[
  {
    "path": "ARTrack_env_cuda113.yaml",
    "content": "name: artrack\nchannels:\n  - http://mirrors.tuna.tsinghua.edu.cn/anaconda/cloud/pytorch\n  - http://mirrors.tuna.tsinghua.edu.cn/anaconda/cloud/conda-forge\n  - http://mirrors.tuna.tsinghua.edu.cn/anaconda/pkgs/main\n  - defaults\ndependencies:\n  - ca-certificates=2021.10.8=ha878542_0\n  - certifi=2021.10.8=py39hf3d152e_2\n  - fvcore=0.1.5.post20220305=pyhd8ed1ab_0\n  - portalocker=2.4.0=py39hf3d152e_0\n  - python_abi=3.9=2_cp39\n  - tabulate=0.8.9=pyhd8ed1ab_0\n  - termcolor=1.1.0=py_2\n  - yacs=0.1.8=pyhd8ed1ab_0\n  - ffmpeg=4.3=hf484d3e_0\n  - pytorch=1.11.0=py3.9_cuda11.3_cudnn8.2.0_0\n  - pytorch-mutex=1.0=cuda\n  - torchvision=0.12.0=py39_cu113\n  - _anaconda_depends=2021.11=py39_0\n  - _libgcc_mutex=0.1=main\n  - _openmp_mutex=4.5=1_gnu\n  - alabaster=0.7.12=pyhd3eb1b0_0\n  - anaconda-client=1.9.0=py39h06a4308_0\n  - anaconda=custom=py39_1\n  - anaconda-project=0.10.2=pyhd3eb1b0_0\n  - anyio=3.5.0=py39h06a4308_0\n  - appdirs=1.4.4=pyhd3eb1b0_0\n  - argh=0.26.2=py39h06a4308_0\n  - argon2-cffi=21.3.0=pyhd3eb1b0_0\n  - argon2-cffi-bindings=21.2.0=py39h7f8727e_0\n  - arrow=0.13.1=py39h06a4308_0\n  - asn1crypto=1.4.0=py_0\n  - astroid=2.6.6=py39h06a4308_0\n  - astropy=5.0.3=py39hce1f21e_0\n  - asttokens=2.0.5=pyhd3eb1b0_0\n  - async_generator=1.10=pyhd3eb1b0_0\n  - atomicwrites=1.4.0=py_0\n  - attrs=21.4.0=pyhd3eb1b0_0\n  - autopep8=1.6.0=pyhd3eb1b0_0\n  - babel=2.9.1=pyhd3eb1b0_0\n  - backcall=0.2.0=pyhd3eb1b0_0\n  - backports=1.1=pyhd3eb1b0_0\n  - backports.shutil_get_terminal_size=1.0.0=pyhd3eb1b0_3\n  - beautifulsoup4=4.10.0=pyh06a4308_0\n  - binaryornot=0.4.4=pyhd3eb1b0_1\n  - bitarray=2.4.1=py39h7f8727e_0\n  - bkcharts=0.2=py39h06a4308_0\n  - black=19.10b0=py_0\n  - blas=1.0=mkl\n  - bleach=4.1.0=pyhd3eb1b0_0\n  - blosc=1.21.0=h8c45485_0\n  - bokeh=2.4.2=py39h06a4308_0\n  - boto=2.49.0=py39h06a4308_0\n  - bottleneck=1.3.4=py39hce1f21e_0\n  - brotli=1.0.9=he6710b0_2\n  - brotlipy=0.7.0=py39h27cfd23_1003\n  - brunsli=0.1=h2531618_0\n  - bzip2=1.0.8=h7b6447c_0\n  - c-ares=1.18.1=h7f8727e_0\n  - cached-property=1.5.2=py_0\n  - cairo=1.16.0=hf32fb01_1\n  - cffi=1.15.0=py39hd667e15_1\n  - cfitsio=3.470=hf0d0db6_6\n  - chardet=4.0.0=py39h06a4308_1003\n  - charls=2.2.0=h2531618_0\n  - charset-normalizer=2.0.4=pyhd3eb1b0_0\n  - click=8.0.4=py39h06a4308_0\n  - cloudpickle=2.0.0=pyhd3eb1b0_0\n  - clyent=1.2.2=py39h06a4308_1\n  - colorama=0.4.4=pyhd3eb1b0_0\n  - conda=4.12.0=py39h06a4308_0\n  - conda-content-trust=0.1.1=pyhd3eb1b0_0\n  - conda-pack=0.6.0=pyhd3eb1b0_0\n  - conda-package-handling=1.8.0=py39h7f8727e_0\n  - conda-token=0.3.0=pyhd3eb1b0_0\n  - contextlib2=0.6.0.post1=pyhd3eb1b0_0\n  - cookiecutter=1.7.2=pyhd3eb1b0_0\n  - cryptography=36.0.0=py39h9ce1e76_0\n  - cudatoolkit=11.3.1=h2bc3f7f_2\n  - curl=7.80.0=h7f8727e_0\n  - cycler=0.11.0=pyhd3eb1b0_0\n  - cython=0.29.28=py39h295c915_0\n  - cytoolz=0.11.0=py39h27cfd23_0\n  - daal4py=2021.5.0=py39h78b71dc_0\n  - dal=2021.5.1=h06a4308_803\n  - dask=2022.2.1=pyhd3eb1b0_0\n  - dask-core=2022.2.1=pyhd3eb1b0_0\n  - dataclasses=0.8=pyh6d0b6a4_7\n  - dbus=1.13.18=hb2f20db_0\n  - debugpy=1.5.1=py39h295c915_0\n  - decorator=5.1.1=pyhd3eb1b0_0\n  - defusedxml=0.7.1=pyhd3eb1b0_0\n  - diff-match-patch=20200713=pyhd3eb1b0_0\n  - distributed=2022.2.1=pyhd3eb1b0_0\n  - docutils=0.17.1=py39h06a4308_1\n  - entrypoints=0.3=py39h06a4308_0\n  - et_xmlfile=1.1.0=py39h06a4308_0\n  - executing=0.8.3=pyhd3eb1b0_0\n  - expat=2.4.4=h295c915_0\n  - fastcache=1.1.0=py39he8ac12f_0\n  - filelock=3.6.0=pyhd3eb1b0_0\n  - flake8=3.9.2=pyhd3eb1b0_0\n  - flask=1.1.2=pyhd3eb1b0_0\n  - fontconfig=2.13.1=h6c09931_0\n  - fonttools=4.25.0=pyhd3eb1b0_0\n  - freetype=2.11.0=h70c0345_0\n  - fribidi=1.0.10=h7b6447c_0\n  - fsspec=2022.2.0=pyhd3eb1b0_0\n  - get_terminal_size=1.0.0=haa9412d_0\n  - gevent=21.8.0=py39h7f8727e_1\n  - giflib=5.2.1=h7b6447c_0\n  - glib=2.69.1=h4ff587b_1\n  - glob2=0.7=pyhd3eb1b0_0\n  - gmp=6.2.1=h2531618_2\n  - gmpy2=2.1.2=py39heeb90bb_0\n  - gnutls=3.6.15=he1e5248_0\n  - graphite2=1.3.14=h23475e2_0\n  - greenlet=1.1.1=py39h295c915_0\n  - gst-plugins-base=1.14.0=h8213a91_2\n  - gstreamer=1.14.0=h28cd5cc_2\n  - h5py=3.6.0=py39ha0f2276_0\n  - harfbuzz=2.8.1=h6f93f22_0\n  - hdf5=1.10.6=hb1b8bf9_0\n  - heapdict=1.0.1=pyhd3eb1b0_0\n  - html5lib=1.1=pyhd3eb1b0_0\n  - icu=58.2=he6710b0_3\n  - idna=3.3=pyhd3eb1b0_0\n  - imagecodecs=2021.8.26=py39h4cda21f_0\n  - imageio=2.9.0=pyhd3eb1b0_0\n  - imagesize=1.3.0=pyhd3eb1b0_0\n  - importlib-metadata=4.11.3=py39h06a4308_0\n  - importlib_metadata=4.11.3=hd3eb1b0_0\n  - inflection=0.5.1=py39h06a4308_0\n  - iniconfig=1.1.1=pyhd3eb1b0_0\n  - intel-openmp=2021.4.0=h06a4308_3561\n  - intervaltree=3.1.0=pyhd3eb1b0_0\n  - ipykernel=6.9.1=py39h06a4308_0\n  - ipython=8.1.1=py39h06a4308_0\n  - ipython_genutils=0.2.0=pyhd3eb1b0_1\n  - ipywidgets=7.6.5=pyhd3eb1b0_1\n  - isort=5.9.3=pyhd3eb1b0_0\n  - itsdangerous=2.0.1=pyhd3eb1b0_0\n  - jbig=2.1=hdba287a_0\n  - jdcal=1.4.1=pyhd3eb1b0_0\n  - jedi=0.18.1=py39h06a4308_1\n  - jeepney=0.7.1=pyhd3eb1b0_0\n  - jinja2=2.11.3=pyhd3eb1b0_0\n  - jinja2-time=0.2.0=pyhd3eb1b0_2\n  - joblib=1.1.0=pyhd3eb1b0_0\n  - jpeg=9d=h7f8727e_0\n  - json5=0.9.6=pyhd3eb1b0_0\n  - jsonschema=3.2.0=pyhd3eb1b0_2\n  - jupyter=1.0.0=py39h06a4308_7\n  - jupyter_client=6.1.12=pyhd3eb1b0_0\n  - jupyter_console=6.4.0=pyhd3eb1b0_0\n  - jupyter_core=4.9.2=py39h06a4308_0\n  - jupyter_server=1.13.5=pyhd3eb1b0_0\n  - jupyterlab=3.3.2=pyhd3eb1b0_0\n  - jupyterlab_pygments=0.1.2=py_0\n  - jupyterlab_server=2.10.3=pyhd3eb1b0_1\n  - jupyterlab_widgets=1.0.0=pyhd3eb1b0_1\n  - jxrlib=1.1=h7b6447c_2\n  - keyring=23.4.0=py39h06a4308_0\n  - kiwisolver=1.3.2=py39h295c915_0\n  - krb5=1.19.2=hac12032_0\n  - lame=3.100=h7b6447c_0\n  - lazy-object-proxy=1.6.0=py39h27cfd23_0\n  - lcms2=2.12=h3be6417_0\n  - ld_impl_linux-64=2.35.1=h7274673_9\n  - lerc=3.0=h295c915_0\n  - libaec=1.0.4=he6710b0_1\n  - libarchive=3.4.2=h62408e4_0\n  - libcurl=7.80.0=h0b77cf5_0\n  - libdeflate=1.8=h7f8727e_5\n  - libedit=3.1.20210910=h7f8727e_0\n  - libev=4.33=h7f8727e_1\n  - libffi=3.3=he6710b0_2\n  - libgcc-ng=9.3.0=h5101ec6_17\n  - libgfortran-ng=7.5.0=ha8ba4b0_17\n  - libgfortran4=7.5.0=ha8ba4b0_17\n  - libgomp=9.3.0=h5101ec6_17\n  - libiconv=1.15=h63c8f33_5\n  - libidn2=2.3.2=h7f8727e_0\n  - liblief=0.11.5=h295c915_1\n  - libllvm11=11.1.0=h3826bc1_1\n  - libnghttp2=1.46.0=hce63b2e_0\n  - libpng=1.6.37=hbc83047_0\n  - libsodium=1.0.18=h7b6447c_0\n  - libspatialindex=1.9.3=h2531618_0\n  - libssh2=1.9.0=h1ba5d50_1\n  - libstdcxx-ng=9.3.0=hd4cf53a_17\n  - libtasn1=4.16.0=h27cfd23_0\n  - libtiff=4.2.0=h85742a9_0\n  - libtool=2.4.6=h295c915_1008\n  - libunistring=0.9.10=h27cfd23_0\n  - libuuid=1.0.3=h7f8727e_2\n  - libuv=1.40.0=h7b6447c_0\n  - libwebp=1.2.2=h55f646e_0\n  - libwebp-base=1.2.2=h7f8727e_0\n  - libxcb=1.14=h7b6447c_0\n  - libxml2=2.9.12=h03d6c58_0\n  - libxslt=1.1.34=hc22bd24_0\n  - libzopfli=1.0.3=he6710b0_0\n  - llvmlite=0.38.0=py39h4ff587b_0\n  - locket=0.2.1=py39h06a4308_2\n  - lxml=4.8.0=py39h1f438cf_0\n  - lz4-c=1.9.3=h295c915_1\n  - lzo=2.10=h7b6447c_2\n  - markupsafe=1.1.1=py39h27cfd23_0\n  - matplotlib=3.5.1=py39h06a4308_1\n  - matplotlib-base=3.5.1=py39ha18d171_1\n  - matplotlib-inline=0.1.2=pyhd3eb1b0_2\n  - mccabe=0.6.1=py39h06a4308_1\n  - mistune=0.8.4=py39h27cfd23_1000\n  - mkl=2021.4.0=h06a4308_640\n  - mkl-service=2.4.0=py39h7f8727e_0\n  - mkl_fft=1.3.1=py39hd3c417c_0\n  - mkl_random=1.2.2=py39h51133e4_0\n  - mock=4.0.3=pyhd3eb1b0_0\n  - more-itertools=8.12.0=pyhd3eb1b0_0\n  - mpc=1.1.0=h10f8cd9_1\n  - mpfr=4.0.2=hb69a4c5_1\n  - mpi=1.0=mpich\n  - mpich=3.3.2=hc856adb_0\n  - mpmath=1.2.1=py39h06a4308_0\n  - msgpack-python=1.0.2=py39hff7bd54_1\n  - multipledispatch=0.6.0=py39h06a4308_0\n  - munkres=1.1.4=py_0\n  - mypy_extensions=0.4.3=py39h06a4308_1\n  - nbclassic=0.3.5=pyhd3eb1b0_0\n  - nbclient=0.5.11=pyhd3eb1b0_0\n  - nbconvert=6.3.0=py39h06a4308_0\n  - nbformat=5.1.3=pyhd3eb1b0_0\n  - ncurses=6.3=h7f8727e_2\n  - nest-asyncio=1.5.1=pyhd3eb1b0_0\n  - nettle=3.7.3=hbbd107a_1\n  - networkx=2.7.1=pyhd3eb1b0_0\n  - nltk=3.7=pyhd3eb1b0_0\n  - nose=1.3.7=pyhd3eb1b0_1008\n  - notebook=6.4.8=py39h06a4308_0\n  - numba=0.55.1=py39h51133e4_0\n  - numexpr=2.8.1=py39h6abb31d_0\n  - numpy=1.21.2=py39h20f2e39_0\n  - numpy-base=1.21.2=py39h79a1101_0\n  - numpydoc=1.2=pyhd3eb1b0_0\n  - olefile=0.46=pyhd3eb1b0_0\n  - openh264=2.1.1=h4ff587b_0\n  - openjpeg=2.4.0=h3ad879b_0\n  - openpyxl=3.0.9=pyhd3eb1b0_0\n  - openssl=1.1.1n=h7f8727e_0\n  - packaging=21.3=pyhd3eb1b0_0\n  - pandas=1.4.1=py39h295c915_1\n  - pandocfilters=1.5.0=pyhd3eb1b0_0\n  - pango=1.45.3=hd140c19_0\n  - parso=0.8.3=pyhd3eb1b0_0\n  - partd=1.2.0=pyhd3eb1b0_1\n  - patchelf=0.13=h295c915_0\n  - path=16.2.0=pyhd3eb1b0_0\n  - path.py=12.5.0=hd3eb1b0_0\n  - pathlib2=2.3.6=py39h06a4308_2\n  - pathspec=0.7.0=py_0\n  - patsy=0.5.2=py39h06a4308_1\n  - pcre=8.45=h295c915_0\n  - pep8=1.7.1=py39h06a4308_0\n  - pexpect=4.8.0=pyhd3eb1b0_3\n  - pickleshare=0.7.5=pyhd3eb1b0_1003\n  - pillow=9.0.1=py39h22f2fdc_0\n  - pip=21.2.4=py39h06a4308_0\n  - pixman=0.40.0=h7f8727e_1\n  - pkginfo=1.8.2=pyhd3eb1b0_0\n  - pluggy=1.0.0=py39h06a4308_1\n  - ply=3.11=py39h06a4308_0\n  - poyo=0.5.0=pyhd3eb1b0_0\n  - prometheus_client=0.13.1=pyhd3eb1b0_0\n  - prompt-toolkit=3.0.20=pyhd3eb1b0_0\n  - prompt_toolkit=3.0.20=hd3eb1b0_0\n  - psutil=5.8.0=py39h27cfd23_1\n  - ptyprocess=0.7.0=pyhd3eb1b0_2\n  - pure_eval=0.2.2=pyhd3eb1b0_0\n  - py=1.11.0=pyhd3eb1b0_0\n  - py-lief=0.11.5=py39h295c915_1\n  - pycodestyle=2.7.0=pyhd3eb1b0_0\n  - pycosat=0.6.3=py39h27cfd23_0\n  - pycparser=2.21=pyhd3eb1b0_0\n  - pycurl=7.44.1=py39h8f2d780_1\n  - pydocstyle=6.1.1=pyhd3eb1b0_0\n  - pyerfa=2.0.0=py39h27cfd23_0\n  - pyflakes=2.3.1=pyhd3eb1b0_0\n  - pygments=2.11.2=pyhd3eb1b0_0\n  - pylint=2.9.6=py39h06a4308_1\n  - pyls-spyder=0.4.0=pyhd3eb1b0_0\n  - pyodbc=4.0.32=py39h295c915_1\n  - pyopenssl=22.0.0=pyhd3eb1b0_0\n  - pyparsing=3.0.4=pyhd3eb1b0_0\n  - pyqt=5.9.2=py39h2531618_6\n  - pyrsistent=0.18.0=py39heee7806_0\n  - pysocks=1.7.1=py39h06a4308_0\n  - pytables=3.6.1=py39h77479fe_1\n  - pytest=6.2.5=py39h06a4308_2\n  - python=3.9.7=h12debd9_1\n  - python-dateutil=2.8.2=pyhd3eb1b0_0\n  - python-libarchive-c=2.9=pyhd3eb1b0_1\n  - python-lsp-black=1.0.0=pyhd3eb1b0_0\n  - python-lsp-jsonrpc=1.0.0=pyhd3eb1b0_0\n  - python-lsp-server=1.2.4=pyhd3eb1b0_0\n  - python-slugify=5.0.2=pyhd3eb1b0_0\n  - pytz=2021.3=pyhd3eb1b0_0\n  - pywavelets=1.3.0=py39h7f8727e_0\n  - pyxdg=0.27=pyhd3eb1b0_0\n  - pyyaml=6.0=py39h7f8727e_1\n  - pyzmq=22.3.0=py39h295c915_2\n  - qdarkstyle=3.0.2=pyhd3eb1b0_0\n  - qstylizer=0.1.10=pyhd3eb1b0_0\n  - qt=5.9.7=h5867ecd_1\n  - qtawesome=1.0.3=pyhd3eb1b0_0\n  - qtconsole=5.2.2=pyhd3eb1b0_0\n  - qtpy=1.11.2=pyhd3eb1b0_0\n  - readline=8.1.2=h7f8727e_1\n  - regex=2022.3.15=py39h7f8727e_0\n  - requests=2.27.1=pyhd3eb1b0_0\n  - ripgrep=12.1.1=0\n  - rope=0.22.0=pyhd3eb1b0_0\n  - rtree=0.9.7=py39h06a4308_1\n  - ruamel_yaml=0.15.100=py39h27cfd23_0\n  - scikit-image=0.19.2=py39h51133e4_0\n  - scikit-learn=1.0.2=py39h51133e4_1\n  - scikit-learn-intelex=2021.5.0=py39h06a4308_0\n  - scipy=1.7.3=py39hc147768_0\n  - seaborn=0.11.2=pyhd3eb1b0_0\n  - secretstorage=3.3.1=py39h06a4308_0\n  - send2trash=1.8.0=pyhd3eb1b0_1\n  - setuptools=58.0.4=py39h06a4308_0\n  - simplegeneric=0.8.1=py39h06a4308_2\n  - singledispatch=3.7.0=pyhd3eb1b0_1001\n  - sip=4.19.13=py39h295c915_0\n  - six=1.16.0=pyhd3eb1b0_1\n  - snappy=1.1.8=he6710b0_0\n  - sniffio=1.2.0=py39h06a4308_1\n  - snowballstemmer=2.2.0=pyhd3eb1b0_0\n  - sortedcollections=2.1.0=pyhd3eb1b0_0\n  - sortedcontainers=2.4.0=pyhd3eb1b0_0\n  - soupsieve=2.3.1=pyhd3eb1b0_0\n  - sphinx=4.4.0=pyhd3eb1b0_0\n  - sphinxcontrib=1.0=py39h06a4308_1\n  - sphinxcontrib-applehelp=1.0.2=pyhd3eb1b0_0\n  - sphinxcontrib-devhelp=1.0.2=pyhd3eb1b0_0\n  - sphinxcontrib-htmlhelp=2.0.0=pyhd3eb1b0_0\n  - sphinxcontrib-jsmath=1.0.1=pyhd3eb1b0_0\n  - sphinxcontrib-qthelp=1.0.3=pyhd3eb1b0_0\n  - sphinxcontrib-serializinghtml=1.1.5=pyhd3eb1b0_0\n  - sphinxcontrib-websupport=1.2.4=py_0\n  - spyder=5.1.5=py39h06a4308_1\n  - spyder-kernels=2.1.3=py39h06a4308_0\n  - sqlalchemy=1.4.32=py39h7f8727e_0\n  - sqlite=3.38.2=hc218d9a_0\n  - stack_data=0.2.0=pyhd3eb1b0_0\n  - statsmodels=0.13.2=py39h7f8727e_0\n  - sympy=1.10.1=py39h06a4308_0\n  - tbb=2021.5.0=hd09550d_0\n  - tbb4py=2021.5.0=py39hd09550d_0\n  - tblib=1.7.0=pyhd3eb1b0_0\n  - terminado=0.13.1=py39h06a4308_0\n  - testpath=0.5.0=pyhd3eb1b0_0\n  - text-unidecode=1.3=pyhd3eb1b0_0\n  - textdistance=4.2.1=pyhd3eb1b0_0\n  - threadpoolctl=2.2.0=pyh0d69192_0\n  - three-merge=0.1.1=pyhd3eb1b0_0\n  - tifffile=2021.7.2=pyhd3eb1b0_2\n  - tinycss=0.4=pyhd3eb1b0_1002\n  - tk=8.6.11=h1ccaba5_0\n  - toml=0.10.2=pyhd3eb1b0_0\n  - toolz=0.11.2=pyhd3eb1b0_0\n  - tornado=6.1=py39h27cfd23_0\n  - tqdm=4.63.0=pyhd3eb1b0_0\n  - traitlets=5.1.1=pyhd3eb1b0_0\n  - typed-ast=1.4.3=py39h7f8727e_1\n  - typing-extensions=4.1.1=hd3eb1b0_0\n  - typing_extensions=4.1.1=pyh06a4308_0\n  - tzdata=2022a=hda174b7_0\n  - ujson=5.1.0=py39h295c915_0\n  - unicodecsv=0.14.1=py39h06a4308_0\n  - unidecode=1.2.0=pyhd3eb1b0_0\n  - unixodbc=2.3.9=h7b6447c_0\n  - urllib3=1.26.8=pyhd3eb1b0_0\n  - watchdog=2.1.6=py39h06a4308_0\n  - wcwidth=0.2.5=pyhd3eb1b0_0\n  - webencodings=0.5.1=py39h06a4308_1\n  - websocket-client=0.58.0=py39h06a4308_4\n  - werkzeug=2.0.3=pyhd3eb1b0_0\n  - wheel=0.37.1=pyhd3eb1b0_0\n  - whichcraft=0.6.1=pyhd3eb1b0_0\n  - widgetsnbextension=3.5.2=py39h06a4308_0\n  - wrapt=1.12.1=py39he8ac12f_1\n  - wurlitzer=3.0.2=py39h06a4308_0\n  - xlrd=2.0.1=pyhd3eb1b0_0\n  - xlsxwriter=3.0.2=pyhd3eb1b0_0\n  - xlwt=1.3.0=py39h06a4308_0\n  - xz=5.2.5=h7b6447c_0\n  - yaml=0.2.5=h7b6447c_0\n  - yapf=0.31.0=pyhd3eb1b0_0\n  - zeromq=4.3.4=h2531618_0\n  - zfp=0.5.5=h295c915_6\n  - zict=2.0.0=pyhd3eb1b0_0\n  - zipp=3.7.0=pyhd3eb1b0_0\n  - zlib=1.2.11=h7f8727e_4\n  - zope=1.0=py39h06a4308_1\n  - zope.event=4.5.0=py39h06a4308_0\n  - zope.interface=5.4.0=py39h7f8727e_0\n  - zstd=1.4.9=haebb681_0\n  - pip:\n    - astor==0.8.1\n    - configparser==5.2.0\n    - data==0.4\n    - docker-pycreds==0.4.0\n    - easydict==1.9\n    - einops==0.4.1\n    - formulaic==0.5.2\n    - funcsigs==1.0.2\n    - future==0.18.2\n    - gitdb==4.0.9\n    - gitpython==3.1.27\n    - interface-meta==1.3.0\n    - iopath==0.1.9\n    - jpeg4py==0.1.4\n    - jsonpatch==1.32\n    - jsonpointer==2.3\n    - latex==0.7.0\n    - libarchive-c==2.9\n    - linearmodels==4.29\n    - lmdb==1.3.0\n    - loguru==0.6.0\n    - mat73==0.59\n    - memory-profiler==0.60.0\n    - msgpack==1.0.2\n    - ninja==1.11.1\n    - opencv-python==4.5.5.64\n    - pathtools==0.1.2\n    - promise==2.3\n    - property-cached==1.6.4\n    - protobuf==3.20.0\n    - pycocotools==2.0.4\n    - pyhdfe==0.1.2\n    - ruamel-yaml-conda==0.15.100\n    - sentry-sdk==1.5.8\n    - setproctitle==1.2.2\n    - setuptools-scm==7.1.0\n    - shapely==1.8.1.post1\n    - shortuuid==1.0.8\n    - shutilwhich==1.1.0\n    - smmap==5.0.0\n    - tables==3.6.1\n    - tempdir==0.7.1\n    - tensorboardx==2.5.1\n    - thop==0.1.0.post2207010342\n    - tikzplotlib==0.10.1\n    - timm==0.5.4\n    - tomli==2.0.1\n    - torch==1.11.0\n    - torchfile==0.1.0\n    - visdom==0.1.8.9\n    - wandb==0.12.11\n    - webcolors==1.12\n    - yaspin==2.1.0\nprefix: /public/baiyifan/conda_envs/artrack\n\n"
  },
  {
    "path": "LICENSE",
    "content": "                                 Apache License\n                           Version 2.0, January 2004\n                        http://www.apache.org/licenses/\n\n   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n\n   1. Definitions.\n\n      \"License\" shall mean the terms and conditions for use, reproduction,\n      and distribution as defined by Sections 1 through 9 of this document.\n\n      \"Licensor\" shall mean the copyright owner or entity authorized by\n      the copyright owner that is granting the License.\n\n      \"Legal Entity\" shall mean the union of the acting entity and all\n      other entities that control, are controlled by, or are under common\n      control with that entity. For the purposes of this definition,\n      \"control\" means (i) the power, direct or indirect, to cause the\n      direction or management of such entity, whether by contract or\n      otherwise, or (ii) ownership of fifty percent (50%) or more of the\n      outstanding shares, or (iii) beneficial ownership of such entity.\n\n      \"You\" (or \"Your\") shall mean an individual or Legal Entity\n      exercising permissions granted by this License.\n\n      \"Source\" form shall mean the preferred form for making modifications,\n      including but not limited to software source code, documentation\n      source, and configuration files.\n\n      \"Object\" form shall mean any form resulting from mechanical\n      transformation or translation of a Source form, including but\n      not limited to compiled object code, generated documentation,\n      and conversions to other media types.\n\n      \"Work\" shall mean the work of authorship, whether in Source or\n      Object form, made available under the License, as indicated by a\n      copyright notice that is included in or attached to the work\n      (an example is provided in the Appendix below).\n\n      \"Derivative Works\" shall mean any work, whether in Source or Object\n      form, that is based on (or derived from) the Work and for which the\n      editorial revisions, annotations, elaborations, or other modifications\n      represent, as a whole, an original work of authorship. For the purposes\n      of this License, Derivative Works shall not include works that remain\n      separable from, or merely link (or bind by name) to the interfaces of,\n      the Work and Derivative Works thereof.\n\n      \"Contribution\" shall mean any work of authorship, including\n      the original version of the Work and any modifications or additions\n      to that Work or Derivative Works thereof, that is intentionally\n      submitted to Licensor for inclusion in the Work by the copyright owner\n      or by an individual or Legal Entity authorized to submit on behalf of\n      the copyright owner. For the purposes of this definition, \"submitted\"\n      means any form of electronic, verbal, or written communication sent\n      to the Licensor or its representatives, including but not limited to\n      communication on electronic mailing lists, source code control systems,\n      and issue tracking systems that are managed by, or on behalf of, the\n      Licensor for the purpose of discussing and improving the Work, but\n      excluding communication that is conspicuously marked or otherwise\n      designated in writing by the copyright owner as \"Not a Contribution.\"\n\n      \"Contributor\" shall mean Licensor and any individual or Legal Entity\n      on behalf of whom a Contribution has been received by Licensor and\n      subsequently incorporated within the Work.\n\n   2. Grant of Copyright License. Subject to the terms and conditions of\n      this License, each Contributor hereby grants to You a perpetual,\n      worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n      copyright license to reproduce, prepare Derivative Works of,\n      publicly display, publicly perform, sublicense, and distribute the\n      Work and such Derivative Works in Source or Object form.\n\n   3. Grant of Patent License. Subject to the terms and conditions of\n      this License, each Contributor hereby grants to You a perpetual,\n      worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n      (except as stated in this section) patent license to make, have made,\n      use, offer to sell, sell, import, and otherwise transfer the Work,\n      where such license applies only to those patent claims licensable\n      by such Contributor that are necessarily infringed by their\n      Contribution(s) alone or by combination of their Contribution(s)\n      with the Work to which such Contribution(s) was submitted. If You\n      institute patent litigation against any entity (including a\n      cross-claim or counterclaim in a lawsuit) alleging that the Work\n      or a Contribution incorporated within the Work constitutes direct\n      or contributory patent infringement, then any patent licenses\n      granted to You under this License for that Work shall terminate\n      as of the date such litigation is filed.\n\n   4. Redistribution. You may reproduce and distribute copies of the\n      Work or Derivative Works thereof in any medium, with or without\n      modifications, and in Source or Object form, provided that You\n      meet the following conditions:\n\n      (a) You must give any other recipients of the Work or\n          Derivative Works a copy of this License; and\n\n      (b) You must cause any modified files to carry prominent notices\n          stating that You changed the files; and\n\n      (c) You must retain, in the Source form of any Derivative Works\n          that You distribute, all copyright, patent, trademark, and\n          attribution notices from the Source form of the Work,\n          excluding those notices that do not pertain to any part of\n          the Derivative Works; and\n\n      (d) If the Work includes a \"NOTICE\" text file as part of its\n          distribution, then any Derivative Works that You distribute must\n          include a readable copy of the attribution notices contained\n          within such NOTICE file, excluding those notices that do not\n          pertain to any part of the Derivative Works, in at least one\n          of the following places: within a NOTICE text file distributed\n          as part of the Derivative Works; within the Source form or\n          documentation, if provided along with the Derivative Works; or,\n          within a display generated by the Derivative Works, if and\n          wherever such third-party notices normally appear. The contents\n          of the NOTICE file are for informational purposes only and\n          do not modify the License. You may add Your own attribution\n          notices within Derivative Works that You distribute, alongside\n          or as an addendum to the NOTICE text from the Work, provided\n          that such additional attribution notices cannot be construed\n          as modifying the License.\n\n      You may add Your own copyright statement to Your modifications and\n      may provide additional or different license terms and conditions\n      for use, reproduction, or distribution of Your modifications, or\n      for any such Derivative Works as a whole, provided Your use,\n      reproduction, and distribution of the Work otherwise complies with\n      the conditions stated in this License.\n\n   5. Submission of Contributions. Unless You explicitly state otherwise,\n      any Contribution intentionally submitted for inclusion in the Work\n      by You to the Licensor shall be under the terms and conditions of\n      this License, without any additional terms or conditions.\n      Notwithstanding the above, nothing herein shall supersede or modify\n      the terms of any separate license agreement you may have executed\n      with Licensor regarding such Contributions.\n\n   6. Trademarks. This License does not grant permission to use the trade\n      names, trademarks, service marks, or product names of the Licensor,\n      except as required for reasonable and customary use in describing the\n      origin of the Work and reproducing the content of the NOTICE file.\n\n   7. Disclaimer of Warranty. Unless required by applicable law or\n      agreed to in writing, Licensor provides the Work (and each\n      Contributor provides its Contributions) on an \"AS IS\" BASIS,\n      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n      implied, including, without limitation, any warranties or conditions\n      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n      PARTICULAR PURPOSE. You are solely responsible for determining the\n      appropriateness of using or redistributing the Work and assume any\n      risks associated with Your exercise of permissions under this License.\n\n   8. Limitation of Liability. In no event and under no legal theory,\n      whether in tort (including negligence), contract, or otherwise,\n      unless required by applicable law (such as deliberate and grossly\n      negligent acts) or agreed to in writing, shall any Contributor be\n      liable to You for damages, including any direct, indirect, special,\n      incidental, or consequential damages of any character arising as a\n      result of this License or out of the use or inability to use the\n      Work (including but not limited to damages for loss of goodwill,\n      work stoppage, computer failure or malfunction, or any and all\n      other commercial damages or losses), even if such Contributor\n      has been advised of the possibility of such damages.\n\n   9. Accepting Warranty or Additional Liability. While redistributing\n      the Work or Derivative Works thereof, You may choose to offer,\n      and charge a fee for, acceptance of support, warranty, indemnity,\n      or other liability obligations and/or rights consistent with this\n      License. However, in accepting such obligations, You may act only\n      on Your own behalf and on Your sole responsibility, not on behalf\n      of any other Contributor, and only if You agree to indemnify,\n      defend, and hold each Contributor harmless for any liability\n      incurred by, or claims asserted against, such Contributor by reason\n      of your accepting any such warranty or additional liability.\n\n   END OF TERMS AND CONDITIONS\n\n   APPENDIX: How to apply the Apache License to your work.\n\n      To apply the Apache License to your work, attach the following\n      boilerplate notice, with the fields enclosed by brackets \"[]\"\n      replaced with your own identifying information. (Don't include\n      the brackets!)  The text should be enclosed in the appropriate\n      comment syntax for the file format. We also recommend that a\n      file or class name and description of purpose be included on the\n      same \"printed page\" as the copyright notice for easier\n      identification within third-party archives.\n\n   Copyright [yyyy] [name of copyright owner]\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n       http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n"
  },
  {
    "path": "README.md",
    "content": "# ARTrack\r\n\r\n[![PWC](https://img.shields.io/endpoint.svg?url=https://paperswithcode.com/badge/artrackv2-prompting-autoregressive-tracker/visual-object-tracking-on-got-10k)](https://paperswithcode.com/sota/visual-object-tracking-on-got-10k?p=artrackv2-prompting-autoregressive-tracker)\r\n[![PWC](https://img.shields.io/endpoint.svg?url=https://paperswithcode.com/badge/artrackv2-prompting-autoregressive-tracker/visual-object-tracking-on-lasot)](https://paperswithcode.com/sota/visual-object-tracking-on-lasot?p=artrackv2-prompting-autoregressive-tracker)\r\n[![PWC](https://img.shields.io/endpoint.svg?url=https://paperswithcode.com/badge/artrackv2-prompting-autoregressive-tracker/visual-object-tracking-on-trackingnet)](https://paperswithcode.com/sota/visual-object-tracking-on-trackingnet?p=artrackv2-prompting-autoregressive-tracker)\r\n[![PWC](https://img.shields.io/endpoint.svg?url=https://paperswithcode.com/badge/artrackv2-prompting-autoregressive-tracker/visual-object-tracking-on-needforspeed)](https://paperswithcode.com/sota/visual-object-tracking-on-needforspeed?p=artrackv2-prompting-autoregressive-tracker)\r\n[![PWC](https://img.shields.io/endpoint.svg?url=https://paperswithcode.com/badge/artrackv2-prompting-autoregressive-tracker/visual-object-tracking-on-tnl2k)](https://paperswithcode.com/sota/visual-object-tracking-on-tnl2k?p=artrackv2-prompting-autoregressive-tracker)\r\n[![PWC](https://img.shields.io/endpoint.svg?url=https://paperswithcode.com/badge/artrackv2-prompting-autoregressive-tracker/visual-object-tracking-on-lasot-ext)](https://paperswithcode.com/sota/visual-object-tracking-on-lasot-ext?p=artrackv2-prompting-autoregressive-tracker)\r\n[![PWC](https://img.shields.io/endpoint.svg?url=https://paperswithcode.com/badge/artrackv2-prompting-autoregressive-tracker/visual-object-tracking-on-uav123)](https://paperswithcode.com/sota/visual-object-tracking-on-uav123?p=artrackv2-prompting-autoregressive-tracker)\r\n\r\nThe official PyTorch implementation of our **CVPR 2023 Highlight**  and  **CVPR 2024 Poster**  paper:\r\n\r\n**Autoregressive Visual Tracking**\r\n\r\n**ARTrackV2: Prompting Autoregressive Tracker Where to Look and How to Describe**\r\n\r\nGitHub maintainer: [Yifan Bai](https://github.com/AlexDotHam)\r\n\r\n[[CVPR2023](https://openaccess.thecvf.com/content/CVPR2023/papers/Wei_Autoregressive_Visual_Tracking_CVPR_2023_paper.pdf)] \r\n[[CVPR2024](https://artrackv2.github.io/)] \r\n\r\n### :bookmark: Update List:\r\n- [ ] Fastitnp\r\n- [ ] VastTrack and More Datasets\r\n\r\n### :bookmark: Checkpoints in Baidu Pan\r\n[[Baidu Pan](https://pan.baidu.com/s/1bpdLdwO39kHD5iH06aGIzg?pwd=g06j)]  passwd: g06j \r\n\r\nGoogle Drive:\r\n[ARTrackV2-B-256](https://drive.google.com/file/d/1tGaY5jQxZOTzJDWXgOgoHtBwc5l4NLQ2/view?usp=drive_link)       |     [ARTrackV2-B-256-GOT](https://drive.google.com/file/d/1RqsjHqTBsIN5ivD-C5tXDyhZlGZjJN88/view?usp=drive_link)     |     [ARTrackV2-L-384-GOT](https://drive.google.com/file/d/1KJ-TXFSn6K_OhchoRE29ePZbSm9sBHVS/view?usp=drive_link)     |\r\n\r\n### :bookmark:Our ARTrackV2 is accepted by CVPR2024!!!\r\n[[Deprecated Code](https://drive.google.com/file/d/15PHRN9utPfm1L4djr7U6MLHWIkx90EVD/view?usp=sharing)]\r\n[[Raw Result](https://drive.google.com/drive/folders/1Kd6IA60OQStfYCgsi42I20VRDTB7VcWl?usp=sharing)]\r\n\r\nWe have released the training code for ARTrackV2 and merged it into this repository. You can adjust the config to use the corresponding ARTrackV2-marked modules. The training process remains the same as V1.\r\n\r\n|             Variant             |       [ARTrackV2-B-256](https://drive.google.com/file/d/1tGaY5jQxZOTzJDWXgOgoHtBwc5l4NLQ2/view?usp=drive_link)       |     [ARTrackV2-B-256-GOT](https://drive.google.com/file/d/1RqsjHqTBsIN5ivD-C5tXDyhZlGZjJN88/view?usp=drive_link)     |     [ARTrackV2-L-384-GOT](https://drive.google.com/file/d/1KJ-TXFSn6K_OhchoRE29ePZbSm9sBHVS/view?usp=drive_link)     |\r\n|:-------------------------------:|:-----------------------:|:-----------------------:|:-----------------------:|\r\n|          Model Config           | ViT-B, 256^2 resolution | ViT-B, 256^2 resolution | ViT-L, 384^2 resolution |\r\n| GOT-10k (AO / SR 0.5 / SR 0.75) |   - / - / -    |   76.1 / 85.5 / 72.9    |   79.6 / 88.0 / 78.7    |\r\n|    LaSOT (AUC / Norm P / P)     |   71.5 / 80.3 / 77.5    |        - / - / -        |        - / - / -        |\r\n| TrackingNet (AUC / Norm P / P)  |   84.3 / 89.1 / 83.6    |        - / - / -        |        - / - / -        |\r\n|  LaSOT_ext (AUC / Norm P / P)   |   51.1 / 58.4 / 61.5    |        - / - / -        |        - / - / -        |\r\n\r\n## Highlight\r\n\r\n![](figure/overview.jpg)\r\n\r\n### :bookmark:Brief Introduction\r\n\r\nWe present **ARTrack**, an autoregressive framework for visual object tracking. ARTrack tackles tracking as a coordinate sequence interpretation task that estimates object trajectories progressively, where the current estimate is induced by previous states and in turn affects subsequences. This time-autoregressive approach models the sequential evolution of trajectories to keep tracing the object **across frames**, making it superior to existing template matching based trackers that only consider the **per-frame** localization accuracy. ARTrack is simple and direct, eliminating customized localization heads and post-processings. Despite its simplicity, ARTrack achieves state-of-the-art performance on prevailing benchmark datasets.\r\n### :bookmark:Strong Performance\r\n\r\n|             Variant             |       ARTrack-256       |       ARTrack-384       |      ARTrack-L-384      |\r\n|:-------------------------------:|:-----------------------:|:-----------------------:|:-----------------------:|\r\n|          Model Config           | ViT-B, 256^2 resolution | ViT-B, 384^2 resolution | ViT-L, 384^2 resolution |\r\n| GOT-10k (AO / SR 0.5 / SR 0.75) |   73.5 / 82.2 / 70.9    |   75.5 / 84.3 / 74.3    |   78.5 / 87.4 / 77.8    |\r\n|    LaSOT (AUC / Norm P / P)     |   70.4 / 79.5 / 76.6    |   72.6 / 81.7 / 79.1    |   73.1 / 82.2 / 80.3    |\r\n| TrackingNet (AUC / Norm P / P)  |   84.2 / 88.7 / 83.5    |   85.1 / 89.1 / 84.8    |   85.6 / 89.6 / 84.8    |\r\n|  LaSOT_ext (AUC / Norm P / P)   |   46.4 / 56.5 / 52.3    |   51.9 / 62.0 / 58.5    |   52.8 / 62.9 / 59.7    |\r\n|          TNL-2K (AUC)           |          57.5           |          59.8           |          60.3           |\r\n|           NfS30 (AUC)           |          64.3           |          66.8           |          67.9           |\r\n|          UAV123 (AUC)           |          67.7           |          70.5           |          71.2           |\r\n\r\n### :bookmark:Inference Speed\r\n\r\nOur baseline model (backbone: ViT-B, resolution: 256x256) can run at **26 fps** (frames per second) on a single NVIDIA GeForce RTX 3090, our alter decoder version can run at **45 fps** on a single NVIDIA GeForce RTX 3090.\r\n\r\n## Bug of array of inhomogeneous shape\r\n\r\nThanks to [MrtXue](https://github.com/MrtXue), if you meet the \"ValueError: setting an array element with a sequence.\" when you train in the second stage, you can try to reduce your numpy version to 1.23.\r\n\r\n## Update for checkpoint(ARTrack_large_384_full):\r\n\r\nYou can download the model weights from [Google Drive](https://drive.google.com/drive/folders/1KsH_MIZIdgjZpUZBmR4P88yeYDqM8yNW?usp=sharing)\r\n\r\n|             Variant             |      ARTrack-L-384      |\r\n|:-------------------------------:|:-----------------------:|\r\n|          Model Config           | ViT-L, 384^2 resolution |\r\n| GOT-10k (AO / SR 0.5 / SR 0.75) |   80.0 / 88.5 / 80.0    |\r\n|    LaSOT (AUC / Norm P / P)     |   73.5 / 82.4 / 80.6    |\r\n| TrackingNet (AUC / Norm P / P)  |   85.5 / 90.1 / 85.9    |\r\n|  LaSOT_ext (AUC / Norm P / P)   |   51.8 / 62.3 / 58.8    |\r\n\r\n## Update for checkpoint and raw_result(ARTrack_base_256_full):\r\n\r\nYou can download the model weights and raw_result from [Google Drive](https://drive.google.com/drive/folders/1KsH_MIZIdgjZpUZBmR4P88yeYDqM8yNW?usp=sharing)\r\n\r\n|             Variant             |       ARTrack-256       |     ARTrack-256-got     |\r\n|:-------------------------------:|:-----------------------:|:-----------------------:|\r\n|          Model Config           | ViT-B, 256^2 resolution | ViT-B, 256^2 resolution |\r\n| GOT-10k (AO / SR 0.5 / SR 0.75) |   76.7 / 85.7 / 74.8    |   74.1 / 83.1 / 70.0    |\r\n|    LaSOT (AUC / Norm P / P)     |   70.8 / 79.6 / 76.3    |        - / - / -        |\r\n| TrackingNet (AUC / Norm P / P)  |   84.3 / 88.7 / 83.4    |        - / - / -        |\r\n|  LaSOT_ext (AUC / Norm P / P)   |   48.4 / 57.7 / 53.7    |        - / - / -        |\r\n\r\n## Install the environment\r\n\r\nUse the Anaconda (CUDA 11.3)\r\n```\r\nconda env create -f ARTrack_env_cuda113.yaml\r\n```\r\n\r\n## Set project paths\r\nRun the following command to set paths for this project\r\n```\r\npython tracking/create_default_local_file.py --workspace_dir . --data_dir ./data --save_dir ./output\r\n```\r\nAfter running this command, you can also modify paths by editing these two files\r\n```\r\nlib/train/admin/local.py  # paths about training\r\nlib/test/evaluation/local.py  # paths about testing\r\n```\r\n\r\n## Data Preparation\r\nPut the tracking datasets in ./data. It should look like this:\r\n   ```\r\n   ${PROJECT_ROOT}\r\n    -- data\r\n        -- lasot\r\n            |-- airplane\r\n            |-- basketball\r\n            |-- bear\r\n            ...\r\n        -- got10k\r\n            |-- test\r\n            |-- train\r\n            |-- val\r\n        -- coco\r\n            |-- annotations\r\n            |-- images\r\n        -- trackingnet\r\n            |-- TRAIN_0\r\n            |-- TRAIN_1\r\n            ...\r\n            |-- TRAIN_11\r\n            |-- TEST\r\n   ```\r\n\r\n## Training\r\nDownload pre-trained [MAE ViT-Base weights](https://dl.fbaipublicfiles.com/mae/pretrain/mae_pretrain_vit_base.pth) and put it under `$PROJECT_ROOT$/pretrained_models` (different pretrained models can also be used, see [MAE](https://github.com/facebookresearch/mae) for more details).\r\n\r\n### One-stage pair-level training\r\n\r\nSince sequence-level training requires video input, and the COCO dataset contains only images, traditional training methods were first used to train the model so that it could be fairly compared to other trackers.\r\n```\r\npython tracking/train.py --script artrack --config artrack_256_full --save_dir ./output --mode multiple --nproc_per_node 4 --use_wandb 0\r\n```\r\n\r\nReplace `--config` with the desired model config under `experiments/artrack`. We use [wandb](https://github.com/wandb/client) to record detailed training logs, in case you don't want to use wandb, set `--use_wandb 0`.\r\n\r\n### Two-stage sequence-level training\r\n\r\nTo enable sequence-level training, replace 'experience/artrack_seq/*.yaml' PRETRAIN_PTH in the yaml configuration file with the path to your pretrained checkpoint, such as './output/artrack_256_full/checkpoints/train/artrack/artrack_256_full/ARTrack_ep0240.pth.tar'.\r\n\r\n```\r\npython tracking/train.py --script artrack_seq --config artrack_seq_256_full --save_dir ./output --mode multiple --nproc_per_node 4 --use_wandb 0\r\n```\r\n\r\n## Evaluation\r\n\r\nChange the corresponding values of `lib/test/evaluation/local.py` to the actual benchmark saving paths\r\n\r\nSome testing examples:\r\n- LaSOT or other off-line evaluated benchmarks (modify `--dataset` correspondingly)\r\n```\r\npython tracking/test.py artrack_seq artrack_seq_256_full --dataset lasot --threads 16 --num_gpus 4\r\npython tracking/analysis_results.py # need to modify tracker configs and names\r\n```\r\n- GOT10K-test\r\n```\r\npython tracking/test.py artrack_seq artrack_seq_256_full --dataset got10k_test --threads 16 --num_gpus 4\r\npython lib/test/utils/transform_got10k.py --tracker_name ostrack --cfg_name vitb_384_mae_ce_32x4_got10k_ep100\r\n```\r\n- TrackingNet\r\n```\r\npython tracking/test.py artrack_seq artrack_seq_256_full --dataset trackingnet --threads 16 --num_gpus 4\r\npython lib/test/utils/transform_trackingnet.py --tracker_name ostrack --cfg_name vitb_384_mae_ce_32x4_ep300\r\n```\r\n\r\n## Acknowledgement\r\n\r\n:heart::heart::heart:Our idea is implemented base on the following projects. We really appreciate their excellent open-source works!\r\n\r\n- [SIoU](https://github.com/AlexDotHam/SIoU-loss) [[related paper](https://arxiv.org/abs/2205.12740)]\r\n- [OSTrack](https://github.com/botaoye/OSTrack) [[related paper](https://arxiv.org/abs/2203.11991)]\r\n- [PyTracking](https://github.com/visionml/pytracking) [[related paper](https://arxiv.org/abs/2208.06888)]\r\n\r\n:heart::heart::heart:This project is not for commercial use. For commercial use, please contact the author.\r\n\r\n:heart::heart::heart:This project is not for commercial use. For commercial use, please contact the author.\r\n\r\n:heart::heart::heart:This project is not for commercial use. For commercial use, please contact the author.\r\n\r\n## Citation\r\n\r\nIf any parts of our paper and code help your research, please consider citing us and giving a star to our repository.\r\n\r\n```\r\n@InProceedings{Wei_2023_CVPR,\r\n    author    = {Wei, Xing and Bai, Yifan and Zheng, Yongchao and Shi, Dahu and Gong, Yihong},\r\n    title     = {Autoregressive Visual Tracking},\r\n    booktitle = {Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)},\r\n    month     = {June},\r\n    year      = {2023},\r\n    pages     = {9697-9706}\r\n}\r\n@InProceedings{Bai_2024_CVPR,\r\n    author    = {Bai, Yifan and Zhao, Zeyang and Gong, Yihong and Wei, Xing},\r\n    title     = {ARTrackV2: Prompting Autoregressive Tracker Where to Look and How to Describe},\r\n    booktitle = {Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)},\r\n    month     = {June},\r\n    year      = {2024}\r\n}\r\n```\r\n\r\n## Contact\r\n\r\nIf you have any questions or concerns, feel free to open issues or directly contact me through the ways on my GitHub homepage **provide below paper's title**.\r\n"
  },
  {
    "path": "artrackv2_mindspore/.gitignore",
    "content": "test/\nlib/models/__pycache__/\nlib/config/__pycache__/\nlib/test/tracker/__pycache__/\nlib/models/__pycache__/\nlib/config/ostrack/__pycache__/\nlib/config/ostrack/__pycache__/\n"
  },
  {
    "path": "artrackv2_mindspore/README.md",
    "content": "# ARTrackV2\n\n## Evaluation\n\nMake sure you have installed the GPU version of MindSpore according to [link](https://www.mindspore.cn/install/en).\n\nChange the corresponding values of `lib/test/evaluation/local.py` to the actual benchmark saving paths\n\nSome testing examples:\n\n- GOT10K-test\n\n```python\ncd tracking\npython test.py ostrack 2stage_256_got --dataset got10k_test --thread 0 --num_gpus 1\n```\n"
  },
  {
    "path": "artrackv2_mindspore/experiments/ostrack/2stage_256_got.yaml",
    "content": "DATA:\r\n  MAX_SAMPLE_INTERVAL: 200\r\n  MEAN:\r\n  - 0.485\r\n  - 0.456\r\n  - 0.406\r\n  SEARCH:\r\n    CENTER_JITTER: 3\r\n    FACTOR: 4.0\r\n    SCALE_JITTER: 0.25\r\n    SIZE: 256\r\n    NUMBER: 14\r\n  STD:\r\n  - 0.229\r\n  - 0.224\r\n  - 0.225\r\n  TEMPLATE:\r\n    CENTER_JITTER: 0\r\n    FACTOR: 2.0\r\n    SCALE_JITTER: 0\r\n    SIZE: 128\r\n    NUMBER: 2\r\n  TRAIN:\r\n    DATASETS_NAME:\r\n    #- LASOT\r\n    - GOT10K_train_full\r\n    #- TRACKINGNET\r\n    DATASETS_RATIO:\r\n    #- 1\r\n    - 1\r\n    #- 1\r\n    SAMPLE_PER_EPOCH: 1000\r\n  VAL:\r\n    DATASETS_NAME:\r\n    - GOT10K_official_val\r\n    DATASETS_RATIO:\r\n    - 1\r\n    SAMPLE_PER_EPOCH: 10000\r\nMODEL:\r\n  PRETRAIN_FILE: \"../checkpoint1.ckpt\"\r\n  EXTRA_MERGER: False\r\n  RETURN_INTER: False\r\n  DECODER:\r\n    TYPE: \"mask\"\r\n    MASK_RATIO: 0.75\r\n    EMBEDDIM: 512\r\n    DEPTH: 8\r\n    NUMHEADS: 16\r\n    MLPRATIO: 4\r\n  BACKBONE:\r\n    TYPE: vit_base_patch16_224\r\n    STRIDE: 16\r\n  HEAD:\r\n    TYPE: PIX\r\n    NUM_CHANNELS: 768\r\nTRAIN:\r\n  BACKBONE_MULTIPLIER: 0.1\r\n  DROP_PATH_RATE: 0.1\r\n  BATCH_SIZE: 8\r\n  EPOCH: 120\r\n  GIOU_WEIGHT: 2.0\r\n  L1_WEIGHT: 0.0\r\n  GRAD_CLIP_NORM: 0.1\r\n  LR: 0.00008\r\n  LR_DROP_EPOCH: 90\r\n  NUM_WORKER: 6\r\n  OPTIMIZER: ADAMW\r\n  PRINT_INTERVAL: 1\r\n  SCHEDULER:\r\n    TYPE: step\r\n    DECAY_RATE: 0.05\r\n  VAL_EPOCH_INTERVAL: 10\r\n  WEIGHT_DECAY: 0.05\r\n  AMP: False\r\nTEST:\r\n  EPOCH: 300\r\n  SEARCH_FACTOR: 3.95\r\n  SEARCH_SIZE: 256\r\n  TEMPLATE_FACTOR: 2.0\r\n  TEMPLATE_SIZE: 128\r\n"
  },
  {
    "path": "artrackv2_mindspore/experiments/ostrack/best_384.yaml",
    "content": "DATA:\r\n  MAX_SAMPLE_INTERVAL: 200\r\n  MEAN:\r\n  - 0.485\r\n  - 0.456\r\n  - 0.406\r\n  SEARCH:\r\n    CENTER_JITTER: 4.5\r\n    FACTOR: 5.0\r\n    SCALE_JITTER: 0.5\r\n    SIZE: 384\r\n  STD:\r\n  - 0.229\r\n  - 0.224\r\n  - 0.225\r\n  TEMPLATE:\r\n    CENTER_JITTER: 0\r\n    FACTOR: 2.0\r\n    SCALE_JITTER: 0\r\n    SIZE: 192\r\n  TRAIN:\r\n    DATASETS_NAME:\r\n    - LASOT\r\n    - GOT10K_vottrain\r\n    - COCO17\r\n    - TRACKINGNET\r\n    DATASETS_RATIO:\r\n    - 1\r\n    - 1\r\n    - 1\r\n    - 1\r\n    SAMPLE_PER_EPOCH: 60000\r\n  VAL:\r\n    DATASETS_NAME:\r\n    - GOT10K_votval\r\n    DATASETS_RATIO:\r\n    - 1\r\n    SAMPLE_PER_EPOCH: 10240\r\nMODEL:\r\n  PRETRAIN_FILE: \"mae_pretrain_vit_base.pth\"\r\n  EXTRA_MERGER: False\r\n  RETURN_INTER: False\r\n  BACKBONE:\r\n    TYPE: vit_base_patch16_224\r\n    STRIDE: 16\r\n  HEAD:\r\n    TYPE: PIX\r\n    NUM_CHANNELS: 768\r\nTRAIN:\r\n  BACKBONE_MULTIPLIER: 0.1\r\n  DROP_PATH_RATE: 0.1\r\n  BATCH_SIZE: 16\r\n  EPOCH: 500\r\n  GIOU_WEIGHT: 2.0\r\n  L1_WEIGHT: 0.0\r\n  GRAD_CLIP_NORM: 0.1\r\n  LR: 0.00008\r\n  LR_DROP_EPOCH: 400\r\n  NUM_WORKER: 16\r\n  OPTIMIZER: ADAMW\r\n  PRINT_INTERVAL: 10\r\n  SCHEDULER:\r\n    TYPE: step\r\n    DECAY_RATE: 0.1\r\n  VAL_EPOCH_INTERVAL: 10\r\n  WEIGHT_DECAY: 0.0001\r\n  AMP: False\r\nTEST:\r\n  EPOCH: 500\r\n  SEARCH_FACTOR: 5.0\r\n  SEARCH_SIZE: 384\r\n  TEMPLATE_FACTOR: 2.0\r\n  TEMPLATE_SIZE: 192"
  },
  {
    "path": "artrackv2_mindspore/experiments/ostrack/finetune.yaml",
    "content": "DATA:\r\n  MAX_SAMPLE_INTERVAL: 200\r\n  MEAN:\r\n  - 0.485\r\n  - 0.456\r\n  - 0.406\r\n  SEARCH:\r\n    CENTER_JITTER: 3\r\n    FACTOR: 4.0\r\n    SCALE_JITTER: 0.25\r\n    SIZE: 256\r\n    NUMBER: 8\r\n  STD:\r\n  - 0.229\r\n  - 0.224\r\n  - 0.225\r\n  TEMPLATE:\r\n    CENTER_JITTER: 0\r\n    FACTOR: 2.0\r\n    SCALE_JITTER: 0\r\n    SIZE: 128\r\n  TRAIN:\r\n    DATASETS_NAME:\r\n    - LASOT\r\n    - GOT10K_vottrain\r\n    - TRACKINGNET\r\n    DATASETS_RATIO:\r\n    - 1\r\n    - 1\r\n    - 1\r\n    SAMPLE_PER_EPOCH: 960\r\n  VAL:\r\n    DATASETS_NAME:\r\n    - GOT10K_official_val\r\n    DATASETS_RATIO:\r\n    - 1\r\n    SAMPLE_PER_EPOCH: 10000\r\nMODEL:\r\n  PRETRAIN_FILE: \"mae_pretrain_vit_base.pth\"\r\n  EXTRA_MERGER: False\r\n  RETURN_INTER: False\r\n  BACKBONE:\r\n    TYPE: vit_base_patch16_224\r\n    STRIDE: 16\r\n  HEAD:\r\n    TYPE: PIX\r\n    NUM_CHANNELS: 768\r\nTRAIN:\r\n  BACKBONE_MULTIPLIER: 0.1\r\n  DROP_PATH_RATE: 0.1\r\n  BATCH_SIZE: 8\r\n  EPOCH: 120\r\n  GIOU_WEIGHT: 2.0\r\n  L1_WEIGHT: 0.0\r\n  GRAD_CLIP_NORM: 0.1\r\n  LR: 0.000004\r\n  LR_DROP_EPOCH: 400\r\n  NUM_WORKER: 8\r\n  OPTIMIZER: ADAMW\r\n  PRINT_INTERVAL: 1\r\n  SCHEDULER:\r\n    TYPE: step\r\n    DECAY_RATE: 0.1\r\n  VAL_EPOCH_INTERVAL: 10\r\n  WEIGHT_DECAY: 0.05\r\n  AMP: False\r\nTEST:\r\n  EPOCH: 30\r\n  SEARCH_FACTOR: 4.0\r\n  SEARCH_SIZE: 256\r\n  TEMPLATE_FACTOR: 2.0\r\n  TEMPLATE_SIZE: 128"
  },
  {
    "path": "artrackv2_mindspore/experiments/ostrack/finetune_384.yaml",
    "content": "DATA:\r\n  MAX_SAMPLE_INTERVAL: 200\r\n  MEAN:\r\n  - 0.485\r\n  - 0.456\r\n  - 0.406\r\n  SEARCH:\r\n    CENTER_JITTER: 4.5\r\n    FACTOR: 5.0\r\n    SCALE_JITTER: 0.5\r\n    SIZE: 384\r\n    NUMBER: 12\r\n  STD:\r\n  - 0.229\r\n  - 0.224\r\n  - 0.225\r\n  TEMPLATE:\r\n    CENTER_JITTER: 0\r\n    FACTOR: 2.0\r\n    SCALE_JITTER: 0\r\n    SIZE: 192\r\n  TRAIN:\r\n    DATASETS_NAME:\r\n    - LASOT\r\n    - GOT10K_vottrain\r\n    - TRACKINGNET\r\n    DATASETS_RATIO:\r\n    - 1\r\n    - 1\r\n    - 1\r\n    SAMPLE_PER_EPOCH: 960\r\n  VAL:\r\n    DATASETS_NAME:\r\n    - GOT10K_official_val\r\n    DATASETS_RATIO:\r\n    - 1\r\n    SAMPLE_PER_EPOCH: 10000\r\nMODEL:\r\n  PRETRAIN_FILE: \"mae_pretrain_vit_large.pth\"\r\n  EXTRA_MERGER: False\r\n  RETURN_INTER: False\r\n  DECODER:\r\n    TYPE: \"mask\"\r\n    MASK_RATIO: 0.75\r\n    EMBEDDIM: 512\r\n    DEPTH: 8\r\n    NUMHEADS: 16\r\n    MLPRATIO: 4\r\n  BACKBONE:\r\n    TYPE: vit_large_patch16_224\r\n    STRIDE: 16\r\n    EMBEDDIM: 1024\r\n  HEAD:\r\n    TYPE: PIX\r\n    NUM_CHANNELS: 1024\r\nTRAIN:\r\n  BACKBONE_MULTIPLIER: 0.1\r\n  DROP_PATH_RATE: 0.1\r\n  BATCH_SIZE: 4\r\n  EPOCH: 120\r\n  GIOU_WEIGHT: 2.0\r\n  L1_WEIGHT: 0.0\r\n  GRAD_CLIP_NORM: 0.1\r\n  LR: 0.00008\r\n  LR_DROP_EPOCH: 400\r\n  NUM_WORKER: 8\r\n  OPTIMIZER: ADAMW\r\n  PRINT_INTERVAL: 1\r\n  SCHEDULER:\r\n    TYPE: step\r\n    DECAY_RATE: 0.1\r\n  VAL_EPOCH_INTERVAL: 10\r\n  WEIGHT_DECAY: 0.05\r\n  AMP: False\r\nTEST:\r\n  EPOCH: 500\r\n  SEARCH_FACTOR: 5.0\r\n  SEARCH_SIZE: 384\r\n  TEMPLATE_FACTOR: 2.0\r\n  TEMPLATE_SIZE: 192"
  },
  {
    "path": "artrackv2_mindspore/experiments/ostrack/finetune_384_got.yaml",
    "content": "DATA:\r\n  MAX_SAMPLE_INTERVAL: 200\r\n  MEAN:\r\n  - 0.485\r\n  - 0.456\r\n  - 0.406\r\n  SEARCH:\r\n    CENTER_JITTER: 4.5\r\n    FACTOR: 5.0\r\n    SCALE_JITTER: 0.5\r\n    SIZE: 384\r\n    NUMBER: 17\r\n  STD:\r\n  - 0.229\r\n  - 0.224\r\n  - 0.225\r\n  TEMPLATE:\r\n    CENTER_JITTER: 0\r\n    FACTOR: 2.0\r\n    SCALE_JITTER: 0\r\n    SIZE: 192\r\n  TRAIN:\r\n    DATASETS_NAME:\r\n      - GOT10K_train_full\r\n    DATASETS_RATIO:\r\n      - 1\r\n    SAMPLE_PER_EPOCH: 960\r\n  VAL:\r\n    DATASETS_NAME:\r\n    - GOT10K_official_val\r\n    DATASETS_RATIO:\r\n    - 1\r\n    SAMPLE_PER_EPOCH: 10000\r\nMODEL:\r\n  PRETRAIN_FILE: \"mae_pretrain_vit_base.pth\"\r\n  EXTRA_MERGER: False\r\n  RETURN_INTER: False\r\n  BACKBONE:\r\n    TYPE: vit_base_patch16_224\r\n    STRIDE: 16\r\n  HEAD:\r\n    TYPE: PIX\r\n    NUM_CHANNELS: 768\r\nTRAIN:\r\n  BACKBONE_MULTIPLIER: 0.1\r\n  DROP_PATH_RATE: 0.1\r\n  BATCH_SIZE: 4\r\n  EPOCH: 120\r\n  GIOU_WEIGHT: 2.0\r\n  L1_WEIGHT: 0.0\r\n  GRAD_CLIP_NORM: 0.1\r\n  LR: 0.000004\r\n  LR_DROP_EPOCH: 400\r\n  NUM_WORKER: 4\r\n  OPTIMIZER: ADAMW\r\n  PRINT_INTERVAL: 1\r\n  SCHEDULER:\r\n    TYPE: step\r\n    DECAY_RATE: 0.1\r\n  VAL_EPOCH_INTERVAL: 10\r\n  WEIGHT_DECAY: 0.05\r\n  AMP: False\r\nTEST:\r\n  EPOCH: 500\r\n  SEARCH_FACTOR: 5.0\r\n  SEARCH_SIZE: 384\r\n  TEMPLATE_FACTOR: 2.0\r\n  TEMPLATE_SIZE: 192"
  },
  {
    "path": "artrackv2_mindspore/experiments/ostrack/vitb_256_mae_32x4_ep300.yaml",
    "content": "DATA:\n  MAX_SAMPLE_INTERVAL: 200\n  MEAN:\n  - 0.485\n  - 0.456\n  - 0.406\n  SEARCH:\n    CENTER_JITTER: 3\n    FACTOR: 4.0\n    SCALE_JITTER: 0.25\n    SIZE: 256\n    NUMBER: 1\n  STD:\n  - 0.229\n  - 0.224\n  - 0.225\n  TEMPLATE:\n    CENTER_JITTER: 0\n    FACTOR: 2.0\n    SCALE_JITTER: 0\n    SIZE: 128\n  TRAIN:\n    DATASETS_NAME:\n      - GOT10K_train_full\n    DATASETS_RATIO:\n      - 1\n    SAMPLE_PER_EPOCH: 60000\n\n#  TRAIN:\n#    DATASETS_NAME:\n#    - LASOT\n#    - GOT10K_vottrain\n#    - COCO17\n#    - TRACKINGNET\n#    DATASETS_RATIO:\n#    - 1\n#    - 1\n#    - 1\n#    - 1\n#    SAMPLE_PER_EPOCH: 60000\n  VAL:\n    DATASETS_NAME:\n    - GOT10K_votval\n    DATASETS_RATIO:\n    - 1\n    SAMPLE_PER_EPOCH: 10000\nMODEL:\n  PRETRAIN_FILE: \"mae_pretrain_vit_base.pth\"\n  EXTRA_MERGER: False\n  RETURN_INTER: False\n  BACKBONE:\n    TYPE: vit_base_patch16_224\n    STRIDE: 16\n  HEAD:\n    TYPE: CENTER\n    NUM_CHANNELS: 256\nTRAIN:\n  BACKBONE_MULTIPLIER: 0.1\n  DROP_PATH_RATE: 0.1\n  BATCH_SIZE: 32\n  EPOCH: 300\n  GIOU_WEIGHT: 2.0\n  L1_WEIGHT: 5.0\n  GRAD_CLIP_NORM: 0.1\n  LR: 0.0004\n  LR_DROP_EPOCH: 240\n  NUM_WORKER: 10\n  OPTIMIZER: ADAMW\n  PRINT_INTERVAL: 50\n  SCHEDULER:\n    TYPE: step\n    DECAY_RATE: 0.1\n  VAL_EPOCH_INTERVAL: 20\n  WEIGHT_DECAY: 0.0001\n  AMP: False\nTEST:\n  EPOCH: 300\n  SEARCH_FACTOR: 4.0\n  SEARCH_SIZE: 256\n  TEMPLATE_FACTOR: 2.0\n  TEMPLATE_SIZE: 128"
  },
  {
    "path": "artrackv2_mindspore/experiments/ostrack/vitb_256_mae_ce_32x4_ep300.yaml",
    "content": "DATA:\n  MAX_SAMPLE_INTERVAL: 200\n  MEAN:\n  - 0.485\n  - 0.456\n  - 0.406\n  SEARCH:\n    CENTER_JITTER: 3\n    FACTOR: 4.0\n    SCALE_JITTER: 0.25\n    SIZE: 256\n    NUMBER: 1\n  STD:\n  - 0.229\n  - 0.224\n  - 0.225\n  TEMPLATE:\n    CENTER_JITTER: 0\n    FACTOR: 2.0\n    SCALE_JITTER: 0\n    SIZE: 128\n#  TRAIN:\n#    DATASETS_NAME:\n#      - GOT10K_train_full\n#    DATASETS_RATIO:\n#      - 1\n#    SAMPLE_PER_EPOCH: 60000\n\n  TRAIN:\n    DATASETS_NAME:\n    - LASOT\n    - GOT10K_vottrain\n    - COCO17\n    - TRACKINGNET\n    DATASETS_RATIO:\n    - 1\n    - 1\n    - 1\n    - 1\n    SAMPLE_PER_EPOCH: 60000\n  VAL:\n    DATASETS_NAME:\n    - GOT10K_votval\n    DATASETS_RATIO:\n    - 1\n    SAMPLE_PER_EPOCH: 10000\nMODEL:\n  PRETRAIN_FILE: \"mae_pretrain_vit_base.pth\"\n  EXTRA_MERGER: False\n  RETURN_INTER: False\n  BACKBONE:\n    TYPE: vit_base_patch16_224\n    STRIDE: 16\n  HEAD:\n    TYPE: PIX\n    NUM_CHANNELS: 768\nTRAIN:\n  BACKBONE_MULTIPLIER: 0.1\n  DROP_PATH_RATE: 0.1\n  CE_START_EPOCH: 600  # candidate elimination start epoch\n  CE_WARM_EPOCH: 560  # candidate elimination warm up epoch\n  BATCH_SIZE: 48\n  EPOCH: 500\n  GIOU_WEIGHT: 2.0\n  L1_WEIGHT: 0.0\n  GRAD_CLIP_NORM: 0.1\n  LR: 0.00008\n  LR_DROP_EPOCH: 400\n  NUM_WORKER: 8\n  OPTIMIZER: ADAMW\n  PRINT_INTERVAL: 10\n  SCHEDULER:\n    TYPE: step\n    DECAY_RATE: 0.1\n  VAL_EPOCH_INTERVAL: 20\n  WEIGHT_DECAY: 0.0001\n  AMP: False\nTEST:\n  EPOCH: 500\n  SEARCH_FACTOR: 4.0\n  SEARCH_SIZE: 256\n  TEMPLATE_FACTOR: 2.0\n  TEMPLATE_SIZE: 128"
  },
  {
    "path": "artrackv2_mindspore/experiments/ostrack/vitb_256_mae_ce_32x4_got10k_ep100.yaml",
    "content": "DATA:\n  MAX_SAMPLE_INTERVAL: 200\n  MEAN:\n  - 0.485\n  - 0.456\n  - 0.406\n  SEARCH:\n    CENTER_JITTER: 3\n    FACTOR: 4.0\n    SCALE_JITTER: 0.25\n    SIZE: 256\n    NUMBER: 24\n  STD:\n  - 0.229\n  - 0.224\n  - 0.225\n  TEMPLATE:\n    CENTER_JITTER: 0\n    FACTOR: 2.0\n    SCALE_JITTER: 0\n    SIZE: 128\n  TRAIN:\n    DATASETS_NAME:\n    - LASOT\n    - GOT10K_vottrain\n    - TRACKINGNET\n    DATASETS_RATIO:\n    - 1\n    - 1\n    - 1\n    SAMPLE_PER_EPOCH: 1000\n  VAL:\n    DATASETS_NAME:\n    - GOT10K_official_val\n    DATASETS_RATIO:\n    - 1\n    SAMPLE_PER_EPOCH: 10000\nMODEL:\n  PRETRAIN_FILE: \"mae_pretrain_vit_base.pth\"\n  EXTRA_MERGER: False\n  RETURN_INTER: False\n  BACKBONE:\n    TYPE: vit_base_patch16_224\n    STRIDE: 16\n  HEAD:\n    TYPE: PIX\n    NUM_CHANNELS: 768\nTRAIN:\n  BACKBONE_MULTIPLIER: 0.1\n  DROP_PATH_RATE: 0.1\n  BATCH_SIZE: 8\n  EPOCH: 120\n  GIOU_WEIGHT: 2.0\n  L1_WEIGHT: 0.0\n  GRAD_CLIP_NORM: 0.1\n  LR: 0.000001\n  LR_DROP_EPOCH: 400\n  NUM_WORKER: 4\n  OPTIMIZER: ADAMW\n  PRINT_INTERVAL: 1\n  SCHEDULER:\n    TYPE: step\n    DECAY_RATE: 0.1\n  VAL_EPOCH_INTERVAL: 10\n  WEIGHT_DECAY: 0.0001\n  AMP: False\nTEST:\n  EPOCH: 30\n  SEARCH_FACTOR: 4.2\n  SEARCH_SIZE: 256\n  TEMPLATE_FACTOR: 2.0\n  TEMPLATE_SIZE: 128\n"
  },
  {
    "path": "artrackv2_mindspore/experiments/ostrack/vitb_384_mae_32x4_ep300.yaml",
    "content": "DATA:\n  MAX_SAMPLE_INTERVAL: 200\n  MEAN:\n  - 0.485\n  - 0.456\n  - 0.406\n  SEARCH:\n    CENTER_JITTER: 4.5\n    FACTOR: 5.0\n    SCALE_JITTER: 0.5\n    SIZE: 384\n  STD:\n  - 0.229\n  - 0.224\n  - 0.225\n  TEMPLATE:\n    CENTER_JITTER: 0\n    FACTOR: 2.0\n    SCALE_JITTER: 0\n    SIZE: 192\n#  TRAIN:\n#    DATASETS_NAME:\n#      - GOT10K_train_full\n#    DATASETS_RATIO:\n#      - 1\n#    SAMPLE_PER_EPOCH: 60000\n\n  TRAIN:\n    DATASETS_NAME:\n    - LASOT\n    - GOT10K_vottrain\n    - COCO17\n    - TRACKINGNET\n    DATASETS_RATIO:\n    - 1\n    - 1\n    - 1\n    - 1\n    SAMPLE_PER_EPOCH: 60000\n  VAL:\n    DATASETS_NAME:\n    - GOT10K_votval\n    DATASETS_RATIO:\n    - 1\n    SAMPLE_PER_EPOCH: 10000\nMODEL:\n  PRETRAIN_FILE: \"mae_pretrain_vit_base.pth\"\n  EXTRA_MERGER: False\n  RETURN_INTER: False\n  BACKBONE:\n    TYPE: vit_base_patch16_224\n    STRIDE: 16\n  HEAD:\n    TYPE: CENTER\n    NUM_CHANNELS: 256\nTRAIN:\n  BACKBONE_MULTIPLIER: 0.1\n  DROP_PATH_RATE: 0.1\n  BATCH_SIZE: 32\n  EPOCH: 300\n  GIOU_WEIGHT: 2.0\n  L1_WEIGHT: 5.0\n  GRAD_CLIP_NORM: 0.1\n  LR: 0.0004\n  LR_DROP_EPOCH: 240\n  NUM_WORKER: 10\n  OPTIMIZER: ADAMW\n  PRINT_INTERVAL: 50\n  SCHEDULER:\n    TYPE: step\n    DECAY_RATE: 0.1\n  VAL_EPOCH_INTERVAL: 20\n  WEIGHT_DECAY: 0.0001\n  AMP: False\nTEST:\n  EPOCH: 300\n  SEARCH_FACTOR: 5.0\n  SEARCH_SIZE: 384\n  TEMPLATE_FACTOR: 2.0\n  TEMPLATE_SIZE: 192"
  },
  {
    "path": "artrackv2_mindspore/experiments/ostrack/vitb_384_mae_ce_32x4_ep300.yaml",
    "content": "DATA:\n  MAX_SAMPLE_INTERVAL: 200\n  MEAN:\n  - 0.485\n  - 0.456\n  - 0.406\n  SEARCH:\n    CENTER_JITTER: 4.5\n    FACTOR: 5.0\n    SCALE_JITTER: 0.5\n    SIZE: 384\n  STD:\n  - 0.229\n  - 0.224\n  - 0.225\n  TEMPLATE:\n    CENTER_JITTER: 0\n    FACTOR: 2.0\n    SCALE_JITTER: 0\n    SIZE: 192\n#  TRAIN:\n#    DATASETS_NAME:\n#      - GOT10K_train_full\n#    DATASETS_RATIO:\n#      - 1\n#    SAMPLE_PER_EPOCH: 60000\n\n  TRAIN:\n    DATASETS_NAME:\n    - LASOT\n    - GOT10K_vottrain\n    - COCO17\n    - TRACKINGNET\n    DATASETS_RATIO:\n    - 1\n    - 1\n    - 1\n    - 1\n    SAMPLE_PER_EPOCH: 60000\n  VAL:\n    DATASETS_NAME:\n    - GOT10K_votval\n    DATASETS_RATIO:\n    - 1\n    SAMPLE_PER_EPOCH: 10000\nMODEL:\n  PRETRAIN_FILE: \"mae_pretrain_vit_base.pth\"\n  EXTRA_MERGER: False\n  RETURN_INTER: False\n  BACKBONE:\n    TYPE: vit_base_patch16_224_ce\n    STRIDE: 16\n    CE_LOC: [3, 6, 9]\n    CE_KEEP_RATIO: [0.7, 0.7, 0.7]\n    CE_TEMPLATE_RANGE: 'CTR_POINT'  # choose between ALL, CTR_POINT, CTR_REC, GT_BOX\n  HEAD:\n    TYPE: CENTER\n    NUM_CHANNELS: 256\nTRAIN:\n  BACKBONE_MULTIPLIER: 0.1\n  DROP_PATH_RATE: 0.1\n  BATCH_SIZE: 32\n  EPOCH: 300\n  GIOU_WEIGHT: 2.0\n  L1_WEIGHT: 5.0\n  GRAD_CLIP_NORM: 0.1\n  LR: 0.0004\n  LR_DROP_EPOCH: 240\n  NUM_WORKER: 10\n  OPTIMIZER: ADAMW\n  PRINT_INTERVAL: 50\n  SCHEDULER:\n    TYPE: step\n    DECAY_RATE: 0.1\n  VAL_EPOCH_INTERVAL: 20\n  WEIGHT_DECAY: 0.0001\n  AMP: False\nTEST:\n  EPOCH: 300\n  SEARCH_FACTOR: 5.0\n  SEARCH_SIZE: 384\n  TEMPLATE_FACTOR: 2.0\n  TEMPLATE_SIZE: 192"
  },
  {
    "path": "artrackv2_mindspore/experiments/ostrack/vitb_384_mae_ce_32x4_got10k_ep100.yaml",
    "content": "DATA:\n  MAX_SAMPLE_INTERVAL: 200\n  MEAN:\n  - 0.485\n  - 0.456\n  - 0.406\n  SEARCH:\n    CENTER_JITTER: 4.5\n    FACTOR: 5.0\n    SCALE_JITTER: 0.5\n    SIZE: 384\n  STD:\n  - 0.229\n  - 0.224\n  - 0.225\n  TEMPLATE:\n    CENTER_JITTER: 0\n    FACTOR: 2.0\n    SCALE_JITTER: 0\n    SIZE: 192\n  TRAIN:\n    DATASETS_NAME:\n      - GOT10K_train_full\n    DATASETS_RATIO:\n      - 1\n    SAMPLE_PER_EPOCH: 60000\n  VAL:\n    DATASETS_NAME:\n    - GOT10K_official_val\n    DATASETS_RATIO:\n    - 1\n    SAMPLE_PER_EPOCH: 10000\nMODEL:\n  PRETRAIN_FILE: \"mae_pretrain_vit_base.pth\"\n  EXTRA_MERGER: False\n  RETURN_INTER: False\n  BACKBONE:\n    TYPE: vit_base_patch16_224_ce\n    STRIDE: 16\n    CE_LOC: [3, 6, 9]\n    CE_KEEP_RATIO: [0.7, 0.7, 0.7]\n    CE_TEMPLATE_RANGE: 'CTR_POINT'  # choose between ALL, CTR_POINT, CTR_REC, GT_BOX\n  HEAD:\n    TYPE: CENTER\n    NUM_CHANNELS: 256\nTRAIN:\n  BACKBONE_MULTIPLIER: 0.1\n  DROP_PATH_RATE: 0.1\n  CE_START_EPOCH: 20  # candidate elimination start epoch\n  CE_WARM_EPOCH: 50  # candidate elimination warm up epoch\n  BATCH_SIZE: 32\n  EPOCH: 100\n  GIOU_WEIGHT: 2.0\n  L1_WEIGHT: 5.0\n  GRAD_CLIP_NORM: 0.1\n  LR: 0.0004\n  LR_DROP_EPOCH: 80\n  NUM_WORKER: 10\n  OPTIMIZER: ADAMW\n  PRINT_INTERVAL: 50\n  SCHEDULER:\n    TYPE: step\n    DECAY_RATE: 0.1\n  VAL_EPOCH_INTERVAL: 20\n  WEIGHT_DECAY: 0.0001\n  AMP: False\nTEST:\n  EPOCH: 100\n  SEARCH_FACTOR: 5.0\n  SEARCH_SIZE: 384\n  TEMPLATE_FACTOR: 2.0\n  TEMPLATE_SIZE: 192"
  },
  {
    "path": "artrackv2_mindspore/external/AR/README.md",
    "content": "# Alpha-Refine\n## Introduction\nAlpha-Refine is the winner of the VOT Real-Time Challenge 2020, which has great ability to predict high-quality masks. \nIn this work, we combine the STARK tracker with Alpha-Refine to test on the VOT2020 benchamark.\n\n## Installation\nAfter the environment has been installed according to the README.md of STARK, you only need to install a few more packages as shown below.\n\n* Install ninja-build for Precise ROI pooling  \n```bash\nsudo apt-get install ninja-build\n```\nIn case of issues, we refer to https://github.com/vacancy/PreciseRoIPooling.\n\n* Install the Precise ROI pooling\n```\ncd ltr/external\ngit clone https://github.com/vacancy/PreciseRoIPooling.git\ncd ../..\n```\n* Add the project path to environment variables\n```\nexport PYTHONPATH=<absolute_path_of_AR>:$PYTHONPATH\n```\n\n* Setup the environment  \n\nCreate the default environment setting files. \n```bash\n# Environment settings for pytracking. Saved at pytracking/evaluation/local.py\npython -c \"from pytracking.evaluation.environment import create_default_local_file; create_default_local_file()\"\n\n# Environment settings for ltr. Saved at ltr/admin/local.py\npython -c \"from ltr.admin.environment import create_default_local_file; create_default_local_file()\"\n```\n\nYou can modify these files to set the paths to datasets, results paths etc.  \n\n* Download the pre-trained Alpha-Refine network  \nDownload the network for [Alpha-Refine](https://drive.google.com/open?id=1qOQRfaRMbQ2nmgX1NFjoQHfXOAn609QM) \nand put it under the ltr/checkpoints/ltr/ARcm_seg/ARcm_coco_seg_only_mask_384 dir.\n\n"
  },
  {
    "path": "artrackv2_mindspore/external/AR/__init__.py",
    "content": ""
  },
  {
    "path": "artrackv2_mindspore/external/AR/ltr/README.md",
    "content": "# LTR\n\nA general PyTorch based framework for learning tracking representations. \n## Table of Contents\n\n* [Quick Start](#quick-start)\n* [Overview](#overview)\n* [Trackers](#trackers)\n   * [PrDiMP](#PrDiMP)\n   * [DiMP](#DiMP)\n   * [ATOM](#ATOM)\n* [Training your own networks](#training-your-own-networks)\n\n## Quick Start\nThe installation script will automatically generate a local configuration file  \"admin/local.py\". In case the file was not generated, run ```admin.environment.create_default_local_file()``` to generate it. Next, set the paths to the training workspace, \ni.e. the directory where the checkpoints will be saved. Also set the paths to the datasets you want to use. If all the dependencies have been correctly installed, you can train a network using the run_training.py script in the correct conda environment.  \n```bash\nconda activate pytracking\npython run_training.py train_module train_name\n```\nHere, ```train_module``` is the sub-module inside ```train_settings``` and ```train_name``` is the name of the train setting file to be used.\n\nFor example, you can train using the included default ATOM settings by running:\n```bash\npython run_training bbreg atom_default\n```\n\n\n## Overview\nThe framework consists of the following sub-modules.  \n - [actors](actors): Contains the actor classes for different trainings. The actor class is responsible for passing the input data through the network can calculating losses.  \n - [admin](admin): Includes functions for loading networks, tensorboard etc. and also contains environment settings.  \n - [dataset](dataset): Contains integration of a number of training datasets, namely [TrackingNet](https://tracking-net.org/), [GOT-10k](http://got-10k.aitestunion.com/), [LaSOT](https://cis.temple.edu/lasot/), \n [ImageNet-VID](http://image-net.org/), [DAVIS](https://davischallenge.org), [YouTube-VOS](https://youtube-vos.org), [MS-COCO](http://cocodataset.org/#home), [SBD](http://home.bharathh.info/pubs/codes/SBD), [LVIS](https://www.lvisdataset.org), [ECSSD](http://www.cse.cuhk.edu.hk/leojia/projects/hsaliency/dataset.html), [MSRA10k](https://mmcheng.net/msra10k), and [HKU-IS](https://sites.google.com/site/ligb86/hkuis). Additionally, it includes modules to generate synthetic videos from image datasets. \n - [data_specs](data_specs): Information about train/val splits of different datasets.   \n - [data](data): Contains functions for processing data, e.g. loading images, data augmentations, sampling frames from videos.  \n - [external](external): External libraries needed for training. Added as submodules.  \n - [models](models): Contains different layers and network definitions.  \n - [trainers](trainers): The main class which runs the training.  \n - [train_settings](train_settings): Contains settings files, specifying the training of a network.   \n \n## Trackers\n The framework currently contains the training code for the following trackers.\n\n### PrDiMP\n The following setting files can be used train the DiMP networks, or to know the exact training details. \n - [dimp.prdimp18](train_settings/dimp/prdimp18.py): The default settings used for training the PrDiMP model with ResNet-18 backbone.\n - [dimp.prdimp50](train_settings/dimp/prdimp50.py): The default settings used for training the PrDiMP model with ResNet-50 backbone. \n - [dimp.super_dimp](train_settings/dimp/super_dimp.py): Combines the bounding-box regressor of PrDiMP with the standard DiMP classifier and better training and inference settings. \n \n### DiMP\n The following setting files can be used train the DiMP networks, or to know the exact training details. \n - [dimp.dimp18](train_settings/dimp/dimp18.py): The default settings used for training the DiMP model with ResNet-18 backbone.\n - [dimp.dimp50](train_settings/dimp/dimp50.py): The default settings used for training the DiMP model with ResNet-50 backbone.\n \n### ATOM\n The following setting file can be used train the ATOM network, or to know the exact training details. \n - [bbreg.atom](train_settings/bbreg/atom_paper.py): The settings used in the paper for training the network in ATOM.\n - [bbreg.atom](train_settings/bbreg/atom.py): Newer settings used for training the network in ATOM, also utilizing the GOT10k dataset.\n - [bbreg.atom](train_settings/bbreg/atom_prob_ml.py): Settings for ATOM with the probabilistic bounding box regression proposed in [this paper](https://arxiv.org/abs/1909.12297). \n - [bbreg.atom](train_settings/bbreg/atom_paper.py): The baseline ATOM* setting evaluated in [this paper](https://arxiv.org/abs/1909.12297).  \n \n## Training your own networks\nTo train a custom network using the toolkit, the following components need to be specified in the train settings. For reference, see [atom.py](train_settings/bbreg/atom.py).  \n- Datasets: The datasets to be used for training. A number of standard tracking datasets are already available in ```dataset``` module.  \n- Processing: This function should perform the necessary post-processing of the data, e.g. cropping of target region, data augmentations etc.  \n- Sampler: Determines how the frames are sampled from a video sequence to form the batches.  \n- Network: The network module to be trained.  \n- Objective: The training objective.  \n- Actor: The trainer passes the training batch to the actor who is responsible for passing the data through the network correctly, and calculating the training loss.  \n- Optimizer: Optimizer to be used, e.g. Adam.  \n- Trainer: The main class which runs the epochs and saves checkpoints. \n \n\n "
  },
  {
    "path": "artrackv2_mindspore/external/AR/ltr/__init__.py",
    "content": "from .admin.loading import load_network\nfrom .admin.model_constructor import model_constructor\nfrom .admin.multigpu import MultiGPU"
  },
  {
    "path": "artrackv2_mindspore/external/AR/ltr/actors/__init__.py",
    "content": "from .base_actor import BaseActor\nfrom .bbreg import AtomActor\nfrom .tracking import DiMPActor"
  },
  {
    "path": "artrackv2_mindspore/external/AR/ltr/actors/base_actor.py",
    "content": "from pytracking import TensorDict\n\n\nclass BaseActor:\n    \"\"\" Base class for actor. The actor class handles the passing of the data through the network\n    and calculation the loss\"\"\"\n    def __init__(self, net, objective):\n        \"\"\"\n        args:\n            net - The network to train\n            objective - The loss function\n        \"\"\"\n        self.net = net\n        self.objective = objective\n\n    def __call__(self, data: TensorDict):\n        \"\"\" Called in each training iteration. Should pass in input data through the network, calculate the loss, and\n        return the training stats for the input data\n        args:\n            data - A TensorDict containing all the necessary data blocks.\n\n        returns:\n            loss    - loss for the input data\n            stats   - a dict containing detailed losses\n        \"\"\"\n        raise NotImplementedError\n\n    def to(self, device):\n        \"\"\" Move the network to device\n        args:\n            device - device to use. 'cpu' or 'cuda'\n        \"\"\"\n        self.net.to(device)\n\n    def train(self, mode=True):\n        \"\"\" Set whether the network is in train mode.\n        args:\n            mode (True) - Bool specifying whether in training mode.\n        \"\"\"\n        self.net.train(mode)\n\n    def eval(self):\n        \"\"\" Set network to eval mode\"\"\"\n        self.train(False)"
  },
  {
    "path": "artrackv2_mindspore/external/AR/ltr/actors/bbreg.py",
    "content": "from . import BaseActor\n\n\nclass AtomActor(BaseActor):\n    \"\"\" Actor for training the IoU-Net in ATOM\"\"\"\n    def __call__(self, data):\n        \"\"\"\n        args:\n            data - The input data, should contain the fields 'train_images', 'test_images', 'train_anno',\n                    'test_proposals' and 'proposal_iou'.\n\n        returns:\n            loss    - the training loss\n            states  -  dict containing detailed losses\n        \"\"\"\n        # Run network to obtain IoU prediction for each proposal in 'test_proposals'\n        iou_pred = self.net(data['train_images'], data['test_images'], data['train_anno'], data['test_proposals'])\n\n        iou_pred = iou_pred.view(-1, iou_pred.shape[2])\n        iou_gt = data['proposal_iou'].view(-1, data['proposal_iou'].shape[2])\n\n        # Compute loss\n        loss = self.objective(iou_pred, iou_gt)\n\n        # Return training stats\n        stats = {'Loss/total': loss.item(),\n                 'Loss/iou': loss.item()}\n\n        return loss, stats\n\n\nclass AtomBBKLActor(BaseActor):\n    \"\"\" Actor for training the IoU-Net in ATOM with BBKL\"\"\"\n    def __call__(self, data):\n        \"\"\"\n        args:\n            data - The input data, should contain the fields 'train_images', 'test_images', 'train_anno',\n                    'test_proposals', 'proposal_density', and 'gt_density'.\n\n        returns:\n            loss    - the training loss\n            states  -  dict containing detailed losses\n        \"\"\"\n        # Run network to obtain IoU prediction for each proposal in 'test_proposals'\n        bb_scores = self.net(data['train_images'], data['test_images'], data['train_anno'], data['test_proposals'])\n\n        bb_scores = bb_scores.view(-1, bb_scores.shape[2])\n        proposal_density = data['proposal_density'].view(-1, data['proposal_density'].shape[2])\n        gt_density = data['gt_density'].view(-1, data['gt_density'].shape[2])\n\n        # Compute loss\n        loss = self.objective(bb_scores, sample_density=proposal_density, gt_density=gt_density, mc_dim=1)\n\n        # Return training stats\n        stats = {'Loss/total': loss.item(),\n                 'Loss/bb_ce': loss.item()}\n\n        return loss, stats\n"
  },
  {
    "path": "artrackv2_mindspore/external/AR/ltr/actors/tracking.py",
    "content": "from . import BaseActor\nimport torch\n\n\nclass DiMPActor(BaseActor):\n    \"\"\"Actor for training the DiMP network.\"\"\"\n    def __init__(self, net, objective, loss_weight=None):\n        super().__init__(net, objective)\n        if loss_weight is None:\n            loss_weight = {'iou': 1.0, 'test_clf': 1.0}\n        self.loss_weight = loss_weight\n\n    def __call__(self, data):\n        \"\"\"\n        args:\n            data - The input data, should contain the fields 'train_images', 'test_images', 'train_anno',\n                    'test_proposals', 'proposal_iou' and 'test_label'.\n\n        returns:\n            loss    - the training loss\n            stats  -  dict containing detailed losses\n        \"\"\"\n        # Run network\n        target_scores, iou_pred = self.net(train_imgs=data['train_images'],\n                                           test_imgs=data['test_images'],\n                                           train_bb=data['train_anno'],\n                                           test_proposals=data['test_proposals'])\n\n        # Classification losses for the different optimization iterations\n        clf_losses_test = [self.objective['test_clf'](s, data['test_label'], data['test_anno']) for s in target_scores]\n\n        # Loss of the final filter\n        clf_loss_test = clf_losses_test[-1]\n        loss_target_classifier = self.loss_weight['test_clf'] * clf_loss_test\n\n        # Compute loss for ATOM IoUNet\n        loss_iou = self.loss_weight['iou'] * self.objective['iou'](iou_pred, data['proposal_iou'])\n\n        # Loss for the initial filter iteration\n        loss_test_init_clf = 0\n        if 'test_init_clf' in self.loss_weight.keys():\n            loss_test_init_clf = self.loss_weight['test_init_clf'] * clf_losses_test[0]\n\n        # Loss for the intermediate filter iterations\n        loss_test_iter_clf = 0\n        if 'test_iter_clf' in self.loss_weight.keys():\n            test_iter_weights = self.loss_weight['test_iter_clf']\n            if isinstance(test_iter_weights, list):\n                loss_test_iter_clf = sum([a*b for a, b in zip(test_iter_weights, clf_losses_test[1:-1])])\n            else:\n                loss_test_iter_clf = (test_iter_weights / (len(clf_losses_test) - 2)) * sum(clf_losses_test[1:-1])\n\n        # Total loss\n        loss = loss_iou + loss_target_classifier + loss_test_init_clf + loss_test_iter_clf\n\n        # Log stats\n        stats = {'Loss/total': loss.item(),\n                 'Loss/iou': loss_iou.item(),\n                 'Loss/target_clf': loss_target_classifier.item()}\n        if 'test_init_clf' in self.loss_weight.keys():\n            stats['Loss/test_init_clf'] = loss_test_init_clf.item()\n        if 'test_iter_clf' in self.loss_weight.keys():\n            stats['Loss/test_iter_clf'] = loss_test_iter_clf.item()\n        stats['ClfTrain/test_loss'] = clf_loss_test.item()\n        if len(clf_losses_test) > 0:\n            stats['ClfTrain/test_init_loss'] = clf_losses_test[0].item()\n            if len(clf_losses_test) > 2:\n                stats['ClfTrain/test_iter_loss'] = sum(clf_losses_test[1:-1]).item() / (len(clf_losses_test) - 2)\n\n        return loss, stats\n\n\nclass KLDiMPActor(BaseActor):\n    \"\"\"Actor for training the DiMP network.\"\"\"\n    def __init__(self, net, objective, loss_weight=None):\n        super().__init__(net, objective)\n        if loss_weight is None:\n            loss_weight = {'bb_ce': 1.0}\n        self.loss_weight = loss_weight\n\n    def __call__(self, data):\n        \"\"\"\n        args:\n            data - The input data, should contain the fields 'train_images', 'test_images', 'train_anno',\n                    'test_proposals', 'proposal_iou' and 'test_label'.\n\n        returns:\n            loss    - the training loss\n            stats  -  dict containing detailed losses\n        \"\"\"\n        # Run network\n        target_scores, bb_scores = self.net(train_imgs=data['train_images'],\n                                            test_imgs=data['test_images'],\n                                            train_bb=data['train_anno'],\n                                            test_proposals=data['test_proposals'])\n\n        # Reshape bb reg variables\n        is_valid = data['test_anno'][:, :, 0] < 99999.0\n        bb_scores = bb_scores[is_valid, :]\n        proposal_density = data['proposal_density'][is_valid, :]\n        gt_density = data['gt_density'][is_valid, :]\n\n        # Compute loss\n        bb_ce = self.objective['bb_ce'](bb_scores, sample_density=proposal_density, gt_density=gt_density, mc_dim=1)\n        loss_bb_ce = self.loss_weight['bb_ce'] * bb_ce\n\n        # If standard DiMP classifier is used\n        loss_target_classifier = 0\n        loss_test_init_clf = 0\n        loss_test_iter_clf = 0\n        if 'test_clf' in self.loss_weight.keys():\n            # Classification losses for the different optimization iterations\n            clf_losses_test = [self.objective['test_clf'](s, data['test_label'], data['test_anno']) for s in target_scores]\n\n            # Loss of the final filter\n            clf_loss_test = clf_losses_test[-1]\n            loss_target_classifier = self.loss_weight['test_clf'] * clf_loss_test\n\n            # Loss for the initial filter iteration\n            if 'test_init_clf' in self.loss_weight.keys():\n                loss_test_init_clf = self.loss_weight['test_init_clf'] * clf_losses_test[0]\n\n            # Loss for the intermediate filter iterations\n            if 'test_iter_clf' in self.loss_weight.keys():\n                test_iter_weights = self.loss_weight['test_iter_clf']\n                if isinstance(test_iter_weights, list):\n                    loss_test_iter_clf = sum([a * b for a, b in zip(test_iter_weights, clf_losses_test[1:-1])])\n                else:\n                    loss_test_iter_clf = (test_iter_weights / (len(clf_losses_test) - 2)) * sum(clf_losses_test[1:-1])\n\n        # If PrDiMP classifier is used\n        loss_clf_ce = 0\n        loss_clf_ce_init = 0\n        loss_clf_ce_iter = 0\n        if 'clf_ce' in self.loss_weight.keys():\n            # Classification losses for the different optimization iterations\n            clf_ce_losses = [self.objective['clf_ce'](s, data['test_label_density'], grid_dim=(-2,-1)) for s in target_scores]\n\n            # Loss of the final filter\n            clf_ce = clf_ce_losses[-1]\n            loss_clf_ce = self.loss_weight['clf_ce'] * clf_ce\n\n            # Loss for the initial filter iteration\n            if 'clf_ce_init' in self.loss_weight.keys():\n                loss_clf_ce_init = self.loss_weight['clf_ce_init'] * clf_ce_losses[0]\n\n            # Loss for the intermediate filter iterations\n            if 'clf_ce_iter' in self.loss_weight.keys() and len(clf_ce_losses) > 2:\n                test_iter_weights = self.loss_weight['clf_ce_iter']\n                if isinstance(test_iter_weights, list):\n                    loss_clf_ce_iter = sum([a * b for a, b in zip(test_iter_weights, clf_ce_losses[1:-1])])\n                else:\n                    loss_clf_ce_iter = (test_iter_weights / (len(clf_ce_losses) - 2)) * sum(clf_ce_losses[1:-1])\n\n        # Total loss\n        loss = loss_bb_ce + loss_clf_ce + loss_clf_ce_init + loss_clf_ce_iter + \\\n                            loss_target_classifier + loss_test_init_clf + loss_test_iter_clf\n\n        if torch.isinf(loss) or torch.isnan(loss):\n            raise Exception('ERROR: Loss was nan or inf!!!')\n\n        # Log stats\n        stats = {'Loss/total': loss.item(),\n                 'Loss/bb_ce': bb_ce.item(),\n                 'Loss/loss_bb_ce': loss_bb_ce.item()}\n        if 'test_clf' in self.loss_weight.keys():\n            stats['Loss/target_clf'] = loss_target_classifier.item()\n        if 'test_init_clf' in self.loss_weight.keys():\n            stats['Loss/test_init_clf'] = loss_test_init_clf.item()\n        if 'test_iter_clf' in self.loss_weight.keys():\n            stats['Loss/test_iter_clf'] = loss_test_iter_clf.item()\n        if 'clf_ce' in self.loss_weight.keys():\n            stats['Loss/clf_ce'] = loss_clf_ce.item()\n        if 'clf_ce_init' in self.loss_weight.keys():\n            stats['Loss/clf_ce_init'] = loss_clf_ce_init.item()\n        if 'clf_ce_iter' in self.loss_weight.keys() and len(clf_ce_losses) > 2:\n            stats['Loss/clf_ce_iter'] = loss_clf_ce_iter.item()\n\n        if 'test_clf' in self.loss_weight.keys():\n            stats['ClfTrain/test_loss'] = clf_loss_test.item()\n            if len(clf_losses_test) > 0:\n                stats['ClfTrain/test_init_loss'] = clf_losses_test[0].item()\n                if len(clf_losses_test) > 2:\n                    stats['ClfTrain/test_iter_loss'] = sum(clf_losses_test[1:-1]).item() / (len(clf_losses_test) - 2)\n\n        if 'clf_ce' in self.loss_weight.keys():\n            stats['ClfTrain/clf_ce'] = clf_ce.item()\n            if len(clf_ce_losses) > 0:\n                stats['ClfTrain/clf_ce_init'] = clf_ce_losses[0].item()\n                if len(clf_ce_losses) > 2:\n                    stats['ClfTrain/clf_ce_iter'] = sum(clf_ce_losses[1:-1]).item() / (len(clf_ce_losses) - 2)\n\n        return loss, stats\n"
  },
  {
    "path": "artrackv2_mindspore/external/AR/ltr/admin/__init__.py",
    "content": ""
  },
  {
    "path": "artrackv2_mindspore/external/AR/ltr/admin/environment.py",
    "content": "import importlib\nimport os\nfrom collections import OrderedDict\n\n\ndef create_default_local_file():\n    path = os.path.join(os.path.dirname(__file__), 'local.py')\n\n    empty_str = '\\'\\''\n    default_settings = OrderedDict({\n        'workspace_dir': empty_str,\n        'tensorboard_dir': 'self.workspace_dir + \\'/tensorboard/\\'',\n        'lasot_dir': empty_str,\n        'got10k_dir': empty_str,\n        'trackingnet_dir': empty_str,\n        'coco_dir': empty_str,\n        'lvis_dir': empty_str,\n        'sbd_dir': empty_str,\n        'imagenet_dir': empty_str,\n        'imagenetdet_dir': empty_str,\n        'ecssd_dir': empty_str,\n        'hkuis_dir': empty_str,\n        'msra10k_dir': empty_str,\n        'davis_dir': empty_str,\n        'youtubevos_dir': empty_str})\n\n    comment = {'workspace_dir': 'Base directory for saving network checkpoints.',\n               'tensorboard_dir': 'Directory for tensorboard files.'}\n\n    with open(path, 'w') as f:\n        f.write('class EnvironmentSettings:\\n')\n        f.write('    def __init__(self):\\n')\n\n        for attr, attr_val in default_settings.items():\n            comment_str = None\n            if attr in comment:\n                comment_str = comment[attr]\n            if comment_str is None:\n                f.write('        self.{} = {}\\n'.format(attr, attr_val))\n            else:\n                f.write('        self.{} = {}    # {}\\n'.format(attr, attr_val, comment_str))\n\n\ndef env_settings():\n    env_module_name = 'ltr.admin.local'\n    try:\n        env_module = importlib.import_module(env_module_name)\n        return env_module.EnvironmentSettings()\n    except:\n        env_file = os.path.join(os.path.dirname(__file__), 'local.py')\n\n        create_default_local_file()\n        raise RuntimeError('YOU HAVE NOT SETUP YOUR local.py!!!\\n Go to \"{}\" and set all the paths you need. Then try to run again.'.format(env_file))\n"
  },
  {
    "path": "artrackv2_mindspore/external/AR/ltr/admin/loading.py",
    "content": "import torch\nimport os\nimport sys\nfrom pathlib import Path\nimport importlib\nimport inspect\nfrom ltr.admin import settings as ws_settings\n\n\ndef load_trained_network(workspace_dir, network_path, checkpoint=None):\n    \"\"\"OUTDATED. Use load_pretrained instead!\"\"\"\n    checkpoint_dir = os.path.join(workspace_dir, 'checkpoints')\n    directory = '{}/{}'.format(checkpoint_dir, network_path)\n\n    net, _ = load_network(directory, checkpoint)\n    return net\n\n\ndef load_pretrained(module, name, checkpoint=None, **kwargs):\n    \"\"\"Load a network trained using the LTR framework. This is useful when you want to initialize your new network with\n    a previously trained model.\n    args:\n        module  -  Name of the train script module. I.e. the name of the folder in ltr/train_scripts.\n        name  -  The name of the train_script.\n        checkpoint  -  You can supply the checkpoint number or the full path to the checkpoint file (see load_network).\n        **kwargs  -  These are passed to load_network (see that function).\n    \"\"\"\n\n    settings = ws_settings.Settings()\n    network_dir = os.path.join(settings.env.workspace_dir, 'checkpoints', 'ltr', module, name)\n    return load_network(network_dir=network_dir, checkpoint=checkpoint, **kwargs)\n\n\ndef load_network(network_dir=None, checkpoint=None, constructor_fun_name=None, constructor_module=None, **kwargs):\n    \"\"\"Loads a network checkpoint file.\n\n    Can be called in two different ways:\n        load_checkpoint(network_dir):\n            Loads the checkpoint file given by the path. If checkpoint_dir is a directory,\n            it tries to find the latest checkpoint in that directory.\n\n        load_checkpoint(network_dir, checkpoint=epoch_num):\n            Loads the network at the given epoch number (int).\n\n    The extra keyword arguments are supplied to the network constructor to replace saved ones.\n    \"\"\"\n\n    if network_dir is not None:\n        net_path = Path(network_dir)\n    else:\n        net_path = None\n\n    if net_path.is_file():\n        checkpoint = str(net_path)\n\n    if checkpoint is None:\n        # Load most recent checkpoint\n        checkpoint_list = sorted(net_path.glob('*.pth.tar'))\n        if checkpoint_list:\n            checkpoint_path = checkpoint_list[-1]\n        else:\n            raise Exception('No matching checkpoint file found')\n    elif isinstance(checkpoint, int):\n        # Checkpoint is the epoch number\n        checkpoint_list = sorted(net_path.glob('*_ep{:04d}.pth.tar'.format(checkpoint)))\n        if not checkpoint_list or len(checkpoint_list) == 0:\n            raise Exception('No matching checkpoint file found')\n        if len(checkpoint_list) > 1:\n            raise Exception('Multiple matching checkpoint files found')\n        else:\n            checkpoint_path = checkpoint_list[0]\n    elif isinstance(checkpoint, str):\n        # Checkpoint is the path\n        checkpoint_path = os.path.expanduser(checkpoint)\n    else:\n        raise TypeError\n\n    # Load network\n    checkpoint_dict = torch_load_legacy(checkpoint_path)\n\n    # Construct network model\n    if 'constructor' in checkpoint_dict and checkpoint_dict['constructor'] is not None:\n        net_constr = checkpoint_dict['constructor']\n        if constructor_fun_name is not None:\n            net_constr.fun_name = constructor_fun_name\n        if constructor_module is not None:\n            net_constr.fun_module = constructor_module\n        # Legacy networks before refactoring\n        if net_constr.fun_module.startswith('dlframework.'):\n            net_constr.fun_module = net_constr.fun_module[len('dlframework.'):]\n        net_fun = getattr(importlib.import_module(net_constr.fun_module), net_constr.fun_name)\n        net_fun_args = list(inspect.signature(net_fun).parameters.keys())\n        for arg, val in kwargs.items():\n            if arg in net_fun_args:\n                net_constr.kwds[arg] = val\n            else:\n                print('WARNING: Keyword argument \"{}\" not found when loading network. It was ignored.'.format(arg))\n        net = net_constr.get()\n    else:\n        raise RuntimeError('No constructor for the given network.')\n\n    net.load_state_dict(checkpoint_dict['net'])\n\n    net.constructor = checkpoint_dict['constructor']\n    if 'net_info' in checkpoint_dict and checkpoint_dict['net_info'] is not None:\n        net.info = checkpoint_dict['net_info']\n\n    return net, checkpoint_dict\n\n\ndef load_weights(net, path, strict=True):\n    checkpoint_dict = torch.load(path)\n    weight_dict = checkpoint_dict['net']\n    net.load_state_dict(weight_dict, strict=strict)\n    return net\n\n\ndef torch_load_legacy(path):\n    \"\"\"Load network with legacy environment.\"\"\"\n\n    # Setup legacy env (for older networks)\n    _setup_legacy_env()\n\n    # Load network\n    checkpoint_dict = torch.load(path, map_location='cpu')\n\n    # Cleanup legacy\n    _cleanup_legacy_env()\n\n    return checkpoint_dict\n\n\ndef _setup_legacy_env():\n    importlib.import_module('ltr')\n    sys.modules['dlframework'] = sys.modules['ltr']\n    sys.modules['dlframework.common'] = sys.modules['ltr']\n    importlib.import_module('ltr.admin')\n    sys.modules['dlframework.common.utils'] = sys.modules['ltr.admin']\n    for m in ('model_constructor', 'stats', 'settings', 'local'):\n        importlib.import_module('ltr.admin.' + m)\n        sys.modules['dlframework.common.utils.' + m] = sys.modules['ltr.admin.' + m]\n\n\ndef _cleanup_legacy_env():\n    del_modules = []\n    for m in sys.modules.keys():\n        if m.startswith('dlframework'):\n            del_modules.append(m)\n    for m in del_modules:\n        del sys.modules[m]\n"
  },
  {
    "path": "artrackv2_mindspore/external/AR/ltr/admin/model_constructor.py",
    "content": "from functools import wraps\nimport importlib\n\n\ndef model_constructor(f):\n    \"\"\" Wraps the function 'f' which returns the network. An extra field 'constructor' is added to the network returned\n    by 'f'. This field contains an instance of the  'NetConstructor' class, which contains the information needed to\n    re-construct the network, such as the name of the function 'f', the function arguments etc. Thus, the network can\n    be easily constructed from a saved checkpoint by calling NetConstructor.get() function.\n    \"\"\"\n    @wraps(f)\n    def f_wrapper(*args, **kwds):\n        net_constr = NetConstructor(f.__name__, f.__module__, args, kwds)\n        output = f(*args, **kwds)\n        if isinstance(output, (tuple, list)):\n            # Assume first argument is the network\n            output[0].constructor = net_constr\n        else:\n            output.constructor = net_constr\n        return output\n    return f_wrapper\n\n\nclass NetConstructor:\n    \"\"\" Class to construct networks. Takes as input the function name (e.g. atom_resnet18), the name of the module\n    which contains the network function (e.g. ltr.models.bbreg.atom) and the arguments for the network\n    function. The class object can then be stored along with the network weights to re-construct the network.\"\"\"\n    def __init__(self, fun_name, fun_module, args, kwds):\n        \"\"\"\n        args:\n            fun_name - The function which returns the network\n            fun_module - the module which contains the network function\n            args - arguments which are passed to the network function\n            kwds - arguments which are passed to the network function\n        \"\"\"\n        self.fun_name = fun_name\n        self.fun_module = fun_module\n        self.args = args\n        self.kwds = kwds\n\n    def get(self):\n        \"\"\" Rebuild the network by calling the network function with the correct arguments. \"\"\"\n        net_module = importlib.import_module(self.fun_module)\n        net_fun = getattr(net_module, self.fun_name)\n        return net_fun(*self.args, **self.kwds)\n"
  },
  {
    "path": "artrackv2_mindspore/external/AR/ltr/admin/multigpu.py",
    "content": "import torch.nn as nn\n\n\ndef is_multi_gpu(net):\n    return isinstance(net, (MultiGPU, nn.DataParallel))\n\n\nclass MultiGPU(nn.DataParallel):\n    \"\"\"Wraps a network to allow simple multi-GPU training.\"\"\"\n    def __getattr__(self, item):\n        try:\n            return super().__getattr__(item)\n        except:\n            pass\n        return getattr(self.module, item)"
  },
  {
    "path": "artrackv2_mindspore/external/AR/ltr/admin/settings.py",
    "content": "from ltr.admin.environment import env_settings\n\n\nclass Settings:\n    \"\"\" Training settings, e.g. the paths to datasets and networks.\"\"\"\n    def __init__(self):\n        self.set_default()\n\n    def set_default(self):\n        self.env = env_settings()\n        self.use_gpu = True\n\n\n"
  },
  {
    "path": "artrackv2_mindspore/external/AR/ltr/admin/stats.py",
    "content": "\n\nclass StatValue:\n    def __init__(self):\n        self.clear()\n\n    def reset(self):\n        self.val = 0\n\n    def clear(self):\n        self.reset()\n        self.history = []\n\n    def update(self, val):\n        self.val = val\n        self.history.append(self.val)\n\n\nclass AverageMeter(object):\n    \"\"\"Computes and stores the average and current value\"\"\"\n    def __init__(self):\n        self.clear()\n        self.has_new_data = False\n\n    def reset(self):\n        self.avg = 0\n        self.val = 0\n        self.sum = 0\n        self.count = 0\n\n    def clear(self):\n        self.reset()\n        self.history = []\n\n    def update(self, val, n=1):\n        self.val = val\n        self.sum += val * n\n        self.count += n\n        self.avg = self.sum / self.count\n\n    def new_epoch(self):\n        if self.count > 0:\n            self.history.append(self.avg)\n            self.reset()\n            self.has_new_data = True\n        else:\n            self.has_new_data = False\n\n\ndef topk_accuracy(output, target, topk=(1,)):\n    \"\"\"Computes the precision@k for the specified values of k\"\"\"\n    single_input = not isinstance(topk, (tuple, list))\n    if single_input:\n        topk = (topk,)\n\n    maxk = max(topk)\n    batch_size = target.size(0)\n\n    _, pred = output.topk(maxk, 1, True, True)\n    pred = pred.t()\n    correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n    res = []\n    for k in topk:\n        correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)[0]\n        res.append(correct_k * 100.0 / batch_size)\n\n    if single_input:\n        return res[0]\n\n    return res\n"
  },
  {
    "path": "artrackv2_mindspore/external/AR/ltr/admin/tensorboard.py",
    "content": "import os\nfrom collections import OrderedDict\ntry:\n    from torch.utils.tensorboard import SummaryWriter\nexcept:\n    print('WARNING: You are using tensorboardX instead sis you have a too old pytorch version.')\n    from tensorboardX import SummaryWriter\n\n\nclass TensorboardWriter:\n    def __init__(self, directory, loader_names):\n        self.directory = directory\n        self.writer = OrderedDict({name: SummaryWriter(os.path.join(self.directory, name)) for name in loader_names})\n\n    def write_info(self, module_name, script_name, description):\n        tb_info_writer = SummaryWriter(os.path.join(self.directory, 'info'))\n        tb_info_writer.add_text('Modulet_name', module_name)\n        tb_info_writer.add_text('Script_name', script_name)\n        tb_info_writer.add_text('Description', description)\n        tb_info_writer.close()\n\n    def write_epoch(self, stats: OrderedDict, epoch: int, ind=-1):\n        for loader_name, loader_stats in stats.items():\n            if loader_stats is None:\n                continue\n            for var_name, val in loader_stats.items():\n                if hasattr(val, 'history') and getattr(val, 'has_new_data', True):\n                    self.writer[loader_name].add_scalar(var_name, val.history[ind], epoch)"
  },
  {
    "path": "artrackv2_mindspore/external/AR/ltr/data/__init__.py",
    "content": "from .loader import LTRLoader"
  },
  {
    "path": "artrackv2_mindspore/external/AR/ltr/data/bounding_box_utils.py",
    "content": "import torch\n\n\ndef rect_to_rel(bb, sz_norm=None):\n    \"\"\"Convert standard rectangular parametrization of the bounding box [x, y, w, h]\n    to relative parametrization [cx/sw, cy/sh, log(w), log(h)], where [cx, cy] is the center coordinate.\n    args:\n        bb  -  N x 4 tensor of boxes.\n        sz_norm  -  [N] x 2 tensor of value of [sw, sh] (optional). sw=w and sh=h if not given.\n    \"\"\"\n\n    c = bb[...,:2] + 0.5 * bb[...,2:]\n    if sz_norm is None:\n        c_rel = c / bb[...,2:]\n    else:\n        c_rel = c / sz_norm\n    sz_rel = torch.log(bb[...,2:])\n    return torch.cat((c_rel, sz_rel), dim=-1)\n\n\ndef rel_to_rect(bb, sz_norm=None):\n    \"\"\"Inverts the effect of rect_to_rel. See above.\"\"\"\n\n    sz = torch.exp(bb[...,2:])\n    if sz_norm is None:\n        c = bb[...,:2] * sz\n    else:\n        c = bb[...,:2] * sz_norm\n    tl = c - 0.5 * sz\n    return torch.cat((tl, sz), dim=-1)\n\n\ndef masks_to_bboxes(mask, fmt='c'):\n\n    \"\"\" Convert a mask tensor to one or more bounding boxes.\n    Note: This function is a bit new, make sure it does what it says.  /Andreas\n    :param mask: Tensor of masks, shape = (..., H, W)\n    :param fmt: bbox layout. 'c' => \"center + size\" or (x_center, y_center, width, height)\n                             't' => \"top left + size\" or (x_left, y_top, width, height)\n                             'v' => \"vertices\" or (x_left, y_top, x_right, y_bottom)\n    :return: tensor containing a batch of bounding boxes, shape = (..., 4)\n    \"\"\"\n    batch_shape = mask.shape[:-2]\n    mask = mask.reshape((-1, *mask.shape[-2:]))\n    bboxes = []\n\n    for m in mask:\n        mx = m.sum(dim=-2).nonzero()\n        my = m.sum(dim=-1).nonzero()\n        bb = [mx.min(), my.min(), mx.max(), my.max()] if (len(mx) > 0 and len(my) > 0) else [0, 0, 0, 0]\n        bboxes.append(bb)\n\n    bboxes = torch.tensor(bboxes, dtype=torch.float32, device=mask.device)\n    bboxes = bboxes.reshape(batch_shape + (4,))\n\n    if fmt == 'v':\n        return bboxes\n\n    x1 = bboxes[..., :2]\n    s = bboxes[..., 2:] - x1 + 1\n\n    if fmt == 'c':\n        return torch.cat((x1 + 0.5 * s, s), dim=-1)\n    elif fmt == 't':\n        return torch.cat((x1, s), dim=-1)\n\n    raise ValueError(\"Undefined bounding box layout '%s'\" % fmt)\n\n\ndef masks_to_bboxes_multi(mask, ids, fmt='c'):\n    assert mask.dim() == 2\n    bboxes = []\n\n    for id in ids:\n        mx = (mask == id).sum(dim=-2).nonzero()\n        my = (mask == id).float().sum(dim=-1).nonzero()\n        bb = [mx.min(), my.min(), mx.max(), my.max()] if (len(mx) > 0 and len(my) > 0) else [0, 0, 0, 0]\n\n        bb = torch.tensor(bb, dtype=torch.float32, device=mask.device)\n\n        x1 = bb[:2]\n        s = bb[2:] - x1 + 1\n\n        if fmt == 'v':\n            pass\n        elif fmt == 'c':\n            bb = torch.cat((x1 + 0.5 * s, s), dim=-1)\n        elif fmt == 't':\n            bb = torch.cat((x1, s), dim=-1)\n        else:\n            raise ValueError(\"Undefined bounding box layout '%s'\" % fmt)\n        bboxes.append(bb)\n\n    return bboxes\n"
  },
  {
    "path": "artrackv2_mindspore/external/AR/ltr/data/image_loader.py",
    "content": "import jpeg4py\nimport cv2 as cv\nfrom PIL import Image\nimport numpy as np\n\ndavis_palette = np.repeat(np.expand_dims(np.arange(0,256), 1), 3, 1).astype(np.uint8)\ndavis_palette[:22, :] = [[0, 0, 0], [128, 0, 0], [0, 128, 0], [128, 128, 0],\n                         [0, 0, 128], [128, 0, 128], [0, 128, 128], [128, 128, 128],\n                         [64, 0, 0], [191, 0, 0], [64, 128, 0], [191, 128, 0],\n                         [64, 0, 128], [191, 0, 128], [64, 128, 128], [191, 128, 128],\n                         [0, 64, 0], [128, 64, 0], [0, 191, 0], [128, 191, 0],\n                         [0, 64, 128], [128, 64, 128]]\n\n\ndef default_image_loader(path):\n    \"\"\"The default image loader, reads the image from the given path. It first tries to use the jpeg4py_loader,\n    but reverts to the opencv_loader if the former is not available.\"\"\"\n    if default_image_loader.use_jpeg4py is None:\n        # Try using jpeg4py\n        im = jpeg4py_loader(path)\n        if im is None:\n            default_image_loader.use_jpeg4py = False\n            print('Using opencv_loader instead.')\n        else:\n            default_image_loader.use_jpeg4py = True\n            return im\n    if default_image_loader.use_jpeg4py:\n        return jpeg4py_loader(path)\n    return opencv_loader(path)\n\ndefault_image_loader.use_jpeg4py = None\n\n\ndef jpeg4py_loader(path):\n    \"\"\" Image reading using jpeg4py https://github.com/ajkxyz/jpeg4py\"\"\"\n    try:\n        return jpeg4py.JPEG(path).decode()\n    except Exception as e:\n        print('ERROR: Could not read image \"{}\"'.format(path))\n        print(e)\n        return None\n\n\ndef opencv_loader(path):\n    \"\"\" Read image using opencv's imread function and returns it in rgb format\"\"\"\n    try:\n        im = cv.imread(path, cv.IMREAD_COLOR)\n\n        # convert to rgb and return\n        return cv.cvtColor(im, cv.COLOR_BGR2RGB)\n    except Exception as e:\n        print('ERROR: Could not read image \"{}\"'.format(path))\n        print(e)\n        return None\n\n\ndef jpeg4py_loader_w_failsafe(path):\n    \"\"\" Image reading using jpeg4py https://github.com/ajkxyz/jpeg4py\"\"\"\n    try:\n        return jpeg4py.JPEG(path).decode()\n    except:\n        try:\n            im = cv.imread(path, cv.IMREAD_COLOR)\n\n            # convert to rgb and return\n            return cv.cvtColor(im, cv.COLOR_BGR2RGB)\n        except Exception as e:\n            print('ERROR: Could not read image \"{}\"'.format(path))\n            print(e)\n            return None\n\n\ndef opencv_seg_loader(path):\n    \"\"\" Read segmentation annotation using opencv's imread function\"\"\"\n    try:\n        return cv.imread(path)\n    except Exception as e:\n        print('ERROR: Could not read image \"{}\"'.format(path))\n        print(e)\n        return None\n\n\ndef imread_indexed(filename):\n    \"\"\" Load indexed image with given filename. Used to read segmentation annotations.\"\"\"\n\n    im = Image.open(filename)\n\n    annotation = np.atleast_3d(im)[...,0]\n    return annotation\n\n\ndef imwrite_indexed(filename, array, color_palette=None):\n    \"\"\" Save indexed image as png. Used to save segmentation annotation.\"\"\"\n\n    if color_palette is None:\n        color_palette = davis_palette\n\n    if np.atleast_3d(array).shape[2] != 1:\n        raise Exception(\"Saving indexed PNGs requires 2D array.\")\n\n    im = Image.fromarray(array)\n    im.putpalette(color_palette.ravel())\n    im.save(filename, format='PNG')"
  },
  {
    "path": "artrackv2_mindspore/external/AR/ltr/data/loader.py",
    "content": "import torch\nimport torch.utils.data.dataloader\nimport importlib\nimport collections\nfrom torch._six import string_classes, int_classes\nfrom pytracking import TensorDict, TensorList\n\n\ndef _check_use_shared_memory():\n    if hasattr(torch.utils.data.dataloader, '_use_shared_memory'):\n        return getattr(torch.utils.data.dataloader, '_use_shared_memory')\n    collate_lib = importlib.import_module('torch.utils.data._utils.collate')\n    if hasattr(collate_lib, '_use_shared_memory'):\n        return getattr(collate_lib, '_use_shared_memory')\n    return torch.utils.data.get_worker_info() is not None\n\n\ndef ltr_collate(batch):\n    \"\"\"Puts each data field into a tensor with outer dimension batch size\"\"\"\n\n    error_msg = \"batch must contain tensors, numbers, dicts or lists; found {}\"\n    elem_type = type(batch[0])\n    if isinstance(batch[0], torch.Tensor):\n        out = None\n        if _check_use_shared_memory():\n            # If we're in a background process, concatenate directly into a\n            # shared memory tensor to avoid an extra copy\n            numel = sum([x.numel() for x in batch])\n            storage = batch[0].storage()._new_shared(numel)\n            out = batch[0].new(storage)\n        return torch.stack(batch, 0, out=out)\n        # if batch[0].dim() < 4:\n        #     return torch.stack(batch, 0, out=out)\n        # return torch.cat(batch, 0, out=out)\n    elif elem_type.__module__ == 'numpy' and elem_type.__name__ != 'str_' \\\n            and elem_type.__name__ != 'string_':\n        elem = batch[0]\n        if elem_type.__name__ == 'ndarray':\n            # array of string classes and object\n            if torch.utils.data.dataloader.re.search('[SaUO]', elem.dtype.str) is not None:\n                raise TypeError(error_msg.format(elem.dtype))\n\n            return torch.stack([torch.from_numpy(b) for b in batch], 0)\n        if elem.shape == ():  # scalars\n            py_type = float if elem.dtype.name.startswith('float') else int\n            return torch.utils.data.dataloader.numpy_type_map[elem.dtype.name](list(map(py_type, batch)))\n    elif isinstance(batch[0], int_classes):\n        return torch.LongTensor(batch)\n    elif isinstance(batch[0], float):\n        return torch.DoubleTensor(batch)\n    elif isinstance(batch[0], string_classes):\n        return batch\n    elif isinstance(batch[0], TensorDict):\n        return TensorDict({key: ltr_collate([d[key] for d in batch]) for key in batch[0]})\n    elif isinstance(batch[0], collections.Mapping):\n        return {key: ltr_collate([d[key] for d in batch]) for key in batch[0]}\n    elif isinstance(batch[0], TensorList):\n        transposed = zip(*batch)\n        return TensorList([ltr_collate(samples) for samples in transposed])\n    elif isinstance(batch[0], collections.Sequence):\n        transposed = zip(*batch)\n        return [ltr_collate(samples) for samples in transposed]\n    elif batch[0] is None:\n        return batch\n\n    raise TypeError((error_msg.format(type(batch[0]))))\n\n\ndef ltr_collate_stack1(batch):\n    \"\"\"Puts each data field into a tensor. The tensors are stacked at dim=1 to form the batch\"\"\"\n\n    error_msg = \"batch must contain tensors, numbers, dicts or lists; found {}\"\n    elem_type = type(batch[0])\n    if isinstance(batch[0], torch.Tensor):\n        out = None\n        if _check_use_shared_memory():\n            # If we're in a background process, concatenate directly into a\n            # shared memory tensor to avoid an extra copy\n            numel = sum([x.numel() for x in batch])\n            storage = batch[0].storage()._new_shared(numel)\n            out = batch[0].new(storage)\n        return torch.stack(batch, 1, out=out)\n        # if batch[0].dim() < 4:\n        #     return torch.stack(batch, 0, out=out)\n        # return torch.cat(batch, 0, out=out)\n    elif elem_type.__module__ == 'numpy' and elem_type.__name__ != 'str_' \\\n            and elem_type.__name__ != 'string_':\n        elem = batch[0]\n        if elem_type.__name__ == 'ndarray':\n            # array of string classes and object\n            if torch.utils.data.dataloader.re.search('[SaUO]', elem.dtype.str) is not None:\n                raise TypeError(error_msg.format(elem.dtype))\n\n            return torch.stack([torch.from_numpy(b) for b in batch], 1)\n        if elem.shape == ():  # scalars\n            py_type = float if elem.dtype.name.startswith('float') else int\n            return torch.utils.data.dataloader.numpy_type_map[elem.dtype.name](list(map(py_type, batch)))\n    elif isinstance(batch[0], int_classes):\n        return torch.LongTensor(batch)\n    elif isinstance(batch[0], float):\n        return torch.DoubleTensor(batch)\n    elif isinstance(batch[0], string_classes):\n        return batch\n    elif isinstance(batch[0], TensorDict):\n        return TensorDict({key: ltr_collate_stack1([d[key] for d in batch]) for key in batch[0]})\n    elif isinstance(batch[0], collections.Mapping):\n        return {key: ltr_collate_stack1([d[key] for d in batch]) for key in batch[0]}\n    elif isinstance(batch[0], TensorList):\n        transposed = zip(*batch)\n        return TensorList([ltr_collate_stack1(samples) for samples in transposed])\n    elif isinstance(batch[0], collections.Sequence):\n        transposed = zip(*batch)\n        return [ltr_collate_stack1(samples) for samples in transposed]\n    elif batch[0] is None:\n        return batch\n\n    raise TypeError((error_msg.format(type(batch[0]))))\n\n\nclass LTRLoader(torch.utils.data.dataloader.DataLoader):\n    \"\"\"\n    Data loader. Combines a dataset and a sampler, and provides\n    single- or multi-process iterators over the dataset.\n\n    Note: The only difference with default pytorch DataLoader is that an additional option stack_dim is available to\n            select along which dimension the data should be stacked to form a batch.\n\n    Arguments:\n        dataset (Dataset): dataset from which to load the data.\n        batch_size (int, optional): how many samples per batch to load\n            (default: 1).\n        shuffle (bool, optional): set to ``True`` to have the data reshuffled\n            at every epoch (default: False).\n        sampler (Sampler, optional): defines the strategy to draw samples from\n            the dataset. If specified, ``shuffle`` must be False.\n        batch_sampler (Sampler, optional): like sampler, but returns a batch of\n            indices at a time. Mutually exclusive with batch_size, shuffle,\n            sampler, and drop_last.\n        num_workers (int, optional): how many subprocesses to use for data\n            loading. 0 means that the data will be loaded in the main process.\n            (default: 0)\n        collate_fn (callable, optional): merges a list of samples to form a mini-batch.\n        stack_dim (int): Dimension along which to stack to form the batch. (default: 0)\n        pin_memory (bool, optional): If ``True``, the data loader will copy tensors\n            into CUDA pinned memory before returning them.\n        drop_last (bool, optional): set to ``True`` to drop the last incomplete batch,\n            if the dataset size is not divisible by the batch size. If ``False`` and\n            the size of dataset is not divisible by the batch size, then the last batch\n            will be smaller. (default: False)\n        timeout (numeric, optional): if positive, the timeout value for collecting a batch\n            from workers. Should always be non-negative. (default: 0)\n        worker_init_fn (callable, optional): If not None, this will be called on each\n            worker subprocess with the worker id (an int in ``[0, num_workers - 1]``) as\n            input, after seeding and before data loading. (default: None)\n\n    .. note:: By default, each worker will have its PyTorch seed set to\n              ``base_seed + worker_id``, where ``base_seed`` is a long generated\n              by main process using its RNG. However, seeds for other libraies\n              may be duplicated upon initializing workers (w.g., NumPy), causing\n              each worker to return identical random numbers. (See\n              :ref:`dataloader-workers-random-seed` section in FAQ.) You may\n              use ``torch.initial_seed()`` to access the PyTorch seed for each\n              worker in :attr:`worker_init_fn`, and use it to set other seeds\n              before data loading.\n\n    .. warning:: If ``spawn`` start method is used, :attr:`worker_init_fn` cannot be an\n                 unpicklable object, e.g., a lambda function.\n    \"\"\"\n\n    __initialized = False\n\n    def __init__(self, name, dataset, training=True, batch_size=1, shuffle=False, sampler=None, batch_sampler=None,\n                 num_workers=0, epoch_interval=1, collate_fn=None, stack_dim=0, pin_memory=False, drop_last=False,\n                 timeout=0, worker_init_fn=None):\n        if collate_fn is None:\n            if stack_dim == 0:\n                collate_fn = ltr_collate\n            elif stack_dim == 1:\n                collate_fn = ltr_collate_stack1\n            else:\n                raise ValueError('Stack dim no supported. Must be 0 or 1.')\n\n        super(LTRLoader, self).__init__(dataset, batch_size, shuffle, sampler, batch_sampler,\n                 num_workers, collate_fn, pin_memory, drop_last,\n                 timeout, worker_init_fn)\n\n        self.name = name\n        self.training = training\n        self.epoch_interval = epoch_interval\n        self.stack_dim = stack_dim"
  },
  {
    "path": "artrackv2_mindspore/external/AR/ltr/data/processing.py",
    "content": "import torch\nimport torchvision.transforms as transforms\nfrom pytracking import TensorDict\nimport ltr.data.processing_utils as prutils\n\n\ndef stack_tensors(x):\n    if isinstance(x, (list, tuple)) and isinstance(x[0], torch.Tensor):\n        return torch.stack(x)\n    return x\n\n\nclass BaseProcessing:\n    \"\"\" Base class for Processing. Processing class is used to process the data returned by a dataset, before passing it\n     through the network. For example, it can be used to crop a search region around the object, apply various data\n     augmentations, etc.\"\"\"\n    def __init__(self, transform=transforms.ToTensor(), train_transform=None, test_transform=None, joint_transform=None):\n        \"\"\"\n        args:\n            transform       - The set of transformations to be applied on the images. Used only if train_transform or\n                                test_transform is None.\n            train_transform - The set of transformations to be applied on the train images. If None, the 'transform'\n                                argument is used instead.\n            test_transform  - The set of transformations to be applied on the test images. If None, the 'transform'\n                                argument is used instead.\n            joint_transform - The set of transformations to be applied 'jointly' on the train and test images.  For\n                                example, it can be used to convert both test and train images to grayscale.\n        \"\"\"\n        self.transform = {'train': transform if train_transform is None else train_transform,\n                          'test':  transform if test_transform is None else test_transform,\n                          'joint': joint_transform}\n\n    def __call__(self, data: TensorDict):\n        raise NotImplementedError\n\n\nclass ATOMProcessing(BaseProcessing):\n    \"\"\" The processing class used for training ATOM. The images are processed in the following way.\n    First, the target bounding box is jittered by adding some noise. Next, a square region (called search region )\n    centered at the jittered target center, and of area search_area_factor^2 times the area of the jittered box is\n    cropped from the image. The reason for jittering the target box is to avoid learning the bias that the target is\n    always at the center of the search region. The search region is then resized to a fixed size given by the\n    argument output_sz. A set of proposals are then generated for the test images by jittering the ground truth box.\n\n    \"\"\"\n\n    def __init__(self, search_area_factor, output_sz, center_jitter_factor, scale_jitter_factor, proposal_params,\n                 mode='pair', *args, **kwargs):\n        \"\"\"\n        args:\n            search_area_factor - The size of the search region  relative to the target size.\n            output_sz - An integer, denoting the size to which the search region is resized. The search region is always\n                        square.\n            center_jitter_factor - A dict containing the amount of jittering to be applied to the target center before\n                                    extracting the search region. See _get_jittered_box for how the jittering is done.\n            scale_jitter_factor - A dict containing the amount of jittering to be applied to the target size before\n                                    extracting the search region. See _get_jittered_box for how the jittering is done.\n            proposal_params - Arguments for the proposal generation process. See _generate_proposals for details.\n            mode - Either 'pair' or 'sequence'. If mode='sequence', then output has an extra dimension for frames\n        \"\"\"\n        super().__init__(*args, **kwargs)\n        self.search_area_factor = search_area_factor\n        self.output_sz = output_sz\n        self.center_jitter_factor = center_jitter_factor\n        self.scale_jitter_factor = scale_jitter_factor\n        self.proposal_params = proposal_params\n        self.mode = mode\n\n    def _get_jittered_box(self, box, mode):\n        \"\"\" Jitter the input box\n        args:\n            box - input bounding box\n            mode - string 'train' or 'test' indicating train or test data\n\n        returns:\n            torch.Tensor - jittered box\n        \"\"\"\n\n        jittered_size = box[2:4] * torch.exp(torch.randn(2) * self.scale_jitter_factor[mode])\n        max_offset = (jittered_size.prod().sqrt() * torch.tensor(self.center_jitter_factor[mode]).float())\n        jittered_center = box[0:2] + 0.5 * box[2:4] + max_offset * (torch.rand(2) - 0.5)\n\n        return torch.cat((jittered_center - 0.5 * jittered_size, jittered_size), dim=0)\n\n    def _generate_proposals(self, box):\n        \"\"\" Generates proposals by adding noise to the input box\n        args:\n            box - input box\n\n        returns:\n            torch.Tensor - Array of shape (num_proposals, 4) containing proposals\n            torch.Tensor - Array of shape (num_proposals,) containing IoU overlap of each proposal with the input box. The\n                        IoU is mapped to [-1, 1]\n        \"\"\"\n        # Generate proposals\n        num_proposals = self.proposal_params['boxes_per_frame']\n        proposal_method = self.proposal_params.get('proposal_method', 'default')\n\n        if proposal_method == 'default':\n            proposals = torch.zeros((num_proposals, 4))\n            gt_iou = torch.zeros(num_proposals)\n            for i in range(num_proposals):\n                proposals[i, :], gt_iou[i] = prutils.perturb_box(box, min_iou=self.proposal_params['min_iou'],\n                                                                 sigma_factor=self.proposal_params['sigma_factor'])\n        elif proposal_method == 'gmm':\n            proposals, _, _ = prutils.sample_box_gmm(box, self.proposal_params['proposal_sigma'],\n                                                                             num_samples=num_proposals)\n            gt_iou = prutils.iou(box.view(1,4), proposals.view(-1,4))\n\n        # Map to [-1, 1]\n        gt_iou = gt_iou * 2 - 1\n        return proposals, gt_iou\n\n    def __call__(self, data: TensorDict):\n        \"\"\"\n        args:\n            data - The input data, should contain the following fields:\n                'train_images', test_images', 'train_anno', 'test_anno'\n        returns:\n            TensorDict - output data block with following fields:\n                'train_images', 'test_images', 'train_anno', 'test_anno', 'test_proposals', 'proposal_iou'\n        \"\"\"\n        # Apply joint transforms\n        if self.transform['joint'] is not None:\n            data['train_images'], data['train_anno'] = self.transform['joint'](image=data['train_images'], bbox=data['train_anno'])\n            data['test_images'], data['test_anno'] = self.transform['joint'](image=data['test_images'], bbox=data['test_anno'], new_roll=False)\n\n        for s in ['train', 'test']:\n            assert self.mode == 'sequence' or len(data[s + '_images']) == 1, \\\n                \"In pair mode, num train/test frames must be 1\"\n\n            # Add a uniform noise to the center pos\n            jittered_anno = [self._get_jittered_box(a, s) for a in data[s + '_anno']]\n\n            # Crop image region centered at jittered_anno box\n            crops, boxes = prutils.jittered_center_crop(data[s + '_images'], jittered_anno, data[s + '_anno'],\n                                                        self.search_area_factor, self.output_sz)\n\n            # Apply transforms\n            data[s + '_images'], data[s + '_anno'] = self.transform[s](image=crops, bbox=boxes, joint=False)\n\n        # Generate proposals\n        frame2_proposals, gt_iou = zip(*[self._generate_proposals(a) for a in data['test_anno']])\n\n        data['test_proposals'] = list(frame2_proposals)\n        data['proposal_iou'] = list(gt_iou)\n\n        # Prepare output\n        if self.mode == 'sequence':\n            data = data.apply(stack_tensors)\n        else:\n            data = data.apply(lambda x: x[0] if isinstance(x, list) else x)\n\n        return data\n\n\nclass KLBBregProcessing(BaseProcessing):\n    \"\"\" Based on ATOMProcessing. It supports training ATOM using the Maximum Likelihood or KL-divergence based learning\n    introduced in [https://arxiv.org/abs/1909.12297] and in PrDiMP [https://arxiv.org/abs/2003.12565].\n    \"\"\"\n\n    def __init__(self, search_area_factor, output_sz, center_jitter_factor, scale_jitter_factor, proposal_params,\n                 mode='pair', *args, **kwargs):\n        \"\"\"\n        args:\n            search_area_factor - The size of the search region  relative to the target size.\n            output_sz - An integer, denoting the size to which the search region is resized. The search region is always\n                        square.\n            center_jitter_factor - A dict containing the amount of jittering to be applied to the target center before\n                                    extracting the search region. See _get_jittered_box for how the jittering is done.\n            scale_jitter_factor - A dict containing the amount of jittering to be applied to the target size before\n                                    extracting the search region. See _get_jittered_box for how the jittering is done.\n            proposal_params - Arguments for the proposal generation process. See _generate_proposals for details.\n            mode - Either 'pair' or 'sequence'. If mode='sequence', then output has an extra dimension for frames\n        \"\"\"\n        super().__init__(*args, **kwargs)\n        self.search_area_factor = search_area_factor\n        self.output_sz = output_sz\n        self.center_jitter_factor = center_jitter_factor\n        self.scale_jitter_factor = scale_jitter_factor\n        self.proposal_params = proposal_params\n        self.mode = mode\n\n    def _get_jittered_box(self, box, mode):\n        \"\"\" Jitter the input box\n        args:\n            box - input bounding box\n            mode - string 'train' or 'test' indicating train or test data\n\n        returns:\n            torch.Tensor - jittered box\n        \"\"\"\n\n        jittered_size = box[2:4] * torch.exp(torch.randn(2) * self.scale_jitter_factor[mode])\n        max_offset = (jittered_size.prod().sqrt() * torch.tensor(self.center_jitter_factor[mode]).float())\n        jittered_center = box[0:2] + 0.5 * box[2:4] + max_offset * (torch.rand(2) - 0.5)\n\n        return torch.cat((jittered_center - 0.5 * jittered_size, jittered_size), dim=0)\n\n    def _generate_proposals(self, box):\n        \"\"\"\n        \"\"\"\n        # Generate proposals\n        proposals, proposal_density, gt_density = prutils.sample_box_gmm(box, self.proposal_params['proposal_sigma'],\n                                                                         gt_sigma=self.proposal_params['gt_sigma'],\n                                                                         num_samples=self.proposal_params[\n                                                                             'boxes_per_frame'],\n                                                                         add_mean_box=self.proposal_params.get(\n                                                                             'add_mean_box', False))\n\n        return proposals, proposal_density, gt_density\n\n    def __call__(self, data: TensorDict):\n        \"\"\"\n        args:\n            data - The input data, should contain the following fields:\n                'train_images', test_images', 'train_anno', 'test_anno'\n        returns:\n            TensorDict - output data block with following fields:\n                'train_images', 'test_images', 'train_anno', 'test_anno', 'test_proposals', 'proposal_density', 'gt_density'\n        \"\"\"\n        # Apply joint transforms\n        if self.transform['joint'] is not None:\n            data['train_images'], data['train_anno'] = self.transform['joint'](image=data['train_images'], bbox=data['train_anno'])\n            data['test_images'], data['test_anno'] = self.transform['joint'](image=data['test_images'], bbox=data['test_anno'], new_roll=False)\n\n        for s in ['train', 'test']:\n            assert self.mode == 'sequence' or len(data[s + '_images']) == 1, \\\n                \"In pair mode, num train/test frames must be 1\"\n\n            # Add a uniform noise to the center pos\n            jittered_anno = [self._get_jittered_box(a, s) for a in data[s + '_anno']]\n\n            # Crop image region centered at jittered_anno box\n            crops, boxes, _ = prutils.jittered_center_crop(data[s + '_images'], jittered_anno, data[s + '_anno'],\n                                                        self.search_area_factor, self.output_sz)\n\n            # Apply transforms\n            data[s + '_images'], data[s + '_anno'] = self.transform[s](image=crops, bbox=boxes, joint=False)\n\n        # Generate proposals\n        proposals, proposal_density, gt_density = zip(*[self._generate_proposals(a) for a in data['test_anno']])\n\n        data['test_proposals'] = proposals\n        data['proposal_density'] = proposal_density\n        data['gt_density'] = gt_density\n\n        # Prepare output\n        if self.mode == 'sequence':\n            data = data.apply(stack_tensors)\n        else:\n            data = data.apply(lambda x: x[0] if isinstance(x, list) else x)\n\n        return data\n\n\nclass ATOMwKLProcessing(BaseProcessing):\n    \"\"\"Same as ATOMProcessing but using the GMM-based sampling of proposal boxes used in KLBBregProcessing.\"\"\"\n    def __init__(self, search_area_factor, output_sz, center_jitter_factor, scale_jitter_factor, proposal_params,\n                 mode='pair', *args, **kwargs):\n        super().__init__(*args, **kwargs)\n        self.search_area_factor = search_area_factor\n        self.output_sz = output_sz\n        self.center_jitter_factor = center_jitter_factor\n        self.scale_jitter_factor = scale_jitter_factor\n        self.proposal_params = proposal_params\n        self.mode = mode\n\n    def _get_jittered_box(self, box, mode):\n        \"\"\" Jitter the input box\n        args:\n            box - input bounding box\n            mode - string 'train' or 'test' indicating train or test data\n\n        returns:\n            torch.Tensor - jittered box\n        \"\"\"\n\n        jittered_size = box[2:4] * torch.exp(torch.randn(2) * self.scale_jitter_factor[mode])\n        max_offset = (jittered_size.prod().sqrt() * torch.tensor(self.center_jitter_factor[mode]).float())\n        jittered_center = box[0:2] + 0.5 * box[2:4] + max_offset * (torch.rand(2) - 0.5)\n\n        return torch.cat((jittered_center - 0.5 * jittered_size, jittered_size), dim=0)\n\n    def _generate_proposals(self, box):\n        \"\"\"\n        \"\"\"\n        # Generate proposals\n        proposals, proposal_density, gt_density = prutils.sample_box_gmm(box, self.proposal_params['proposal_sigma'],\n                                                                         self.proposal_params['gt_sigma'],\n                                                                         self.proposal_params['boxes_per_frame'])\n\n        iou = prutils.iou_gen(proposals, box.view(1, 4))\n        return proposals, proposal_density, gt_density, iou\n\n    def __call__(self, data: TensorDict):\n        # Apply joint transforms\n        if self.transform['joint'] is not None:\n            data['train_images'], data['train_anno'] = self.transform['joint'](image=data['train_images'], bbox=data['train_anno'])\n            data['test_images'], data['test_anno'] = self.transform['joint'](image=data['test_images'], bbox=data['test_anno'], new_roll=False)\n\n        for s in ['train', 'test']:\n            assert self.mode == 'sequence' or len(data[s + '_images']) == 1, \\\n                \"In pair mode, num train/test frames must be 1\"\n\n            # Add a uniform noise to the center pos\n            jittered_anno = [self._get_jittered_box(a, s) for a in data[s + '_anno']]\n\n            # Crop image region centered at jittered_anno box\n            crops, boxes = prutils.jittered_center_crop(data[s + '_images'], jittered_anno, data[s + '_anno'],\n                                                        self.search_area_factor, self.output_sz)\n\n            # Apply transforms\n            data[s + '_images'], data[s + '_anno'] = self.transform[s](image=crops, bbox=boxes, joint=False)\n\n        # Generate proposals\n        proposals, proposal_density, gt_density, proposal_iou = zip(\n            *[self._generate_proposals(a) for a in data['test_anno']])\n\n        data['test_proposals'] = proposals\n        data['proposal_density'] = proposal_density\n        data['gt_density'] = gt_density\n        data['proposal_iou'] = proposal_iou\n        # Prepare output\n        if self.mode == 'sequence':\n            data = data.apply(stack_tensors)\n        else:\n            data = data.apply(lambda x: x[0] if isinstance(x, list) else x)\n\n        return data\n\n\n\nclass DiMPProcessing(BaseProcessing):\n    \"\"\" The processing class used for training DiMP. The images are processed in the following way.\n    First, the target bounding box is jittered by adding some noise. Next, a square region (called search region )\n    centered at the jittered target center, and of area search_area_factor^2 times the area of the jittered box is\n    cropped from the image. The reason for jittering the target box is to avoid learning the bias that the target is\n    always at the center of the search region. The search region is then resized to a fixed size given by the\n    argument output_sz. A Gaussian label centered at the target is generated for each image. These label functions are\n    used for computing the loss of the predicted classification model on the test images. A set of proposals are\n    also generated for the test images by jittering the ground truth box. These proposals are used to train the\n    bounding box estimating branch.\n\n    \"\"\"\n\n    def __init__(self, search_area_factor, output_sz, center_jitter_factor, scale_jitter_factor, crop_type='replicate',\n                 max_scale_change=None, mode='pair', proposal_params=None, label_function_params=None, *args, **kwargs):\n        \"\"\"\n        args:\n            search_area_factor - The size of the search region  relative to the target size.\n            output_sz - An integer, denoting the size to which the search region is resized. The search region is always\n                        square.\n            center_jitter_factor - A dict containing the amount of jittering to be applied to the target center before\n                                    extracting the search region. See _get_jittered_box for how the jittering is done.\n            scale_jitter_factor - A dict containing the amount of jittering to be applied to the target size before\n                                    extracting the search region. See _get_jittered_box for how the jittering is done.\n            crop_type - If 'replicate', the boundary pixels are replicated in case the search region crop goes out of image.\n                        If 'inside', the search region crop is shifted/shrunk to fit completely inside the image.\n                        If 'inside_major', the search region crop is shifted/shrunk to fit completely inside one axis of the image.\n            max_scale_change - Maximum allowed scale change when performing the crop (only applicable for 'inside' and 'inside_major')\n            mode - Either 'pair' or 'sequence'. If mode='sequence', then output has an extra dimension for frames\n            proposal_params - Arguments for the proposal generation process. See _generate_proposals for details.\n            label_function_params - Arguments for the label generation process. See _generate_label_function for details.\n        \"\"\"\n        super().__init__(*args, **kwargs)\n        self.search_area_factor = search_area_factor\n        self.output_sz = output_sz\n        self.center_jitter_factor = center_jitter_factor\n        self.scale_jitter_factor = scale_jitter_factor\n        self.crop_type = crop_type\n        self.mode = mode\n        self.max_scale_change = max_scale_change\n\n        self.proposal_params = proposal_params\n        self.label_function_params = label_function_params\n\n    def _get_jittered_box(self, box, mode):\n        \"\"\" Jitter the input box\n        args:\n            box - input bounding box\n            mode - string 'train' or 'test' indicating train or test data\n\n        returns:\n            torch.Tensor - jittered box\n        \"\"\"\n\n        jittered_size = box[2:4] * torch.exp(torch.randn(2) * self.scale_jitter_factor[mode])\n        max_offset = (jittered_size.prod().sqrt() * torch.tensor(self.center_jitter_factor[mode]).float())\n        jittered_center = box[0:2] + 0.5 * box[2:4] + max_offset * (torch.rand(2) - 0.5)\n\n        return torch.cat((jittered_center - 0.5 * jittered_size, jittered_size), dim=0)\n\n    def _generate_proposals(self, box):\n        \"\"\" Generates proposals by adding noise to the input box\n        args:\n            box - input box\n\n        returns:\n            torch.Tensor - Array of shape (num_proposals, 4) containing proposals\n            torch.Tensor - Array of shape (num_proposals,) containing IoU overlap of each proposal with the input box. The\n                        IoU is mapped to [-1, 1]\n        \"\"\"\n        # Generate proposals\n        num_proposals = self.proposal_params['boxes_per_frame']\n        proposal_method = self.proposal_params.get('proposal_method', 'default')\n\n        if proposal_method == 'default':\n            proposals = torch.zeros((num_proposals, 4))\n            gt_iou = torch.zeros(num_proposals)\n\n            for i in range(num_proposals):\n                proposals[i, :], gt_iou[i] = prutils.perturb_box(box, min_iou=self.proposal_params['min_iou'],\n                                                                 sigma_factor=self.proposal_params['sigma_factor'])\n        elif proposal_method == 'gmm':\n            proposals, _, _ = prutils.sample_box_gmm(box, self.proposal_params['proposal_sigma'],\n                                                     num_samples=num_proposals)\n            gt_iou = prutils.iou(box.view(1, 4), proposals.view(-1, 4))\n        else:\n            raise ValueError('Unknown proposal method.')\n\n        # Map to [-1, 1]\n        gt_iou = gt_iou * 2 - 1\n        return proposals, gt_iou\n\n    def _generate_label_function(self, target_bb):\n        \"\"\" Generates the gaussian label function centered at target_bb\n        args:\n            target_bb - target bounding box (num_images, 4)\n\n        returns:\n            torch.Tensor - Tensor of shape (num_images, label_sz, label_sz) containing the label for each sample\n        \"\"\"\n\n        gauss_label = prutils.gaussian_label_function(target_bb.view(-1, 4), self.label_function_params['sigma_factor'],\n                                                      self.label_function_params['kernel_sz'],\n                                                      self.label_function_params['feature_sz'], self.output_sz,\n                                                      end_pad_if_even=self.label_function_params.get('end_pad_if_even', True))\n\n        return gauss_label\n\n    def __call__(self, data: TensorDict):\n        \"\"\"\n        args:\n            data - The input data, should contain the following fields:\n                'train_images', test_images', 'train_anno', 'test_anno'\n        returns:\n            TensorDict - output data block with following fields:\n                'train_images', 'test_images', 'train_anno', 'test_anno', 'test_proposals', 'proposal_iou',\n                'test_label' (optional), 'train_label' (optional), 'test_label_density' (optional), 'train_label_density' (optional)\n        \"\"\"\n\n        if self.transform['joint'] is not None:\n            data['train_images'], data['train_anno'] = self.transform['joint'](image=data['train_images'], bbox=data['train_anno'])\n            data['test_images'], data['test_anno'] = self.transform['joint'](image=data['test_images'], bbox=data['test_anno'], new_roll=False)\n\n        for s in ['train', 'test']:\n            assert self.mode == 'sequence' or len(data[s + '_images']) == 1, \\\n                \"In pair mode, num train/test frames must be 1\"\n\n            # Add a uniform noise to the center pos\n            jittered_anno = [self._get_jittered_box(a, s) for a in data[s + '_anno']]\n\n            crops, boxes = prutils.target_image_crop(data[s + '_images'], jittered_anno, data[s + '_anno'],\n                                                     self.search_area_factor, self.output_sz, mode=self.crop_type,\n                                                     max_scale_change=self.max_scale_change)\n\n            data[s + '_images'], data[s + '_anno'] = self.transform[s](image=crops, bbox=boxes, joint=False)\n\n        # Generate proposals\n        if self.proposal_params:\n            frame2_proposals, gt_iou = zip(*[self._generate_proposals(a) for a in data['test_anno']])\n\n            data['test_proposals'] = list(frame2_proposals)\n            data['proposal_iou'] = list(gt_iou)\n\n        # Prepare output\n        if self.mode == 'sequence':\n            data = data.apply(stack_tensors)\n        else:\n            data = data.apply(lambda x: x[0] if isinstance(x, list) else x)\n\n        # Generate label functions\n        if self.label_function_params is not None:\n            data['train_label'] = self._generate_label_function(data['train_anno'])\n            data['test_label'] = self._generate_label_function(data['test_anno'])\n\n        return data\n\n\nclass KLDiMPProcessing(BaseProcessing):\n    \"\"\" The processing class used for training PrDiMP that additionally supports the probabilistic classifier and\n    bounding box regressor. See DiMPProcessing for details.\n    \"\"\"\n\n    def __init__(self, search_area_factor, output_sz, center_jitter_factor, scale_jitter_factor, crop_type='replicate',\n                 max_scale_change=None, mode='pair', proposal_params=None,\n                 label_function_params=None, label_density_params=None, *args, **kwargs):\n        \"\"\"\n        args:\n            search_area_factor - The size of the search region  relative to the target size.\n            output_sz - An integer, denoting the size to which the search region is resized. The search region is always\n                        square.\n            center_jitter_factor - A dict containing the amount of jittering to be applied to the target center before\n                                    extracting the search region. See _get_jittered_box for how the jittering is done.\n            scale_jitter_factor - A dict containing the amount of jittering to be applied to the target size before\n                                    extracting the search region. See _get_jittered_box for how the jittering is done.\n            crop_type - If 'replicate', the boundary pixels are replicated in case the search region crop goes out of image.\n                        If 'inside', the search region crop is shifted/shrunk to fit completely inside the image.\n                        If 'inside_major', the search region crop is shifted/shrunk to fit completely inside one axis of the image.\n            max_scale_change - Maximum allowed scale change when performing the crop (only applicable for 'inside' and 'inside_major')\n            mode - Either 'pair' or 'sequence'. If mode='sequence', then output has an extra dimension for frames\n            proposal_params - Arguments for the proposal generation process. See _generate_proposals for details.\n            label_function_params - Arguments for the label generation process. See _generate_label_function for details.\n            label_density_params - Arguments for the label density generation process. See _generate_label_function for details.\n        \"\"\"\n        super().__init__(*args, **kwargs)\n        self.search_area_factor = search_area_factor\n        self.output_sz = output_sz\n        self.center_jitter_factor = center_jitter_factor\n        self.scale_jitter_factor = scale_jitter_factor\n        self.crop_type = crop_type\n        self.mode = mode\n        self.max_scale_change = max_scale_change\n\n        self.proposal_params = proposal_params\n        self.label_function_params = label_function_params\n        self.label_density_params = label_density_params\n\n    def _get_jittered_box(self, box, mode):\n        \"\"\" Jitter the input box\n        args:\n            box - input bounding box\n            mode - string 'train' or 'test' indicating train or test data\n\n        returns:\n            torch.Tensor - jittered box\n        \"\"\"\n\n        jittered_size = box[2:4] * torch.exp(torch.randn(2) * self.scale_jitter_factor[mode])\n        max_offset = (jittered_size.prod().sqrt() * torch.tensor(self.center_jitter_factor[mode]).float())\n        jittered_center = box[0:2] + 0.5 * box[2:4] + max_offset * (torch.rand(2) - 0.5)\n\n        return torch.cat((jittered_center - 0.5 * jittered_size, jittered_size), dim=0)\n\n    def _generate_proposals(self, box):\n        \"\"\" Generate proposal sample boxes from a GMM proposal distribution and compute their ground-truth density.\n        This is used for ML and KL based regression learning of the bounding box regressor.\n        args:\n            box - input bounding box\n        \"\"\"\n        # Generate proposals\n        proposals, proposal_density, gt_density = prutils.sample_box_gmm(box, self.proposal_params['proposal_sigma'],\n                                                                         gt_sigma=self.proposal_params['gt_sigma'],\n                                                                         num_samples=self.proposal_params['boxes_per_frame'],\n                                                                         add_mean_box=self.proposal_params.get('add_mean_box', False))\n\n        return proposals, proposal_density, gt_density\n\n    def _generate_label_function(self, target_bb):\n        \"\"\" Generates the gaussian label function centered at target_bb\n        args:\n            target_bb - target bounding box (num_images, 4)\n\n        returns:\n            torch.Tensor - Tensor of shape (num_images, label_sz, label_sz) containing the label for each sample\n        \"\"\"\n\n        gauss_label = prutils.gaussian_label_function(target_bb.view(-1, 4), self.label_function_params['sigma_factor'],\n                                                      self.label_function_params['kernel_sz'],\n                                                      self.label_function_params['feature_sz'], self.output_sz,\n                                                      end_pad_if_even=self.label_function_params.get('end_pad_if_even', True))\n\n        return gauss_label\n\n    def _generate_label_density(self, target_bb):\n        \"\"\" Generates the gaussian label density centered at target_bb\n        args:\n            target_bb - target bounding box (num_images, 4)\n\n        returns:\n            torch.Tensor - Tensor of shape (num_images, label_sz, label_sz) containing the label for each sample\n        \"\"\"\n\n        feat_sz = self.label_density_params['feature_sz'] * self.label_density_params.get('interp_factor', 1)\n        gauss_label = prutils.gaussian_label_function(target_bb.view(-1, 4), self.label_density_params['sigma_factor'],\n                                                      self.label_density_params['kernel_sz'],\n                                                      feat_sz, self.output_sz,\n                                                      end_pad_if_even=self.label_density_params.get('end_pad_if_even', True),\n                                                      density=True,\n                                                      uni_bias=self.label_density_params.get('uni_weight', 0.0))\n\n        gauss_label *= (gauss_label > self.label_density_params.get('threshold', 0.0)).float()\n\n        if self.label_density_params.get('normalize', False):\n            g_sum = gauss_label.sum(dim=(-2,-1))\n            valid = g_sum>0.01\n            gauss_label[valid, :, :] /= g_sum[valid].view(-1, 1, 1)\n            gauss_label[~valid, :, :] = 1.0 / (gauss_label.shape[-2] * gauss_label.shape[-1])\n\n        gauss_label *= 1.0 - self.label_density_params.get('shrink', 0.0)\n\n        return gauss_label\n\n    def __call__(self, data: TensorDict):\n        \"\"\"\n        args:\n            data - The input data, should contain the following fields:\n                'train_images', test_images', 'train_anno', 'test_anno'\n        returns:\n            TensorDict - output data block with following fields:\n                'train_images', 'test_images', 'train_anno', 'test_anno', 'test_proposals', 'proposal_density', 'gt_density',\n                'test_label' (optional), 'train_label' (optional), 'test_label_density' (optional), 'train_label_density' (optional)\n        \"\"\"\n\n        if self.transform['joint'] is not None:\n            data['train_images'], data['train_anno'] = self.transform['joint'](image=data['train_images'], bbox=data['train_anno'])\n            data['test_images'], data['test_anno'] = self.transform['joint'](image=data['test_images'], bbox=data['test_anno'], new_roll=False)\n\n        for s in ['train', 'test']:\n            assert self.mode == 'sequence' or len(data[s + '_images']) == 1, \\\n                \"In pair mode, num train/test frames must be 1\"\n\n            # Add a uniform noise to the center pos\n            jittered_anno = [self._get_jittered_box(a, s) for a in data[s + '_anno']]\n\n            crops, boxes = prutils.target_image_crop(data[s + '_images'], jittered_anno, data[s + '_anno'],\n                                                     self.search_area_factor, self.output_sz, mode=self.crop_type,\n                                                     max_scale_change=self.max_scale_change)\n\n            data[s + '_images'], data[s + '_anno'] = self.transform[s](image=crops, bbox=boxes, joint=False)\n\n        # Generate proposals\n        proposals, proposal_density, gt_density = zip(*[self._generate_proposals(a) for a in data['test_anno']])\n\n        data['test_proposals'] = proposals\n        data['proposal_density'] = proposal_density\n        data['gt_density'] = gt_density\n\n        for s in ['train', 'test']:\n            is_distractor = data.get('is_distractor_{}_frame'.format(s), None)\n            if is_distractor is not None:\n                for is_dist, box in zip(is_distractor, data[s+'_anno']):\n                    if is_dist:\n                        box[0] = 99999999.9\n                        box[1] = 99999999.9\n\n        # Prepare output\n        if self.mode == 'sequence':\n            data = data.apply(stack_tensors)\n        else:\n            data = data.apply(lambda x: x[0] if isinstance(x, list) else x)\n\n        # Generate label functions\n        if self.label_function_params is not None:\n            data['train_label'] = self._generate_label_function(data['train_anno'])\n            data['test_label'] = self._generate_label_function(data['test_anno'])\n        if self.label_density_params is not None:\n            data['train_label_density'] = self._generate_label_density(data['train_anno'])\n            data['test_label_density'] = self._generate_label_density(data['test_anno'])\n\n        return data\n"
  },
  {
    "path": "artrackv2_mindspore/external/AR/ltr/data/processing_utils.py",
    "content": "import torch\nimport math\nimport cv2 as cv\nimport random\nimport torch.nn.functional as F\nfrom .bounding_box_utils import rect_to_rel, rel_to_rect\n\n\ndef sample_target(im, target_bb, search_area_factor, output_sz=None, mask=None):\n    \"\"\" Extracts a square crop centered at target_bb box, of area search_area_factor^2 times target_bb area\n\n    args:\n        im - cv image\n        target_bb - target box [x, y, w, h]\n        search_area_factor - Ratio of crop size to target size\n        output_sz - (float) Size to which the extracted crop is resized (always square). If None, no resizing is done.\n\n    returns:\n        cv image - extracted crop\n        float - the factor by which the crop has been resized to make the crop size equal output_size\n    \"\"\"\n    x, y, w, h = target_bb.tolist()\n\n    # Crop image\n    crop_sz = math.ceil(math.sqrt(w * h) * search_area_factor)\n\n    if crop_sz < 1:\n        raise Exception('Too small bounding box.')\n\n    x1 = round(x + 0.5 * w - crop_sz * 0.5)\n    x2 = x1 + crop_sz\n\n    y1 = round(y + 0.5 * h - crop_sz * 0.5)\n    y2 = y1 + crop_sz\n\n    x1_pad = max(0, -x1)\n    x2_pad = max(x2 - im.shape[1] + 1, 0)\n\n    y1_pad = max(0, -y1)\n    y2_pad = max(y2 - im.shape[0] + 1, 0)\n\n    # Crop target\n    im_crop = im[y1 + y1_pad:y2 - y2_pad, x1 + x1_pad:x2 - x2_pad, :]\n    if mask is not None:\n        mask_crop = mask[y1 + y1_pad:y2 - y2_pad, x1 + x1_pad:x2 - x2_pad]\n\n    # Pad\n    im_crop_padded = cv.copyMakeBorder(im_crop, y1_pad, y2_pad, x1_pad, x2_pad, cv.BORDER_REPLICATE)\n    if mask is not None:\n        mask_crop_padded = F.pad(mask_crop, pad=(x1_pad, x2_pad, y1_pad, y2_pad), mode='constant', value=0)\n\n    if output_sz is not None:\n        resize_factor = output_sz / crop_sz\n        im_crop_padded = cv.resize(im_crop_padded, (output_sz, output_sz))\n\n        if mask is None:\n            return im_crop_padded, resize_factor\n        mask_crop_padded = \\\n        F.interpolate(mask_crop_padded[None, None], (output_sz, output_sz), mode='bilinear', align_corners=False)[0, 0]\n        return im_crop_padded, resize_factor, mask_crop_padded\n\n    else:\n        if mask is None:\n            return im_crop_padded, 1.0\n        return im_crop_padded, 1.0, mask_crop_padded\n\n\ndef transform_image_to_crop(box_in: torch.Tensor, box_extract: torch.Tensor, resize_factor: float,\n                            crop_sz: torch.Tensor) -> torch.Tensor:\n    \"\"\" Transform the box co-ordinates from the original image co-ordinates to the co-ordinates of the cropped image\n    args:\n        box_in - the box for which the co-ordinates are to be transformed\n        box_extract - the box about which the image crop has been extracted.\n        resize_factor - the ratio between the original image scale and the scale of the image crop\n        crop_sz - size of the cropped image\n\n    returns:\n        torch.Tensor - transformed co-ordinates of box_in\n    \"\"\"\n    box_extract_center = box_extract[0:2] + 0.5 * box_extract[2:4]\n\n    box_in_center = box_in[0:2] + 0.5 * box_in[2:4]\n\n    box_out_center = (crop_sz - 1) / 2 + (box_in_center - box_extract_center) * resize_factor\n    box_out_wh = box_in[2:4] * resize_factor\n\n    box_out = torch.cat((box_out_center - 0.5 * box_out_wh, box_out_wh))\n    return box_out\n\n\ndef jittered_center_crop(frames, box_extract, box_gt, search_area_factor, output_sz, masks=None):\n    \"\"\" For each frame in frames, extracts a square crop centered at box_extract, of area search_area_factor^2\n    times box_extract area. The extracted crops are then resized to output_sz. Further, the co-ordinates of the box\n    box_gt are transformed to the image crop co-ordinates\n\n    args:\n        frames - list of frames\n        box_extract - list of boxes of same length as frames. The crops are extracted using anno_extract\n        box_gt - list of boxes of same length as frames. The co-ordinates of these boxes are transformed from\n                    image co-ordinates to the crop co-ordinates\n        search_area_factor - The area of the extracted crop is search_area_factor^2 times box_extract area\n        output_sz - The size to which the extracted crops are resized\n\n    returns:\n        list - list of image crops\n        list - box_gt location in the crop co-ordinates\n        \"\"\"\n\n    if masks is None:\n        crops_resize_factors = [sample_target(f, a, search_area_factor, output_sz)\n                                for f, a in zip(frames, box_extract)]\n        frames_crop, resize_factors = zip(*crops_resize_factors)\n        masks_crop = None\n    else:\n        crops_resize_factors = [sample_target(f, a, search_area_factor, output_sz, m)\n                                for f, a, m in zip(frames, box_extract, masks)]\n        frames_crop, resize_factors, masks_crop = zip(*crops_resize_factors)\n\n    crop_sz = torch.Tensor([output_sz, output_sz])\n\n    # find the bb location in the crop\n    box_crop = [transform_image_to_crop(a_gt, a_ex, rf, crop_sz)\n                for a_gt, a_ex, rf in zip(box_gt, box_extract, resize_factors)]\n\n    return frames_crop, box_crop, masks_crop\n\n\ndef sample_target_adaptive(im, target_bb, search_area_factor, output_sz, mode: str = 'replicate',\n                           max_scale_change=None, mask=None):\n    \"\"\" Extracts a crop centered at target_bb box, of area search_area_factor^2. If the crop area contains regions\n    outside the image, it is shifted so that the it is inside the image. Further, if the crop area exceeds the image\n    size, a smaller crop which fits the image is returned instead.\n\n    args:\n        im - Input numpy image to crop.\n        target_bb - target box [x, y, w, h]\n        search_area_factor - Ratio of crop size to target size\n        output_sz - (float) Size to which the extracted crop is resized (always square). If None, no resizing is done.\n        mode - If 'replicate', the boundary pixels are replicated in case the search region crop goes out of image.\n               If 'inside', the search region crop is shifted/shrunk to fit completely inside the image.\n               If 'inside_major', the search region crop is shifted/shrunk to fit completely inside one axis of the image.\n        max_scale_change - Maximum allowed scale change when performing the crop (only applicable for 'inside' and 'inside_major')\n        mask - Optional mask to apply the same crop.\n\n    returns:\n        numpy image - Extracted crop.\n        torch.Tensor - A bounding box denoting the cropped region in the image.\n        numpy mask - Cropped mask returned only if mask is not None.\n    \"\"\"\n\n    if max_scale_change is None:\n        max_scale_change = float('inf')\n    if isinstance(output_sz, (float, int)):\n        output_sz = (output_sz, output_sz)\n    output_sz = torch.Tensor(output_sz)\n\n    im_h = im.shape[0]\n    im_w = im.shape[1]\n\n    bbx, bby, bbw, bbh = target_bb.tolist()\n\n    # Crop image\n    crop_sz_x, crop_sz_y = (output_sz * (\n                target_bb[2:].prod() / output_sz.prod()).sqrt() * search_area_factor).ceil().long().tolist()\n\n    # Get new sample size if forced inside the image\n    if mode == 'inside' or mode == 'inside_major':\n        # Calculate rescaling factor if outside the image\n        rescale_factor = [crop_sz_x / im_w, crop_sz_y / im_h]\n        if mode == 'inside':\n            rescale_factor = max(rescale_factor)\n        elif mode == 'inside_major':\n            rescale_factor = min(rescale_factor)\n        rescale_factor = min(max(1, rescale_factor), max_scale_change)\n\n        crop_sz_x = math.floor(crop_sz_x / rescale_factor)\n        crop_sz_y = math.floor(crop_sz_y / rescale_factor)\n\n    if crop_sz_x < 1 or crop_sz_y < 1:\n        raise Exception('Too small bounding box.')\n\n    x1 = round(bbx + 0.5 * bbw - crop_sz_x * 0.5)\n    x2 = x1 + crop_sz_x\n\n    y1 = round(bby + 0.5 * bbh - crop_sz_y * 0.5)\n    y2 = y1 + crop_sz_y\n\n    # Move box inside image\n    shift_x = max(0, -x1) + min(0, im_w - x2)\n    x1 += shift_x\n    x2 += shift_x\n\n    shift_y = max(0, -y1) + min(0, im_h - y2)\n    y1 += shift_y\n    y2 += shift_y\n\n    out_x = (max(0, -x1) + max(0, x2 - im_w)) // 2\n    out_y = (max(0, -y1) + max(0, y2 - im_h)) // 2\n    shift_x = (-x1 - out_x) * (out_x > 0)\n    shift_y = (-y1 - out_y) * (out_y > 0)\n\n    x1 += shift_x\n    x2 += shift_x\n    y1 += shift_y\n    y2 += shift_y\n\n    x1_pad = max(0, -x1)\n    x2_pad = max(x2 - im.shape[1] + 1, 0)\n\n    y1_pad = max(0, -y1)\n    y2_pad = max(y2 - im.shape[0] + 1, 0)\n\n    # Crop target\n    im_crop = im[y1 + y1_pad:y2 - y2_pad, x1 + x1_pad:x2 - x2_pad, :]\n\n    if mask is not None:\n        mask_crop = mask[y1 + y1_pad:y2 - y2_pad, x1 + x1_pad:x2 - x2_pad]\n\n    # Pad\n    im_crop_padded = cv.copyMakeBorder(im_crop, y1_pad, y2_pad, x1_pad, x2_pad, cv.BORDER_REPLICATE)\n\n    if mask is not None:\n        mask_crop_padded = F.pad(mask_crop, pad=(x1_pad, x2_pad, y1_pad, y2_pad), mode='constant', value=0)\n\n    # Resize image\n    im_out = cv.resize(im_crop_padded, tuple(output_sz.long().tolist()))\n\n    if mask is not None:\n        mask_out = \\\n        F.interpolate(mask_crop_padded[None, None], tuple(output_sz.flip(0).long().tolist()), mode='nearest')[0, 0]\n\n    crop_box = torch.Tensor([x1, y1, x2 - x1, y2 - y1])\n\n    if mask is None:\n        return im_out, crop_box\n    else:\n        return im_out, crop_box, mask_out\n\n\ndef crop_and_resize(im, box, crop_bb, output_sz, mask=None):\n    if isinstance(output_sz, (float, int)):\n        output_sz = (output_sz, output_sz)\n\n    im_h = im.shape[0]\n    im_w = im.shape[1]\n\n    if crop_bb[2] < 1 or crop_bb[3] < 1:\n        raise Exception('Too small bounding box.')\n\n    x1 = crop_bb[0]\n    x2 = crop_bb[0] + crop_bb[2]\n\n    y1 = crop_bb[1]\n    y2 = crop_bb[1] + crop_bb[3]\n\n    x1_pad = max(0, -x1)\n    x2_pad = max(x2 - im.shape[1] + 1, 0)\n\n    y1_pad = max(0, -y1)\n    y2_pad = max(y2 - im.shape[0] + 1, 0)\n\n    # Crop target\n    im_crop = im[y1 + y1_pad:y2 - y2_pad, x1 + x1_pad:x2 - x2_pad, :]\n\n    if mask is not None:\n        mask_crop = mask[y1 + y1_pad:y2 - y2_pad, x1 + x1_pad:x2 - x2_pad]\n\n    # Pad\n    im_crop_padded = cv.copyMakeBorder(im_crop, y1_pad, y2_pad, x1_pad, x2_pad, cv.BORDER_REPLICATE)\n\n    if mask is not None:\n        mask_crop_padded = F.pad(mask_crop, pad=(x1_pad, x2_pad, y1_pad, y2_pad), mode='constant', value=0)\n\n    # Resize image\n    im_out = cv.resize(im_crop_padded, output_sz)\n\n    if mask is not None:\n        mask_out = F.interpolate(mask_crop_padded[None, None], (output_sz[1], output_sz[0]), mode='nearest')[0, 0]\n\n    rescale_factor = output_sz[0] / crop_bb[2]\n\n    # Hack\n    if box is not None:\n        box_crop = box.clone()\n        box_crop[0] -= crop_bb[0]\n        box_crop[1] -= crop_bb[1]\n\n        box_crop *= rescale_factor\n    else:\n        box_crop = None\n\n    if mask is None:\n        return im_out, box_crop\n    else:\n        return im_out, box_crop, mask_out\n\n\ndef transform_box_to_crop(box: torch.Tensor, crop_box: torch.Tensor, crop_sz: torch.Tensor) -> torch.Tensor:\n    \"\"\" Transform the box co-ordinates from the original image co-ordinates to the co-ordinates of the cropped image\n    args:\n        box - the box for which the co-ordinates are to be transformed\n        crop_box - bounding box defining the crop in the original image\n        crop_sz - size of the cropped image\n\n    returns:\n        torch.Tensor - transformed co-ordinates of box_in\n    \"\"\"\n\n    box_out = box.clone()\n    box_out[:2] -= crop_box[:2]\n\n    scale_factor = crop_sz / crop_box[2:]\n\n    box_out[:2] *= scale_factor\n    box_out[2:] *= scale_factor\n    return box_out\n\n\ndef target_image_crop(frames, box_extract, box_gt, search_area_factor, output_sz, mode: str = 'replicate',\n                      max_scale_change=None, masks=None):\n    \"\"\" For each frame in frames, extracts a square crop centered at box_extract, of area search_area_factor^2\n    times box_extract area. If the crop area contains regions outside the image, it is shifted / shrunk so that it\n    completely fits inside the image. The extracted crops are then resized to output_sz. Further, the co-ordinates of\n    the box box_gt are transformed to the image crop co-ordinates\n\n    args:\n        frames - list of frames\n        box_extract - list of boxes of same length as frames. The crops are extracted using anno_extract\n        box_gt - list of boxes of same length as frames. The co-ordinates of these boxes are transformed from\n                    image co-ordinates to the crop co-ordinates\n        search_area_factor - The area of the extracted crop is search_area_factor^2 times box_extract area\n        output_sz - The size to which the extracted crops are resized\n        mode - If 'replicate', the boundary pixels are replicated in case the search region crop goes out of image.\n               If 'inside', the search region crop is shifted/shrunk to fit completely inside the image.\n               If 'inside_major', the search region crop is shifted/shrunk to fit completely inside one axis of the image.\n        max_scale_change - Maximum allowed scale change when performing the crop (only applicable for 'inside' and 'inside_major')\n        masks - Optional masks to apply the same crop.\n\n    returns:\n        list - list of image crops\n        list - box_gt location in the crop co-ordinates\n        \"\"\"\n\n    if isinstance(output_sz, (float, int)):\n        output_sz = (output_sz, output_sz)\n\n    if masks is None:\n        frame_crops_boxes = [sample_target_adaptive(f, a, search_area_factor, output_sz, mode, max_scale_change)\n                             for f, a in zip(frames, box_extract)]\n\n        frames_crop, crop_boxes = zip(*frame_crops_boxes)\n    else:\n        frame_crops_boxes_masks = [\n            sample_target_adaptive(f, a, search_area_factor, output_sz, mode, max_scale_change, mask=m)\n            for f, a, m in zip(frames, box_extract, masks)]\n\n        frames_crop, crop_boxes, masks_crop = zip(*frame_crops_boxes_masks)\n\n    crop_sz = torch.Tensor(output_sz)\n\n    # find the bb location in the crop\n    box_crop = [transform_box_to_crop(bb_gt, crop_bb, crop_sz)\n                for bb_gt, crop_bb in zip(box_gt, crop_boxes)]\n\n    if masks is None:\n        return frames_crop, box_crop\n    else:\n        return frames_crop, box_crop, masks_crop\n\n\ndef iou(reference, proposals):\n    \"\"\"Compute the IoU between a reference box with multiple proposal boxes.\n\n    args:\n        reference - Tensor of shape (1, 4).\n        proposals - Tensor of shape (num_proposals, 4)\n\n    returns:\n        torch.Tensor - Tensor of shape (num_proposals,) containing IoU of reference box with each proposal box.\n    \"\"\"\n\n    # Intersection box\n    tl = torch.max(reference[:, :2], proposals[:, :2])\n    br = torch.min(reference[:, :2] + reference[:, 2:], proposals[:, :2] + proposals[:, 2:])\n    sz = (br - tl).clamp(0)\n\n    # Area\n    intersection = sz.prod(dim=1)\n    union = reference[:, 2:].prod(dim=1) + proposals[:, 2:].prod(dim=1) - intersection\n\n    return intersection / union\n\n\ndef rand_uniform(a, b, shape=1):\n    \"\"\" sample numbers uniformly between a and b.\n    args:\n        a - lower bound\n        b - upper bound\n        shape - shape of the output tensor\n\n    returns:\n        torch.Tensor - tensor of shape=shape\n    \"\"\"\n    return (b - a) * torch.rand(shape) + a\n\n\ndef perturb_box(box, min_iou=0.5, sigma_factor=0.1):\n    \"\"\" Perturb the input box by adding gaussian noise to the co-ordinates\n\n     args:\n        box - input box\n        min_iou - minimum IoU overlap between input box and the perturbed box\n        sigma_factor - amount of perturbation, relative to the box size. Can be either a single element, or a list of\n                        sigma_factors, in which case one of them will be uniformly sampled. Further, each of the\n                        sigma_factor element can be either a float, or a tensor\n                        of shape (4,) specifying the sigma_factor per co-ordinate\n\n    returns:\n        torch.Tensor - the perturbed box\n    \"\"\"\n\n    if isinstance(sigma_factor, list):\n        # If list, sample one sigma_factor as current sigma factor\n        c_sigma_factor = random.choice(sigma_factor)\n    else:\n        c_sigma_factor = sigma_factor\n\n    if not isinstance(c_sigma_factor, torch.Tensor):\n        c_sigma_factor = c_sigma_factor * torch.ones(4)\n\n    perturb_factor = torch.sqrt(box[2] * box[3]) * c_sigma_factor\n\n    # multiple tries to ensure that the perturbed box has iou > min_iou with the input box\n    for i_ in range(100):\n        c_x = box[0] + 0.5 * box[2]\n        c_y = box[1] + 0.5 * box[3]\n        c_x_per = random.gauss(c_x, perturb_factor[0])\n        c_y_per = random.gauss(c_y, perturb_factor[1])\n\n        w_per = random.gauss(box[2], perturb_factor[2])\n        h_per = random.gauss(box[3], perturb_factor[3])\n\n        if w_per <= 1:\n            w_per = box[2] * rand_uniform(0.15, 0.5)\n\n        if h_per <= 1:\n            h_per = box[3] * rand_uniform(0.15, 0.5)\n\n        box_per = torch.Tensor([c_x_per - 0.5 * w_per, c_y_per - 0.5 * h_per, w_per, h_per]).round()\n\n        if box_per[2] <= 1:\n            box_per[2] = box[2] * rand_uniform(0.15, 0.5)\n\n        if box_per[3] <= 1:\n            box_per[3] = box[3] * rand_uniform(0.15, 0.5)\n\n        box_iou = iou(box.view(1, 4), box_per.view(1, 4))\n\n        # if there is sufficient overlap, return\n        if box_iou > min_iou:\n            return box_per, box_iou\n\n        # else reduce the perturb factor\n        perturb_factor *= 0.9\n\n    return box_per, box_iou\n\n\ndef gauss_1d(sz, sigma, center, end_pad=0, density=False):\n    k = torch.arange(-(sz - 1) / 2, (sz + 1) / 2 + end_pad).reshape(1, -1)\n    gauss = torch.exp(-1.0 / (2 * sigma ** 2) * (k - center.reshape(-1, 1)) ** 2)\n    if density:\n        gauss /= math.sqrt(2 * math.pi) * sigma\n    return gauss\n\n\ndef gauss_2d(sz, sigma, center, end_pad=(0, 0), density=False):\n    if isinstance(sigma, (float, int)):\n        sigma = (sigma, sigma)\n    return gauss_1d(sz[0].item(), sigma[0], center[:, 0], end_pad[0], density).reshape(center.shape[0], 1, -1) * \\\n           gauss_1d(sz[1].item(), sigma[1], center[:, 1], end_pad[1], density).reshape(center.shape[0], -1, 1)\n\n\ndef gaussian_label_function(target_bb, sigma_factor, kernel_sz, feat_sz, image_sz, end_pad_if_even=True, density=False,\n                            uni_bias=0):\n    \"\"\"Construct Gaussian label function.\"\"\"\n\n    if isinstance(kernel_sz, (float, int)):\n        kernel_sz = (kernel_sz, kernel_sz)\n    if isinstance(feat_sz, (float, int)):\n        feat_sz = (feat_sz, feat_sz)\n    if isinstance(image_sz, (float, int)):\n        image_sz = (image_sz, image_sz)\n\n    image_sz = torch.Tensor(image_sz)\n    feat_sz = torch.Tensor(feat_sz)\n\n    target_center = target_bb[:, 0:2] + 0.5 * target_bb[:, 2:4]\n    target_center_norm = (target_center - image_sz / 2) / image_sz\n\n    center = feat_sz * target_center_norm + 0.5 * \\\n             torch.Tensor([(kernel_sz[0] + 1) % 2, (kernel_sz[1] + 1) % 2])\n\n    sigma = sigma_factor * feat_sz.prod().sqrt().item()\n\n    if end_pad_if_even:\n        end_pad = (int(kernel_sz[0] % 2 == 0), int(kernel_sz[1] % 2 == 0))\n    else:\n        end_pad = (0, 0)\n\n    gauss_label = gauss_2d(feat_sz, sigma, center, end_pad, density=density)\n    if density:\n        sz = (feat_sz + torch.Tensor(end_pad)).prod()\n        label = (1.0 - uni_bias) * gauss_label + uni_bias / sz\n    else:\n        label = gauss_label + uni_bias\n    return label\n\n\ndef gauss_density_centered(x, std):\n    \"\"\"Evaluate the probability density of a Gaussian centered at zero.\n    args:\n        x - Samples.\n        std - List of standard deviations\n    \"\"\"\n    return torch.exp(-0.5 * (x / std) ** 2) / (math.sqrt(2 * math.pi) * std)\n\n\ndef gmm_density_centered(x, std):\n    \"\"\"Evaluate the probability density of a GMM centered at zero.\n    args:\n        x - Samples. Assumes dim=-1 is the component dimension and dim=-2 is feature dimension. Rest are sample dimension.\n        std - Tensor of standard deviations\n    \"\"\"\n    if x.dim() == std.dim() - 1:\n        x = x.unsqueeze(-1)\n    elif not (x.dim() == std.dim() and x.shape[-1] == 1):\n        raise ValueError('Last dimension must be the gmm stds.')\n    return gauss_density_centered(x, std).prod(-2).mean(-1)\n\n\ndef sample_gmm_centered(std, num_samples=1):\n    \"\"\"Sample from a GMM distribution centered at zero:\n    args:\n        std - Tensor of standard deviations\n        num_samples - number of samples\n    \"\"\"\n    num_components = std.shape[-1]\n    num_dims = std.numel() // num_components\n\n    std = std.view(1, num_dims, num_components)\n\n    # Sample component ids\n    k = torch.randint(num_components, (num_samples,), dtype=torch.int64)\n    std_samp = std[0, :, k].t()\n\n    # Sample\n    x_centered = std_samp * torch.randn(num_samples, num_dims)\n    prob_dens = gmm_density_centered(x_centered, std)\n\n    return x_centered, prob_dens\n\n\ndef sample_gmm(mean, std, num_samples=1):\n    \"\"\"Sample from a GMM distribution:\n    args:\n        mean - a single mean vector\n        std - Tensor of standard deviations\n        num_samples - number of samples\n    \"\"\"\n    num_dims = mean.numel()\n    num_components = std.shape[-1]\n\n    mean = mean.view(1, num_dims)\n    std = std.view(1, -1, num_components)\n\n    # Sample component ids\n    k = torch.randint(num_components, (num_samples,), dtype=torch.int64)\n    std_samp = std[0, :, k].t()\n\n    # Sample\n    x_centered = std_samp * torch.randn(num_samples, num_dims)\n    x = x_centered + mean\n    prob_dens = gmm_density_centered(x_centered, std)\n\n    return x, prob_dens\n\n\ndef sample_box_gmm(mean_box, proposal_sigma, gt_sigma=None, num_samples=1, add_mean_box=False):\n    \"\"\"Sample boxes from a Gaussian mixture model.\n    args:\n        mean_box - Center (or mean) bounding box\n        proposal_sigma - List of standard deviations for each Gaussian\n        gt_sigma - Standard deviation of the ground truth distribution\n        num_samples - Number of sampled boxes\n        add_mean_box - Also add mean box as first element\n\n    returns:\n        proposals, proposal density and ground truth density for all samples\n    \"\"\"\n    center_std = torch.Tensor([s[0] for s in proposal_sigma])\n    sz_std = torch.Tensor([s[1] for s in proposal_sigma])\n    std = torch.stack([center_std, center_std, sz_std, sz_std])\n\n    mean_box = mean_box.view(1, 4)\n    sz_norm = mean_box[:, 2:].clone()\n\n    # Sample boxes\n    proposals_rel_centered, proposal_density = sample_gmm_centered(std, num_samples)\n\n    # Add mean and map back\n    mean_box_rel = rect_to_rel(mean_box, sz_norm)\n    proposals_rel = proposals_rel_centered + mean_box_rel\n    proposals = rel_to_rect(proposals_rel, sz_norm)\n\n    if gt_sigma is None or gt_sigma[0] == 0 and gt_sigma[1] == 0:\n        gt_density = torch.zeros_like(proposal_density)\n    else:\n        std_gt = torch.Tensor([gt_sigma[0], gt_sigma[0], gt_sigma[1], gt_sigma[1]]).view(1, 4)\n        gt_density = gauss_density_centered(proposals_rel_centered, std_gt).prod(-1)\n\n    if add_mean_box:\n        proposals = torch.cat((mean_box, proposals))\n        proposal_density = torch.cat((torch.Tensor([-1]), proposal_density))\n        gt_density = torch.cat((torch.Tensor([1]), gt_density))\n\n    return proposals, proposal_density, gt_density"
  },
  {
    "path": "artrackv2_mindspore/external/AR/ltr/data/processing_utils_SE.py",
    "content": "import torch\nimport math\nimport cv2 as cv\nimport random\n\nimport numpy as np\n\ndef stack_tensors(x):\n    if isinstance(x, list) and isinstance(x[0], torch.Tensor):\n        return torch.stack(x)\n    return x\n\n\n'''Added on 2019.12.23'''\ndef sample_target_SE(im, target_bb, search_area_factor, output_sz=None, mode=cv.BORDER_REPLICATE):\n    \"\"\" Extracts a crop centered at target_bb box, of size search_area_factor times target_bb(Both height and width)\n\n    args:\n        im - cv image\n        target_bb - target box [x, y, w, h]\n        search_area_factor - Ratio of crop size to target size\n        output_sz - (float) Size to which the extracted crop is resized (always square). If None, no resizing is done.\n\n    returns:\n        cv image - extracted crop\n        float - the factor by which the crop has been resized to make the crop size equal output_size\n    \"\"\"\n\n    x, y, w, h = target_bb.tolist()\n\n    # Crop image\n    ws = math.ceil(search_area_factor * w)\n    hs = math.ceil(search_area_factor * h)\n\n    if ws < 1 or hs < 1:\n        raise Exception('Too small bounding box.')\n\n    x1 = round(x + 0.5*w - ws*0.5)\n    x2 = x1 + ws\n\n    y1 = round(y + 0.5 * h - hs * 0.5)\n    y2 = y1 + hs\n\n    x1_pad = max(0, -x1)\n    x2_pad = max(x2-im.shape[1]+1, 0)\n\n    y1_pad = max(0, -y1)\n    y2_pad = max(y2-im.shape[0]+1, 0)\n\n    # Crop target\n    im_crop = im[y1+y1_pad:y2-y2_pad, x1+x1_pad:x2-x2_pad, :]\n\n    # Pad\n    im_crop_padded = cv.copyMakeBorder(im_crop, y1_pad, y2_pad, x1_pad, x2_pad, mode)\n\n    if output_sz is not None:\n        w_rsz_f = output_sz / ws\n        h_rsz_f = output_sz / hs\n        im_crop_padded_rsz = cv.resize(im_crop_padded, (output_sz, output_sz))\n        if len(im_crop_padded_rsz.shape)==2:\n            im_crop_padded_rsz = im_crop_padded_rsz[...,np.newaxis]\n        return im_crop_padded_rsz, h_rsz_f, w_rsz_f\n    else:\n        return im_crop_padded, 1.0, 1.0\n'''把mask映射到原图上'''\ndef map_mask_back(im, target_bb, search_area_factor, mask, mode=cv.BORDER_REPLICATE):\n    \"\"\" Extracts a crop centered at target_bb box, of size search_area_factor times target_bb(Both height and width)\n\n    args:\n        im - cv image\n        target_bb - target box [x, y, w, h]\n        search_area_factor - Ratio of crop size to target size\n        output_sz - (float) Size to which the extracted crop is resized (always square). If None, no resizing is done.\n\n    returns:\n        cv image - extracted crop\n        float - the factor by which the crop has been resized to make the crop size equal output_size\n    \"\"\"\n    H,W = (im.shape[0],im.shape[1])\n    base = np.zeros((H,W))\n    x, y, w, h = target_bb.tolist()\n\n    # Crop image\n    ws = math.ceil(search_area_factor * w)\n    hs = math.ceil(search_area_factor * h)\n\n    if ws < 1 or hs < 1:\n        raise Exception('Too small bounding box.')\n\n    x1 = round(x + 0.5*w - ws*0.5)\n    x2 = x1 + ws\n\n    y1 = round(y + 0.5 * h - hs * 0.5)\n    y2 = y1 + hs\n\n    x1_pad = max(0, -x1)\n    x2_pad = max(x2-im.shape[1]+1, 0)\n\n    y1_pad = max(0, -y1)\n    y2_pad = max(y2-im.shape[0]+1, 0)\n\n    '''pad base'''\n    base_padded = cv.copyMakeBorder(base, y1_pad, y2_pad, x1_pad, x2_pad, mode)\n    '''Resize mask'''\n    mask_rsz = cv.resize(mask,(ws,hs))\n    '''fill region with mask'''\n    base_padded[y1+y1_pad:y2+y1_pad, x1+x1_pad:x2+x1_pad] = mask_rsz.copy()\n    '''crop base_padded to get final mask'''\n    final_mask = base_padded[y1_pad:y1_pad+H,x1_pad:x1_pad+W]\n    assert (final_mask.shape == (H,W))\n    return final_mask\n\n'''Added on 2019.12.23'''\ndef transform_image_to_crop_SE(box_in: torch.Tensor, box_extract: torch.Tensor, resize_factor_h: float, resize_factor_w: float,\n                            crop_sz: torch.Tensor) -> torch.Tensor:\n    \"\"\" Transform the box co-ordinates from the original image co-ordinates to the co-ordinates of the cropped image\n    args:\n        box_in - the box for which the co-ordinates are to be transformed\n        box_extract - the box about which the image crop has been extracted.\n        resize_factor - the ratio between the original image scale and the scale of the image crop\n        crop_sz - size of the cropped image\n\n    returns:\n        torch.Tensor - transformed co-ordinates of box_in\n    \"\"\"\n    box_extract_center = box_extract[0:2] + 0.5*box_extract[2:4]\n\n    box_in_center = box_in[0:2] + 0.5*box_in[2:4]\n\n    box_out_xc = (crop_sz[0] -1)/2 + (box_in_center[0] - box_extract_center[0])*resize_factor_w\n    box_out_yc = (crop_sz[0] -1)/2 + (box_in_center[1] - box_extract_center[1])*resize_factor_h\n    box_out_w = box_in[2] * resize_factor_w\n    box_out_h = box_in[3] * resize_factor_h\n\n    '''2019.12.28 为了避免出现(x1,y1)小于0,或者(x2,y2)大于256的情况,这里我对它们加上了一些限制'''\n    max_sz = crop_sz[0].item()\n    box_out_x1 = torch.clamp(box_out_xc - 0.5 * box_out_w,0,max_sz)\n    box_out_y1 = torch.clamp(box_out_yc - 0.5 * box_out_h,0,max_sz)\n    box_out_x2 = torch.clamp(box_out_xc + 0.5 * box_out_w,0,max_sz)\n    box_out_y2 = torch.clamp(box_out_yc + 0.5 * box_out_h,0,max_sz)\n    box_out_w_new = box_out_x2 - box_out_x1\n    box_out_h_new = box_out_y2 - box_out_y1\n    box_out = torch.stack((box_out_x1, box_out_y1, box_out_w_new, box_out_h_new))\n    return box_out\n\ndef centered_crop(frames, anno, area_factor, output_sz):\n    crops_resize_factors = [sample_target(f, a, area_factor, output_sz)\n                            for f, a in zip(frames, anno)]\n\n    frames_crop, resize_factors = zip(*crops_resize_factors)\n\n    crop_sz = torch.Tensor([output_sz, output_sz])\n\n    # find the bb location in the crop\n    anno_crop = [transform_image_to_crop(a, a, rf, crop_sz)\n                 for a, rf in zip(anno, resize_factors)]\n\n    return frames_crop, anno_crop\n\n'''Added by Bin Yan 2019.12.23, \nchanged on 2020.1.4(add a new args: \"get_bbox_coord\")'''\ndef jittered_center_crop_SE(frames, box_extract, box_gt, search_area_factor, output_sz, get_bbox_coord=True, mode=cv.BORDER_REPLICATE):\n    \"\"\"\n    Crop a patch centered at box_extract. The height and width of cropped region is search_area_factor times that of box_extract.\n    The extracted crops are then resized to output_sz. Further, the co-ordinates of the box box_gt are transformed to the image crop co-ordinates\n    args:\n        frames - list of frames\n        box_extract - list of boxes of same length as frames. The crops are extracted using anno_extract\n        box_gt - list of boxes of same length as frames. The co-ordinates of these boxes are transformed from\n                    image co-ordinates to the crop co-ordinates\n        search_area_factor - The area of the extracted crop is search_area_factor^2 times box_extract area\n        output_sz - The size to which the extracted crops are resized\n\n    returns:\n        list - list of image crops\n        list - box_gt location in the crop co-ordinates\n    \"\"\"\n    '''call function \"sample_target_SE\" and function \"transform_image_to_crop_SE\"'''\n    '''get cropped patch(fixed size)'''\n    crops_resize_factors = [sample_target_SE(f, a, search_area_factor, output_sz, mode=mode)\n                            for f, a in zip(frames, box_extract)]\n\n    frames_crop, resize_factors_h, resize_factors_w = zip(*crops_resize_factors)\n    if get_bbox_coord:\n        crop_sz = torch.Tensor([output_sz, output_sz])\n\n        # find the bb location in the crop\n        '''get GT's cooridinate on the cropped patch'''\n        box_crop = [transform_image_to_crop_SE(a_gt, a_ex, h_rsf, w_rsf, crop_sz)\n                    for a_gt, a_ex, h_rsf, w_rsf in zip(box_gt, box_extract, resize_factors_h, resize_factors_w)]\n\n        return frames_crop, box_crop\n    else:\n        return frames_crop\n\ndef sample_target_nopad(im, target_bb, search_area_factor, output_sz):\n    \"\"\" Extracts a crop centered at target_bb box, of area search_area_factor^2. If the crop area contains regions\n    outside the image, it is shifted so that the it is inside the image. Further, if the crop area exceeds the image\n    size, a smaller crop which fits the image is returned instead.\n\n    args:\n        im - cv image\n        target_bb - target box [x, y, w, h]\n        search_area_factor - Ratio of crop size to target size\n        output_sz - (float) Size to which the extracted crop is resized (always square). If None, no resizing is done.\n\n    returns:\n        cv image - extracted crop\n        torch.Tensor - a bounding box denoting the cropped region in the image.\n    \"\"\"\n\n    if isinstance(output_sz, (float, int)):\n        output_sz = (output_sz, output_sz)\n    output_sz = torch.Tensor(output_sz)\n\n    im_h = im.shape[0]\n    im_w = im.shape[1]\n\n    bbx, bby, bbw, bbh = target_bb.tolist()\n\n    # Crop image\n    crop_sz_x, crop_sz_y = (output_sz * (target_bb[2:].prod()/output_sz.prod()).sqrt() * search_area_factor).ceil()\n\n    # Calculate rescaling factor if outside the image\n    rescale_factor = max(1, crop_sz_x/im_w, crop_sz_y/im_h)\n    crop_sz_x = math.floor(crop_sz_x / rescale_factor)\n    crop_sz_y = math.floor(crop_sz_y / rescale_factor)\n\n    if crop_sz_x < 1 or crop_sz_y < 1:\n        raise Exception('Too small bounding box.')\n\n    x1 = round(bbx + 0.5*bbw - crop_sz_x*0.5)\n    x2 = x1 + crop_sz_x\n\n    y1 = round(bby + 0.5*bbh - crop_sz_y*0.5)\n    y2 = y1 + crop_sz_y\n\n    # Move box inside image\n    shift_x = max(0, -x1) + min(0, im_w - x2)\n    x1 += shift_x\n    x2 += shift_x\n\n    shift_y = max(0, -y1) + min(0, im_h - y2)\n    y1 += shift_y\n    y2 += shift_y\n\n    # Crop and resize image\n    im_crop = im[y1:y2, x1:x2, :]\n    im_out = cv.resize(im_crop, tuple(output_sz.long().tolist()))\n\n    crop_box = torch.Tensor([x1, y1, x2-x1, y2-y1])\n    return im_out, crop_box\n\n\ndef transform_box_to_crop(box: torch.Tensor, crop_box: torch.Tensor, crop_sz: torch.Tensor) -> torch.Tensor:\n    \"\"\" Transform the box co-ordinates from the original image co-ordinates to the co-ordinates of the cropped image\n    args:\n        box - the box for which the co-ordinates are to be transformed\n        crop_box - bounding box defining the crop in the original image\n        crop_sz - size of the cropped image\n\n    returns:\n        torch.Tensor - transformed co-ordinates of box_in\n    \"\"\"\n\n    box_out = box.clone()\n    box_out[:2] -= crop_box[:2]\n\n    scale_factor = crop_sz / crop_box[2:]\n\n    box_out[:2] *= scale_factor\n    box_out[2:] *= scale_factor\n    return box_out\n\n\ndef jittered_center_crop_nopad(frames, box_extract, box_gt, search_area_factor, output_sz):\n    \"\"\" For each frame in frames, extracts a square crop centered at box_extract, of area search_area_factor^2\n    times box_extract area. If the crop area contains regions outside the image, it is shifted / shrunk so that it\n    completely fits inside the image. The extracted crops are then resized to output_sz. Further, the co-ordinates of\n    the box box_gt are transformed to the image crop co-ordinates\n\n    args:\n        frames - list of frames\n        box_extract - list of boxes of same length as frames. The crops are extracted using anno_extract\n        box_gt - list of boxes of same length as frames. The co-ordinates of these boxes are transformed from\n                    image co-ordinates to the crop co-ordinates\n        search_area_factor - The area of the extracted crop is search_area_factor^2 times box_extract area\n        output_sz - The size to which the extracted crops are resized\n\n    returns:\n        list - list of image crops\n        list - box_gt location in the crop co-ordinates\n        \"\"\"\n\n    if isinstance(output_sz, (float, int)):\n        output_sz = (output_sz, output_sz)\n\n    frame_crops_boxes = [sample_target_nopad(f, a, search_area_factor, output_sz)\n                            for f, a in zip(frames, box_extract)]\n\n    frames_crop, crop_boxes = zip(*frame_crops_boxes)\n\n    crop_sz = torch.Tensor(output_sz)\n\n    # find the bb location in the crop\n    box_crop = [transform_box_to_crop(bb_gt, crop_bb, crop_sz)\n                for bb_gt, crop_bb in zip(box_gt, crop_boxes)]\n\n    return frames_crop, box_crop\n\n\ndef iou(reference, proposals):\n    \"\"\"Compute the IoU between a reference box with multiple proposal boxes.\n\n    args:\n        reference - Tensor of shape (1, 4).\n        proposals - Tensor of shape (num_proposals, 4)\n\n    returns:\n        torch.Tensor - Tensor of shape (num_proposals,) containing IoU of reference box with each proposal box.\n    \"\"\"\n\n    # Intersection box\n    tl = torch.max(reference[:,:2], proposals[:,:2])\n    br = torch.min(reference[:,:2] + reference[:,2:], proposals[:,:2] + proposals[:,2:])\n    sz = (br - tl).clamp(0)\n\n    # Area\n    intersection = sz.prod(dim=1)\n    union = reference[:,2:].prod(dim=1) + proposals[:,2:].prod(dim=1) - intersection\n\n    return intersection / union\n\n\ndef rand_uniform(a, b, shape=1):\n    \"\"\" sample numbers uniformly between a and b.\n    args:\n        a - lower bound\n        b - upper bound\n        shape - shape of the output tensor\n\n    returns:\n        torch.Tensor - tensor of shape=shape\n    \"\"\"\n    return (b - a) * torch.rand(shape) + a\n\n\ndef perturb_box(box, min_iou=0.5, sigma_factor=0.1):\n    \"\"\" Perturb the input box by adding gaussian noise to the co-ordinates\n\n     args:\n        box - input box\n        min_iou - minimum IoU overlap between input box and the perturbed box\n        sigma_factor - amount of perturbation, relative to the box size. Can be either a single element, or a list of\n                        sigma_factors, in which case one of them will be uniformly sampled. Further, each of the\n                        sigma_factor element can be either a float, or a tensor\n                        of shape (4,) specifying the sigma_factor per co-ordinate\n\n    returns:\n        torch.Tensor - the perturbed box\n    \"\"\"\n\n    if isinstance(sigma_factor, list):\n        # If list, sample one sigma_factor as current sigma factor\n        c_sigma_factor = random.choice(sigma_factor)\n    else:\n        c_sigma_factor = sigma_factor\n\n    if not isinstance(c_sigma_factor, torch.Tensor):\n        c_sigma_factor = c_sigma_factor * torch.ones(4)\n\n    perturb_factor = torch.sqrt(box[2]*box[3])*c_sigma_factor\n\n    # multiple tries to ensure that the perturbed box has iou > min_iou with the input box\n    for i_ in range(100):\n        c_x = box[0] + 0.5*box[2]\n        c_y = box[1] + 0.5 * box[3]\n        c_x_per = random.gauss(c_x, perturb_factor[0])\n        c_y_per = random.gauss(c_y, perturb_factor[1])\n\n        w_per = random.gauss(box[2], perturb_factor[2])\n        h_per = random.gauss(box[3], perturb_factor[3])\n\n        if w_per <= 1:\n            w_per = box[2]*rand_uniform(0.15, 0.5)\n\n        if h_per <= 1:\n            h_per = box[3]*rand_uniform(0.15, 0.5)\n\n        box_per = torch.Tensor([c_x_per - 0.5*w_per, c_y_per - 0.5*h_per, w_per, h_per]).round()\n\n        if box_per[2] <= 1:\n            box_per[2] = box[2]*rand_uniform(0.15, 0.5)\n\n        if box_per[3] <= 1:\n            box_per[3] = box[3]*rand_uniform(0.15, 0.5)\n\n        box_iou = iou(box.view(1, 4), box_per.view(1, 4))\n\n        # if there is sufficient overlap, return\n        if box_iou > min_iou:\n            return box_per, box_iou\n\n        # else reduce the perturb factor\n        perturb_factor *= 0.9\n\n    return box_per, box_iou\n\n\ndef gauss_1d(sz, sigma, center, end_pad=0):\n    k = torch.arange(-(sz-1)/2, (sz+1)/2 + end_pad).reshape(1, -1)\n    return torch.exp(-1.0/(2*sigma**2) * (k - center.reshape(-1, 1))**2)\n\n\ndef gauss_2d(sz, sigma, center, end_pad=(0, 0)):\n    if isinstance(sigma, (float, int)):\n        sigma = (sigma, sigma)\n    return gauss_1d(sz[0].item(), sigma[0], center[:, 0], end_pad[0]).reshape(center.shape[0], 1, -1) * \\\n           gauss_1d(sz[1].item(), sigma[1], center[:, 1], end_pad[1]).reshape(center.shape[0], -1, 1)\n\n\ndef gaussian_label_function(target_bb, sigma_factor, kernel_sz, feat_sz, image_sz, end_pad_if_even=True):\n    \"\"\"Construct Gaussian label function.\"\"\"\n\n    if isinstance(kernel_sz, (float, int)):\n        kernel_sz = (kernel_sz, kernel_sz)\n    if isinstance(feat_sz, (float, int)):\n        feat_sz = (feat_sz, feat_sz)\n    if isinstance(image_sz, (float, int)):\n        image_sz = (image_sz, image_sz)\n\n    image_sz = torch.Tensor(image_sz)\n    feat_sz = torch.Tensor(feat_sz)\n\n    target_center = target_bb[:, 0:2] + 0.5 * target_bb[:, 2:4]\n    target_center_norm = (target_center - image_sz / 2) / image_sz\n\n    center = feat_sz * target_center_norm + 0.5 * \\\n             torch.Tensor([(kernel_sz[0] + 1) % 2, (kernel_sz[1] + 1) % 2])\n\n    sigma = sigma_factor * feat_sz.prod().sqrt().item()\n\n    if end_pad_if_even:\n        end_pad = (int(kernel_sz[0]%2 == 0), int(kernel_sz[1]%2 == 0))\n    else:\n        end_pad = (0, 0)\n\n    gauss_label = gauss_2d(feat_sz, sigma, center, end_pad)\n    return gauss_label\n\n"
  },
  {
    "path": "artrackv2_mindspore/external/AR/ltr/data/sampler.py",
    "content": "import random\nimport torch.utils.data\nfrom pytracking import TensorDict\n\n\ndef no_processing(data):\n    return data\n\n\nclass TrackingSampler(torch.utils.data.Dataset):\n    \"\"\" Class responsible for sampling frames from training sequences to form batches. Each training sample is a\n    tuple consisting of i) a set of train frames, used to learn the DiMP classification model and obtain the\n    modulation vector for IoU-Net, and ii) a set of test frames on which target classification loss for the predicted\n    DiMP model, and the IoU prediction loss for the IoU-Net is calculated.\n\n    The sampling is done in the following ways. First a dataset is selected at random. Next, a sequence is selected\n    from that dataset. A base frame is then sampled randomly from the sequence. Next, a set of 'train frames' and\n    'test frames' are sampled from the sequence from the range [base_frame_id - max_gap, base_frame_id]  and\n    (base_frame_id, base_frame_id + max_gap] respectively. Only the frames in which the target is visible are sampled.\n    If enough visible frames are not found, the 'max_gap' is increased gradually till enough frames are found.\n\n    The sampled frames are then passed through the input 'processing' function for the necessary processing-\n    \"\"\"\n\n    def __init__(self, datasets, p_datasets, samples_per_epoch, max_gap,\n                 num_test_frames, num_train_frames=1, processing=no_processing, frame_sample_mode='causal'):\n        \"\"\"\n        args:\n            datasets - List of datasets to be used for training\n            p_datasets - List containing the probabilities by which each dataset will be sampled\n            samples_per_epoch - Number of training samples per epoch\n            max_gap - Maximum gap, in frame numbers, between the train frames and the test frames.\n            num_test_frames - Number of test frames to sample.\n            num_train_frames - Number of train frames to sample.\n            processing - An instance of Processing class which performs the necessary processing of the data.\n            frame_sample_mode - Either 'causal' or 'interval'. If 'causal', then the test frames are sampled in a causally,\n                                otherwise randomly within the interval.\n        \"\"\"\n        self.datasets = datasets\n\n        # If p not provided, sample uniformly from all videos\n        if p_datasets is None:\n            p_datasets = [len(d) for d in self.datasets]\n\n        # Normalize\n        p_total = sum(p_datasets)\n        self.p_datasets = [x / p_total for x in p_datasets]\n\n        self.samples_per_epoch = samples_per_epoch\n        self.max_gap = max_gap\n        self.num_test_frames = num_test_frames\n        self.num_train_frames = num_train_frames\n        self.processing = processing\n        self.frame_sample_mode = frame_sample_mode\n\n    def __len__(self):\n        return self.samples_per_epoch\n\n    def _sample_visible_ids(self, visible, num_ids=1, min_id=None, max_id=None):\n        \"\"\" Samples num_ids frames between min_id and max_id for which target is visible\n\n        args:\n            visible - 1d Tensor indicating whether target is visible for each frame\n            num_ids - number of frames to be samples\n            min_id - Minimum allowed frame number\n            max_id - Maximum allowed frame number\n\n        returns:\n            list - List of sampled frame numbers. None if not sufficient visible frames could be found.\n        \"\"\"\n        if num_ids == 0:\n            return []\n        if min_id is None or min_id < 0:\n            min_id = 0\n        if max_id is None or max_id > len(visible):\n            max_id = len(visible)\n\n        valid_ids = [i for i in range(min_id, max_id) if visible[i]]\n\n        # No visible ids\n        if len(valid_ids) == 0:\n            return None\n\n        return random.choices(valid_ids, k=num_ids)\n\n    def __getitem__(self, index):\n        \"\"\"\n        args:\n            index (int): Index (Ignored since we sample randomly)\n\n        returns:\n            TensorDict - dict containing all the data blocks\n        \"\"\"\n\n        # Select a dataset\n        dataset = random.choices(self.datasets, self.p_datasets)[0]\n        is_video_dataset = dataset.is_video_sequence()\n\n        # Sample a sequence with enough visible frames\n        enough_visible_frames = False\n        while not enough_visible_frames:\n            # Sample a sequence\n            seq_id = random.randint(0, dataset.get_num_sequences() - 1)\n\n            # Sample frames\n            seq_info_dict = dataset.get_sequence_info(seq_id)\n            visible = seq_info_dict['visible']\n\n            enough_visible_frames = visible.type(torch.int64).sum().item() > 2 * (\n                    self.num_test_frames + self.num_train_frames) and len(visible) >= 20\n\n            enough_visible_frames = enough_visible_frames or not is_video_dataset\n\n        if is_video_dataset:\n            train_frame_ids = None\n            test_frame_ids = None\n            gap_increase = 0\n\n            if self.frame_sample_mode == 'interval':\n                # Sample frame numbers within interval defined by the first frame\n                while test_frame_ids is None:\n                    base_frame_id = self._sample_visible_ids(visible, num_ids=1)\n                    extra_train_frame_ids = self._sample_visible_ids(visible, num_ids=self.num_train_frames - 1,\n                                                                     min_id=base_frame_id[\n                                                                                0] - self.max_gap - gap_increase,\n                                                                     max_id=base_frame_id[\n                                                                                0] + self.max_gap + gap_increase)\n                    if extra_train_frame_ids is None:\n                        gap_increase += 5\n                        continue\n                    train_frame_ids = base_frame_id + extra_train_frame_ids\n                    test_frame_ids = self._sample_visible_ids(visible, num_ids=self.num_test_frames,\n                                                              min_id=train_frame_ids[0] - self.max_gap - gap_increase,\n                                                              max_id=train_frame_ids[0] + self.max_gap + gap_increase)\n                    gap_increase += 5  # Increase gap until a frame is found\n\n            elif self.frame_sample_mode == 'causal':\n                # Sample test and train frames in a causal manner, i.e. test_frame_ids > train_frame_ids\n                while test_frame_ids is None:\n                    base_frame_id = self._sample_visible_ids(visible, num_ids=1, min_id=self.num_train_frames - 1,\n                                                             max_id=len(visible) - self.num_test_frames)\n                    prev_frame_ids = self._sample_visible_ids(visible, num_ids=self.num_train_frames - 1,\n                                                              min_id=base_frame_id[0] - self.max_gap - gap_increase,\n                                                              max_id=base_frame_id[0])\n                    if prev_frame_ids is None:\n                        gap_increase += 5\n                        continue\n                    train_frame_ids = base_frame_id + prev_frame_ids\n                    test_frame_ids = self._sample_visible_ids(visible, min_id=train_frame_ids[0] + 1,\n                                                              max_id=train_frame_ids[0] + self.max_gap + gap_increase,\n                                                              num_ids=self.num_test_frames)\n                    # Increase gap until a frame is found\n                    gap_increase += 5\n        else:\n            # In case of image dataset, just repeat the image to generate synthetic video\n            train_frame_ids = [1] * self.num_train_frames\n            test_frame_ids = [1] * self.num_test_frames\n\n        train_frames, train_anno, meta_obj_train = dataset.get_frames(seq_id, train_frame_ids, seq_info_dict)\n        test_frames, test_anno, meta_obj_test = dataset.get_frames(seq_id, test_frame_ids, seq_info_dict)\n\n        data = TensorDict({'train_images': train_frames,\n                           'train_anno': train_anno['bbox'],\n                           'test_images': test_frames,\n                           'test_anno': test_anno['bbox'],\n                           'dataset': dataset.get_name(),\n                           'test_class': meta_obj_test.get('object_class_name')})\n\n        return self.processing(data)\n\n\nclass DiMPSampler(TrackingSampler):\n    \"\"\" See TrackingSampler.\"\"\"\n\n    def __init__(self, datasets, p_datasets, samples_per_epoch, max_gap,\n                 num_test_frames, num_train_frames=1, processing=no_processing, frame_sample_mode='causal'):\n        super().__init__(datasets=datasets, p_datasets=p_datasets, samples_per_epoch=samples_per_epoch, max_gap=max_gap,\n                         num_test_frames=num_test_frames, num_train_frames=num_train_frames, processing=processing,\n                         frame_sample_mode=frame_sample_mode)\n\n\nclass ATOMSampler(TrackingSampler):\n    \"\"\" See TrackingSampler.\"\"\"\n\n    def __init__(self, datasets, p_datasets, samples_per_epoch, max_gap,\n                 num_test_frames=1, num_train_frames=1, processing=no_processing, frame_sample_mode='interval'):\n        super().__init__(datasets=datasets, p_datasets=p_datasets, samples_per_epoch=samples_per_epoch, max_gap=max_gap,\n                         num_test_frames=num_test_frames, num_train_frames=num_train_frames, processing=processing,\n                         frame_sample_mode=frame_sample_mode)"
  },
  {
    "path": "artrackv2_mindspore/external/AR/ltr/data/transforms.py",
    "content": "import random\nimport numpy as np\nimport math\nimport cv2 as cv\nimport torch\nimport torch.nn.functional as F\nimport torchvision.transforms.functional as tvisf\n\n\nclass Transform:\n    \"\"\"A set of transformations, used for e.g. data augmentation.\n    Args of constructor:\n        transforms: An arbitrary number of transformations, derived from the TransformBase class.\n                    They are applied in the order they are given.\n\n    The Transform object can jointly transform images, bounding boxes and segmentation masks.\n    This is done by calling the object with the following key-word arguments (all are optional).\n\n    The following arguments are inputs to be transformed. They are either supplied as a single instance, or a list of instances.\n        image  -  Image\n        coords  -  2xN dimensional Tensor of 2D image coordinates [y, x]\n        bbox  -  Bounding box on the form [x, y, w, h]\n        mask  -  Segmentation mask with discrete classes\n\n    The following parameters can be supplied with calling the transform object:\n        joint [Bool]  -  If True then transform all images/coords/bbox/mask in the list jointly using the same transformation.\n                         Otherwise each tuple (images, coords, bbox, mask) will be transformed independently using\n                         different random rolls. Default: True.\n        new_roll [Bool]  -  If False, then no new random roll is performed, and the saved result from the previous roll\n                            is used instead. Default: True.\n\n    Check the DiMPProcessing class for examples.\n    \"\"\"\n\n    def __init__(self, *transforms):\n        if len(transforms) == 1 and isinstance(transforms[0], (list, tuple)):\n            transforms = transforms[0]\n        self.transforms = transforms\n        self._valid_inputs = ['image', 'coords', 'bbox', 'mask']\n        self._valid_args = ['joint', 'new_roll']\n        self._valid_all = self._valid_inputs + self._valid_args\n\n    def __call__(self, **inputs):\n        var_names = [k for k in inputs.keys() if k in self._valid_inputs]\n        for v in inputs.keys():\n            if v not in self._valid_all:\n                raise ValueError('Incorrect input \\\"{}\\\" to transform. Only supports inputs {} and arguments {}.'.format(v, self._valid_inputs, self._valid_args))\n\n        joint_mode = inputs.get('joint', True)\n        new_roll = inputs.get('new_roll', True)\n\n        if not joint_mode:\n            out = zip(*[self(**inp) for inp in self._split_inputs(inputs)])\n            return tuple(list(o) for o in out)\n\n        out = {k: v for k, v in inputs.items() if k in self._valid_inputs}\n\n        for t in self.transforms:\n            out = t(**out, joint=joint_mode, new_roll=new_roll)\n        if len(var_names) == 1:\n            return out[var_names[0]]\n        # Make sure order is correct\n        return tuple(out[v] for v in var_names)\n\n    def _split_inputs(self, inputs):\n        var_names = [k for k in inputs.keys() if k in self._valid_inputs]\n        split_inputs = [{k: v for k, v in zip(var_names, vals)} for vals in zip(*[inputs[vn] for vn in var_names])]\n        for arg_name, arg_val in filter(lambda it: it[0]!='joint' and it[0] in self._valid_args, inputs.items()):\n            if isinstance(arg_val, list):\n                for inp, av in zip(split_inputs, arg_val):\n                    inp[arg_name] = av\n            else:\n                for inp in split_inputs:\n                    inp[arg_name] = arg_val\n        return split_inputs\n\n    def __repr__(self):\n        format_string = self.__class__.__name__ + '('\n        for t in self.transforms:\n            format_string += '\\n'\n            format_string += '    {0}'.format(t)\n        format_string += '\\n)'\n        return format_string\n\n\nclass TransformBase:\n    \"\"\"Base class for transformation objects. See the Transform class for details.\"\"\"\n    def __init__(self):\n        self._valid_inputs = ['image', 'coords', 'bbox', 'mask']\n        self._valid_args = ['new_roll']\n        self._valid_all = self._valid_inputs + self._valid_args\n        self._rand_params = None\n\n    def __call__(self, **inputs):\n        # Split input\n        input_vars = {k: v for k, v in inputs.items() if k in self._valid_inputs}\n        input_args = {k: v for k, v in inputs.items() if k in self._valid_args}\n\n        # Roll random parameters for the transform\n        if input_args.get('new_roll', True):\n            rand_params = self.roll()\n            if rand_params is None:\n                rand_params = ()\n            elif not isinstance(rand_params, tuple):\n                rand_params = (rand_params,)\n            self._rand_params = rand_params\n\n        outputs = dict()\n        for var_name, var in input_vars.items():\n            if var is not None:\n                transform_func = getattr(self, 'transform_' + var_name)\n                if var_name in ['coords', 'bbox']:\n                    params = (self._get_image_size(input_vars),) + self._rand_params\n                else:\n                    params = self._rand_params\n                if isinstance(var, (list, tuple)):\n                    outputs[var_name] = [transform_func(x, *params) for x in var]\n                else:\n                    outputs[var_name] = transform_func(var, *params)\n        return outputs\n\n    def _get_image_size(self, inputs):\n        im = None\n        for var_name in ['image', 'mask']:\n            if inputs.get(var_name) is not None:\n                im = inputs[var_name]\n                break\n        if im is None:\n            return None\n        if isinstance(im, (list, tuple)):\n            im = im[0]\n        if isinstance(im, np.ndarray):\n            return im.shape[:2]\n        if torch.is_tensor(im):\n            return (im.shape[-2], im.shape[-1])\n        raise Exception('Unknown image type')\n\n    def roll(self):\n        return None\n\n    def transform_image(self, image, *rand_params):\n        \"\"\"Must be deterministic\"\"\"\n        return image\n\n    def transform_coords(self, coords, image_shape, *rand_params):\n        \"\"\"Must be deterministic\"\"\"\n        return coords\n\n    def transform_bbox(self, bbox, image_shape, *rand_params):\n        \"\"\"Assumes [x, y, w, h]\"\"\"\n        # Check if not overloaded\n        if self.transform_coords.__code__ == TransformBase.transform_coords.__code__:\n            return bbox\n\n        coord = bbox.clone().view(-1,2).t().flip(0)\n\n        x1 = coord[1, 0]\n        x2 = coord[1, 0] + coord[1, 1]\n\n        y1 = coord[0, 0]\n        y2 = coord[0, 0] + coord[0, 1]\n\n        coord_all = torch.tensor([[y1, y1, y2, y2], [x1, x2, x2, x1]])\n\n        coord_transf = self.transform_coords(coord_all, image_shape, *rand_params).flip(0)\n        tl = torch.min(coord_transf, dim=1)[0]\n        sz = torch.max(coord_transf, dim=1)[0] - tl\n        bbox_out = torch.cat((tl, sz), dim=-1).reshape(bbox.shape)\n        return bbox_out\n\n    def transform_mask(self, mask, *rand_params):\n        \"\"\"Must be deterministic\"\"\"\n        return mask\n\n\nclass ToTensor(TransformBase):\n    \"\"\"Convert to a Tensor\"\"\"\n\n    def transform_image(self, image):\n        # handle numpy array\n        if image.ndim == 2:\n            image = image[:, :, None]\n\n        image = torch.from_numpy(image.transpose((2, 0, 1)))\n        # backward compatibility\n        if isinstance(image, torch.ByteTensor):\n            return image.float().div(255)\n        else:\n            return image\n\n    def transfrom_mask(self, mask):\n        if isinstance(mask, np.ndarray):\n            return torch.from_numpy(mask)\n\n\n\nclass ToTensorAndJitter(TransformBase):\n    \"\"\"Convert to a Tensor and jitter brightness\"\"\"\n    def __init__(self, brightness_jitter=0.0, normalize=True):\n        super().__init__()\n        self.brightness_jitter = brightness_jitter\n        self.normalize = normalize\n\n    def roll(self):\n        return np.random.uniform(max(0, 1 - self.brightness_jitter), 1 + self.brightness_jitter)\n\n    def transform_image(self, image, brightness_factor):\n        # handle numpy array\n        image = torch.from_numpy(image.transpose((2, 0, 1)))\n\n        # backward compatibility\n        if self.normalize:\n            return image.float().mul(brightness_factor/255.0).clamp(0.0, 1.0)\n        else:\n            return image.float().mul(brightness_factor).clamp(0.0, 255.0)\n\n    def transform_mask(self, mask, brightness_factor):\n        if isinstance(mask, np.ndarray):\n            return torch.from_numpy(mask)\n        else:\n            return mask\n\n\nclass Normalize(TransformBase):\n    \"\"\"Normalize image\"\"\"\n    def __init__(self, mean, std, inplace=False):\n        super().__init__()\n        self.mean = mean\n        self.std = std\n        self.inplace = inplace\n\n    def transform_image(self, image):\n        return tvisf.normalize(image, self.mean, self.std, self.inplace)\n\n\nclass ToGrayscale(TransformBase):\n    \"\"\"Converts image to grayscale with probability\"\"\"\n    def __init__(self, probability = 0.5):\n        super().__init__()\n        self.probability = probability\n        self.color_weights = np.array([0.2989, 0.5870, 0.1140], dtype=np.float32)\n\n    def roll(self):\n        return random.random() < self.probability\n\n    def transform_image(self, image, do_grayscale):\n        if do_grayscale:\n            if torch.is_tensor(image):\n                raise NotImplementedError('Implement torch variant.')\n            img_gray = cv.cvtColor(image, cv.COLOR_RGB2GRAY)\n            return np.stack([img_gray, img_gray, img_gray], axis=2)\n            # return np.repeat(np.sum(img * self.color_weights, axis=2, keepdims=True).astype(np.uint8), 3, axis=2)\n        return image\n\n\nclass ToBGR(TransformBase):\n    \"\"\"Converts image to BGR\"\"\"\n    def transform_image(self, image):\n        if torch.is_tensor(image):\n            raise NotImplementedError('Implement torch variant.')\n        img_bgr = cv.cvtColor(image, cv.COLOR_RGB2BGR)\n        return img_bgr\n\n\nclass RandomHorizontalFlip(TransformBase):\n    \"\"\"Horizontally flip image randomly with a probability p.\"\"\"\n    def __init__(self, probability = 0.5):\n        super().__init__()\n        self.probability = probability\n\n    def roll(self):\n        return random.random() < self.probability\n\n    def transform_image(self, image, do_flip):\n        if do_flip:\n            if torch.is_tensor(image):\n                return image.flip((2,))\n            return np.fliplr(image).copy()\n        return image\n\n    def transform_coords(self, coords, image_shape, do_flip):\n        if do_flip:\n            coords = coords.clone()\n            coords[1,:] = (image_shape[1] - 1) - coords[1,:]\n        return coords\n\n    def transform_mask(self, mask, do_flip):\n        if do_flip:\n            if torch.is_tensor(mask):\n                return mask.flip((-1,))\n            return np.fliplr(mask).copy()\n        return mask\n\n\nclass Blur(TransformBase):\n    \"\"\" Blur the image by applying a gaussian kernel with given sigma\"\"\"\n    def __init__(self, sigma):\n        super().__init__()\n        if isinstance(sigma, (float, int)):\n            sigma = (sigma, sigma)\n        self.sigma = sigma\n        self.filter_size = [math.ceil(2*s) for s in self.sigma]\n        x_coord = [torch.arange(-sz, sz+1, dtype=torch.float32) for sz in self.filter_size]\n        self.filter = [torch.exp(-(x**2)/(2*s**2)) for x, s in zip(x_coord, self.sigma)]\n        self.filter[0] = self.filter[0].view(1,1,-1,1) / self.filter[0].sum()\n        self.filter[1] = self.filter[1].view(1,1,1,-1) / self.filter[1].sum()\n\n    def transform_image(self, image):\n        if torch.is_tensor(image):\n            sz = image.shape[2:]\n            im1 = F.conv2d(image.view(-1, 1, sz[0], sz[1]), self.filter[0], padding=(self.filter_size[0], 0))\n            return F.conv2d(im1, self.filter[1], padding=(0,self.filter_size[1])).view(-1,sz[0],sz[1])\n        else:\n            raise NotImplementedError\n\n\nclass RandomBlur(TransformBase):\n    \"\"\" Blur the image, with a given probability, by applying a gaussian kernel with given sigma\"\"\"\n    def __init__(self, sigma, probability=0.1):\n        super().__init__()\n        self.probability = probability\n\n        if isinstance(sigma, (float, int)):\n            sigma = (sigma, sigma)\n        self.sigma = sigma\n        self.filter_size = [math.ceil(2*s) for s in self.sigma]\n        x_coord = [torch.arange(-sz, sz+1, dtype=torch.float32) for sz in self.filter_size]\n        self.filter = [torch.exp(-(x**2)/(2*s**2)) for x, s in zip(x_coord, self.sigma)]\n        self.filter[0] = self.filter[0].view(1,1,-1,1) / self.filter[0].sum()\n        self.filter[1] = self.filter[1].view(1,1,1,-1) / self.filter[1].sum()\n\n    def roll(self):\n        return random.random() < self.probability\n\n    def transform(self, image, do_blur=None):\n        if do_blur is None:\n            do_blur = False\n\n        if do_blur:\n            if torch.is_tensor(image):\n                sz = image.shape[1:]\n                im1 = F.conv2d(image.view(-1, 1, sz[0], sz[1]), self.filter[0], padding=(self.filter_size[0], 0))\n                return F.conv2d(im1, self.filter[1], padding=(0,self.filter_size[1])).view(-1,sz[0],sz[1])\n            else:\n                raise NotImplementedError\n        else:\n            return image\n\n\nclass RandomAffine(TransformBase):\n    \"\"\"Apply random affine transformation.\"\"\"\n    def __init__(self, p_flip=0.0, max_rotation=0.0, max_shear=0.0, max_scale=0.0, max_ar_factor=0.0,\n                 border_mode='constant', pad_amount=0):\n        super().__init__()\n        self.p_flip = p_flip\n        self.max_rotation = max_rotation\n        self.max_shear = max_shear\n        self.max_scale = max_scale\n        self.max_ar_factor = max_ar_factor\n\n        if border_mode == 'constant':\n            self.border_flag = cv.BORDER_CONSTANT\n        elif border_mode == 'replicate':\n            self.border_flag == cv.BORDER_REPLICATE\n        else:\n            raise Exception\n\n        self.pad_amount = pad_amount\n\n    def roll(self):\n        do_flip = random.random() < self.p_flip\n        theta = random.uniform(-self.max_rotation, self.max_rotation)\n\n        shear_x = random.uniform(-self.max_shear, self.max_shear)\n        shear_y = random.uniform(-self.max_shear, self.max_shear)\n\n        ar_factor = np.exp(random.uniform(-self.max_ar_factor, self.max_ar_factor))\n        scale_factor = np.exp(random.uniform(-self.max_scale, self.max_scale))\n\n        return do_flip, theta, (shear_x, shear_y), (scale_factor, scale_factor * ar_factor)\n\n    def _construct_t_mat(self, image_shape, do_flip, theta, shear_values, scale_factors):\n        im_h, im_w = image_shape\n        t_mat = np.identity(3)\n\n        if do_flip:\n            if do_flip:\n                t_mat[0, 0] = -1.0\n                t_mat[0, 2] = im_w\n\n        t_rot = cv.getRotationMatrix2D((im_w * 0.5, im_h * 0.5), theta, 1.0)\n        t_rot = np.concatenate((t_rot, np.array([0.0, 0.0, 1.0]).reshape(1, 3)))\n\n        t_shear = np.array([[1.0, shear_values[0], -shear_values[0] * 0.5 * im_w],\n                            [shear_values[1], 1.0, -shear_values[1] * 0.5 * im_h],\n                            [0.0, 0.0, 1.0]])\n\n        t_scale = np.array([[scale_factors[0], 0.0, (1.0 - scale_factors[0]) * 0.5 * im_w],\n                            [0.0, scale_factors[1], (1.0 - scale_factors[1]) * 0.5 * im_h],\n                            [0.0, 0.0, 1.0]])\n\n        t_mat = t_scale @ t_rot @ t_shear @ t_mat\n\n        t_mat[0, 2] += self.pad_amount\n        t_mat[1, 2] += self.pad_amount\n\n        t_mat = t_mat[:2, :]\n\n        return t_mat\n\n    def transform_image(self, image, do_flip, theta, shear_values, scale_factors):\n        if torch.is_tensor(image):\n            raise Exception('Only supported for numpy input')\n\n        t_mat = self._construct_t_mat(image.shape[:2], do_flip, theta, shear_values, scale_factors)\n        output_sz = (image.shape[1] + 2*self.pad_amount, image.shape[0] + 2*self.pad_amount)\n        image_t = cv.warpAffine(image, t_mat, output_sz, flags=cv.INTER_LINEAR,\n                                borderMode=self.border_flag)\n\n        return image_t\n\n    def transform_coords(self, coords, image_shape, do_flip, theta, shear_values, scale_factors):\n        t_mat = self._construct_t_mat(image_shape, do_flip, theta, shear_values, scale_factors)\n\n        t_mat_tensor = torch.from_numpy(t_mat).float()\n\n        coords_xy1 = torch.stack((coords[1, :], coords[0, :], torch.ones_like(coords[1, :])))\n\n        coords_xy_t = torch.mm(t_mat_tensor, coords_xy1)\n\n        return coords_xy_t[[1, 0], :]\n\n    def transform_mask(self, mask, do_flip, theta, shear_values, scale_factors):\n        t_mat = self._construct_t_mat(mask.shape[:2], do_flip, theta, shear_values, scale_factors)\n        output_sz = (mask.shape[1] + 2*self.pad_amount, mask.shape[0] + 2*self.pad_amount)\n\n        mask_t = cv.warpAffine(mask.numpy(), t_mat, output_sz, flags=cv.INTER_NEAREST,\n                               borderMode=self.border_flag)\n\n        return torch.from_numpy(mask_t)\n"
  },
  {
    "path": "artrackv2_mindspore/external/AR/ltr/dataset/__init__.py",
    "content": "from .lasot import Lasot\nfrom .got10k import Got10k\nfrom .tracking_net import TrackingNet\nfrom .imagenetvid import ImagenetVID\nfrom .coco import MSCOCO\nfrom .coco_seq import MSCOCOSeq\nfrom .youtubevos import YouTubeVOS\nfrom .davis import Davis\nfrom .lvis import LVIS\nfrom .ecssd import ECSSD\nfrom .msra10k import MSRA10k\nfrom .hku_is import HKUIS\nfrom .sbd import SBD\nfrom .synthetic_video import SyntheticVideo\nfrom .synthetic_video_blend import SyntheticVideoBlend\n"
  },
  {
    "path": "artrackv2_mindspore/external/AR/ltr/dataset/base_image_dataset.py",
    "content": "import torch.utils.data\nfrom ltr.data.image_loader import jpeg4py_loader\n\n\nclass BaseImageDataset(torch.utils.data.Dataset):\n    \"\"\" Base class for image datasets \"\"\"\n\n    def __init__(self, name, root, image_loader=jpeg4py_loader):\n        \"\"\"\n        args:\n            root - The root path to the dataset\n            image_loader (jpeg4py_loader) -  The function to read the images. jpeg4py (https://github.com/ajkxyz/jpeg4py)\n                                            is used by default.\n        \"\"\"\n        self.name = name\n        self.root = root\n        self.image_loader = image_loader\n\n        self.image_list = []     # Contains the list of sequences.\n        self.class_list = []\n\n    def __len__(self):\n        \"\"\" Returns size of the dataset\n        returns:\n            int - number of samples in the dataset\n        \"\"\"\n        return self.get_num_images()\n\n    def __getitem__(self, index):\n        \"\"\" Not to be used! Check get_frames() instead.\n        \"\"\"\n        return None\n\n    def get_name(self):\n        \"\"\" Name of the dataset\n\n        returns:\n            string - Name of the dataset\n        \"\"\"\n        raise NotImplementedError\n\n    def get_num_images(self):\n        \"\"\" Number of sequences in a dataset\n\n        returns:\n            int - number of sequences in the dataset.\"\"\"\n        return len(self.image_list)\n\n    def has_class_info(self):\n        return False\n\n    def get_class_name(self, image_id):\n        return None\n\n    def get_num_classes(self):\n        return len(self.class_list)\n\n    def get_class_list(self):\n        return self.class_list\n\n    def get_images_in_class(self, class_name):\n        raise NotImplementedError\n\n    def has_segmentation_info(self):\n        return False\n\n    def get_image_info(self, seq_id):\n        \"\"\" Returns information about a particular image,\n\n        args:\n            seq_id - index of the image\n\n        returns:\n            Dict\n            \"\"\"\n        raise NotImplementedError\n\n    def get_image(self, image_id, anno=None):\n        \"\"\" Get a image\n\n        args:\n            image_id      - index of image\n            anno(None)  - The annotation for the sequence (see get_sequence_info). If None, they will be loaded.\n\n        returns:\n            image -\n            anno -\n            dict - A dict containing meta information about the sequence, e.g. class of the target object.\n\n        \"\"\"\n        raise NotImplementedError\n\n"
  },
  {
    "path": "artrackv2_mindspore/external/AR/ltr/dataset/base_video_dataset.py",
    "content": "import torch.utils.data\nfrom ltr.data.image_loader import jpeg4py_loader\n\n\nclass BaseVideoDataset(torch.utils.data.Dataset):\n    \"\"\" Base class for video datasets \"\"\"\n\n    def __init__(self, name, root, image_loader=jpeg4py_loader):\n        \"\"\"\n        args:\n            root - The root path to the dataset\n            image_loader (jpeg4py_loader) -  The function to read the images. jpeg4py (https://github.com/ajkxyz/jpeg4py)\n                                            is used by default.\n        \"\"\"\n        self.name = name\n        self.root = root\n        self.image_loader = image_loader\n\n        self.sequence_list = []     # Contains the list of sequences.\n        self.class_list = []\n\n    def __len__(self):\n        \"\"\" Returns size of the dataset\n        returns:\n            int - number of samples in the dataset\n        \"\"\"\n        return self.get_num_sequences()\n\n    def __getitem__(self, index):\n        \"\"\" Not to be used! Check get_frames() instead.\n        \"\"\"\n        return None\n\n    def is_video_sequence(self):\n        \"\"\" Returns whether the dataset is a video dataset or an image dataset\n\n        returns:\n            bool - True if a video dataset\n        \"\"\"\n        return True\n\n    def is_synthetic_video_dataset(self):\n        \"\"\" Returns whether the dataset contains real videos or synthetic\n\n        returns:\n            bool - True if a video dataset\n        \"\"\"\n        return False\n\n    def get_name(self):\n        \"\"\" Name of the dataset\n\n        returns:\n            string - Name of the dataset\n        \"\"\"\n        raise NotImplementedError\n\n    def get_num_sequences(self):\n        \"\"\" Number of sequences in a dataset\n\n        returns:\n            int - number of sequences in the dataset.\"\"\"\n        return len(self.sequence_list)\n\n    def has_class_info(self):\n        return False\n\n    def has_occlusion_info(self):\n        return False\n\n    def get_num_classes(self):\n        return len(self.class_list)\n\n    def get_class_list(self):\n        return self.class_list\n\n    def get_sequences_in_class(self, class_name):\n        raise NotImplementedError\n\n    def has_segmentation_info(self):\n        return False\n\n    def get_sequence_info(self, seq_id):\n        \"\"\" Returns information about a particular sequences,\n\n        args:\n            seq_id - index of the sequence\n\n        returns:\n            Dict\n            \"\"\"\n        raise NotImplementedError\n\n    def get_frames(self, seq_id, frame_ids, anno=None):\n        \"\"\" Get a set of frames from a particular sequence\n\n        args:\n            seq_id      - index of sequence\n            frame_ids   - a list of frame numbers\n            anno(None)  - The annotation for the sequence (see get_sequence_info). If None, they will be loaded.\n\n        returns:\n            list - List of frames corresponding to frame_ids\n            list - List of dicts for each frame\n            dict - A dict containing meta information about the sequence, e.g. class of the target object.\n\n        \"\"\"\n        raise NotImplementedError\n\n"
  },
  {
    "path": "artrackv2_mindspore/external/AR/ltr/dataset/coco.py",
    "content": "import os\nfrom .base_image_dataset import BaseImageDataset\nfrom ltr.data.image_loader import jpeg4py_loader\nimport torch\nfrom pycocotools.coco import COCO\nimport random\nfrom collections import OrderedDict\nfrom ltr.admin.environment import env_settings\n\n\nclass MSCOCO(BaseImageDataset):\n    \"\"\" The COCO object detection dataset.\n\n    Publication:\n        Microsoft COCO: Common Objects in Context.\n        Tsung-Yi Lin, Michael Maire, Serge J. Belongie, Lubomir D. Bourdev, Ross B. Girshick, James Hays, Pietro Perona,\n        Deva Ramanan, Piotr Dollar and C. Lawrence Zitnick\n        ECCV, 2014\n        https://arxiv.org/pdf/1405.0312.pdf\n\n    Download the images along with annotations from http://cocodataset.org/#download. The root folder should be\n    organized as follows.\n        - coco_root\n            - annotations\n                - instances_train2014.json\n                - instances_train2017.json\n            - images\n                - train2014\n                - train2017\n\n    Note: You also have to install the coco pythonAPI from https://github.com/cocodataset/cocoapi.\n    \"\"\"\n\n    def __init__(self, root=None, image_loader=jpeg4py_loader, data_fraction=None, min_area=None,\n                 split=\"train\", version=\"2014\"):\n        \"\"\"\n        args:\n            root - path to coco root folder\n            image_loader (jpeg4py_loader) - The function to read the images. jpeg4py (https://github.com/ajkxyz/jpeg4py)\n                                            is used by default.\n            data_fraction - Fraction of dataset to be used. The complete dataset is used by default\n            min_area - Objects with area less than min_area are filtered out. Default is 0.0\n            split - 'train' or 'val'.\n            version - version of coco dataset (2014 or 2017)\n        \"\"\"\n\n        root = env_settings().coco_dir if root is None else root\n        super().__init__('COCO', root, image_loader)\n\n        self.img_pth = os.path.join(root, 'images/{}{}/'.format(split, version))\n        self.anno_path = os.path.join(root, 'annotations/instances_{}{}.json'.format(split, version))\n\n        self.coco_set = COCO(self.anno_path)\n\n        self.cats = self.coco_set.cats\n\n        self.class_list = self.get_class_list()  # the parent class thing would happen in the sampler\n\n        self.image_list = self._get_image_list(min_area=min_area)\n\n        if data_fraction is not None:\n            self.image_list = random.sample(self.image_list, int(len(self.image_list) * data_fraction))\n        self.im_per_class = self._build_im_per_class()\n\n    def _get_image_list(self, min_area=None):\n        ann_list = list(self.coco_set.anns.keys())\n        image_list = [a for a in ann_list if self.coco_set.anns[a]['iscrowd'] == 0]\n\n        if min_area is not None:\n            image_list = [a for a in image_list if self.coco_set.anns[a]['area'] > min_area]\n\n        return image_list\n\n    def get_num_classes(self):\n        return len(self.class_list)\n\n    def get_name(self):\n        return 'coco'\n\n    def has_class_info(self):\n        return True\n\n    def has_segmentation_info(self):\n        return True\n\n    def get_class_list(self):\n        class_list = []\n        for cat_id in self.cats.keys():\n            class_list.append(self.cats[cat_id]['name'])\n        return class_list\n\n    def _build_im_per_class(self):\n        im_per_class = {}\n        for i, im in enumerate(self.image_list):\n            class_name = self.cats[self.coco_set.anns[im]['category_id']]['name']\n            if class_name not in im_per_class:\n                im_per_class[class_name] = [i]\n            else:\n                im_per_class[class_name].append(i)\n\n        return im_per_class\n\n    def get_images_in_class(self, class_name):\n        return self.im_per_class[class_name]\n\n    def get_image_info(self, im_id):\n        anno = self._get_anno(im_id)\n\n        bbox = torch.Tensor(anno['bbox']).view(4,)\n\n        mask = torch.Tensor(self.coco_set.annToMask(anno))\n\n        valid = (bbox[2] > 0) & (bbox[3] > 0)\n        visible = valid.clone().byte()\n\n        return {'bbox': bbox, 'mask': mask, 'valid': valid, 'visible': visible}\n\n    def _get_anno(self, im_id):\n        anno = self.coco_set.anns[self.image_list[im_id]]\n\n        return anno\n\n    def _get_image(self, im_id):\n        path = self.coco_set.loadImgs([self.coco_set.anns[self.image_list[im_id]]['image_id']])[0]['file_name']\n        img = self.image_loader(os.path.join(self.img_pth, path))\n        return img\n\n    def get_meta_info(self, im_id):\n        try:\n            cat_dict_current = self.cats[self.coco_set.anns[self.image_list[im_id]]['category_id']]\n            object_meta = OrderedDict({'object_class_name': cat_dict_current['name'],\n                                       'motion_class': None,\n                                       'major_class': cat_dict_current['supercategory'],\n                                       'root_class': None,\n                                       'motion_adverb': None})\n        except:\n            object_meta = OrderedDict({'object_class_name': None,\n                                       'motion_class': None,\n                                       'major_class': None,\n                                       'root_class': None,\n                                       'motion_adverb': None})\n        return object_meta\n\n    def get_class_name(self, im_id):\n        cat_dict_current = self.cats[self.coco_set.anns[self.image_list[im_id]]['category_id']]\n        return cat_dict_current['name']\n\n    def get_image(self, image_id, anno=None):\n        frame = self._get_image(image_id)\n\n        if anno is None:\n            anno = self.get_image_info(image_id)\n\n        object_meta = self.get_meta_info(image_id)\n\n        return frame, anno, object_meta\n"
  },
  {
    "path": "artrackv2_mindspore/external/AR/ltr/dataset/coco_seq.py",
    "content": "import os\nfrom .base_video_dataset import BaseVideoDataset\nfrom ltr.data.image_loader import jpeg4py_loader\nimport torch\nimport random\nfrom pycocotools.coco import COCO\nfrom collections import OrderedDict\nfrom ltr.admin.environment import env_settings\n\n\nclass MSCOCOSeq(BaseVideoDataset):\n    \"\"\" The COCO dataset. COCO is an image dataset. Thus, we treat each image as a sequence of length 1.\n\n    Publication:\n        Microsoft COCO: Common Objects in Context.\n        Tsung-Yi Lin, Michael Maire, Serge J. Belongie, Lubomir D. Bourdev, Ross B. Girshick, James Hays, Pietro Perona,\n        Deva Ramanan, Piotr Dollar and C. Lawrence Zitnick\n        ECCV, 2014\n        https://arxiv.org/pdf/1405.0312.pdf\n\n    Download the images along with annotations from http://cocodataset.org/#download. The root folder should be\n    organized as follows.\n        - coco_root\n            - annotations\n                - instances_train2014.json\n                - instances_train2017.json\n            - images\n                - train2014\n                - train2017\n\n    Note: You also have to install the coco pythonAPI from https://github.com/cocodataset/cocoapi.\n    \"\"\"\n\n    def __init__(self, root=None, image_loader=jpeg4py_loader, data_fraction=None, split=\"train\", version=\"2014\"):\n        \"\"\"\n        args:\n            root - path to the coco dataset.\n            image_loader (default_image_loader) -  The function to read the images. If installed,\n                                                   jpeg4py (https://github.com/ajkxyz/jpeg4py) is used by default. Else,\n                                                   opencv's imread is used.\n            data_fraction (None) - Fraction of images to be used. The images are selected randomly. If None, all the\n                                  images  will be used\n            split - 'train' or 'val'.\n            version - version of coco dataset (2014 or 2017)\n        \"\"\"\n        root = env_settings().coco_dir if root is None else root\n        super().__init__('COCO', root, image_loader)\n\n        self.img_pth = os.path.join(root, 'images/{}{}/'.format(split, version))\n        self.anno_path = os.path.join(root, 'annotations/instances_{}{}.json'.format(split, version))\n\n        # Load the COCO set.\n        self.coco_set = COCO(self.anno_path)\n\n        self.cats = self.coco_set.cats\n\n        self.class_list = self.get_class_list()\n\n        self.sequence_list = self._get_sequence_list()\n\n        if data_fraction is not None:\n            self.sequence_list = random.sample(self.sequence_list, int(len(self.sequence_list)*data_fraction))\n        self.seq_per_class = self._build_seq_per_class()\n\n    def _get_sequence_list(self):\n        ann_list = list(self.coco_set.anns.keys())\n        seq_list = [a for a in ann_list if self.coco_set.anns[a]['iscrowd'] == 0]\n\n        return seq_list\n\n    def is_video_sequence(self):\n        return False\n\n    def get_num_classes(self):\n        return len(self.class_list)\n\n    def get_name(self):\n        return 'coco'\n\n    def has_class_info(self):\n        return True\n\n    def get_class_list(self):\n        class_list = []\n        for cat_id in self.cats.keys():\n            class_list.append(self.cats[cat_id]['name'])\n        return class_list\n\n    def has_segmentation_info(self):\n        return True\n\n    def get_num_sequences(self):\n        return len(self.sequence_list)\n\n    def _build_seq_per_class(self):\n        seq_per_class = {}\n        for i, seq in enumerate(self.sequence_list):\n            class_name = self.cats[self.coco_set.anns[seq]['category_id']]['name']\n            if class_name not in seq_per_class:\n                seq_per_class[class_name] = [i]\n            else:\n                seq_per_class[class_name].append(i)\n\n        return seq_per_class\n\n    def get_sequences_in_class(self, class_name):\n        return self.seq_per_class[class_name]\n\n    def get_sequence_info(self, seq_id):\n        anno = self._get_anno(seq_id)\n\n        bbox = torch.Tensor(anno['bbox']).view(1, 4)\n\n        mask = torch.Tensor(self.coco_set.annToMask(anno)).unsqueeze(dim=0)\n\n        valid = (bbox[:, 2] > 0) & (bbox[:, 3] > 0)\n        visible = valid.clone().byte()\n\n        return {'bbox': bbox, 'mask': mask, 'valid': valid, 'visible': visible}\n\n    def _get_anno(self, seq_id):\n        anno = self.coco_set.anns[self.sequence_list[seq_id]]\n\n        return anno\n\n    def _get_frames(self, seq_id):\n        path = self.coco_set.loadImgs([self.coco_set.anns[self.sequence_list[seq_id]]['image_id']])[0]['file_name']\n        img = self.image_loader(os.path.join(self.img_pth, path))\n        return img\n\n    def get_meta_info(self, seq_id):\n        try:\n            cat_dict_current = self.cats[self.coco_set.anns[self.sequence_list[seq_id]]['category_id']]\n            object_meta = OrderedDict({'object_class_name': cat_dict_current['name'],\n                                       'motion_class': None,\n                                       'major_class': cat_dict_current['supercategory'],\n                                       'root_class': None,\n                                       'motion_adverb': None})\n        except:\n            object_meta = OrderedDict({'object_class_name': None,\n                                       'motion_class': None,\n                                       'major_class': None,\n                                       'root_class': None,\n                                       'motion_adverb': None})\n        return object_meta\n\n\n    def get_class_name(self, seq_id):\n        cat_dict_current = self.cats[self.coco_set.anns[self.sequence_list[seq_id]]['category_id']]\n        return cat_dict_current['name']\n\n    def get_frames(self, seq_id=None, frame_ids=None, anno=None):\n        # COCO is an image dataset. Thus we replicate the image denoted by seq_id len(frame_ids) times, and return a\n        # list containing these replicated images.\n        frame = self._get_frames(seq_id)\n\n        frame_list = [frame.copy() for _ in frame_ids]\n\n        if anno is None:\n            anno = self.get_sequence_info(seq_id)\n\n        anno_frames = {}\n        for key, value in anno.items():\n            anno_frames[key] = [value[0, ...] for _ in frame_ids]\n\n        object_meta = self.get_meta_info(seq_id)\n\n        return frame_list, anno_frames, object_meta\n"
  },
  {
    "path": "artrackv2_mindspore/external/AR/ltr/dataset/davis.py",
    "content": "from pathlib import Path\nfrom ltr.dataset.vos_base import VOSDatasetBase, VOSMeta\nfrom pytracking.evaluation import Sequence\nfrom ltr.admin.environment import env_settings\nfrom ltr.data.image_loader import jpeg4py_loader\n\n\nclass Davis(VOSDatasetBase):\n    \"\"\" The Davis VOS dataset\n\n        Publication:\n            A Benchmark Dataset and Evaluation Methodology for Video Object Segmentation\n            F. Perazzi, J. Pont-Tuset, B. McWilliams, L. Van Gool, M. Gross, and A. Sorkine-Hornung\n            CVPR, 2016\n            http://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/Perazzi_A_Benchmark_Dataset_CVPR_2016_paper.pdf\n\n        Download the dataset from https://davischallenge.org/davis2017/code.html\n        \"\"\"\n    def __init__(self, root=None, sequences=None, version='2017', split='train', multiobj=True,\n                 vis_threshold=10, image_loader=jpeg4py_loader):\n        \"\"\"\n        args:\n             root - Dataset root path. If unset, it uses the path in your local.py config.\n             sequences - List of sequence names. Limit to a subset of sequences if not None.\n             version - '2016' or '2017\n             split - Any name in DAVIS/ImageSets/<year>\n             multiobj - Whether the dataset will return all objects in a sequence or multiple sequences with one object\n                        in each.\n             vis_threshold - Minimum number of pixels required to consider a target object \"visible\".\n             image_loader (jpeg4py_loader) - The function to read the images. jpeg4py (https://github.com/ajkxyz/jpeg4py)\n                                            is used by default.\n        \"\"\"\n        if version == '2017':\n            if split in ['train', 'val']:\n                root = env_settings().davis_dir if root is None else root\n            elif split in ['test-dev']:\n                root = env_settings().davis_testdev_dir if root is None else root\n            else:\n                raise Exception('Unknown split {}'.format(split))\n        else:\n            root = env_settings().davis16_dir if root is None else root\n            \n        super().__init__(name='DAVIS', root=Path(root), version=version, split=split, multiobj=multiobj,\n                         vis_threshold=vis_threshold, image_loader=image_loader)\n\n        dset_path = self.root\n        self._jpeg_path = dset_path / 'JPEGImages' / '480p'\n        self._anno_path = dset_path / 'Annotations' / '480p'\n\n        meta_path = dset_path / \"generated_meta.json\"\n        if meta_path.exists():\n            self.gmeta = VOSMeta(filename=meta_path)\n        else:\n            self.gmeta = VOSMeta.generate('DAVIS', self._jpeg_path, self._anno_path)\n            self.gmeta.save(meta_path)\n\n        if sequences is None:\n            if self.split != 'all':\n                fname = dset_path / 'ImageSets' / self.version / (self.split + '.txt')\n                sequences = open(fname).read().splitlines()\n            else:\n                sequences = [p for p in sorted(self._jpeg_path.glob(\"*\")) if p.is_dir()]\n\n        self.sequence_names = sequences\n        self._samples = []\n\n        for seq in sequences:\n            obj_ids = self.gmeta.get_obj_ids(seq)\n            if self.multiobj:  # Multiple objects per sample\n                self._samples.append((seq, obj_ids))\n            else:  # One object per sample\n                self._samples.extend([(seq, [obj_id]) for obj_id in obj_ids])\n\n        print(\"%s loaded.\" % self.get_name())\n\n    def _construct_sequence(self, sequence_info):\n\n        seq_name = sequence_info['sequence']\n        images, gt_labels, gt_bboxes = self.get_paths_and_bboxes(sequence_info)\n\n        return Sequence(name=seq_name, frames=images, dataset='DAVIS', ground_truth_rect=gt_bboxes,\n                        ground_truth_seg=gt_labels, object_ids=sequence_info['object_ids'],\n                        multiobj_mode=self.multiobj)\n"
  },
  {
    "path": "artrackv2_mindspore/external/AR/ltr/dataset/ecssd.py",
    "content": "import os\nfrom .base_image_dataset import BaseImageDataset\nfrom ltr.data.image_loader import jpeg4py_loader, opencv_loader, imread_indexed\nimport torch\nfrom collections import OrderedDict\nfrom ltr.admin.environment import env_settings\nfrom ltr.data.bounding_box_utils import masks_to_bboxes\n\n\nclass ECSSD(BaseImageDataset):\n    \"\"\"\n    Extended Complex Scene Saliency Dataset (ECSSD)\n\n    Publication:\n            Hierarchical Image Saliency Detection on Extended CSSD\n            Jianping Shi, Qiong Yan, Li Xu, Jiaya Jia\n            TPAMI, 2016\n            https://arxiv.org/pdf/1408.5418.pdf\n\n        Download the dataset from http://www.cse.cuhk.edu.hk/leojia/projects/hsaliency/dataset.html\n    \"\"\"\n    def __init__(self, root=None, image_loader=jpeg4py_loader, data_fraction=None, min_area=None):\n        \"\"\"\n        args:\n            root - path to ECSSD root folder\n            image_loader (jpeg4py_loader) - The function to read the images. jpeg4py (https://github.com/ajkxyz/jpeg4py)\n                                            is used by default.\n            data_fraction - Fraction of dataset to be used. The complete dataset is used by default\n            min_area - Objects with area less than min_area are filtered out. Default is 0.0\n        \"\"\"\n        root = env_settings().ecssd_dir if root is None else root\n        super().__init__('ECSSD', root, image_loader)\n\n        self.image_list = self._load_dataset(min_area=min_area)\n\n        if data_fraction is not None:\n            raise NotImplementedError\n\n    def _load_dataset(self, min_area=None):\n        images = []\n\n        for i in range(1, 1001):\n            a = imread_indexed(os.path.join(self.root, 'ground_truth_mask', '{:04d}.png'.format(i)))\n\n            if min_area is None or (a > 0).sum() > min_area:\n                images.append(i)\n\n        return images\n\n    def get_name(self):\n        return 'ecssd'\n\n    def has_segmentation_info(self):\n        return True\n\n    def get_image_info(self, im_id):\n        mask = imread_indexed(os.path.join(self.root, 'ground_truth_mask', '{:04d}.png'.format(self.image_list[im_id])))\n\n        mask = torch.Tensor(mask == 255)\n        bbox = masks_to_bboxes(mask, fmt='t').view(4,)\n\n        valid = (bbox[2] > 0) & (bbox[3] > 0)\n        visible = valid.clone().byte()\n\n        return {'bbox': bbox, 'mask': mask, 'valid': valid, 'visible': visible}\n\n    def get_meta_info(self, im_id):\n        object_meta = OrderedDict({'object_class_name': None,\n                                   'motion_class': None,\n                                   'major_class': None,\n                                   'root_class': None,\n                                   'motion_adverb': None})\n\n        return object_meta\n\n    def get_image(self, image_id, anno=None):\n        frame = self.image_loader(os.path.join(self.root, 'images', '{:04d}.jpg'.format(self.image_list[image_id])))\n\n        if anno is None:\n            anno = self.get_image_info(image_id)\n\n        object_meta = self.get_meta_info(image_id)\n\n        return frame, anno, object_meta\n"
  },
  {
    "path": "artrackv2_mindspore/external/AR/ltr/dataset/got10k.py",
    "content": "import os\nimport os.path\nimport numpy as np\nimport torch\nimport csv\nimport pandas\nimport random\nfrom collections import OrderedDict\nfrom .base_video_dataset import BaseVideoDataset\nfrom ltr.data.image_loader import jpeg4py_loader\nfrom ltr.admin.environment import env_settings\n\n\nclass Got10k(BaseVideoDataset):\n    \"\"\" GOT-10k dataset.\n\n    Publication:\n        GOT-10k: A Large High-Diversity Benchmark for Generic Object Tracking in the Wild\n        Lianghua Huang, Xin Zhao, and Kaiqi Huang\n        arXiv:1810.11981, 2018\n        https://arxiv.org/pdf/1810.11981.pdf\n\n    Download dataset from http://got-10k.aitestunion.com/downloads\n    \"\"\"\n\n    def __init__(self, root=None, image_loader=jpeg4py_loader, split=None, seq_ids=None, data_fraction=None):\n        \"\"\"\n        args:\n            root - path to the got-10k training data. Note: This should point to the 'train' folder inside GOT-10k\n            image_loader (jpeg4py_loader) -  The function to read the images. jpeg4py (https://github.com/ajkxyz/jpeg4py)\n                                            is used by default.\n            split - 'train' or 'val'. Note: The validation split here is a subset of the official got-10k train split,\n                    not NOT the official got-10k validation split. To use the official validation split, provide that as\n                    the root folder instead.\n            seq_ids - List containing the ids of the videos to be used for training. Note: Only one of 'split' or 'seq_ids'\n                        options can be used at the same time.\n            data_fraction - Fraction of dataset to be used. The complete dataset is used by default\n        \"\"\"\n        root = env_settings().got10k_dir if root is None else root\n        super().__init__('GOT10k', root, image_loader)\n\n        # all folders inside the root\n        self.sequence_list = self._get_sequence_list()\n\n        # seq_id is the index of the folder inside the got10k root path\n        if split is not None:\n            if seq_ids is not None:\n                raise ValueError('Cannot set both split_name and seq_ids.')\n            ltr_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..')\n            if split == 'train':\n                file_path = os.path.join(ltr_path, 'data_specs', 'got10k_train_split.txt')\n            elif split == 'val':\n                file_path = os.path.join(ltr_path, 'data_specs', 'got10k_val_split.txt')\n            elif split == 'vottrain':\n                file_path = os.path.join(ltr_path, 'data_specs', 'got10k_vot_train_split.txt')\n            elif split == 'votval':\n                file_path = os.path.join(ltr_path, 'data_specs', 'got10k_vot_val_split.txt')\n            else:\n                raise ValueError('Unknown split name.')\n            seq_ids = pandas.read_csv(file_path, header=None, squeeze=True, dtype=np.int64).values.tolist()\n        elif seq_ids is None:\n            seq_ids = list(range(0, len(self.sequence_list)))\n\n        self.sequence_list = [self.sequence_list[i] for i in seq_ids]\n\n        if data_fraction is not None:\n            self.sequence_list = random.sample(self.sequence_list, int(len(self.sequence_list)*data_fraction))\n\n        self.sequence_meta_info = self._load_meta_info()\n        self.seq_per_class = self._build_seq_per_class()\n\n        self.class_list = list(self.seq_per_class.keys())\n        self.class_list.sort()\n\n    def get_name(self):\n        return 'got10k'\n\n    def has_class_info(self):\n        return True\n\n    def has_occlusion_info(self):\n        return True\n\n    def _load_meta_info(self):\n        sequence_meta_info = {s: self._read_meta(os.path.join(self.root, s)) for s in self.sequence_list}\n        return sequence_meta_info\n\n    def _read_meta(self, seq_path):\n        try:\n            with open(os.path.join(seq_path, 'meta_info.ini')) as f:\n                meta_info = f.readlines()\n            object_meta = OrderedDict({'object_class_name': meta_info[5].split(': ')[-1][:-1],\n                                       'motion_class': meta_info[6].split(': ')[-1][:-1],\n                                       'major_class': meta_info[7].split(': ')[-1][:-1],\n                                       'root_class': meta_info[8].split(': ')[-1][:-1],\n                                       'motion_adverb': meta_info[9].split(': ')[-1][:-1]})\n        except:\n            object_meta = OrderedDict({'object_class_name': None,\n                                       'motion_class': None,\n                                       'major_class': None,\n                                       'root_class': None,\n                                       'motion_adverb': None})\n        return object_meta\n\n    def _build_seq_per_class(self):\n        seq_per_class = {}\n\n        for i, s in enumerate(self.sequence_list):\n            object_class = self.sequence_meta_info[s]['object_class_name']\n            if object_class in seq_per_class:\n                seq_per_class[object_class].append(i)\n            else:\n                seq_per_class[object_class] = [i]\n\n        return seq_per_class\n\n    def get_sequences_in_class(self, class_name):\n        return self.seq_per_class[class_name]\n\n    def _get_sequence_list(self):\n        with open(os.path.join(self.root, 'list.txt')) as f:\n            dir_list = list(csv.reader(f))\n        dir_list = [dir_name[0] for dir_name in dir_list]\n        return dir_list\n\n    def _read_bb_anno(self, seq_path):\n        bb_anno_file = os.path.join(seq_path, \"groundtruth.txt\")\n        gt = pandas.read_csv(bb_anno_file, delimiter=',', header=None, dtype=np.float32, na_filter=False, low_memory=False).values\n        return torch.tensor(gt)\n\n    def _read_target_visible(self, seq_path):\n        # Read full occlusion and out_of_view\n        occlusion_file = os.path.join(seq_path, \"absence.label\")\n        cover_file = os.path.join(seq_path, \"cover.label\")\n\n        with open(occlusion_file, 'r', newline='') as f:\n            occlusion = torch.ByteTensor([int(v[0]) for v in csv.reader(f)])\n        with open(cover_file, 'r', newline='') as f:\n            cover = torch.ByteTensor([int(v[0]) for v in csv.reader(f)])\n\n        target_visible = ~occlusion & (cover>0).byte()\n\n        visible_ratio = cover.float() / 8\n        return target_visible, visible_ratio\n\n    def _get_sequence_path(self, seq_id):\n        return os.path.join(self.root, self.sequence_list[seq_id])\n\n    def get_sequence_info(self, seq_id):\n        seq_path = self._get_sequence_path(seq_id)\n        bbox = self._read_bb_anno(seq_path)\n\n        valid = (bbox[:, 2] > 0) & (bbox[:, 3] > 0)\n        visible, visible_ratio = self._read_target_visible(seq_path)\n        visible = visible & valid.byte()\n\n        return {'bbox': bbox, 'valid': valid, 'visible': visible, 'visible_ratio': visible_ratio}\n\n    def _get_frame_path(self, seq_path, frame_id):\n        return os.path.join(seq_path, '{:08}.jpg'.format(frame_id+1))    # frames start from 1\n\n    def _get_frame(self, seq_path, frame_id):\n        return self.image_loader(self._get_frame_path(seq_path, frame_id))\n\n    def get_class_name(self, seq_id):\n        obj_meta = self.sequence_meta_info[self.sequence_list[seq_id]]\n\n        return obj_meta['object_class_name']\n\n    def get_frames(self, seq_id, frame_ids, anno=None):\n        seq_path = self._get_sequence_path(seq_id)\n        obj_meta = self.sequence_meta_info[self.sequence_list[seq_id]]\n\n        frame_list = [self._get_frame(seq_path, f_id) for f_id in frame_ids]\n\n        if anno is None:\n            anno = self.get_sequence_info(seq_id)\n\n        anno_frames = {}\n        for key, value in anno.items():\n            anno_frames[key] = [value[f_id, ...].clone() for f_id in frame_ids]\n\n        return frame_list, anno_frames, obj_meta\n"
  },
  {
    "path": "artrackv2_mindspore/external/AR/ltr/dataset/hku_is.py",
    "content": "import os\nfrom .base_image_dataset import BaseImageDataset\nfrom ltr.data.image_loader import jpeg4py_loader, opencv_loader, imread_indexed\nimport torch\nfrom collections import OrderedDict\nfrom ltr.admin.environment import env_settings\nfrom ltr.data.bounding_box_utils import masks_to_bboxes\n\n\nclass HKUIS(BaseImageDataset):\n    \"\"\"\n    HKU-IS salient object detection dataset\n\n    Publication:\n        Visual saliency based on multiscale deep features\n        Guanbin Li and Yizhou Yu\n        CVPR, 2015\n        https://arxiv.org/pdf/1503.08663.pdf\n\n    Dowload dataset from https://sites.google.com/site/ligb86/hkuis\n    \"\"\"\n\n    def __init__(self, root=None, image_loader=jpeg4py_loader, data_fraction=None, min_area=None):\n        \"\"\"\n        args:\n            root - path to HKU-IS root folder\n            image_loader (jpeg4py_loader) - The function to read the images. jpeg4py (https://github.com/ajkxyz/jpeg4py)\n                                            is used by default.\n            data_fraction - Fraction of dataset to be used. The complete dataset is used by default\n            min_area - Objects with area less than min_area are filtered out. Default is 0.0\n        \"\"\"\n        root = env_settings().hkuis_dir if root is None else root\n        super().__init__('HKUIS', root, image_loader)\n\n        self.image_list, self.anno_list = self._load_dataset(min_area=min_area)\n\n        if data_fraction is not None:\n            raise NotImplementedError\n\n    def _load_dataset(self, min_area=None):\n        files_list = os.listdir(os.path.join(self.root, 'imgs'))\n        image_list = [f[:-4] for f in files_list]\n\n        images = []\n        annos = []\n\n        for f in image_list:\n            a = imread_indexed(os.path.join(self.root, 'gt', '{}.png'.format(f)))\n\n            if min_area is None or (a > 0).sum() > min_area:\n                im = opencv_loader(os.path.join(self.root, 'imgs', '{}.png'.format(f)))\n                images.append(im)\n                annos.append(a)\n\n        return images, annos\n\n    def get_name(self):\n        return 'hku-is'\n\n    def has_segmentation_info(self):\n        return True\n\n    def get_image_info(self, im_id):\n        mask = self.anno_list[im_id]\n        mask = torch.Tensor(mask == 255)\n        bbox = masks_to_bboxes(mask, fmt='t').view(4,)\n\n        valid = (bbox[2] > 0) & (bbox[3] > 0)\n        visible = valid.clone().byte()\n\n        return {'bbox': bbox, 'mask': mask, 'valid': valid, 'visible': visible}\n\n    def get_meta_info(self, im_id):\n        object_meta = OrderedDict({'object_class_name': None,\n                                   'motion_class': None,\n                                   'major_class': None,\n                                   'root_class': None,\n                                   'motion_adverb': None})\n\n        return object_meta\n\n    def get_image(self, image_id, anno=None):\n        frame = self.image_list[image_id]\n\n        if anno is None:\n            anno = self.get_image_info(image_id)\n\n        object_meta = self.get_meta_info(image_id)\n\n        return frame, anno, object_meta\n"
  },
  {
    "path": "artrackv2_mindspore/external/AR/ltr/dataset/imagenetvid.py",
    "content": "import os\nfrom .base_video_dataset import BaseVideoDataset\nfrom ltr.data.image_loader import default_image_loader\nimport xml.etree.ElementTree as ET\nimport json\nimport torch\nimport random\nfrom collections import OrderedDict\nfrom ltr.admin.environment import env_settings\n\n\ndef get_target_to_image_ratio(seq):\n    anno = torch.Tensor(seq['anno'])\n    img_sz = torch.Tensor(seq['image_size'])\n    return (anno[0, 2:4].prod() / (img_sz.prod())).sqrt()\n\n\nclass ImagenetVID(BaseVideoDataset):\n    \"\"\" Imagenet VID dataset.\n\n    Publication:\n        ImageNet Large Scale Visual Recognition Challenge\n        Olga Russakovsky, Jia Deng, Hao Su, Jonathan Krause, Sanjeev Satheesh, Sean Ma, Zhiheng Huang, Andrej Karpathy,\n        Aditya Khosla, Michael Bernstein, Alexander C. Berg and Li Fei-Fei\n        IJCV, 2015\n        https://arxiv.org/pdf/1409.0575.pdf\n\n    Download the dataset from http://image-net.org/\n    \"\"\"\n    def __init__(self, root=None, image_loader=default_image_loader, min_length=0, max_target_area=1):\n        \"\"\"\n        args:\n            root - path to the imagenet vid dataset.\n            image_loader (default_image_loader) -  The function to read the images. If installed,\n                                                   jpeg4py (https://github.com/ajkxyz/jpeg4py) is used by default. Else,\n                                                   opencv's imread is used.\n            min_length - Minimum allowed sequence length.\n            max_target_area - max allowed ratio between target area and image area. Can be used to filter out targets\n                                which cover complete image.\n        \"\"\"\n        root = env_settings().imagenet_dir if root is None else root\n        super().__init__(root, image_loader)\n\n        cache_file = os.path.join(root, 'cache.json')\n        if os.path.isfile(cache_file):\n            # If available, load the pre-processed cache file containing meta-info for each sequence\n            with open(cache_file, 'r') as f:\n                sequence_list_dict = json.load(f)\n\n            self.sequence_list = sequence_list_dict\n        else:\n            # Else process the imagenet annotations and generate the cache file\n            self.sequence_list = self._process_anno(root)\n\n            with open(cache_file, 'w') as f:\n                json.dump(self.sequence_list, f)\n\n        # Filter the sequences based on min_length and max_target_area in the first frame\n        self.sequence_list = [x for x in self.sequence_list if len(x['anno']) >= min_length and\n                              get_target_to_image_ratio(x) < max_target_area]\n\n    def get_name(self):\n        return 'imagenetvid'\n\n    def get_num_sequences(self):\n        return len(self.sequence_list)\n\n    def get_sequence_info(self, seq_id):\n        bb_anno = torch.Tensor(self.sequence_list[seq_id]['anno'])\n        valid = (bb_anno[:, 2] > 0) & (bb_anno[:, 3] > 0)\n        visible = torch.ByteTensor(self.sequence_list[seq_id]['target_visible']) & valid.byte()\n        return {'bbox': bb_anno, 'valid': valid, 'visible': visible}\n\n    def _get_frame(self, sequence, frame_id):\n        set_name = 'ILSVRC2015_VID_train_{:04d}'.format(sequence['set_id'])\n        vid_name = 'ILSVRC2015_train_{:08d}'.format(sequence['vid_id'])\n        frame_number = frame_id + sequence['start_frame']\n\n        frame_path = os.path.join(self.root, 'Data', 'VID', 'train', set_name, vid_name,\n                                  '{:06d}.JPEG'.format(frame_number))\n        return self.image_loader(frame_path)\n\n    def get_frames(self, seq_id, frame_ids, anno=None):\n        sequence = self.sequence_list[seq_id]\n\n        frame_list = [self._get_frame(sequence, f) for f in frame_ids]\n\n        if anno is None:\n            anno = self.get_sequence_info(seq_id)\n\n        # Create anno dict\n        anno_frames = {}\n        for key, value in anno.items():\n            anno_frames[key] = [value[f_id, ...].clone() for f_id in frame_ids]\n\n        # added the class info to the meta info\n        object_meta = OrderedDict({'object_class': sequence['class_name'],\n                                   'motion_class': None,\n                                   'major_class': None,\n                                   'root_class': None,\n                                   'motion_adverb': None})\n\n        return frame_list, anno_frames, object_meta\n\n    def _process_anno(self, root):\n        # Builds individual tracklets\n        base_vid_anno_path = os.path.join(root, 'Annotations', 'VID', 'train')\n\n        all_sequences = []\n        for set in sorted(os.listdir(base_vid_anno_path)):\n            set_id = int(set.split('_')[-1])\n            for vid in sorted(os.listdir(os.path.join(base_vid_anno_path, set))):\n\n                vid_id = int(vid.split('_')[-1])\n                anno_files = sorted(os.listdir(os.path.join(base_vid_anno_path, set, vid)))\n\n                frame1_anno = ET.parse(os.path.join(base_vid_anno_path, set, vid, anno_files[0]))\n                image_size = [int(frame1_anno.find('size/width').text), int(frame1_anno.find('size/height').text)]\n\n                objects = [ET.ElementTree(file=os.path.join(base_vid_anno_path, set, vid, f)).findall('object')\n                           for f in anno_files]\n\n                tracklets = {}\n\n                # Find all tracklets along with start frame\n                for f_id, all_targets in enumerate(objects):\n                    for target in all_targets:\n                        tracklet_id = target.find('trackid').text\n                        if tracklet_id not in tracklets:\n                            tracklets[tracklet_id] = f_id\n\n                for tracklet_id, tracklet_start in tracklets.items():\n                    tracklet_anno = []\n                    target_visible = []\n                    class_name_id = None\n\n                    for f_id in range(tracklet_start, len(objects)):\n                        found = False\n                        for target in objects[f_id]:\n                            if target.find('trackid').text == tracklet_id:\n                                if not class_name_id:\n                                    class_name_id = target.find('name').text\n                                x1 = int(target.find('bndbox/xmin').text)\n                                y1 = int(target.find('bndbox/ymin').text)\n                                x2 = int(target.find('bndbox/xmax').text)\n                                y2 = int(target.find('bndbox/ymax').text)\n\n                                tracklet_anno.append([x1, y1, x2 - x1, y2 - y1])\n                                target_visible.append(target.find('occluded').text == '0')\n\n                                found = True\n                                break\n                        if not found:\n                            break\n\n                    new_sequence = {'set_id': set_id, 'vid_id': vid_id, 'class_name': class_name_id,\n                                    'start_frame': tracklet_start, 'anno': tracklet_anno,\n                                    'target_visible': target_visible, 'image_size': image_size}\n                    all_sequences.append(new_sequence)\n\n        return all_sequences\n"
  },
  {
    "path": "artrackv2_mindspore/external/AR/ltr/dataset/lasot.py",
    "content": "import os\nimport os.path\nimport torch\nimport numpy as np\nimport pandas\nimport csv\nimport random\nfrom collections import OrderedDict\nfrom .base_video_dataset import BaseVideoDataset\nfrom ltr.data.image_loader import jpeg4py_loader\nfrom ltr.admin.environment import env_settings\n\n\nclass Lasot(BaseVideoDataset):\n    \"\"\" LaSOT dataset.\n\n    Publication:\n        LaSOT: A High-quality Benchmark for Large-scale Single Object Tracking\n        Heng Fan, Liting Lin, Fan Yang, Peng Chu, Ge Deng, Sijia Yu, Hexin Bai, Yong Xu, Chunyuan Liao and Haibin Ling\n        CVPR, 2019\n        https://arxiv.org/pdf/1809.07845.pdf\n\n    Download the dataset from https://cis.temple.edu/lasot/download.html\n    \"\"\"\n\n    def __init__(self, root=None, image_loader=jpeg4py_loader, vid_ids=None, split=None, data_fraction=None):\n        \"\"\"\n        args:\n            root - path to the lasot dataset.\n            image_loader (jpeg4py_loader) -  The function to read the images. jpeg4py (https://github.com/ajkxyz/jpeg4py)\n                                            is used by default.\n            vid_ids - List containing the ids of the videos (1 - 20) used for training. If vid_ids = [1, 3, 5], then the\n                    videos with subscripts -1, -3, and -5 from each class will be used for training.\n            split - If split='train', the official train split (protocol-II) is used for training. Note: Only one of\n                    vid_ids or split option can be used at a time.\n            data_fraction - Fraction of dataset to be used. The complete dataset is used by default\n        \"\"\"\n        root = env_settings().lasot_dir if root is None else root\n        super().__init__('LaSOT', root, image_loader)\n\n        # Keep a list of all classes\n        self.class_list = [f for f in os.listdir(self.root)]\n        self.class_to_id = {cls_name: cls_id for cls_id, cls_name in enumerate(self.class_list)}\n\n        self.sequence_list = self._build_sequence_list(vid_ids, split)\n\n        if data_fraction is not None:\n            self.sequence_list = random.sample(self.sequence_list, int(len(self.sequence_list)*data_fraction))\n\n        self.seq_per_class = self._build_class_list()\n\n    def _build_sequence_list(self, vid_ids=None, split=None):\n        if split is not None:\n            if vid_ids is not None:\n                raise ValueError('Cannot set both split_name and vid_ids.')\n            ltr_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..')\n            if split == 'train':\n                file_path = os.path.join(ltr_path, 'data_specs', 'lasot_train_split.txt')\n            else:\n                raise ValueError('Unknown split name.')\n            sequence_list = pandas.read_csv(file_path, header=None, squeeze=True).values.tolist()\n        elif vid_ids is not None:\n            sequence_list = [c+'-'+str(v) for c in self.class_list for v in vid_ids]\n        else:\n            raise ValueError('Set either split_name or vid_ids.')\n\n        return sequence_list\n\n    def _build_class_list(self):\n        seq_per_class = {}\n        for seq_id, seq_name in enumerate(self.sequence_list):\n            class_name = seq_name.split('-')[0]\n            if class_name in seq_per_class:\n                seq_per_class[class_name].append(seq_id)\n            else:\n                seq_per_class[class_name] = [seq_id]\n\n        return seq_per_class\n\n    def get_name(self):\n        return 'lasot'\n\n    def has_class_info(self):\n        return True\n\n    def has_occlusion_info(self):\n        return True\n\n    def get_num_sequences(self):\n        return len(self.sequence_list)\n\n    def get_num_classes(self):\n        return len(self.class_list)\n\n    def get_sequences_in_class(self, class_name):\n        return self.seq_per_class[class_name]\n\n    def _read_bb_anno(self, seq_path):\n        bb_anno_file = os.path.join(seq_path, \"groundtruth.txt\")\n        gt = pandas.read_csv(bb_anno_file, delimiter=',', header=None, dtype=np.float32, na_filter=False, low_memory=False).values\n        return torch.tensor(gt)\n\n    def _read_target_visible(self, seq_path):\n        # Read full occlusion and out_of_view\n        occlusion_file = os.path.join(seq_path, \"full_occlusion.txt\")\n        out_of_view_file = os.path.join(seq_path, \"out_of_view.txt\")\n\n        with open(occlusion_file, 'r', newline='') as f:\n            occlusion = torch.ByteTensor([int(v) for v in list(csv.reader(f))[0]])\n        with open(out_of_view_file, 'r') as f:\n            out_of_view = torch.ByteTensor([int(v) for v in list(csv.reader(f))[0]])\n\n        target_visible = ~occlusion & ~out_of_view\n\n        return target_visible\n\n    def _get_sequence_path(self, seq_id):\n        seq_name = self.sequence_list[seq_id]\n        class_name = seq_name.split('-')[0]\n        vid_id = seq_name.split('-')[1]\n\n        return os.path.join(self.root, class_name, class_name + '-' + vid_id)\n\n    def get_sequence_info(self, seq_id):\n        seq_path = self._get_sequence_path(seq_id)\n        bbox = self._read_bb_anno(seq_path)\n\n        valid = (bbox[:, 2] > 0) & (bbox[:, 3] > 0)\n        visible = self._read_target_visible(seq_path) & valid.byte()\n\n        return {'bbox': bbox, 'valid': valid, 'visible': visible}\n\n    def _get_frame_path(self, seq_path, frame_id):\n        return os.path.join(seq_path, 'img', '{:08}.jpg'.format(frame_id+1))    # frames start from 1\n\n    def _get_frame(self, seq_path, frame_id):\n        return self.image_loader(self._get_frame_path(seq_path, frame_id))\n\n    def _get_class(self, seq_path):\n        raw_class = seq_path.split('/')[-2]\n        return raw_class\n\n    def get_class_name(self, seq_id):\n        seq_path = self._get_sequence_path(seq_id)\n        obj_class = self._get_class(seq_path)\n\n        return obj_class\n\n    def get_frames(self, seq_id, frame_ids, anno=None):\n        seq_path = self._get_sequence_path(seq_id)\n\n        obj_class = self._get_class(seq_path)\n        frame_list = [self._get_frame(seq_path, f_id) for f_id in frame_ids]\n\n        if anno is None:\n            anno = self.get_sequence_info(seq_id)\n\n        anno_frames = {}\n        for key, value in anno.items():\n            anno_frames[key] = [value[f_id, ...].clone() for f_id in frame_ids]\n\n        object_meta = OrderedDict({'object_class_name': obj_class,\n                                   'motion_class': None,\n                                   'major_class': None,\n                                   'root_class': None,\n                                   'motion_adverb': None})\n\n        return frame_list, anno_frames, object_meta\n"
  },
  {
    "path": "artrackv2_mindspore/external/AR/ltr/dataset/lvis.py",
    "content": "import os\nfrom .base_image_dataset import BaseImageDataset\nfrom ltr.data.image_loader import jpeg4py_loader_w_failsafe\nimport torch\nimport random\nimport lvis.lvis as lvis_pk\nfrom collections import OrderedDict\nfrom ltr.admin.environment import env_settings\n\n\nclass LVIS(BaseImageDataset):\n    \"\"\" The LVIS object detection dataset\n\n    Publication:\n        LVIS: A Dataset for Large Vocabulary Instance Segmentation\n        Agrim Gupta, Piotr Dollár, and Ross Girshick\n        CVPR, 2019\n        https://arxiv.org/pdf/1908.03195.pdf\n\n    Download the images along with annotations from https://www.lvisdataset.org/dataset. The root folder should be\n    organized as follows.\n        - lvis_root\n            - annotations\n                - lvis_v0.5_train.json\n                - lvis_v0.5_val.json\n            - images\n                - val2017\n                - train2017\n\n    Note: You also have to install the lvis Python API from https://github.com/lvis-dataset/lvis-api\n    \"\"\"\n\n    def __init__(self, root=None, image_loader=jpeg4py_loader_w_failsafe, data_fraction=None, min_area=None, split=\"train\"):\n        \"\"\"\n        args:\n            root - path to lvis root folder\n            image_loader (jpeg4py_loader) - The function to read the images. jpeg4py (https://github.com/ajkxyz/jpeg4py)\n                                            is used by default.\n            data_fraction - Fraction of dataset to be used. The complete dataset is used by default\n            min_area - Objects with area less than min_area are filtered out. Default is 0.0\n            split - 'train' or 'val'.\n        \"\"\"\n        root = env_settings().lvis_dir if root is None else root\n        super().__init__('LVIS', root, image_loader)\n\n        self.img_pth = os.path.join(root, 'images', f'{split}2017/')\n        self.anno_path = os.path.join(root, 'annotations', f'lvis_v0.5_{split}.json')\n\n        # Load the LVIS set.\n        self.lvis_set = lvis_pk.LVIS(self.anno_path)\n\n        self.cats = self.lvis_set.cats\n\n        self.class_list = self.get_class_list()     # the parent class thing would happen in the sampler\n\n        self.image_list = self._get_image_list(min_area=min_area)\n\n        if data_fraction is not None:\n            self.image_list = random.sample(self.image_list, int(len(self.image_list) * data_fraction))\n        self.im_per_class = self._build_im_per_class()\n\n    def _get_image_list(self, min_area=None):\n        im_list = list(self.lvis_set.anns.keys())  # No 'iscrowd' information in LVIS\n\n        if min_area is not None:\n            im_list = [s for s in im_list if self.lvis_set.anns[s]['area'] > min_area]\n\n        return im_list\n\n    def get_num_classes(self):\n        return len(self.class_list)\n\n    def get_name(self):\n        return 'lvis'\n\n    def has_class_info(self):\n        return True\n\n    def get_class_list(self):\n        class_list = []\n        for cat_id in self.cats.keys():\n            class_list.append(self.cats[cat_id]['name'])\n        return class_list\n\n    def has_segmentation_info(self):\n        return True\n\n    def _build_im_per_class(self):\n        im_per_class = {}\n        for i, im in enumerate(self.image_list):\n            class_name = self.cats[self.lvis_set.anns[im]['category_id']]['name']\n            if class_name not in im_per_class:\n                im_per_class[class_name] = [i]\n            else:\n                im_per_class[class_name].append(i)\n\n        return im_per_class\n\n    def get_images_in_class(self, class_name):\n        return self.im_per_class[class_name]\n\n    def get_image_info(self, im_id):\n        anno = self._get_anno(im_id)\n\n        bbox = torch.Tensor(anno['bbox']).view(4,)\n\n        mask = torch.Tensor(self.lvis_set.ann_to_mask(anno))\n\n        valid = (bbox[2] > 0) & (bbox[3] > 0)\n        visible = valid.clone().byte()\n\n        return {'bbox': bbox, 'mask': mask, 'valid': valid, 'visible': visible}\n\n    def _get_anno(self, im_id):\n        anno = self.lvis_set.anns[self.image_list[im_id]]\n\n        return anno\n\n    def _get_image(self, im_id):\n        path = self.lvis_set.load_imgs([self.lvis_set.anns[self.image_list[im_id]]['image_id']])[0]['file_name']\n        img = self.image_loader(os.path.join(self.img_pth, path))\n        return img\n\n    def get_meta_info(self, im_id):\n        try:\n            cat_dict_current = self.cats[self.lvis_set.anns[self.image_list[im_id]]['category_id']]\n            object_meta = OrderedDict({'object_class_name': cat_dict_current['name'],\n                                       'motion_class': None,\n                                       'major_class': None,  # No 'supercategory' information available in LVIS\n                                       'root_class': None,\n                                       'motion_adverb': None})\n        except:\n            object_meta = OrderedDict({'object_class_name': None,\n                                       'motion_class': None,\n                                       'major_class': None,\n                                       'root_class': None,\n                                       'motion_adverb': None})\n        return object_meta\n\n    def get_class_name(self, im_id):\n        cat_dict_current = self.cats[self.lvis_set.anns[self.image_list[im_id]]['category_id']]\n        return cat_dict_current['name']\n\n    def get_image(self, image_id, anno=None):\n        frame = self._get_image(image_id)\n\n        if anno is None:\n            anno = self.get_image_info(image_id)\n\n        object_meta = self.get_meta_info(image_id)\n\n        return frame, anno, object_meta\n"
  },
  {
    "path": "artrackv2_mindspore/external/AR/ltr/dataset/msra10k.py",
    "content": "import os\nfrom .base_image_dataset import BaseImageDataset\nfrom ltr.data.image_loader import jpeg4py_loader, imread_indexed\nimport torch\nfrom collections import OrderedDict\nfrom ltr.admin.environment import env_settings\nfrom ltr.data.bounding_box_utils import masks_to_bboxes\n\n\nclass MSRA10k(BaseImageDataset):\n    \"\"\"\n    MSRA10k salient object detection dataset\n\n    Publication:\n        Global contrast based salient region detection\n        Ming-Ming Cheng, Niloy J. Mitra, Xiaolei Huang, Philip H. S. Torr, and Shi-Min Hu\n        TPAMI, 2015\n        https://mmcheng.net/mftp/Papers/SaliencyTPAMI.pdf\n\n    Download dataset from https://mmcheng.net/msra10k/\n    \"\"\"\n\n    def __init__(self, root=None, image_loader=jpeg4py_loader, data_fraction=None, min_area=None):\n        \"\"\"\n        args:\n            root - path to MSRA10k root folder\n            image_loader (jpeg4py_loader) - The function to read the images. jpeg4py (https://github.com/ajkxyz/jpeg4py)\n                                            is used by default.\n            data_fraction - Fraction of dataset to be used. The complete dataset is used by default\n            min_area - Objects with area less than min_area are filtered out. Default is 0.0\n        \"\"\"\n        root = env_settings().msra10k_dir if root is None else root\n        super().__init__('MSRA10k', root, image_loader)\n\n        self.image_list = self._load_dataset(min_area=min_area)\n\n        if data_fraction is not None:\n            raise NotImplementedError\n\n    def _load_dataset(self, min_area=None):\n        files_list = os.listdir(os.path.join(self.root, 'Imgs'))\n        image_list = [f[:-4] for f in files_list if f[-3:] == 'jpg']\n\n        images = []\n\n        for f in image_list:\n            a = imread_indexed(os.path.join(self.root, 'Imgs', '{}.png'.format(f)))\n\n            if min_area is None or (a > 0).sum() > min_area:\n                images.append(f)\n\n        return images\n\n    def get_name(self):\n        return 'msra10k'\n\n    def has_segmentation_info(self):\n        return True\n\n    def get_image_info(self, im_id):\n        mask = imread_indexed(os.path.join(self.root, 'Imgs', '{}.png'.format(self.image_list[im_id])))\n        mask = torch.Tensor(mask == 255)\n        bbox = masks_to_bboxes(mask, fmt='t').view(4,)\n\n        valid = (bbox[2] > 0) & (bbox[3] > 0)\n        visible = valid.clone().byte()\n\n        return {'bbox': bbox, 'mask': mask, 'valid': valid, 'visible': visible}\n\n    def get_meta_info(self, im_id):\n        object_meta = OrderedDict({'object_class_name': None,\n                                   'motion_class': None,\n                                   'major_class': None,\n                                   'root_class': None,\n                                   'motion_adverb': None})\n\n        return object_meta\n\n    def get_image(self, image_id, anno=None):\n        frame = self.image_loader(os.path.join(self.root, 'Imgs', '{}.jpg'.format(self.image_list[image_id])))\n\n        if anno is None:\n            anno = self.get_image_info(image_id)\n\n        object_meta = self.get_meta_info(image_id)\n\n        return frame, anno, object_meta\n"
  },
  {
    "path": "artrackv2_mindspore/external/AR/ltr/dataset/sbd.py",
    "content": "from .base_image_dataset import BaseImageDataset\nfrom ltr.data.image_loader import jpeg4py_loader_w_failsafe\nimport torch\nfrom collections import OrderedDict\nimport os\nfrom scipy.io import loadmat\nfrom ltr.data.bounding_box_utils import masks_to_bboxes\n\nfrom ltr.admin.environment import env_settings\n\n\nclass SBD(BaseImageDataset):\n    \"\"\"\n    Semantic Boundaries Dataset and Benchmark (SBD)\n\n    Publication:\n        Semantic contours from inverse detectors\n        Bharath Hariharan, Pablo Arbelaez, Lubomir Bourdev, Subhransu Maji and Jitendra Malik\n        ICCV, 2011\n        http://home.bharathh.info/pubs/pdfs/BharathICCV2011.pdf\n\n    Download dataset from: http://home.bharathh.info/pubs/codes/SBD/download.html\n    \"\"\"\n    def __init__(self, root=None, image_loader=jpeg4py_loader_w_failsafe, data_fraction=None, split=\"train\"):\n        \"\"\"\n        args:\n            root - path to SBD root folder\n            image_loader - The function to read the images. jpeg4py (https://github.com/ajkxyz/jpeg4py)\n                           is used by default.\n            data_fraction - Fraction of dataset to be used. The complete dataset is used by default\n            split - dataset split (\"train\", \"train_noval\", \"val\")\n        \"\"\"\n        root = env_settings().sbd_dir if root is None else root\n        super().__init__('SBD', root, image_loader)\n\n        assert split in [\"train\", \"train_noval\", \"val\"]\n\n        self.root = root\n\n        self.image_path_list, self.anno_file_list = self._load_dataset(split)\n\n        # Load mat fine\n        anno_list = [loadmat(a) for a in self.anno_file_list]\n\n        self.image_list = self._construct_image_list(anno_list)\n        if data_fraction is not None:\n            raise NotImplementedError\n\n    def _load_dataset(self, split):\n        split_f = os.path.join(self.root, split.rstrip('\\n') + '.txt')\n\n        with open(os.path.join(split_f), \"r\") as f:\n            file_names = [x.strip() for x in f.readlines()]\n\n        image_list = [os.path.join(self.root, 'img', x + \".jpg\") for x in file_names]\n        anno_list = [os.path.join(self.root, 'inst', x + \".mat\") for x in file_names]\n\n        assert (len(image_list) == len(anno_list))\n\n        return image_list, anno_list\n\n    def _get_mask_from_mat(self, mat):\n        return torch.tensor(mat['GTinst'][0]['Segmentation'][0])\n\n    def _construct_image_list(self, anno_list):\n        image_list = []\n\n        for im_id, a in enumerate(anno_list):\n            mask = self._get_mask_from_mat(a)\n            for instance_id in range(1, mask.max().item() + 1):\n                image_list.append((im_id, instance_id))\n\n        return image_list\n\n    def get_name(self):\n        return 'sbd'\n\n    def has_segmentation_info(self):\n        return True\n\n    def get_image_info(self, im_id):\n        image_id, instance_id = self.image_list[im_id]\n        anno_mat = loadmat(self.anno_file_list[image_id])\n        mask = self._get_mask_from_mat(anno_mat)\n\n        mask = (mask == instance_id).float()\n        bbox = masks_to_bboxes(mask, fmt='t')\n        valid = (bbox[2] > 0) & (bbox[3] > 0)\n        visible = valid.clone().byte()\n\n        return {'bbox': bbox, 'mask': mask, 'valid': valid, 'visible': visible}\n\n    def _get_image(self, im_id):\n        image_id, _ = self.image_list[im_id]\n\n        img = self.image_loader(self.image_path_list[image_id])\n        return img\n\n    def get_meta_info(self, im_id):\n        object_meta = OrderedDict({'object_class_name': None,\n                                   'motion_class': None,\n                                   'major_class': None,\n                                   'root_class': None,\n                                   'motion_adverb': None})\n        return object_meta\n\n    def get_image(self, image_id, anno=None):\n        image = self._get_image(image_id)\n\n        if anno is None:\n            anno = self.get_image_info(image_id)\n\n        object_meta = self.get_meta_info(image_id)\n\n        return image, anno, object_meta\n"
  },
  {
    "path": "artrackv2_mindspore/external/AR/ltr/dataset/synthetic_video.py",
    "content": "from collections import OrderedDict\nfrom .base_video_dataset import BaseVideoDataset\nfrom ltr.data.bounding_box_utils import masks_to_bboxes\n\n\nclass SyntheticVideo(BaseVideoDataset):\n    \"\"\"\n    Create a synthetic video dataset from an image dataset by applying a random transformation to images.\n    \"\"\"\n    def __init__(self, base_image_dataset, transform=None):\n        \"\"\"\n        args:\n            base_image_dataset - Image dataset used for generating synthetic videos\n            transform - Set of transforms to be applied to the images to generate synthetic video.\n        \"\"\"\n        super().__init__(base_image_dataset.get_name() + '_syn_vid', base_image_dataset.root,\n                         base_image_dataset.image_loader)\n        self.base_image_dataset = base_image_dataset\n        self.transform = transform\n\n    def get_name(self):\n        return self.name\n\n    def is_video_sequence(self):\n        return False\n\n    def has_class_info(self):\n        return self.base_image_dataset.has_class_info()\n\n    def has_occlusion_info(self):\n        return True\n\n    def get_num_sequences(self):\n        return self.base_image_dataset.get_num_images()\n\n    def get_num_classes(self):\n        return len(self.class_list)\n\n    def get_sequences_in_class(self, class_name):\n        return self.get_images_in_class[class_name]\n\n    def get_sequence_info(self, seq_id):\n        image_info = self.base_image_dataset.get_image_info(seq_id)\n\n        image_info = {k: v.unsqueeze(0) for k, v in image_info.items()}\n        return image_info\n\n    def get_class_name(self, seq_id):\n        return self.base_image_dataset.get_class_name(seq_id)\n\n    def get_frames(self, seq_id, frame_ids, anno=None):\n        frame, anno, object_meta = self.base_image_dataset.get_image(seq_id, anno=anno)\n\n        frame_list = [frame.copy() for _ in frame_ids]\n\n        if anno is None:\n            anno = self.get_sequence_info(seq_id)\n\n        anno_frames = {}\n        for key, value in anno.items():\n            anno_frames[key] = [value[0].clone() for f_id in frame_ids]\n\n        if self.transform is not None:\n            if 'mask' in anno_frames.keys():\n                frame_list, anno_frames['bbox'], anno_frames['mask'] = self.transform(image=frame_list,\n                                                                                      bbox=anno_frames['bbox'],\n                                                                                      mask=anno_frames['mask'],\n                                                                                      joint=False)\n\n                anno_frames['bbox'] = [masks_to_bboxes(m, fmt='t') for m in anno_frames['mask']]\n            else:\n                frame_list, anno_frames['bbox'] = self.transform(image=frame_list,\n                                                                 bbox=anno_frames['bbox'],\n                                                                 joint=False)\n\n        object_meta = OrderedDict({'object_class_name': self.get_class_name(seq_id),\n                                   'motion_class': None,\n                                   'major_class': None,\n                                   'root_class': None,\n                                   'motion_adverb': None})\n\n        return frame_list, anno_frames, object_meta\n"
  },
  {
    "path": "artrackv2_mindspore/external/AR/ltr/dataset/synthetic_video_blend.py",
    "content": "from collections import OrderedDict\nfrom .base_video_dataset import BaseVideoDataset\nfrom ltr.data.bounding_box_utils import masks_to_bboxes\nimport random\nimport torch\n\n\nclass SyntheticVideoBlend(BaseVideoDataset):\n    \"\"\"\n    Create a synthetic video by applying random transformations to an object (foreground) and pasting it in a\n    background image.  Currently, the foreground object is pasted at random locations in different frames.\n    \"\"\"\n    def __init__(self, foreground_image_dataset, background_image_dataset, foreground_transform=None,\n                 background_transform=None):\n        \"\"\"\n        args:\n            foreground_image_dataset - A segmentation dataset from which foreground objects are cropped using the\n                                       segmentation mask\n            background_image_dataset - Dataset used to sample background image for the synthetic video\n            foreground_transform - Random transformations to be applied to the foreground object in every frame\n            background_transform - Random transformations to be applied to the background image in every frame\n        \"\"\"\n        assert foreground_image_dataset.has_segmentation_info()\n\n        super().__init__(foreground_image_dataset.get_name() + '_syn_vid_blend', foreground_image_dataset.root,\n                         foreground_image_dataset.image_loader)\n        self.foreground_image_dataset = foreground_image_dataset\n        self.background_image_dataset = background_image_dataset\n\n        self.foreground_transform = foreground_transform\n        self.background_transform = background_transform\n\n    def get_name(self):\n        return self.name\n\n    def is_video_sequence(self):\n        return False\n\n    def has_class_info(self):\n        return self.foreground_image_dataset.has_class_info()\n\n    def has_occlusion_info(self):\n        return True\n\n    def get_num_sequences(self):\n        return self.foreground_image_dataset.get_num_images()\n\n    def get_num_classes(self):\n        return len(self.class_list)\n\n    def get_sequences_in_class(self, class_name):\n        return self.get_images_in_class[class_name]\n\n    def get_sequence_info(self, seq_id):\n        image_info = self.foreground_image_dataset.get_image_info(seq_id)\n\n        image_info = {k: v.unsqueeze(0) for k, v in image_info.items()}\n        return image_info\n\n    def get_class_name(self, seq_id):\n        return self.foreground_image_dataset.get_class_name(seq_id)\n\n    def _paste_target(self, fg_image, fg_box, fg_mask, bg_image, paste_loc):\n        fg_mask = fg_mask.view(fg_mask.shape[0], fg_mask.shape[1], 1)\n        fg_box = fg_box.long().tolist()\n\n        x1 = int(paste_loc[0] - 0.5 * fg_box[2])\n        x2 = x1 + fg_box[2]\n\n        y1 = int(paste_loc[1] - 0.5 * fg_box[3])\n        y2 = y1 + fg_box[3]\n\n        x1_pad = max(-x1, 0)\n        y1_pad = max(-y1, 0)\n\n        x2_pad = max(x2 - bg_image.shape[1], 0)\n        y2_pad = max(y2 - bg_image.shape[0], 0)\n\n        bg_mask = torch.zeros((bg_image.shape[0], bg_image.shape[1], 1), dtype=fg_mask.dtype,\n                              device=fg_mask.device)\n\n        if x1_pad >= fg_mask.shape[1] or x2_pad >= fg_mask.shape[1] or y1_pad >= fg_mask.shape[0] or y2_pad >= \\\n                fg_mask.shape[0]:\n            return bg_image, bg_mask.squeeze(-1)\n\n        fg_mask_patch = fg_mask[fg_box[1] + y1_pad:fg_box[1] + fg_box[3] - y2_pad,\n                                fg_box[0] + x1_pad:fg_box[0] + fg_box[2] - x2_pad, :]\n\n        fg_image_patch = fg_image[fg_box[1] + y1_pad:fg_box[1] + fg_box[3] - y2_pad,\n                         fg_box[0] + x1_pad:fg_box[0] + fg_box[2] - x2_pad, :]\n\n        bg_image[y1 + y1_pad:y2 - y2_pad, x1 + x1_pad:x2 - x2_pad, :] = \\\n            bg_image[y1 + y1_pad:y2 - y2_pad, x1 + x1_pad:x2 - x2_pad, :] * (1 - fg_mask_patch.numpy()) \\\n            + fg_mask_patch.numpy() * fg_image_patch\n\n        bg_mask[y1 + y1_pad:y2 - y2_pad, x1 + x1_pad:x2 - x2_pad, :] = fg_mask_patch\n\n        return bg_image, bg_mask.squeeze(-1)\n\n    def get_frames(self, seq_id, frame_ids, anno=None):\n        # Handle foreground\n        fg_frame, fg_anno, fg_object_meta = self.foreground_image_dataset.get_image(seq_id, anno=anno)\n\n        fg_frame_list = [fg_frame.copy() for _ in frame_ids]\n\n        fg_anno_frames = {}\n        for key, value in fg_anno.items():\n            fg_anno_frames[key] = [value[0].clone() for f_id in frame_ids]\n\n        if self.foreground_transform is not None:\n            fg_frame_list, fg_anno_frames['bbox'], fg_anno_frames['mask'] = self.foreground_transform(\n                image=fg_frame_list,\n                bbox=fg_anno_frames['bbox'],\n                mask=fg_anno_frames['mask'],\n                joint=False)\n\n        # Sample a random background\n        bg_seq_id = random.randint(0, self.background_image_dataset.get_num_images() - 1)\n\n        bg_frame, bg_anno, _ = self.background_image_dataset.get_image(bg_seq_id)\n\n        bg_frame_list = [bg_frame.copy() for _ in frame_ids]\n\n        bg_anno_frames = {}\n        for key, value in bg_anno.items():\n            # Note: Since we get bg anno from image dataset, it does not has frame dimension\n            bg_anno_frames[key] = [value.clone() for f_id in frame_ids]\n\n        if self.background_transform is not None:\n            if 'mask' in bg_anno_frames.keys():\n                bg_frame_list, bg_anno_frames['bbox'], bg_anno_frames['mask'] = self.background_transform(\n                    image=bg_frame_list,\n                    bbox=bg_anno_frames['bbox'],\n                    mask=bg_anno_frames['mask'],\n                    joint=False)\n            else:\n                bg_frame_list, bg_anno_frames['bbox'] = self.background_transform(\n                    image=bg_frame_list,\n                    bbox=bg_anno_frames['bbox'],\n                    joint=False)\n\n        for i in range(len(frame_ids)):\n            # To be safe, get target bb for the mask\n            bbox = masks_to_bboxes(fg_anno_frames['mask'][i], fmt='t')\n\n            loc_y = random.randint(0, bg_frame_list[i].shape[0] - 1)\n            loc_x = random.randint(0, bg_frame_list[i].shape[1] - 1)\n\n            paste_loc = (loc_x, loc_y)\n            fg_frame_list[i], fg_anno_frames['mask'][i] = self._paste_target(fg_frame_list[i], bbox,\n                                                                             fg_anno_frames['mask'][i],\n                                                                             bg_frame_list[i], paste_loc)\n\n            fg_anno_frames['bbox'][i] = masks_to_bboxes(fg_anno_frames['mask'][i], fmt='t')\n\n        object_meta = OrderedDict({'object_class_name': self.get_class_name(seq_id),\n                                   'motion_class': None,\n                                   'major_class': None,\n                                   'root_class': None,\n                                   'motion_adverb': None})\n\n        return fg_frame_list, fg_anno_frames, object_meta\n"
  },
  {
    "path": "artrackv2_mindspore/external/AR/ltr/dataset/tracking_net.py",
    "content": "import torch\nimport os\nimport os.path\nimport numpy as np\nimport pandas\nimport random\nfrom collections import OrderedDict\n\nfrom ltr.data.image_loader import jpeg4py_loader\nfrom .base_video_dataset import BaseVideoDataset\nfrom ltr.admin.environment import env_settings\n\n\ndef list_sequences(root, set_ids):\n    \"\"\" Lists all the videos in the input set_ids. Returns a list of tuples (set_id, video_name)\n\n    args:\n        root: Root directory to TrackingNet\n        set_ids: Sets (0-11) which are to be used\n\n    returns:\n        list - list of tuples (set_id, video_name) containing the set_id and video_name for each sequence\n    \"\"\"\n    sequence_list = []\n\n    for s in set_ids:\n        anno_dir = os.path.join(root, \"TRAIN_\" + str(s), \"anno\")\n\n        sequences_cur_set = [(s, os.path.splitext(f)[0]) for f in os.listdir(anno_dir) if f.endswith('.txt')]\n        sequence_list += sequences_cur_set\n\n    return sequence_list\n\n\nclass TrackingNet(BaseVideoDataset):\n    \"\"\" TrackingNet dataset.\n\n    Publication:\n        TrackingNet: A Large-Scale Dataset and Benchmark for Object Tracking in the Wild.\n        Matthias Mueller,Adel Bibi, Silvio Giancola, Salman Al-Subaihi and Bernard Ghanem\n        ECCV, 2018\n        https://ivul.kaust.edu.sa/Documents/Publications/2018/TrackingNet%20A%20Large%20Scale%20Dataset%20and%20Benchmark%20for%20Object%20Tracking%20in%20the%20Wild.pdf\n\n    Download the dataset using the toolkit https://github.com/SilvioGiancola/TrackingNet-devkit.\n    \"\"\"\n    def __init__(self, root=None, image_loader=jpeg4py_loader, set_ids=None, data_fraction=None):\n        \"\"\"\n        args:\n            root        - The path to the TrackingNet folder, containing the training sets.\n            image_loader (jpeg4py_loader) -  The function to read the images. jpeg4py (https://github.com/ajkxyz/jpeg4py)\n                                            is used by default.\n            set_ids (None) - List containing the ids of the TrackingNet sets to be used for training. If None, all the\n                            sets (0 - 11) will be used.\n            data_fraction - Fraction of dataset to be used. The complete dataset is used by default\n        \"\"\"\n        root = env_settings().trackingnet_dir if root is None else root\n        super().__init__('TrackingNet', root, image_loader)\n\n        if set_ids is None:\n            set_ids = [i for i in range(12)]\n\n        self.set_ids = set_ids\n\n        # Keep a list of all videos. Sequence list is a list of tuples (set_id, video_name) containing the set_id and\n        # video_name for each sequence\n        self.sequence_list = list_sequences(self.root, self.set_ids)\n\n        if data_fraction is not None:\n            self.sequence_list = random.sample(self.sequence_list, int(len(self.sequence_list) * data_fraction))\n\n        self.seq_to_class_map, self.seq_per_class = self._load_class_info()\n\n        # we do not have the class_lists for the tracking net\n        self.class_list = list(self.seq_per_class.keys())\n        self.class_list.sort()\n\n    def _load_class_info(self):\n        ltr_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..')\n        class_map_path = os.path.join(ltr_path, 'data_specs', 'trackingnet_classmap.txt')\n\n        with open(class_map_path, 'r') as f:\n            seq_to_class_map = {seq_class.split('\\t')[0]: seq_class.rstrip().split('\\t')[1] for seq_class in f}\n\n        seq_per_class = {}\n        for i, seq in enumerate(self.sequence_list):\n            class_name = seq_to_class_map[seq[1]]\n            if class_name not in seq_per_class:\n                seq_per_class[class_name] = [i]\n            else:\n                seq_per_class[class_name].append(i)\n\n        return seq_to_class_map, seq_per_class\n\n    def get_name(self):\n        return 'trackingnet'\n\n    def has_class_info(self):\n        return True\n\n    def get_sequences_in_class(self, class_name):\n        return self.seq_per_class[class_name]\n\n    def _read_bb_anno(self, seq_id):\n        set_id = self.sequence_list[seq_id][0]\n        vid_name = self.sequence_list[seq_id][1]\n        bb_anno_file = os.path.join(self.root, \"TRAIN_\" + str(set_id), \"anno\", vid_name + \".txt\")\n        gt = pandas.read_csv(bb_anno_file, delimiter=',', header=None, dtype=np.float32, na_filter=False,\n                             low_memory=False).values\n        return torch.tensor(gt)\n\n    def get_sequence_info(self, seq_id):\n        bbox = self._read_bb_anno(seq_id)\n\n        valid = (bbox[:, 2] > 0) & (bbox[:, 3] > 0)\n        visible = valid.clone().byte()\n        return {'bbox': bbox, 'valid': valid, 'visible': visible}\n\n    def _get_frame(self, seq_id, frame_id):\n        set_id = self.sequence_list[seq_id][0]\n        vid_name = self.sequence_list[seq_id][1]\n        frame_path = os.path.join(self.root, \"TRAIN_\" + str(set_id), \"frames\", vid_name, str(frame_id) + \".jpg\")\n        return self.image_loader(frame_path)\n\n    def _get_class(self, seq_id):\n        seq_name = self.sequence_list[seq_id][1]\n        return self.seq_to_class_map[seq_name]\n\n    def get_class_name(self, seq_id):\n        obj_class = self._get_class(seq_id)\n\n        return obj_class\n\n    def get_frames(self, seq_id, frame_ids, anno=None):\n        frame_list = [self._get_frame(seq_id, f) for f in frame_ids]\n\n        if anno is None:\n            anno = self.get_sequence_info(seq_id)\n\n        anno_frames = {}\n        for key, value in anno.items():\n            anno_frames[key] = [value[f_id, ...].clone() for f_id in frame_ids]\n\n        obj_class = self._get_class(seq_id)\n\n        object_meta = OrderedDict({'object_class_name': obj_class,\n                                   'motion_class': None,\n                                   'major_class': None,\n                                   'root_class': None,\n                                   'motion_adverb': None})\n\n        return frame_list, anno_frames, object_meta\n"
  },
  {
    "path": "artrackv2_mindspore/external/AR/ltr/dataset/vos_base.py",
    "content": "import torch\nfrom pathlib import Path\nfrom collections import OrderedDict, defaultdict\nimport json\nimport numpy as np\nimport os\n\nfrom .base_video_dataset import BaseVideoDataset\nfrom ltr.data.image_loader import jpeg4py_loader, imread_indexed\nfrom ltr.data.bounding_box_utils import masks_to_bboxes\n\n\nclass VOSMeta:\n    def __init__(self, data=None, filename=None):\n        if filename is not None:\n            self.load(filename)\n        elif data is not None:\n            self._data = data\n        else:\n            raise ValueError(\"Must set either data or filename parameter\")\n\n    def save(self, gen_meta: Path):\n        gen_meta.parent.mkdir(exist_ok=True, parents=True)\n        json.dump(self._data, open(gen_meta, \"w\"))\n\n    def load(self, gen_meta: Path):\n        if not gen_meta.exists():\n            print(\"Generated metadata file %s is not found.\" % gen_meta)\n            print(\"Find and run VOSMeta.generate() to create it.\")\n            raise FileNotFoundError(gen_meta)\n        self._data = json.load(open(gen_meta), object_pairs_hook=OrderedDict)\n\n    @classmethod\n    def generate(cls, dset_name: str, dset_images_path: Path, dset_annos_path: Path):\n        \"\"\"\n        Count the annotation mask pixels per object, per frame, in all sequences in a dataset\n        :param dset_name:        Dataset name, for printing the progress bar.\n        :param dset_annos_path:  Path to annotations directory, containing sequence directories,\n                                 with annotation frames in them.\n\n        :return: Dataset meta dict:\n\n        {'sequence0':\n            {\n             'shape': (height, width)\n\n             'obj_sizes':  # Object pixels per frame\n                {'frame0': {'object0': px_count, 'object1': px_count, ...},\n                 'frame1': {'object0': px_count, 'object1': px_count, ...},\n                ... },\n\n             'bboxes':  # Bounding boxes per frame\n                {'frame0': {'object0': bbox, 'object1': bbox, ...},\n                 'frame1': {'object0': bbox, 'object1': bbox, ...},\n                ... },\n            ...\n        }\n        \"\"\"\n        assert(dset_annos_path.exists())\n\n        dset_meta = OrderedDict()\n        sequences = [p.stem for p in sorted(dset_annos_path.glob(\"*\")) if p.is_dir()]\n\n        try:\n            from tqdm import tqdm\n        except:\n            def tqdm(x, *args, **kwargs):\n                return x\n\n        for seq in tqdm(sequences, desc=dset_name, unit=\"seq\"):\n\n            obj_sizes2 = defaultdict(OrderedDict)\n            bboxes = defaultdict(OrderedDict)\n            shape = None\n            frame_names = [file.stem for file in sorted((dset_images_path / seq).glob(\"*.jpg\"))]\n            anno_paths = list(sorted((dset_annos_path / seq).glob(\"*.png\")))\n\n            # Extract information from the given label frames\n            for path in anno_paths:\n                f_id = path.stem\n\n                # Count label-pixels per frame\n                labels = imread_indexed(path)\n                # labels = np.array(Image.open(path))\n                obj_ids, obj_sizes = np.unique(labels, return_counts=True)\n                obj_ids = [str(oid) for oid in obj_ids]\n                obj_sizes = obj_sizes.tolist()\n\n                if '0' in obj_ids:  # Remove background id\n                    obj_ids = obj_ids[1:]\n                    obj_sizes = obj_sizes[1:]\n                obj_sizes2[f_id] = OrderedDict(zip(obj_ids, obj_sizes))\n\n                # Generate per-label bounding boxes\n                for obj_id in obj_ids:\n                    bboxes[f_id][obj_id] = cls._mask_to_bbox(labels == int(obj_id))\n\n                if shape is None:\n                    shape = labels.shape[:2]\n\n            # Format result\n\n            dset_meta[seq] = dict(shape=shape, obj_sizes=obj_sizes2, bboxes=bboxes, frame_names=frame_names)\n\n        return VOSMeta(dset_meta)\n\n    @staticmethod\n    def _mask_to_bbox(mask: np.ndarray):\n\n        mask = mask.astype(int)\n        xs = mask.sum(axis=-2).nonzero()[0].tolist()\n        ys = mask.sum(axis=-1).nonzero()[0].tolist()\n\n        if len(ys) > 0 and len(xs) > 0:\n            x, y, w, h = xs[0], ys[0], xs[-1] - xs[0], ys[-1] - ys[0]\n        else:\n            x, y, w, h = 0, 0, 0, 0\n\n        return [x, y, w, h]\n\n    @staticmethod\n    def _transpose_nested_dict(d):\n        \"\"\" Permute a 2-level nested dict such that the inner and outer keys swap places. \"\"\"\n        d2 = defaultdict(OrderedDict)\n        for key1, inner in d.items():\n            for key2, value in inner.items():\n                d2[key2][key1] = value\n        return d2\n\n    def select_split(self, dataset_name, split):\n        ltr_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..')\n        sequences = set([s.strip() for s in open(os.path.join(ltr_path, 'data_specs', dataset_name + '_' + split + '.txt')).readlines()])\n        all_sequences = set(self._data.keys())\n        to_remove = all_sequences.difference(sequences)\n        for seq_name in to_remove:\n            self._data.pop(seq_name)\n\n    def get_sequence_names(self):\n        return list(self._data.keys())\n\n    def get_shape(self, seq_name):\n        \"\"\" Sequence image shape (h,w) \"\"\"\n        h, w = self._data[seq_name]['shape']\n        return h, w\n\n    def get_obj_ids(self, seq_name):\n        \"\"\" All objects in the sequence \"\"\"\n        return list(self.get_obj_sizes_per_object(seq_name).keys())\n\n    def get_frame_names(self, seq_name):\n        \"\"\" All filename stems of the frames in the sequence \"\"\"\n        return self._data[seq_name]['frame_names']\n\n    def enable_all_frames(self, dset_images_path):\n        \"\"\" For YouTubeVOS: Update the frame names with (jpeg) files from the <split>_all_frames set\n        :param dset_images_path:  /path/to/train_all_frames/JPEGImages (or valid or test)\n        :param seq: Sequence name\n        :return:\n        \"\"\"\n\n        # Try load the cached index\n        idx_file = dset_images_path.parent / \"frame_names.json\"\n        if idx_file.exists():\n            print('Loading cached frame names from %s' % idx_file)\n            all_frame_names = json.load(open(idx_file))\n        else:\n            # Cache the data to the user's home directory (guaranteed to be writable)\n            all_frame_names = dict()\n            user_idx_file = Path.home() / (dset_images_path.parent.stem + \"_frame_names.json\")\n            print('Indexing YouTubeVOS \"all_frames\" frame names to %s' % user_idx_file)\n            for seq in self._data:\n                all_frame_names[seq] = [file.stem for file in sorted((dset_images_path / seq).glob(\"*.jpg\"))]\n            json.dump(all_frame_names, open(user_idx_file, \"w\"))\n            print('Done. Move %s to %s to load faster next time.' % (user_idx_file, idx_file))\n\n        for seq, frame_names in all_frame_names.items():\n            self._data[seq]['frame_names'] = frame_names\n\n    def get_aspect_ratio(self, seq_name):\n        \"\"\" Sequence aspect ratio \"\"\"\n        h, w = self._data[seq_name]['shape']\n        return w / h\n\n    def get_obj_sizes_per_frame(self, seq_name):\n        \"\"\" Get object pixel counts, grouped by frame names \"\"\"\n        return self._data[seq_name]['obj_sizes']\n\n    def get_bboxes_per_frame(self, seq_name):\n        \"\"\" Object bounding boxes, grouped by frame names \"\"\"\n        return self._data[seq_name]['bboxes']\n\n    def get_obj_sizes_per_object(self, seq_name):\n        \"\"\" Object pixel counts, grouped by object \"\"\"\n        return self._transpose_nested_dict(self.get_obj_sizes_per_frame(seq_name))\n\n    def get_bboxes_per_object(self, seq_name):\n        \"\"\" Object bounding boxes, grouped by object \"\"\"\n        return self._transpose_nested_dict(self.get_bboxes_per_frame(seq_name))\n\n    @staticmethod\n    def generate_datasets_meta(src, dst=Path(\"~/vosdataset_meta\").expanduser()):\n        VOSMeta.generate(\"SyntheticCoco\", src / \"JPEGImages\", src / \"Annotations\").save(src / \"generated_meta.json\")\n\n\nclass VOSDatasetBase(BaseVideoDataset):\n\n    \"\"\" Generic VOS dataset reader base class, for both DAVIS and YouTubeVOS \"\"\"\n\n    def __init__(self, name: str, root: Path, version=None, split='train',\n                 multiobj=True, vis_threshold=10, image_loader=jpeg4py_loader):\n        \"\"\"\n        :param root:            Dataset root path, eg /path/to/DAVIS or /path/to/YouTubeVOS/\n                                Note: YouTubeVOS 2018 and 2019 are expected to be in\n                                /path/to/YouTubeVOS/2018 and /path/to/YouTubeVOS/2019, respectively\n        :param name:            'DAVIS' or 'YouTubeVOS' (case sensitive)\n        :param version:         DAVIS: '2016', '2017, YouTubeVOS: '2018' or '2019'\n        :param split:           DAVIS: Any name in DAVIS/ImageSets/<year>,\n                                YouTubeVOS: 'test', 'train', 'valid' or 'jjtrain', 'jjvalid'\n        :param multiobj:        Whether the dataset will return all objects in a sequence or\n                                multiple sequences with one object in each.\n        :param vis_threshold:   Minimum number of pixels required to consider a target object \"visible\".\n        :param image_loader:    Image loader.\n        \"\"\"\n\n        assert root.exists() and root.is_dir()\n\n        super().__init__(name, root, image_loader)\n\n        self.version = version\n        self.split = split\n        self.vis_threshold = vis_threshold\n        self.multiobj = multiobj\n\n    def _load_image(self, path):\n        im = self.image_loader(str(path))\n        assert im is not None\n        im = np.atleast_3d(im)\n        return im\n\n    @staticmethod\n    def _load_anno(path):\n        if not path.exists():\n            return None\n        # im = np.atleast_3d(np.array(Image.open(path)))\n        im = imread_indexed(path)\n        return im\n\n    def get_num_sequences(self):\n        return len(self._samples)\n\n    def get_sequence_info(self, sample_id):\n        \"\"\" Get sample meta data.\n        :param sample_id:  Sample to query.\n        :return: dict of metadata:\n                sequence:    Sequence name\n                frame_shape: (height, width) of the images\n                frame_names: List of frame filename stems in the sequence\n                object_ids:  Id numbers of all objects occurring in the sequence\n                obj_sizes:   Matrix shape=(frames, object) of the number of pixels for each object in each frame\n                             Coordinates in this matrix relate to the frame_names and object_ids\n                visible:     Boolean matrix of the same shape as obj_sizes. Entries with more pixels\n                             than self.visible_threshold are True.\n        \"\"\"\n        m = self.gmeta\n        seq_name, obj_ids = self._samples[sample_id]\n        f_names = m.get_frame_names(seq_name)  # All frames\n\n        f2i = {f: i for i, f in enumerate(f_names)}  # Frame name to matrix index\n        o2i = {o: i for i, o in enumerate(obj_ids)}  # Object id to matrix index\n\n        # Get a matrix of object sizes: shape=(frames, objects)\n        obj_sizes = torch.zeros((len(f_names), len(obj_ids)), dtype=torch.int)\n        sizes_per_object = m.get_obj_sizes_per_object(seq_name)\n\n        for obj_id in obj_ids:\n            frames = sizes_per_object[obj_id]\n            oid = o2i[obj_id]\n            for f, sz in frames.items():\n                obj_sizes[f2i[f], oid] = sz\n\n        visible = (obj_sizes > self.vis_threshold).byte()\n\n        return dict(sequence=seq_name, frame_shape=m.get_shape(seq_name), frame_names=f_names, object_ids=obj_ids,\n                    object_sizes=obj_sizes, visible=visible, valid=visible)\n\n    def get_paths_and_bboxes(self, sequence_info):\n\n        seq_name = sequence_info['sequence']\n        annos_root = self._anno_path / seq_name\n        images_root = self._jpeg_path / seq_name\n\n        frame_names = sequence_info['frame_names']\n        f2i = {f: i for i, f in enumerate(frame_names)}\n\n        images = [str(images_root / (f + \".jpg\")) for f in frame_names]\n\n        # Find the frames where ground truth is available and\n        # get the bounding boxes and segmentation labels of those frames\n        all_bboxes = self.gmeta.get_bboxes_per_frame(seq_name)\n        gt_labels = [str(annos_root / (f + \".png\")) if f in all_bboxes.keys() else None for f in frame_names]\n\n        gt_bboxes = OrderedDict()\n        for obj_id in sequence_info['object_ids']:\n            gt_bboxes[obj_id] = np.array([all_bboxes.get(frame, {}).get(obj_id, [-1, -1, -1, -1]) for frame in frame_names])\n\n        return images, gt_labels, gt_bboxes\n\n    def _construct_sequence(self, sequence_info):\n        raise NotImplementedError\n\n    def get_sequence_list(self):\n        if len(self.sequence_list) > 0:\n            return self.sequence_list\n        self.sequence_list = [self._construct_sequence(self.get_sequence_info(i)) for i in range(len(self._samples))]\n        return self.sequence_list\n\n    def __len__(self):\n        return len(self._samples)\n\n    def _get_image_path(self, meta, frame_id):\n        return self._jpeg_path / meta['sequence'] / (meta['frame_names'][frame_id] + \".jpg\")\n\n    def _get_anno_path(self, meta, frame_id):\n        return self._anno_path / meta['sequence'] / (meta['frame_names'][frame_id] + \".png\")\n\n    def get_frames(self, sample_id, frame_ids, anno=None):\n        \"\"\"  Fetch frames with the given ids.\n        :param sample_id:  Sample to get.\n        :param frame_ids:  List of frame indices in the sequence belonging to the sample_id\n        :return: dict of metadata and data:\n                sequence:  Sequence name\n                images:    List of images. No entries may be None\n                labels:    List of label/mask images. Entries may be None if the data is missing\n                bboxes:    List of bounding boxes. Entries may be None if the data is missing\n        \"\"\"\n        seq_name, obj_ids = self._samples[sample_id]\n\n        meta = self.get_sequence_info(sample_id) if anno is None else anno\n        frame_names = meta['frame_names']\n        images = [self._load_image(self._jpeg_path / seq_name / (frame_names[f] + \".jpg\")) for f in frame_ids]\n        labels = [self._load_anno(self._anno_path / seq_name / (frame_names[f] + \".png\")) for f in frame_ids]\n\n        # Generate bounding boxes for the requested objects\n        bboxes = []\n        for lb in labels:\n            lb = torch.from_numpy(lb.squeeze())\n            frame_bbs = {}\n            for obj_id in obj_ids:\n                bbox = masks_to_bboxes(lb == int(obj_id), fmt='t')\n                if bbox[3] == 0 or bbox[2] == 0:\n                    print(\"!\")\n                frame_bbs[obj_id] = bbox\n            bboxes.append(frame_bbs)\n\n        # Insert empty bboxes for missing object ids\n        for bbox in bboxes:\n            for obj_id in obj_ids:\n                if obj_id not in bbox:\n                    bbox[obj_id] = torch.zeros(4, dtype=torch.float32)\n\n        # Remap to object id 1, if requested - for training\n        if not self.multiobj:\n            assert len(obj_ids) == 1\n            obj_id = obj_ids[0]\n            labels = [torch.Tensor(lb == int(obj_id)) for lb in labels]\n            bboxes = [bbox[obj_id] for bbox in bboxes]\n        else:\n            labels = [torch.Tensor(lb) for lb in labels]\n\n        object_meta = {key: meta[key] for key in ['sequence', 'frame_shape', 'frame_names', 'object_ids']}\n\n        anno_frames = dict(bbox=bboxes, mask=labels)\n        for key in ['object_sizes', 'visible', 'valid']:\n            value = meta[key]\n            anno_frames[key] = [value[f_id, ...].clone() for f_id in frame_ids]\n\n        return images, anno_frames, object_meta\n\n    def get_name(self):\n        return \"%s/%s/%s\" % (self.name, self.version, self.split)\n\n    def has_class_info(self):\n        return False\n\n    def has_occlusion_info(self):\n        return True\n\n    def get_num_classes(self):\n        return 0\n\n    def get_class_list(self):\n        return []\n\n    def get_sequences_in_class(self, class_name):\n        raise []\n\n    def has_segmentation_info(self):\n        return True\n"
  },
  {
    "path": "artrackv2_mindspore/external/AR/ltr/dataset/youtubevos.py",
    "content": "from pathlib import Path\nimport os\nfrom ltr.dataset.vos_base import VOSDatasetBase, VOSMeta\nfrom pytracking.evaluation import Sequence\nimport json\nfrom ltr.admin.environment import env_settings\nfrom ltr.data.image_loader import jpeg4py_loader\n\n\nclass YouTubeVOSMeta:\n    \"\"\" Thin wrapper for YouTubeVOS meta data\n    meta.json\n    {\n        \"videos\": {\n            \"<video_id>\": {\n                \"objects\": {\n                    \"<object_id>\": {\n                        \"category\": \"<category>\",\n                        \"frames\": [\n                            \"<frame_id>\",\n                            \"<frame_id>\",\n                        ]\n                    }\n                }\n            }\n        }\n    }\n    # <object_id> is the same as the pixel values of object in annotated segmentation PNG files.\n    # <frame_id> is the 5-digit index of frame in video, and not necessary to start from 0.\n    \"\"\"\n\n    def __init__(self, dset_split_path):\n        self._data = json.load(open(dset_split_path / 'meta.json'))['videos']\n\n    def sequences(self):\n        return list(self._data.keys())\n\n    def seq_frames(self, seq_name):\n        \"\"\" All filename stems of the frames in the sequence \"\"\"\n        frames = set()\n        for obj_id in self.object_ids(seq_name):\n            for f in self.object_frames(seq_name, obj_id):\n                frames.add(f)\n        return list(sorted(frames))\n\n    def object_ids(self, seq_name):\n        \"\"\" All objects in the sequence \"\"\"\n        return list(self._data[seq_name]['objects'].keys())\n\n    def object_category(self, seq_name, obj_id):\n        return self._data[seq_name]['objects'][str(obj_id)]['category']\n\n    def object_frames(self, seq_name, obj_id):\n        return self._data[seq_name]['objects'][str(obj_id)]['frames']\n\n    def object_first_frame(self, seq_name, obj_id):\n        return self.object_frames(seq_name, obj_id)[0]\n\n\nclass YouTubeVOS(VOSDatasetBase):\n    \"\"\"\n    YoutubeVOS video object segmentation dataset.\n\n    Publication:\n        YouTube-VOS: A Large-Scale Video Object Segmentation Benchmark\n        Ning Xu, Linjie Yang, Yuchen Fan, Dingcheng Yue, Yuchen Liang, Jianchao Yang, and Thomas Huang\n        ECCV, 2018\n        https://arxiv.org/pdf/1809.03327.pdf\n\n    Download dataset from: https://youtube-vos.org/dataset/\n    \"\"\"\n    def __init__(self, root=None, version='2019', split='train', cleanup=None, all_frames=False, sequences=None,\n                 multiobj=True, vis_threshold=10, image_loader=jpeg4py_loader):\n        \"\"\"\n        args:\n            root - Dataset root path. If unset, it uses the path in your local.py config.\n            version - '2018' or '2019'\n            split - 'test', 'train', 'valid', or 'jjtrain', 'jjvalid'. 'jjvalid' corresponds to a custom validation\n                    dataset consisting of 300 videos randomly sampled from the train set. 'jjtrain' contains the\n                    remaining videos used for training.\n            cleanup - List of actions to take to to clean up known problems in the dataset.\n                      'aspects': remove frames with weird aspect ratios,\n                      'starts': fix up start frames from original meta data\n            all_frames - Whether to use an \"all_frames\" split.\n            sequences - List of sequence names. Limit to a subset of sequences if not None.\n            multiobj - Whether the dataset will return all objects in a sequence or multiple sequences with one\n                       object in each.\n            vis_threshold - Minimum number of pixels required to consider a target object \"visible\".\n            image_loader - Image loader.\n        \"\"\"\n        root = env_settings().youtubevos_dir if root is None else root\n        super().__init__(name=\"YouTubeVOS\", root=Path(root), version=version, split=split, multiobj=multiobj,\n                         vis_threshold=vis_threshold, image_loader=image_loader)\n\n        split_folder = self.split\n        if self.split.startswith(\"jj\"):\n            split_folder = \"train\"\n\n        dset_path = self.root / self.version / split_folder\n\n        self._anno_path = dset_path / 'Annotations'\n\n        if all_frames:\n            self._jpeg_path = self.root / self.version / (split_folder + \"_all_frames\") / 'JPEGImages'\n        else:\n            self._jpeg_path = dset_path / 'JPEGImages'\n\n        self.meta = YouTubeVOSMeta(dset_path)\n        meta_path = dset_path / \"generated_meta.json\"\n        if meta_path.exists():\n            self.gmeta = VOSMeta(filename=meta_path)\n        else:\n            self.gmeta = VOSMeta.generate('YouTubeVOS', self._jpeg_path, self._anno_path)\n            self.gmeta.save(meta_path)\n\n        if all_frames:\n            self.gmeta.enable_all_frames(self._jpeg_path)\n\n        if self.split not in ['train', 'valid', 'test']:\n            self.gmeta.select_split('youtubevos', self.split)\n\n        if sequences is None:\n            sequences = self.gmeta.get_sequence_names()\n\n        to_remove = set()\n        cleanup = {} if cleanup is None else set(cleanup)\n\n        if 'aspect' in cleanup:\n            # Remove sequences with unusual aspect ratios\n            for seq_name in sequences:\n                a = self.gmeta.get_aspect_ratio(seq_name)\n                if a < 1.45 or a > 1.9:\n                    to_remove.add(seq_name)\n\n        if 'starts' in cleanup:\n            # Fix incorrect start frames for some objects found with ytvos_start_frames_test()\n            bad_start_frames = [(\"0e27472bea\", '2', ['00055', '00060'], '00065'),\n                                (\"5937b08d69\", '4', ['00000'], '00005'),\n                                (\"5e1ce354fd\", '5', ['00010', '00015'], '00020'),\n                                (\"7053e4f41e\", '2', ['00000', '00005', '00010', '00015'], '00020'),\n                                (\"720e3fa04c\", '2', ['00050'], '00055'),\n                                (\"c73c8e747f\", '2', ['00035'], '00040')]\n            for seq_name, obj_id, bad_frames, good_frame in bad_start_frames:\n                # bad_frames is from meta.json included with the dataset\n                # good_frame is from the generated meta - and the first actual frame where the object was seen.\n                if seq_name in self.meta._data:\n                    frames = self.meta.object_frames(seq_name, obj_id)\n                    for f in bad_frames:\n                        frames.remove(f)\n                    assert frames[0] == good_frame\n\n        sequences = [seq for seq in sequences if seq not in to_remove]\n\n        self.sequence_names = sequences\n        self._samples = []\n\n        for seq in sequences:\n            obj_ids = self.meta.object_ids(seq)\n            if self.multiobj:  # Multiple objects per sample\n                self._samples.append((seq, obj_ids))\n            else:  # One object per sample\n                self._samples.extend([(seq, [obj_id]) for obj_id in obj_ids])\n\n        print(\"%s loaded.\" % self.get_name())\n        if len(to_remove) > 0:\n            print(\"   %d sequences were removed, (%d remaining).\" % (len(to_remove), len(sequences)))\n\n    def _construct_sequence(self, sequence_info):\n\n        seq_name = sequence_info['sequence']\n        frame_names = sequence_info['frame_names']\n        fname_to_fid = {f: i for i, f in enumerate(frame_names)}\n        images, gt_segs, gt_bboxes = self.get_paths_and_bboxes(sequence_info)\n\n        init_data = dict()\n        for obj_id in sequence_info['object_ids']:\n            if obj_id == '0':\n                print(\"!\")\n            f_name = self.meta.object_first_frame(seq_name, obj_id)\n            f_id = fname_to_fid[f_name]\n            if f_id not in init_data:\n                init_data[f_id] = {'object_ids': [obj_id],\n                                   'bbox': {obj_id: gt_bboxes[obj_id][f_id,:]},\n                                   'mask': os.path.join(os.path.dirname(gt_segs[f_id]), (f_name + \".png\"))}\n                assert init_data[f_id]['mask'] in gt_segs  # If this fails, some file is missing\n            else:\n                init_data[f_id]['object_ids'].append(obj_id)\n                init_data[f_id]['bbox'][obj_id] = gt_bboxes[obj_id][f_id,:]\n\n        return Sequence(name=seq_name, frames=images, dataset='YouTubeVOS', ground_truth_rect=gt_bboxes,\n                        init_data=init_data, ground_truth_seg=gt_segs, object_ids=sequence_info['object_ids'],\n                        multiobj_mode=self.multiobj)\n"
  },
  {
    "path": "artrackv2_mindspore/external/AR/ltr/external/PreciseRoIPooling/.gitignore",
    "content": "# Byte-compiled / optimized / DLL files\n__pycache__/\n*.py[cod]\n*$py.class\n\n.vim-template*\n\n# C extensions\n*.so\n\n# Distribution / packaging\n.Python\nbuild/\ndevelop-eggs/\ndist/\ndownloads/\neggs/\n.eggs/\nlib/\nlib64/\nparts/\nsdist/\nvar/\nwheels/\n*.egg-info/\n.installed.cfg\n*.egg\nMANIFEST\n\n# PyInstaller\n#  Usually these files are written by a python script from a template\n#  before PyInstaller builds the exe, so as to inject date/other infos into it.\n*.manifest\n*.spec\n\n# Installer logs\npip-log.txt\npip-delete-this-directory.txt\n\n# Unit test / coverage reports\nhtmlcov/\n.tox/\n.coverage\n.coverage.*\n.cache\nnosetests.xml\ncoverage.xml\n*.cover\n.hypothesis/\n.pytest_cache/\n\n# Translations\n*.mo\n*.pot\n\n# Django stuff:\n*.log\nlocal_settings.py\ndb.sqlite3\n\n# Flask stuff:\ninstance/\n.webassets-cache\n\n# Scrapy stuff:\n.scrapy\n\n# Sphinx documentation\ndocs/_build/\n\n# PyBuilder\ntarget/\n\n# Jupyter Notebook\n.ipynb_checkpoints\n\n# pyenv\n.python-version\n\n# celery beat schedule file\ncelerybeat-schedule\n\n# SageMath parsed files\n*.sage.py\n\n# Environments\n.env\n.venv\nenv/\nvenv/\nENV/\nenv.bak/\nvenv.bak/\n\n# Spyder project settings\n.spyderproject\n.spyproject\n\n# Rope project settings\n.ropeproject\n\n# mkdocs documentation\n/site\n\n# mypy\n.mypy_cache/\n"
  },
  {
    "path": "artrackv2_mindspore/external/AR/ltr/external/PreciseRoIPooling/LICENSE",
    "content": "MIT License\n\nCopyright (c) 2018 Jiayuan Mao\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n"
  },
  {
    "path": "artrackv2_mindspore/external/AR/ltr/external/PreciseRoIPooling/README.md",
    "content": "# PreciseRoIPooling\nThis repo implements the **Precise RoI Pooling** (PrRoI Pooling), proposed in the paper **Acquisition of Localization Confidence for Accurate Object Detection** published at ECCV 2018 (Oral Presentation).\n\n**Acquisition of Localization Confidence for Accurate Object Detection**\n\n_Borui Jiang*, Ruixuan Luo*, Jiayuan Mao*, Tete Xiao, Yuning Jiang_ (* indicates equal contribution.)\n\nhttps://arxiv.org/abs/1807.11590\n\n## Brief\n\nIn short, Precise RoI Pooling is an integration-based (bilinear interpolation) average pooling method for RoI Pooling. It avoids any quantization and has a continuous gradient on bounding box coordinates. It is:\n\n- different from the original RoI Pooling proposed in [Fast R-CNN](https://arxiv.org/abs/1504.08083). PrRoI Pooling uses average pooling instead of max pooling for each bin and has a continuous gradient on bounding box coordinates. That is, one can take the derivatives of some loss function w.r.t the coordinates of each RoI and optimize the RoI coordinates.\n- different from the RoI Align proposed in [Mask R-CNN](https://arxiv.org/abs/1703.06870). PrRoI Pooling uses a full integration-based average pooling instead of sampling a constant number of points. This makes the gradient w.r.t. the coordinates continuous.\n\nFor a better illustration, we illustrate RoI Pooling, RoI Align and PrRoI Pooing in the following figure. More details including the gradient computation can be found in our paper.\n\n<center><img src=\"./_assets/prroi_visualization.png\" width=\"80%\"></center>\n\n## Implementation\n\nPrRoI Pooling was originally implemented by [Tete Xiao](http://tetexiao.com/) based on MegBrain, an (internal) deep learning framework built by Megvii Inc. It was later adapted into open-source deep learning frameworks. Currently, we only support PyTorch. Unfortunately, we don't have any specific plan for the adaptation into other frameworks such as TensorFlow, but any contributions (pull requests) will be more than welcome.\n\n## Usage (PyTorch 1.0)\n\nIn the directory `pytorch/`, we provide a PyTorch-based implementation of PrRoI Pooling. It requires PyTorch 1.0+ and only supports CUDA (CPU mode is not implemented).\nSince we use PyTorch JIT for cxx/cuda code compilation, to use the module in your code, simply do:\n\n```\nfrom prroi_pool import PrRoIPool2D\n\navg_pool = PrRoIPool2D(window_height, window_width, spatial_scale)\nroi_features = avg_pool(features, rois)\n\n# for those who want to use the \"functional\"\n\nfrom prroi_pool.functional import prroi_pool2d\nroi_features = prroi_pool2d(features, rois, window_height, window_width, spatial_scale)\n```\n\n\n## Usage (PyTorch 0.4)\n\n**!!! Please first checkout to the branch pytorch0.4.**\n\nIn the directory `pytorch/`, we provide a PyTorch-based implementation of PrRoI Pooling. It requires PyTorch 0.4 and only supports CUDA (CPU mode is not implemented).\nTo use the PrRoI Pooling module, first goto `pytorch/prroi_pool` and execute `./travis.sh` to compile the essential components (you may need `nvcc` for this step). To use the module in your code, simply do:\n\n```\nfrom prroi_pool import PrRoIPool2D\n\navg_pool = PrRoIPool2D(window_height, window_width, spatial_scale)\nroi_features = avg_pool(features, rois)\n\n# for those who want to use the \"functional\"\n\nfrom prroi_pool.functional import prroi_pool2d\nroi_features = prroi_pool2d(features, rois, window_height, window_width, spatial_scale)\n```\n\nHere,\n\n- RoI is an `m * 5` float tensor of format `(batch_index, x0, y0, x1, y1)`, following the convention in the original Caffe implementation of RoI Pooling, although in some frameworks the batch indices are provided by an integer tensor.\n- `spatial_scale` is multiplied to the RoIs. For example, if your feature maps are down-sampled by a factor of 16 (w.r.t. the input image), you should use a spatial scale of `1/16`.\n- The coordinates for RoI follows the [L, R) convension. That is, `(0, 0, 4, 4)` denotes a box of size `4x4`.\n"
  },
  {
    "path": "artrackv2_mindspore/external/AR/ltr/external/PreciseRoIPooling/pytorch/prroi_pool/.gitignore",
    "content": "*.o\n/_prroi_pooling\n"
  },
  {
    "path": "artrackv2_mindspore/external/AR/ltr/external/PreciseRoIPooling/pytorch/prroi_pool/__init__.py",
    "content": "#! /usr/bin/env python3\n# -*- coding: utf-8 -*-\n# File   : __init__.py\n# Author : Jiayuan Mao, Tete Xiao\n# Email  : maojiayuan@gmail.com, jasonhsiao97@gmail.com\n# Date   : 07/13/2018\n# \n# This file is part of PreciseRoIPooling.\n# Distributed under terms of the MIT license.\n# Copyright (c) 2017 Megvii Technology Limited.\n\nfrom .prroi_pool import *\n\n"
  },
  {
    "path": "artrackv2_mindspore/external/AR/ltr/external/PreciseRoIPooling/pytorch/prroi_pool/functional.py",
    "content": "#! /usr/bin/env python3\n# -*- coding: utf-8 -*-\n# File   : functional.py\n# Author : Jiayuan Mao, Tete Xiao\n# Email  : maojiayuan@gmail.com, jasonhsiao97@gmail.com\n# Date   : 07/13/2018\n#\n# This file is part of PreciseRoIPooling.\n# Distributed under terms of the MIT license.\n# Copyright (c) 2017 Megvii Technology Limited.\n\nimport torch\nimport torch.autograd as ag\n\n__all__ = ['prroi_pool2d']\n\n\n_prroi_pooling = None\n\n\ndef _import_prroi_pooling():\n    global _prroi_pooling\n\n    if _prroi_pooling is None:\n        try:\n            from os.path import join as pjoin, dirname\n            from torch.utils.cpp_extension import load as load_extension\n            root_dir = pjoin(dirname(__file__), 'src')\n\n            _prroi_pooling = load_extension(\n                '_prroi_pooling',\n                [pjoin(root_dir, 'prroi_pooling_gpu.c'), pjoin(root_dir, 'prroi_pooling_gpu_impl.cu')],\n                verbose=True\n            )\n        except ImportError:\n            raise ImportError('Can not compile Precise RoI Pooling library.')\n\n    return _prroi_pooling\n\n\nclass PrRoIPool2DFunction(ag.Function):\n    @staticmethod\n    def forward(ctx, features, rois, pooled_height, pooled_width, spatial_scale):\n        _prroi_pooling = _import_prroi_pooling()\n\n        assert 'FloatTensor' in features.type() and 'FloatTensor' in rois.type(), \\\n                'Precise RoI Pooling only takes float input, got {} for features and {} for rois.'.format(features.type(), rois.type())\n\n        pooled_height = int(pooled_height)\n        pooled_width = int(pooled_width)\n        spatial_scale = float(spatial_scale)\n\n        features = features.contiguous()\n        rois = rois.contiguous()\n        params = (pooled_height, pooled_width, spatial_scale)\n\n        if features.is_cuda:\n            output = _prroi_pooling.prroi_pooling_forward_cuda(features, rois, *params)\n            ctx.params = params\n            # everything here is contiguous.\n            ctx.save_for_backward(features, rois, output)\n        else:\n            raise NotImplementedError('Precise RoI Pooling only supports GPU (cuda) implememtations.')\n\n        return output\n\n    @staticmethod\n    def backward(ctx, grad_output):\n        _prroi_pooling = _import_prroi_pooling()\n\n        features, rois, output = ctx.saved_tensors\n        grad_input = grad_coor = None\n\n        if features.requires_grad:\n            grad_output = grad_output.contiguous()\n            grad_input = _prroi_pooling.prroi_pooling_backward_cuda(features, rois, output, grad_output, *ctx.params)\n        if rois.requires_grad:\n            grad_output = grad_output.contiguous()\n            grad_coor = _prroi_pooling.prroi_pooling_coor_backward_cuda(features, rois, output, grad_output, *ctx.params)\n\n        return grad_input, grad_coor, None, None, None\n\n\nprroi_pool2d = PrRoIPool2DFunction.apply\n\n"
  },
  {
    "path": "artrackv2_mindspore/external/AR/ltr/external/PreciseRoIPooling/pytorch/prroi_pool/prroi_pool.py",
    "content": "#! /usr/bin/env python3\n# -*- coding: utf-8 -*-\n# File   : prroi_pool.py\n# Author : Jiayuan Mao, Tete Xiao\n# Email  : maojiayuan@gmail.com, jasonhsiao97@gmail.com\n# Date   : 07/13/2018\n#\n# This file is part of PreciseRoIPooling.\n# Distributed under terms of the MIT license.\n# Copyright (c) 2017 Megvii Technology Limited.\n\nimport torch.nn as nn\n\nfrom .functional import prroi_pool2d\n\n__all__ = ['PrRoIPool2D']\n\n\nclass PrRoIPool2D(nn.Module):\n    def __init__(self, pooled_height, pooled_width, spatial_scale):\n        super().__init__()\n\n        self.pooled_height = int(pooled_height)\n        self.pooled_width = int(pooled_width)\n        self.spatial_scale = float(spatial_scale)\n\n    def forward(self, features, rois):\n        return prroi_pool2d(features, rois, self.pooled_height, self.pooled_width, self.spatial_scale)\n\n    def extra_repr(self):\n        return 'kernel_size=({pooled_height}, {pooled_width}), spatial_scale={spatial_scale}'.format(**self.__dict__)\n\n"
  },
  {
    "path": "artrackv2_mindspore/external/AR/ltr/external/PreciseRoIPooling/pytorch/prroi_pool/src/prroi_pooling_gpu.c",
    "content": "/*\n * File   : prroi_pooling_gpu.c\n * Author : Jiayuan Mao, Tete Xiao\n * Email  : maojiayuan@gmail.com, jasonhsiao97@gmail.com\n * Date   : 07/13/2018\n *\n * Distributed under terms of the MIT license.\n * Copyright (c) 2017 Megvii Technology Limited.\n */\n\n#include <math.h>\n#include <torch/extension.h>\n\n#include <ATen/ATen.h>\n#include <ATen/cuda/CUDAContext.h>\n\n#include <THC/THC.h>\n\n#include \"prroi_pooling_gpu_impl.cuh\"\n\n\nat::Tensor prroi_pooling_forward_cuda(const at::Tensor &features, const at::Tensor &rois, int pooled_height, int pooled_width, float spatial_scale) {\n    int nr_rois = rois.size(0);\n    int nr_channels = features.size(1);\n    int height = features.size(2);\n    int width = features.size(3);\n    int top_count = nr_rois * nr_channels * pooled_height * pooled_width;\n    auto output = at::zeros({nr_rois, nr_channels, pooled_height, pooled_width}, features.options());\n\n    if (output.numel() == 0) {\n        THCudaCheck(cudaGetLastError());\n        return output;\n    }\n\n    cudaStream_t stream = at::cuda::getCurrentCUDAStream();\n    PrRoIPoolingForwardGpu(\n        stream, features.data<float>(), rois.data<float>(), output.data<float>(),\n        nr_channels, height, width, pooled_height, pooled_width, spatial_scale,\n        top_count\n    );\n\n    THCudaCheck(cudaGetLastError());\n    return output;\n}\n\nat::Tensor prroi_pooling_backward_cuda(\n    const at::Tensor &features, const at::Tensor &rois, const at::Tensor &output, const at::Tensor &output_diff,\n    int pooled_height, int pooled_width, float spatial_scale) {\n\n    auto features_diff = at::zeros_like(features);\n\n    int nr_rois = rois.size(0);\n    int batch_size = features.size(0);\n    int nr_channels = features.size(1);\n    int height = features.size(2);\n    int width = features.size(3);\n    int top_count = nr_rois * nr_channels * pooled_height * pooled_width;\n    int bottom_count = batch_size * nr_channels * height * width;\n\n    if (output.numel() == 0) {\n        THCudaCheck(cudaGetLastError());\n        return features_diff;\n    }\n\n    cudaStream_t stream = at::cuda::getCurrentCUDAStream();\n    PrRoIPoolingBackwardGpu(\n        stream,\n        features.data<float>(), rois.data<float>(), output.data<float>(), output_diff.data<float>(),\n        features_diff.data<float>(),\n        nr_channels, height, width, pooled_height, pooled_width, spatial_scale,\n        top_count, bottom_count\n    );\n\n    THCudaCheck(cudaGetLastError());\n    return features_diff;\n}\n\nat::Tensor prroi_pooling_coor_backward_cuda(\n    const at::Tensor &features, const at::Tensor &rois, const at::Tensor &output, const at::Tensor &output_diff,\n    int pooled_height, int pooled_width, float spatial_scale) {\n\n    auto coor_diff = at::zeros_like(rois);\n\n    int nr_rois = rois.size(0);\n    int nr_channels = features.size(1);\n    int height = features.size(2);\n    int width = features.size(3);\n    int top_count = nr_rois * nr_channels * pooled_height * pooled_width;\n    int bottom_count = nr_rois * 5;\n\n    if (output.numel() == 0) {\n        THCudaCheck(cudaGetLastError());\n        return coor_diff;\n    }\n\n    cudaStream_t stream = at::cuda::getCurrentCUDAStream();\n    PrRoIPoolingCoorBackwardGpu(\n        stream,\n        features.data<float>(), rois.data<float>(), output.data<float>(), output_diff.data<float>(),\n        coor_diff.data<float>(),\n        nr_channels, height, width, pooled_height, pooled_width, spatial_scale,\n        top_count, bottom_count\n    );\n\n    THCudaCheck(cudaGetLastError());\n    return coor_diff;\n}\n\nPYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {\n    m.def(\"prroi_pooling_forward_cuda\", &prroi_pooling_forward_cuda, \"PRRoIPooling_forward\");\n    m.def(\"prroi_pooling_backward_cuda\", &prroi_pooling_backward_cuda, \"PRRoIPooling_backward\");\n    m.def(\"prroi_pooling_coor_backward_cuda\", &prroi_pooling_coor_backward_cuda, \"PRRoIPooling_backward_coor\");\n}\n"
  },
  {
    "path": "artrackv2_mindspore/external/AR/ltr/external/PreciseRoIPooling/pytorch/prroi_pool/src/prroi_pooling_gpu.h",
    "content": "/*\n * File   : prroi_pooling_gpu.h\n * Author : Jiayuan Mao, Tete Xiao\n * Email  : maojiayuan@gmail.com, jasonhsiao97@gmail.com \n * Date   : 07/13/2018\n * \n * Distributed under terms of the MIT license.\n * Copyright (c) 2017 Megvii Technology Limited.\n */\n\nint prroi_pooling_forward_cuda(THCudaTensor *features, THCudaTensor *rois, THCudaTensor *output, int pooled_height, int pooled_width, float spatial_scale);\n\nint prroi_pooling_backward_cuda(\n    THCudaTensor *features, THCudaTensor *rois, THCudaTensor *output, THCudaTensor *output_diff, THCudaTensor *features_diff,\n    int pooled_height, int pooled_width, float spatial_scale\n);\n\nint prroi_pooling_coor_backward_cuda(\n    THCudaTensor *features, THCudaTensor *rois, THCudaTensor *output, THCudaTensor *output_diff, THCudaTensor *features_diff,\n    int pooled_height, int pooled_width, float spatial_scal\n);\n\n"
  },
  {
    "path": "artrackv2_mindspore/external/AR/ltr/external/PreciseRoIPooling/pytorch/prroi_pool/src/prroi_pooling_gpu_impl.cu",
    "content": "/*\n * File   : prroi_pooling_gpu_impl.cu\n * Author : Tete Xiao, Jiayuan Mao\n * Email  : jasonhsiao97@gmail.com\n *\n * Distributed under terms of the MIT license.\n * Copyright (c) 2017 Megvii Technology Limited.\n */\n\n#include \"prroi_pooling_gpu_impl.cuh\"\n\n#include <cstdio>\n#include <cfloat>\n\n#define CUDA_KERNEL_LOOP(i, n) \\\n    for (int i = blockIdx.x * blockDim.x + threadIdx.x; \\\n        i < (n); \\\n        i += blockDim.x * gridDim.x)\n\n#define CUDA_POST_KERNEL_CHECK \\\n    do { \\\n        cudaError_t err = cudaGetLastError(); \\\n        if (cudaSuccess != err) { \\\n            fprintf(stderr, \"cudaCheckError() failed : %s\\n\", cudaGetErrorString(err)); \\\n            exit(-1); \\\n        } \\\n    } while(0)\n\n#define CUDA_NUM_THREADS 512\n\nnamespace {\n\nstatic int CUDA_NUM_BLOCKS(const int N) {\n  return (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS;\n}\n\n__device__ static float PrRoIPoolingGetData(F_DEVPTR_IN data, const int h, const int w, const int height, const int width)\n{\n    bool overflow = (h < 0) || (w < 0) || (h >= height) || (w >= width);\n    float retVal = overflow ? 0.0f : data[h * width + w];\n    return retVal;\n}\n\n__device__ static float PrRoIPoolingGetCoeff(float dh, float dw){\n    dw = dw > 0 ? dw : -dw;\n    dh = dh > 0 ? dh : -dh;\n    return (1.0f - dh) * (1.0f - dw);\n}\n\n__device__ static float PrRoIPoolingSingleCoorIntegral(float s, float t, float c1, float c2) {\n    return 0.5 * (t * t - s * s) * c2 + (t - 0.5 * t * t - s + 0.5 * s * s) * c1;\n}\n\n__device__ static float PrRoIPoolingInterpolation(F_DEVPTR_IN data, const float h, const float w, const int height, const int width){\n    float retVal = 0.0f;\n    int h1 = floorf(h);\n    int w1 = floorf(w);\n    retVal += PrRoIPoolingGetData(data, h1, w1, height, width) * PrRoIPoolingGetCoeff(h - float(h1), w - float(w1));\n    h1 = floorf(h)+1;\n    w1 = floorf(w);\n    retVal += PrRoIPoolingGetData(data, h1, w1, height, width) * PrRoIPoolingGetCoeff(h - float(h1), w - float(w1));\n    h1 = floorf(h);\n    w1 = floorf(w)+1;\n    retVal += PrRoIPoolingGetData(data, h1, w1, height, width) * PrRoIPoolingGetCoeff(h - float(h1), w - float(w1));\n    h1 = floorf(h)+1;\n    w1 = floorf(w)+1;\n    retVal += PrRoIPoolingGetData(data, h1, w1, height, width) * PrRoIPoolingGetCoeff(h - float(h1), w - float(w1));\n    return retVal;\n}\n\n__device__ static float PrRoIPoolingMatCalculation(F_DEVPTR_IN this_data, const int s_h, const int s_w, const int e_h, const int e_w,\n        const float y0, const float x0, const float y1, const float x1, const int h0, const int w0)\n{\n    float alpha, beta, lim_alpha, lim_beta, tmp;\n    float sum_out = 0;\n\n    alpha = x0 - float(s_w);\n    beta = y0 - float(s_h);\n    lim_alpha = x1 - float(s_w);\n    lim_beta = y1 - float(s_h);\n    tmp = (lim_alpha - 0.5f * lim_alpha * lim_alpha - alpha + 0.5f * alpha * alpha)\n        * (lim_beta - 0.5f * lim_beta * lim_beta - beta + 0.5f * beta * beta);\n    sum_out += PrRoIPoolingGetData(this_data, s_h, s_w, h0, w0) * tmp;\n\n    alpha = float(e_w) - x1;\n    lim_alpha = float(e_w) - x0;\n    tmp = (lim_alpha - 0.5f * lim_alpha * lim_alpha - alpha + 0.5f * alpha * alpha)\n        * (lim_beta - 0.5f * lim_beta * lim_beta - beta + 0.5f * beta * beta);\n    sum_out += PrRoIPoolingGetData(this_data, s_h, e_w, h0, w0) * tmp;\n\n    alpha = x0 - float(s_w);\n    beta = float(e_h) - y1;\n    lim_alpha = x1 - float(s_w);\n    lim_beta = float(e_h) - y0;\n    tmp = (lim_alpha - 0.5f * lim_alpha * lim_alpha - alpha + 0.5f * alpha * alpha)\n        * (lim_beta - 0.5f * lim_beta * lim_beta - beta + 0.5f * beta * beta);\n    sum_out += PrRoIPoolingGetData(this_data, e_h, s_w, h0, w0) * tmp;\n\n    alpha = float(e_w) - x1;\n    lim_alpha = float(e_w) - x0;\n    tmp = (lim_alpha - 0.5f * lim_alpha * lim_alpha - alpha + 0.5f * alpha * alpha)\n        * (lim_beta - 0.5f * lim_beta * lim_beta - beta + 0.5f * beta * beta);\n    sum_out += PrRoIPoolingGetData(this_data, e_h, e_w, h0, w0) * tmp;\n\n    return sum_out;\n}\n\n__device__ static void PrRoIPoolingDistributeDiff(F_DEVPTR_OUT diff, const float top_diff, const int h, const int w, const int height, const int width, const float coeff)\n{\n    bool overflow = (h < 0) || (w < 0) || (h >= height) || (w >= width);\n    if (!overflow)\n        atomicAdd(diff + h * width + w, top_diff * coeff);\n}\n\n__device__ static void PrRoIPoolingMatDistributeDiff(F_DEVPTR_OUT diff, const float top_diff, const int s_h, const int s_w, const int e_h, const int e_w,\n        const float y0, const float x0, const float y1, const float x1, const int h0, const int w0)\n{\n    float alpha, beta, lim_alpha, lim_beta, tmp;\n\n    alpha = x0 - float(s_w);\n    beta = y0 - float(s_h);\n    lim_alpha = x1 - float(s_w);\n    lim_beta = y1 - float(s_h);\n    tmp = (lim_alpha - 0.5f * lim_alpha * lim_alpha - alpha + 0.5f * alpha * alpha)\n        * (lim_beta - 0.5f * lim_beta * lim_beta - beta + 0.5f * beta * beta);\n    PrRoIPoolingDistributeDiff(diff, top_diff, s_h, s_w, h0, w0, tmp);\n\n    alpha = float(e_w) - x1;\n    lim_alpha = float(e_w) - x0;\n    tmp = (lim_alpha - 0.5f * lim_alpha * lim_alpha - alpha + 0.5f * alpha * alpha)\n        * (lim_beta - 0.5f * lim_beta * lim_beta - beta + 0.5f * beta * beta);\n    PrRoIPoolingDistributeDiff(diff, top_diff, s_h, e_w, h0, w0, tmp);\n\n    alpha = x0 - float(s_w);\n    beta = float(e_h) - y1;\n    lim_alpha = x1 - float(s_w);\n    lim_beta = float(e_h) - y0;\n    tmp = (lim_alpha - 0.5f * lim_alpha * lim_alpha - alpha + 0.5f * alpha * alpha)\n        * (lim_beta - 0.5f * lim_beta * lim_beta - beta + 0.5f * beta * beta);\n    PrRoIPoolingDistributeDiff(diff, top_diff, e_h, s_w, h0, w0, tmp);\n\n    alpha = float(e_w) - x1;\n    lim_alpha = float(e_w) - x0;\n    tmp = (lim_alpha - 0.5f * lim_alpha * lim_alpha - alpha + 0.5f * alpha * alpha)\n        * (lim_beta - 0.5f * lim_beta * lim_beta - beta + 0.5f * beta * beta);\n    PrRoIPoolingDistributeDiff(diff, top_diff, e_h, e_w, h0, w0, tmp);\n}\n\n__global__ void PrRoIPoolingForward(\n        const int nthreads,\n        F_DEVPTR_IN bottom_data,\n        F_DEVPTR_IN bottom_rois,\n        F_DEVPTR_OUT top_data,\n        const int channels,\n        const int height,\n        const int width,\n        const int pooled_height,\n        const int pooled_width,\n        const float spatial_scale) {\n\n  CUDA_KERNEL_LOOP(index, nthreads) {\n    // (n, c, ph, pw) is an element in the pooled output\n    int pw = index % pooled_width;\n    int ph = (index / pooled_width) % pooled_height;\n    int c = (index / pooled_width / pooled_height) % channels;\n    int n = index / pooled_width / pooled_height / channels;\n\n    bottom_rois += n * 5;\n    int roi_batch_ind = bottom_rois[0];\n\n    float roi_start_w = bottom_rois[1] * spatial_scale;\n    float roi_start_h = bottom_rois[2] * spatial_scale;\n    float roi_end_w = bottom_rois[3] * spatial_scale;\n    float roi_end_h = bottom_rois[4] * spatial_scale;\n\n    float roi_width = max(roi_end_w - roi_start_w, ((float)0.0));\n    float roi_height = max(roi_end_h - roi_start_h, ((float)0.0));\n    float bin_size_h = roi_height / static_cast<float>(pooled_height);\n    float bin_size_w = roi_width / static_cast<float>(pooled_width);\n\n    const float *this_data = bottom_data + (roi_batch_ind * channels + c) * height * width;\n    float *this_out = top_data + index;\n\n    float win_start_w = roi_start_w + bin_size_w * pw;\n    float win_start_h = roi_start_h + bin_size_h * ph;\n    float win_end_w = win_start_w + bin_size_w;\n    float win_end_h = win_start_h + bin_size_h;\n\n    float win_size = max(float(0.0), bin_size_w * bin_size_h);\n    if (win_size == 0) {\n        *this_out = 0;\n        return;\n    }\n\n    float sum_out = 0;\n\n    int s_w, s_h, e_w, e_h;\n\n    s_w = floorf(win_start_w);\n    e_w = ceilf(win_end_w);\n    s_h = floorf(win_start_h);\n    e_h = ceilf(win_end_h);\n\n    for (int w_iter = s_w; w_iter < e_w; ++w_iter)\n        for (int h_iter = s_h; h_iter < e_h; ++h_iter)\n            sum_out += PrRoIPoolingMatCalculation(this_data, h_iter, w_iter, h_iter + 1, w_iter + 1,\n                max(win_start_h, float(h_iter)), max(win_start_w, float(w_iter)),\n                min(win_end_h, float(h_iter) + 1.0), min(win_end_w, float(w_iter + 1.0)),\n                height, width);\n    *this_out = sum_out / win_size;\n  }\n}\n\n__global__ void PrRoIPoolingBackward(\n        const int nthreads,\n        F_DEVPTR_IN bottom_rois,\n        F_DEVPTR_IN top_diff,\n        F_DEVPTR_OUT bottom_diff,\n        const int channels,\n        const int height,\n        const int width,\n        const int pooled_height,\n        const int pooled_width,\n        const float spatial_scale) {\n\n  CUDA_KERNEL_LOOP(index, nthreads) {\n    // (n, c, ph, pw) is an element in the pooled output\n    int pw = index % pooled_width;\n    int ph = (index / pooled_width) % pooled_height;\n    int c = (index / pooled_width / pooled_height) % channels;\n    int n = index / pooled_width / pooled_height / channels;\n    bottom_rois += n * 5;\n\n    int roi_batch_ind = bottom_rois[0];\n    float roi_start_w = bottom_rois[1] * spatial_scale;\n    float roi_start_h = bottom_rois[2] * spatial_scale;\n    float roi_end_w = bottom_rois[3] * spatial_scale;\n    float roi_end_h = bottom_rois[4] * spatial_scale;\n\n    float roi_width = max(roi_end_w - roi_start_w, (float)0);\n    float roi_height = max(roi_end_h - roi_start_h, (float)0);\n    float bin_size_h = roi_height / static_cast<float>(pooled_height);\n    float bin_size_w = roi_width / static_cast<float>(pooled_width);\n\n    const float *this_out_grad = top_diff + index;\n    float *this_data_grad = bottom_diff + (roi_batch_ind * channels + c) * height * width;\n\n    float win_start_w = roi_start_w + bin_size_w * pw;\n    float win_start_h = roi_start_h + bin_size_h * ph;\n    float win_end_w = win_start_w + bin_size_w;\n    float win_end_h = win_start_h + bin_size_h;\n\n    float win_size = max(float(0.0), bin_size_w * bin_size_h);\n\n    float sum_out = win_size == float(0) ? float(0) : *this_out_grad / win_size;\n\n    int s_w, s_h, e_w, e_h;\n\n    s_w = floorf(win_start_w);\n    e_w = ceilf(win_end_w);\n    s_h = floorf(win_start_h);\n    e_h = ceilf(win_end_h);\n\n    for (int w_iter = s_w; w_iter < e_w; ++w_iter)\n        for (int h_iter = s_h; h_iter < e_h; ++h_iter)\n            PrRoIPoolingMatDistributeDiff(this_data_grad, sum_out, h_iter, w_iter, h_iter + 1, w_iter + 1,\n                max(win_start_h, float(h_iter)), max(win_start_w, float(w_iter)),\n                min(win_end_h, float(h_iter) + 1.0), min(win_end_w, float(w_iter + 1.0)),\n                height, width);\n\n  }\n}\n\n__global__ void PrRoIPoolingCoorBackward(\n        const int nthreads,\n        F_DEVPTR_IN bottom_data,\n        F_DEVPTR_IN bottom_rois,\n        F_DEVPTR_IN top_data,\n        F_DEVPTR_IN top_diff,\n        F_DEVPTR_OUT bottom_diff,\n        const int channels,\n        const int height,\n        const int width,\n        const int pooled_height,\n        const int pooled_width,\n        const float spatial_scale) {\n\n  CUDA_KERNEL_LOOP(index, nthreads) {\n    // (n, c, ph, pw) is an element in the pooled output\n    int pw = index % pooled_width;\n    int ph = (index / pooled_width) % pooled_height;\n    int c = (index / pooled_width / pooled_height) % channels;\n    int n = index / pooled_width / pooled_height / channels;\n    bottom_rois += n * 5;\n\n    int roi_batch_ind = bottom_rois[0];\n    float roi_start_w = bottom_rois[1] * spatial_scale;\n    float roi_start_h = bottom_rois[2] * spatial_scale;\n    float roi_end_w = bottom_rois[3] * spatial_scale;\n    float roi_end_h = bottom_rois[4] * spatial_scale;\n\n    float roi_width = max(roi_end_w - roi_start_w, (float)0);\n    float roi_height = max(roi_end_h - roi_start_h, (float)0);\n    float bin_size_h = roi_height / static_cast<float>(pooled_height);\n    float bin_size_w = roi_width / static_cast<float>(pooled_width);\n\n    const float *this_out_grad = top_diff + index;\n    const float *this_bottom_data = bottom_data + (roi_batch_ind * channels + c) * height * width;\n    const float *this_top_data = top_data + index;\n    float *this_data_grad = bottom_diff + n * 5;\n\n    float win_start_w = roi_start_w + bin_size_w * pw;\n    float win_start_h = roi_start_h + bin_size_h * ph;\n    float win_end_w = win_start_w + bin_size_w;\n    float win_end_h = win_start_h + bin_size_h;\n\n    float win_size = max(float(0.0), bin_size_w * bin_size_h);\n\n    float sum_out = win_size == float(0) ? float(0) : *this_out_grad / win_size;\n\n    // WARNING: to be discussed\n    if (sum_out == 0)\n        return;\n\n    int s_w, s_h, e_w, e_h;\n\n    s_w = floorf(win_start_w);\n    e_w = ceilf(win_end_w);\n    s_h = floorf(win_start_h);\n    e_h = ceilf(win_end_h);\n\n    float g_x1_y = 0, g_x2_y = 0, g_x_y1 = 0, g_x_y2 = 0;\n    for (int h_iter = s_h; h_iter < e_h; ++h_iter) {\n        g_x1_y += PrRoIPoolingSingleCoorIntegral(max(win_start_h, float(h_iter)) - h_iter,\n                min(win_end_h, float(h_iter + 1)) - h_iter,\n                PrRoIPoolingInterpolation(this_bottom_data, h_iter, win_start_w, height, width),\n                PrRoIPoolingInterpolation(this_bottom_data, h_iter + 1, win_start_w, height, width));\n\n        g_x2_y += PrRoIPoolingSingleCoorIntegral(max(win_start_h, float(h_iter)) - h_iter,\n                min(win_end_h, float(h_iter + 1)) - h_iter,\n                PrRoIPoolingInterpolation(this_bottom_data, h_iter, win_end_w, height, width),\n                PrRoIPoolingInterpolation(this_bottom_data, h_iter + 1, win_end_w, height, width));\n    }\n\n    for (int w_iter = s_w; w_iter < e_w; ++w_iter) {\n        g_x_y1 += PrRoIPoolingSingleCoorIntegral(max(win_start_w, float(w_iter)) - w_iter,\n                min(win_end_w, float(w_iter + 1)) - w_iter,\n                PrRoIPoolingInterpolation(this_bottom_data, win_start_h, w_iter, height, width),\n                PrRoIPoolingInterpolation(this_bottom_data, win_start_h, w_iter + 1, height, width));\n\n        g_x_y2 += PrRoIPoolingSingleCoorIntegral(max(win_start_w, float(w_iter)) - w_iter,\n                min(win_end_w, float(w_iter + 1)) - w_iter,\n                PrRoIPoolingInterpolation(this_bottom_data, win_end_h, w_iter, height, width),\n                PrRoIPoolingInterpolation(this_bottom_data, win_end_h, w_iter + 1, height, width));\n    }\n\n    float partial_x1 = -g_x1_y + (win_end_h - win_start_h) * (*this_top_data);\n    float partial_y1 = -g_x_y1 + (win_end_w - win_start_w) * (*this_top_data);\n    float partial_x2 = g_x2_y - (win_end_h - win_start_h) * (*this_top_data);\n    float partial_y2 = g_x_y2 - (win_end_w - win_start_w) * (*this_top_data);\n\n    partial_x1 = partial_x1 / win_size * spatial_scale;\n    partial_x2 = partial_x2 / win_size * spatial_scale;\n    partial_y1 = partial_y1 / win_size * spatial_scale;\n    partial_y2 = partial_y2 / win_size * spatial_scale;\n\n    // (b, x1, y1, x2, y2)\n\n    this_data_grad[0] = 0;\n    atomicAdd(this_data_grad + 1, (partial_x1 * (1.0 - float(pw) / pooled_width) + partial_x2 * (1.0 - float(pw + 1) / pooled_width))\n            * (*this_out_grad));\n    atomicAdd(this_data_grad + 2, (partial_y1 * (1.0 - float(ph) / pooled_height) + partial_y2 * (1.0 - float(ph + 1) / pooled_height))\n            * (*this_out_grad));\n    atomicAdd(this_data_grad + 3, (partial_x2 * float(pw + 1) / pooled_width + partial_x1 * float(pw) / pooled_width)\n            * (*this_out_grad));\n    atomicAdd(this_data_grad + 4, (partial_y2 * float(ph + 1) / pooled_height + partial_y1 * float(ph) / pooled_height)\n            * (*this_out_grad));\n  }\n}\n\n} /* !anonymous namespace */\n\n#ifdef __cplusplus\nextern \"C\" {\n#endif\n\nvoid PrRoIPoolingForwardGpu(\n    cudaStream_t stream,\n    F_DEVPTR_IN bottom_data,\n    F_DEVPTR_IN bottom_rois,\n    F_DEVPTR_OUT top_data,\n    const int channels_, const int height_, const int width_,\n    const int pooled_height_, const int pooled_width_,\n    const float spatial_scale_,\n    const int top_count) {\n\n    PrRoIPoolingForward<<<CUDA_NUM_BLOCKS(top_count), CUDA_NUM_THREADS, 0, stream>>>(\n        top_count, bottom_data, bottom_rois, top_data,\n        channels_, height_, width_, pooled_height_, pooled_width_, spatial_scale_);\n\n    CUDA_POST_KERNEL_CHECK;\n}\n\nvoid PrRoIPoolingBackwardGpu(\n    cudaStream_t stream,\n    F_DEVPTR_IN bottom_data,\n    F_DEVPTR_IN bottom_rois,\n    F_DEVPTR_IN top_data,\n    F_DEVPTR_IN top_diff,\n    F_DEVPTR_OUT bottom_diff,\n    const int channels_, const int height_, const int width_,\n    const int pooled_height_, const int pooled_width_,\n    const float spatial_scale_,\n    const int top_count, const int bottom_count) {\n\n    cudaMemsetAsync(bottom_diff, 0, sizeof(float) * bottom_count, stream);\n    PrRoIPoolingBackward<<<CUDA_NUM_BLOCKS(top_count), CUDA_NUM_THREADS, 0, stream>>>(\n        top_count, bottom_rois, top_diff, bottom_diff,\n        channels_, height_, width_, pooled_height_, pooled_width_, spatial_scale_);\n    CUDA_POST_KERNEL_CHECK;\n}\n\nvoid PrRoIPoolingCoorBackwardGpu(\n    cudaStream_t stream,\n    F_DEVPTR_IN bottom_data,\n    F_DEVPTR_IN bottom_rois,\n    F_DEVPTR_IN top_data,\n    F_DEVPTR_IN top_diff,\n    F_DEVPTR_OUT bottom_diff,\n    const int channels_, const int height_, const int width_,\n    const int pooled_height_, const int pooled_width_,\n    const float spatial_scale_,\n    const int top_count, const int bottom_count) {\n\n    cudaMemsetAsync(bottom_diff, 0, sizeof(float) * bottom_count, stream);\n    PrRoIPoolingCoorBackward<<<CUDA_NUM_BLOCKS(top_count), CUDA_NUM_THREADS, 0, stream>>>(\n        top_count, bottom_data, bottom_rois, top_data, top_diff, bottom_diff,\n        channels_, height_, width_, pooled_height_, pooled_width_, spatial_scale_);\n    CUDA_POST_KERNEL_CHECK;\n}\n\n} /* !extern \"C\" */\n\n"
  },
  {
    "path": "artrackv2_mindspore/external/AR/ltr/external/PreciseRoIPooling/pytorch/prroi_pool/src/prroi_pooling_gpu_impl.cuh",
    "content": "/*\n * File   : prroi_pooling_gpu_impl.cuh\n * Author : Tete Xiao, Jiayuan Mao\n * Email  : jasonhsiao97@gmail.com\n *\n * Distributed under terms of the MIT license.\n * Copyright (c) 2017 Megvii Technology Limited.\n */\n\n#ifndef PRROI_POOLING_GPU_IMPL_CUH\n#define PRROI_POOLING_GPU_IMPL_CUH\n\n#ifdef __cplusplus\nextern \"C\" {\n#endif\n\n#define F_DEVPTR_IN const float *\n#define F_DEVPTR_OUT float *\n\nvoid PrRoIPoolingForwardGpu(\n    cudaStream_t stream,\n    F_DEVPTR_IN bottom_data,\n    F_DEVPTR_IN bottom_rois,\n    F_DEVPTR_OUT top_data,\n    const int channels_, const int height_, const int width_,\n    const int pooled_height_, const int pooled_width_,\n    const float spatial_scale_,\n    const int top_count);\n\nvoid PrRoIPoolingBackwardGpu(\n    cudaStream_t stream,\n    F_DEVPTR_IN bottom_data,\n    F_DEVPTR_IN bottom_rois,\n    F_DEVPTR_IN top_data,\n    F_DEVPTR_IN top_diff,\n    F_DEVPTR_OUT bottom_diff,\n    const int channels_, const int height_, const int width_,\n    const int pooled_height_, const int pooled_width_,\n    const float spatial_scale_,\n    const int top_count, const int bottom_count);\n\nvoid PrRoIPoolingCoorBackwardGpu(\n    cudaStream_t stream,\n    F_DEVPTR_IN bottom_data,\n    F_DEVPTR_IN bottom_rois,\n    F_DEVPTR_IN top_data,\n    F_DEVPTR_IN top_diff,\n    F_DEVPTR_OUT bottom_diff,\n    const int channels_, const int height_, const int width_,\n    const int pooled_height_, const int pooled_width_,\n    const float spatial_scale_,\n    const int top_count, const int bottom_count);\n\n#ifdef __cplusplus\n} /* !extern \"C\" */\n#endif\n\n#endif /* !PRROI_POOLING_GPU_IMPL_CUH */\n\n"
  },
  {
    "path": "artrackv2_mindspore/external/AR/ltr/external/PreciseRoIPooling/pytorch/tests/test_prroi_pooling2d.py",
    "content": "# -*- coding: utf-8 -*-\n# File   : test_prroi_pooling2d.py\n# Author : Jiayuan Mao\n# Email  : maojiayuan@gmail.com\n# Date   : 18/02/2018\n#\n# This file is part of Jacinle.\n\nimport unittest\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom jactorch.utils.unittest import TorchTestCase\n\nfrom prroi_pool import PrRoIPool2D\n\n\nclass TestPrRoIPool2D(TorchTestCase):\n    def test_forward(self):\n        pool = PrRoIPool2D(7, 7, spatial_scale=0.5)\n        features = torch.rand((4, 16, 24, 32)).cuda()\n        rois = torch.tensor([\n            [0, 0, 0, 14, 14],\n            [1, 14, 14, 28, 28],\n        ]).float().cuda()\n\n        out = pool(features, rois)\n        out_gold = F.avg_pool2d(features, kernel_size=2, stride=1)\n\n        self.assertTensorClose(out, torch.stack((\n            out_gold[0, :, :7, :7],\n            out_gold[1, :, 7:14, 7:14],\n        ), dim=0))\n\n    def test_backward_shapeonly(self):\n        pool = PrRoIPool2D(2, 2, spatial_scale=0.5)\n\n        features = torch.rand((4, 2, 24, 32)).cuda()\n        rois = torch.tensor([\n            [0, 0, 0, 4, 4],\n            [1, 14, 14, 18, 18],\n        ]).float().cuda()\n        features.requires_grad = rois.requires_grad = True\n        out = pool(features, rois)\n\n        loss = out.sum()\n        loss.backward()\n\n        self.assertTupleEqual(features.size(), features.grad.size())\n        self.assertTupleEqual(rois.size(), rois.grad.size())\n\n\nif __name__ == '__main__':\n    unittest.main()\n"
  },
  {
    "path": "artrackv2_mindspore/external/AR/ltr/external/PreciseRoIPooling/src/prroi_pooling_gpu_impl.cu",
    "content": "/*\n * File   : prroi_pooling_gpu_impl.cu\n * Author : Tete Xiao, Jiayuan Mao\n * Email  : jasonhsiao97@gmail.com\n *\n * Distributed under terms of the MIT license.\n * Copyright (c) 2017 Megvii Technology Limited.\n */\n\n#include \"prroi_pooling_gpu_impl.cuh\"\n\n#include <cstdio>\n#include <cfloat>\n\n#define CUDA_KERNEL_LOOP(i, n) \\\n    for (int i = blockIdx.x * blockDim.x + threadIdx.x; \\\n        i < (n); \\\n        i += blockDim.x * gridDim.x)\n\n#define CUDA_POST_KERNEL_CHECK \\\n    do { \\\n        cudaError_t err = cudaGetLastError(); \\\n        if (cudaSuccess != err) { \\\n            fprintf(stderr, \"cudaCheckError() failed : %s\\n\", cudaGetErrorString(err)); \\\n            exit(-1); \\\n        } \\\n    } while(0)\n\n#define CUDA_NUM_THREADS 512\n\nnamespace {\n\nstatic int CUDA_NUM_BLOCKS(const int N) {\n  return (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS;\n}\n\n__device__ static float PrRoIPoolingGetData(F_DEVPTR_IN data, const int h, const int w, const int height, const int width)\n{\n    bool overflow = (h < 0) || (w < 0) || (h >= height) || (w >= width);\n    float retVal = overflow ? 0.0f : data[h * width + w];\n    return retVal;\n}\n\n__device__ static float PrRoIPoolingGetCoeff(float dh, float dw){\n    dw = dw > 0 ? dw : -dw;\n    dh = dh > 0 ? dh : -dh;\n    return (1.0f - dh) * (1.0f - dw);\n}\n\n__device__ static float PrRoIPoolingSingleCoorIntegral(float s, float t, float c1, float c2) {\n    return 0.5 * (t * t - s * s) * c2 + (t - 0.5 * t * t - s + 0.5 * s * s) * c1;\n}\n\n__device__ static float PrRoIPoolingInterpolation(F_DEVPTR_IN data, const float h, const float w, const int height, const int width){\n    float retVal = 0.0f;\n    int h1 = floorf(h);\n    int w1 = floorf(w);\n    retVal += PrRoIPoolingGetData(data, h1, w1, height, width) * PrRoIPoolingGetCoeff(h - float(h1), w - float(w1));\n    h1 = floorf(h)+1;\n    w1 = floorf(w);\n    retVal += PrRoIPoolingGetData(data, h1, w1, height, width) * PrRoIPoolingGetCoeff(h - float(h1), w - float(w1));\n    h1 = floorf(h);\n    w1 = floorf(w)+1;\n    retVal += PrRoIPoolingGetData(data, h1, w1, height, width) * PrRoIPoolingGetCoeff(h - float(h1), w - float(w1));\n    h1 = floorf(h)+1;\n    w1 = floorf(w)+1;\n    retVal += PrRoIPoolingGetData(data, h1, w1, height, width) * PrRoIPoolingGetCoeff(h - float(h1), w - float(w1));\n    return retVal;\n}\n\n__device__ static float PrRoIPoolingMatCalculation(F_DEVPTR_IN this_data, const int s_h, const int s_w, const int e_h, const int e_w,\n        const float y0, const float x0, const float y1, const float x1, const int h0, const int w0)\n{\n    float alpha, beta, lim_alpha, lim_beta, tmp;\n    float sum_out = 0;\n\n    alpha = x0 - float(s_w);\n    beta = y0 - float(s_h);\n    lim_alpha = x1 - float(s_w);\n    lim_beta = y1 - float(s_h);\n    tmp = (lim_alpha - 0.5f * lim_alpha * lim_alpha - alpha + 0.5f * alpha * alpha)\n        * (lim_beta - 0.5f * lim_beta * lim_beta - beta + 0.5f * beta * beta);\n    sum_out += PrRoIPoolingGetData(this_data, s_h, s_w, h0, w0) * tmp;\n\n    alpha = float(e_w) - x1;\n    lim_alpha = float(e_w) - x0;\n    tmp = (lim_alpha - 0.5f * lim_alpha * lim_alpha - alpha + 0.5f * alpha * alpha)\n        * (lim_beta - 0.5f * lim_beta * lim_beta - beta + 0.5f * beta * beta);\n    sum_out += PrRoIPoolingGetData(this_data, s_h, e_w, h0, w0) * tmp;\n\n    alpha = x0 - float(s_w);\n    beta = float(e_h) - y1;\n    lim_alpha = x1 - float(s_w);\n    lim_beta = float(e_h) - y0;\n    tmp = (lim_alpha - 0.5f * lim_alpha * lim_alpha - alpha + 0.5f * alpha * alpha)\n        * (lim_beta - 0.5f * lim_beta * lim_beta - beta + 0.5f * beta * beta);\n    sum_out += PrRoIPoolingGetData(this_data, e_h, s_w, h0, w0) * tmp;\n\n    alpha = float(e_w) - x1;\n    lim_alpha = float(e_w) - x0;\n    tmp = (lim_alpha - 0.5f * lim_alpha * lim_alpha - alpha + 0.5f * alpha * alpha)\n        * (lim_beta - 0.5f * lim_beta * lim_beta - beta + 0.5f * beta * beta);\n    sum_out += PrRoIPoolingGetData(this_data, e_h, e_w, h0, w0) * tmp;\n\n    return sum_out;\n}\n\n__device__ static void PrRoIPoolingDistributeDiff(F_DEVPTR_OUT diff, const float top_diff, const int h, const int w, const int height, const int width, const float coeff)\n{\n    bool overflow = (h < 0) || (w < 0) || (h >= height) || (w >= width);\n    if (!overflow)\n        atomicAdd(diff + h * width + w, top_diff * coeff);\n}\n\n__device__ static void PrRoIPoolingMatDistributeDiff(F_DEVPTR_OUT diff, const float top_diff, const int s_h, const int s_w, const int e_h, const int e_w,\n        const float y0, const float x0, const float y1, const float x1, const int h0, const int w0)\n{\n    float alpha, beta, lim_alpha, lim_beta, tmp;\n\n    alpha = x0 - float(s_w);\n    beta = y0 - float(s_h);\n    lim_alpha = x1 - float(s_w);\n    lim_beta = y1 - float(s_h);\n    tmp = (lim_alpha - 0.5f * lim_alpha * lim_alpha - alpha + 0.5f * alpha * alpha)\n        * (lim_beta - 0.5f * lim_beta * lim_beta - beta + 0.5f * beta * beta);\n    PrRoIPoolingDistributeDiff(diff, top_diff, s_h, s_w, h0, w0, tmp);\n\n    alpha = float(e_w) - x1;\n    lim_alpha = float(e_w) - x0;\n    tmp = (lim_alpha - 0.5f * lim_alpha * lim_alpha - alpha + 0.5f * alpha * alpha)\n        * (lim_beta - 0.5f * lim_beta * lim_beta - beta + 0.5f * beta * beta);\n    PrRoIPoolingDistributeDiff(diff, top_diff, s_h, e_w, h0, w0, tmp);\n\n    alpha = x0 - float(s_w);\n    beta = float(e_h) - y1;\n    lim_alpha = x1 - float(s_w);\n    lim_beta = float(e_h) - y0;\n    tmp = (lim_alpha - 0.5f * lim_alpha * lim_alpha - alpha + 0.5f * alpha * alpha)\n        * (lim_beta - 0.5f * lim_beta * lim_beta - beta + 0.5f * beta * beta);\n    PrRoIPoolingDistributeDiff(diff, top_diff, e_h, s_w, h0, w0, tmp);\n\n    alpha = float(e_w) - x1;\n    lim_alpha = float(e_w) - x0;\n    tmp = (lim_alpha - 0.5f * lim_alpha * lim_alpha - alpha + 0.5f * alpha * alpha)\n        * (lim_beta - 0.5f * lim_beta * lim_beta - beta + 0.5f * beta * beta);\n    PrRoIPoolingDistributeDiff(diff, top_diff, e_h, e_w, h0, w0, tmp);\n}\n\n__global__ void PrRoIPoolingForward(\n        const int nthreads,\n        F_DEVPTR_IN bottom_data,\n        F_DEVPTR_IN bottom_rois,\n        F_DEVPTR_OUT top_data,\n        const int channels,\n        const int height,\n        const int width,\n        const int pooled_height,\n        const int pooled_width,\n        const float spatial_scale) {\n\n  CUDA_KERNEL_LOOP(index, nthreads) {\n    // (n, c, ph, pw) is an element in the pooled output\n    int pw = index % pooled_width;\n    int ph = (index / pooled_width) % pooled_height;\n    int c = (index / pooled_width / pooled_height) % channels;\n    int n = index / pooled_width / pooled_height / channels;\n\n    bottom_rois += n * 5;\n    int roi_batch_ind = bottom_rois[0];\n\n    float roi_start_w = bottom_rois[1] * spatial_scale;\n    float roi_start_h = bottom_rois[2] * spatial_scale;\n    float roi_end_w = bottom_rois[3] * spatial_scale;\n    float roi_end_h = bottom_rois[4] * spatial_scale;\n\n    float roi_width = max(roi_end_w - roi_start_w, ((float)0.0));\n    float roi_height = max(roi_end_h - roi_start_h, ((float)0.0));\n    float bin_size_h = roi_height / static_cast<float>(pooled_height);\n    float bin_size_w = roi_width / static_cast<float>(pooled_width);\n\n    const float *this_data = bottom_data + (roi_batch_ind * channels + c) * height * width;\n    float *this_out = top_data + index;\n\n    float win_start_w = roi_start_w + bin_size_w * pw;\n    float win_start_h = roi_start_h + bin_size_h * ph;\n    float win_end_w = win_start_w + bin_size_w;\n    float win_end_h = win_start_h + bin_size_h;\n\n    float win_size = max(float(0.0), bin_size_w * bin_size_h);\n    if (win_size == 0) {\n        *this_out = 0;\n        return;\n    }\n\n    float sum_out = 0;\n\n    int s_w, s_h, e_w, e_h;\n\n    s_w = floorf(win_start_w);\n    e_w = ceilf(win_end_w);\n    s_h = floorf(win_start_h);\n    e_h = ceilf(win_end_h);\n\n    for (int w_iter = s_w; w_iter < e_w; ++w_iter)\n        for (int h_iter = s_h; h_iter < e_h; ++h_iter)\n            sum_out += PrRoIPoolingMatCalculation(this_data, h_iter, w_iter, h_iter + 1, w_iter + 1,\n                max(win_start_h, float(h_iter)), max(win_start_w, float(w_iter)),\n                min(win_end_h, float(h_iter) + 1.0), min(win_end_w, float(w_iter + 1.0)),\n                height, width);\n    *this_out = sum_out / win_size;\n  }\n}\n\n__global__ void PrRoIPoolingBackward(\n        const int nthreads,\n        F_DEVPTR_IN bottom_rois,\n        F_DEVPTR_IN top_diff,\n        F_DEVPTR_OUT bottom_diff,\n        const int channels,\n        const int height,\n        const int width,\n        const int pooled_height,\n        const int pooled_width,\n        const float spatial_scale) {\n\n  CUDA_KERNEL_LOOP(index, nthreads) {\n    // (n, c, ph, pw) is an element in the pooled output\n    int pw = index % pooled_width;\n    int ph = (index / pooled_width) % pooled_height;\n    int c = (index / pooled_width / pooled_height) % channels;\n    int n = index / pooled_width / pooled_height / channels;\n    bottom_rois += n * 5;\n\n    int roi_batch_ind = bottom_rois[0];\n    float roi_start_w = bottom_rois[1] * spatial_scale;\n    float roi_start_h = bottom_rois[2] * spatial_scale;\n    float roi_end_w = bottom_rois[3] * spatial_scale;\n    float roi_end_h = bottom_rois[4] * spatial_scale;\n\n    float roi_width = max(roi_end_w - roi_start_w, (float)0);\n    float roi_height = max(roi_end_h - roi_start_h, (float)0);\n    float bin_size_h = roi_height / static_cast<float>(pooled_height);\n    float bin_size_w = roi_width / static_cast<float>(pooled_width);\n\n    const float *this_out_grad = top_diff + index;\n    float *this_data_grad = bottom_diff + (roi_batch_ind * channels + c) * height * width;\n\n    float win_start_w = roi_start_w + bin_size_w * pw;\n    float win_start_h = roi_start_h + bin_size_h * ph;\n    float win_end_w = win_start_w + bin_size_w;\n    float win_end_h = win_start_h + bin_size_h;\n\n    float win_size = max(float(0.0), bin_size_w * bin_size_h);\n\n    float sum_out = win_size == float(0) ? float(0) : *this_out_grad / win_size;\n\n    int s_w, s_h, e_w, e_h;\n\n    s_w = floorf(win_start_w);\n    e_w = ceilf(win_end_w);\n    s_h = floorf(win_start_h);\n    e_h = ceilf(win_end_h);\n\n    for (int w_iter = s_w; w_iter < e_w; ++w_iter)\n        for (int h_iter = s_h; h_iter < e_h; ++h_iter)\n            PrRoIPoolingMatDistributeDiff(this_data_grad, sum_out, h_iter, w_iter, h_iter + 1, w_iter + 1,\n                max(win_start_h, float(h_iter)), max(win_start_w, float(w_iter)),\n                min(win_end_h, float(h_iter) + 1.0), min(win_end_w, float(w_iter + 1.0)),\n                height, width);\n\n  }\n}\n\n__global__ void PrRoIPoolingCoorBackward(\n        const int nthreads,\n        F_DEVPTR_IN bottom_data,\n        F_DEVPTR_IN bottom_rois,\n        F_DEVPTR_IN top_data,\n        F_DEVPTR_IN top_diff,\n        F_DEVPTR_OUT bottom_diff,\n        const int channels,\n        const int height,\n        const int width,\n        const int pooled_height,\n        const int pooled_width,\n        const float spatial_scale) {\n\n  CUDA_KERNEL_LOOP(index, nthreads) {\n    // (n, c, ph, pw) is an element in the pooled output\n    int pw = index % pooled_width;\n    int ph = (index / pooled_width) % pooled_height;\n    int c = (index / pooled_width / pooled_height) % channels;\n    int n = index / pooled_width / pooled_height / channels;\n    bottom_rois += n * 5;\n\n    int roi_batch_ind = bottom_rois[0];\n    float roi_start_w = bottom_rois[1] * spatial_scale;\n    float roi_start_h = bottom_rois[2] * spatial_scale;\n    float roi_end_w = bottom_rois[3] * spatial_scale;\n    float roi_end_h = bottom_rois[4] * spatial_scale;\n\n    float roi_width = max(roi_end_w - roi_start_w, (float)0);\n    float roi_height = max(roi_end_h - roi_start_h, (float)0);\n    float bin_size_h = roi_height / static_cast<float>(pooled_height);\n    float bin_size_w = roi_width / static_cast<float>(pooled_width);\n\n    const float *this_out_grad = top_diff + index;\n    const float *this_bottom_data = bottom_data + (roi_batch_ind * channels + c) * height * width;\n    const float *this_top_data = top_data + index;\n    float *this_data_grad = bottom_diff + n * 5;\n\n    float win_start_w = roi_start_w + bin_size_w * pw;\n    float win_start_h = roi_start_h + bin_size_h * ph;\n    float win_end_w = win_start_w + bin_size_w;\n    float win_end_h = win_start_h + bin_size_h;\n\n    float win_size = max(float(0.0), bin_size_w * bin_size_h);\n\n    float sum_out = win_size == float(0) ? float(0) : *this_out_grad / win_size;\n\n    // WARNING: to be discussed\n    if (sum_out == 0)\n        return;\n\n    int s_w, s_h, e_w, e_h;\n\n    s_w = floorf(win_start_w);\n    e_w = ceilf(win_end_w);\n    s_h = floorf(win_start_h);\n    e_h = ceilf(win_end_h);\n\n    float g_x1_y = 0, g_x2_y = 0, g_x_y1 = 0, g_x_y2 = 0;\n    for (int h_iter = s_h; h_iter < e_h; ++h_iter) {\n        g_x1_y += PrRoIPoolingSingleCoorIntegral(max(win_start_h, float(h_iter)) - h_iter,\n                min(win_end_h, float(h_iter + 1)) - h_iter,\n                PrRoIPoolingInterpolation(this_bottom_data, h_iter, win_start_w, height, width),\n                PrRoIPoolingInterpolation(this_bottom_data, h_iter + 1, win_start_w, height, width));\n\n        g_x2_y += PrRoIPoolingSingleCoorIntegral(max(win_start_h, float(h_iter)) - h_iter,\n                min(win_end_h, float(h_iter + 1)) - h_iter,\n                PrRoIPoolingInterpolation(this_bottom_data, h_iter, win_end_w, height, width),\n                PrRoIPoolingInterpolation(this_bottom_data, h_iter + 1, win_end_w, height, width));\n    }\n\n    for (int w_iter = s_w; w_iter < e_w; ++w_iter) {\n        g_x_y1 += PrRoIPoolingSingleCoorIntegral(max(win_start_w, float(w_iter)) - w_iter,\n                min(win_end_w, float(w_iter + 1)) - w_iter,\n                PrRoIPoolingInterpolation(this_bottom_data, win_start_h, w_iter, height, width),\n                PrRoIPoolingInterpolation(this_bottom_data, win_start_h, w_iter + 1, height, width));\n\n        g_x_y2 += PrRoIPoolingSingleCoorIntegral(max(win_start_w, float(w_iter)) - w_iter,\n                min(win_end_w, float(w_iter + 1)) - w_iter,\n                PrRoIPoolingInterpolation(this_bottom_data, win_end_h, w_iter, height, width),\n                PrRoIPoolingInterpolation(this_bottom_data, win_end_h, w_iter + 1, height, width));\n    }\n\n    float partial_x1 = -g_x1_y + (win_end_h - win_start_h) * (*this_top_data);\n    float partial_y1 = -g_x_y1 + (win_end_w - win_start_w) * (*this_top_data);\n    float partial_x2 = g_x2_y - (win_end_h - win_start_h) * (*this_top_data);\n    float partial_y2 = g_x_y2 - (win_end_w - win_start_w) * (*this_top_data);\n\n    partial_x1 = partial_x1 / win_size * spatial_scale;\n    partial_x2 = partial_x2 / win_size * spatial_scale;\n    partial_y1 = partial_y1 / win_size * spatial_scale;\n    partial_y2 = partial_y2 / win_size * spatial_scale;\n\n    // (b, x1, y1, x2, y2)\n\n    this_data_grad[0] = 0;\n    atomicAdd(this_data_grad + 1, (partial_x1 * (1.0 - float(pw) / pooled_width) + partial_x2 * (1.0 - float(pw + 1) / pooled_width))\n            * (*this_out_grad));\n    atomicAdd(this_data_grad + 2, (partial_y1 * (1.0 - float(ph) / pooled_height) + partial_y2 * (1.0 - float(ph + 1) / pooled_height))\n            * (*this_out_grad));\n    atomicAdd(this_data_grad + 3, (partial_x2 * float(pw + 1) / pooled_width + partial_x1 * float(pw) / pooled_width)\n            * (*this_out_grad));\n    atomicAdd(this_data_grad + 4, (partial_y2 * float(ph + 1) / pooled_height + partial_y1 * float(ph) / pooled_height)\n            * (*this_out_grad));\n  }\n}\n\n} /* !anonymous namespace */\n\n#ifdef __cplusplus\nextern \"C\" {\n#endif\n\nvoid PrRoIPoolingForwardGpu(\n    cudaStream_t stream,\n    F_DEVPTR_IN bottom_data,\n    F_DEVPTR_IN bottom_rois,\n    F_DEVPTR_OUT top_data,\n    const int channels_, const int height_, const int width_,\n    const int pooled_height_, const int pooled_width_,\n    const float spatial_scale_,\n    const int top_count) {\n\n    PrRoIPoolingForward<<<CUDA_NUM_BLOCKS(top_count), CUDA_NUM_THREADS, 0, stream>>>(\n        top_count, bottom_data, bottom_rois, top_data,\n        channels_, height_, width_, pooled_height_, pooled_width_, spatial_scale_);\n\n    CUDA_POST_KERNEL_CHECK;\n}\n\nvoid PrRoIPoolingBackwardGpu(\n    cudaStream_t stream,\n    F_DEVPTR_IN bottom_data,\n    F_DEVPTR_IN bottom_rois,\n    F_DEVPTR_IN top_data,\n    F_DEVPTR_IN top_diff,\n    F_DEVPTR_OUT bottom_diff,\n    const int channels_, const int height_, const int width_,\n    const int pooled_height_, const int pooled_width_,\n    const float spatial_scale_,\n    const int top_count, const int bottom_count) {\n\n    cudaMemsetAsync(bottom_diff, 0, sizeof(float) * bottom_count, stream);\n    PrRoIPoolingBackward<<<CUDA_NUM_BLOCKS(top_count), CUDA_NUM_THREADS, 0, stream>>>(\n        top_count, bottom_rois, top_diff, bottom_diff,\n        channels_, height_, width_, pooled_height_, pooled_width_, spatial_scale_);\n    CUDA_POST_KERNEL_CHECK;\n}\n\nvoid PrRoIPoolingCoorBackwardGpu(\n    cudaStream_t stream,\n    F_DEVPTR_IN bottom_data,\n    F_DEVPTR_IN bottom_rois,\n    F_DEVPTR_IN top_data,\n    F_DEVPTR_IN top_diff,\n    F_DEVPTR_OUT bottom_diff,\n    const int channels_, const int height_, const int width_,\n    const int pooled_height_, const int pooled_width_,\n    const float spatial_scale_,\n    const int top_count, const int bottom_count) {\n\n    cudaMemsetAsync(bottom_diff, 0, sizeof(float) * bottom_count, stream);\n    PrRoIPoolingCoorBackward<<<CUDA_NUM_BLOCKS(top_count), CUDA_NUM_THREADS, 0, stream>>>(\n        top_count, bottom_data, bottom_rois, top_data, top_diff, bottom_diff,\n        channels_, height_, width_, pooled_height_, pooled_width_, spatial_scale_);\n    CUDA_POST_KERNEL_CHECK;\n}\n\n} /* !extern \"C\" */\n\n"
  },
  {
    "path": "artrackv2_mindspore/external/AR/ltr/external/PreciseRoIPooling/src/prroi_pooling_gpu_impl.cuh",
    "content": "/*\n * File   : prroi_pooling_gpu_impl.cuh\n * Author : Tete Xiao, Jiayuan Mao\n * Email  : jasonhsiao97@gmail.com\n *\n * Distributed under terms of the MIT license.\n * Copyright (c) 2017 Megvii Technology Limited.\n */\n\n#ifndef PRROI_POOLING_GPU_IMPL_CUH\n#define PRROI_POOLING_GPU_IMPL_CUH\n\n#ifdef __cplusplus\nextern \"C\" {\n#endif\n\n#define F_DEVPTR_IN const float *\n#define F_DEVPTR_OUT float *\n\nvoid PrRoIPoolingForwardGpu(\n    cudaStream_t stream,\n    F_DEVPTR_IN bottom_data,\n    F_DEVPTR_IN bottom_rois,\n    F_DEVPTR_OUT top_data,\n    const int channels_, const int height_, const int width_,\n    const int pooled_height_, const int pooled_width_,\n    const float spatial_scale_,\n    const int top_count);\n\nvoid PrRoIPoolingBackwardGpu(\n    cudaStream_t stream,\n    F_DEVPTR_IN bottom_data,\n    F_DEVPTR_IN bottom_rois,\n    F_DEVPTR_IN top_data,\n    F_DEVPTR_IN top_diff,\n    F_DEVPTR_OUT bottom_diff,\n    const int channels_, const int height_, const int width_,\n    const int pooled_height_, const int pooled_width_,\n    const float spatial_scale_,\n    const int top_count, const int bottom_count);\n\nvoid PrRoIPoolingCoorBackwardGpu(\n    cudaStream_t stream,\n    F_DEVPTR_IN bottom_data,\n    F_DEVPTR_IN bottom_rois,\n    F_DEVPTR_IN top_data,\n    F_DEVPTR_IN top_diff,\n    F_DEVPTR_OUT bottom_diff,\n    const int channels_, const int height_, const int width_,\n    const int pooled_height_, const int pooled_width_,\n    const float spatial_scale_,\n    const int top_count, const int bottom_count);\n\n#ifdef __cplusplus\n} /* !extern \"C\" */\n#endif\n\n#endif /* !PRROI_POOLING_GPU_IMPL_CUH */\n\n"
  },
  {
    "path": "artrackv2_mindspore/external/AR/ltr/models/AR_seg_mask/AR_seg_mask.py",
    "content": "import torch.nn as nn\nfrom ltr.models.neck import CorrNL\nfrom ltr import model_constructor\nimport torch\nimport ltr.models.backbone.resnet_seg as resnet_seg\n\nfrom ltr.models.head import seg_network\nfrom easydict import EasyDict as edict\n\n'''2020.4.14 replace mask head with frtm for higher-quality mask'''\n'''2020.4.22 Only use the mask branch'''\n\n\nclass ARnet_seg_mask(nn.Module):\n    \"\"\" Scale Estimation network module with three branches: bbox, coner and mask. \"\"\"\n    def __init__(self, feature_extractor, neck_module, head_module, used_layers,\n                 extractor_grad=True,output_size=(256,256)):\n        \"\"\"\n        args:\n            feature_extractor - backbone feature extractor\n            bb_regressor - IoU prediction module\n            bb_regressor_layer - List containing the name of the layers from feature_extractor, which are input to\n                                    bb_regressor\n            extractor_grad - Bool indicating whether backbone feature extractor requires gradients\n        \"\"\"\n        super(ARnet_seg_mask, self).__init__()\n\n        self.feature_extractor = feature_extractor\n        self.neck = neck_module\n        self.refiner = head_module\n        self.used_layers = used_layers\n        self.output_size = output_size\n\n        if not extractor_grad:\n            for p in self.feature_extractor.parameters():\n                p.requires_grad_(False)\n\n    def forward(self, train_imgs, test_imgs, train_bb, mode='train'):\n        \"\"\" Forward pass\n        Note: If the training is done in sequence mode, that is, test_imgs.dim() == 5, then the batch dimension\n        corresponds to the first dimensions. test_imgs is thus of the form [sequence, batch, feature, row, col]\n        \"\"\"\n        self.forward_ref(train_imgs, train_bb)\n        pred_dict = self.forward_test(test_imgs, mode)\n        return pred_dict\n\n    def forward_ref(self, train_imgs, train_bb):\n        \"\"\" Forward pass of reference branch.\n        size of train_imgs is (1,batch,3,H,W), train_bb is (1,batch,4)\"\"\"\n        num_sequences = train_imgs.shape[-4] # batch\n        num_train_images = train_imgs.shape[0] if train_imgs.dim() == 5 else 1 # 1\n\n        # Extract backbone features\n        '''train_feat OrderedDict, key:'layer4' '''\n        train_feat_dict = self.extract_backbone_features(train_imgs.view(-1, *train_imgs.shape[-3:])) # 输入size是(batch,3,256,256)\n\n        train_feat_list = [feat for feat in train_feat_dict.values()] #list,其中每个元素对应一层输出的特征(tensor)\n\n        # get reference feature\n        self.neck.get_ref_kernel(train_feat_list, train_bb.view(num_train_images, num_sequences, 4))\n\n\n    def forward_test(self, test_imgs, mode='train'):\n        \"\"\" Forward pass of test branch. size of test_imgs is (1,batch,3,256,256)\"\"\"\n        output = {}\n        # Extract backbone features\n        test_feat_dict = self.extract_backbone_features(test_imgs.view(-1, *test_imgs.shape[-3:]),\n                                                        layers=['layer1','layer2','layer3','layer4','layer5'])# 输入size是(batch,3,256,256)\n        '''list,tensor'''\n        # Save low-level feature list\n        # Lfeat_list = [feat for name, feat in test_feat_dict.items() if name != 'layer3']\n\n        # fuse feature from two branches\n        fusion_feat = self.neck.fuse_feat([test_feat_dict['layer4']])\n        # Obtain bbox prediction\n        if mode=='train':\n            output['mask'] = torch.sigmoid(self.refiner(fusion_feat, test_feat_dict, self.output_size))\n        elif mode == 'mask':\n            output = torch.sigmoid(self.refiner(fusion_feat, test_feat_dict, self.output_size))\n        else:\n            raise ValueError(\"mode should be train or test\")\n        return output\n\n    def extract_backbone_features(self, im, layers=None):\n        if layers is None:\n            layers = self.used_layers\n        return self.feature_extractor(im, layers)\n\n    def extract_features(self, im, layers):\n        return self.feature_extractor(im, layers)\n\n\n\n@model_constructor\ndef ARnet_seg_mask_resnet50(backbone_pretrained=True,used_layers=('layer4',),pool_size=None):\n    # backbone\n    backbone_net = resnet_seg.resnet50(pretrained=backbone_pretrained)\n    # neck\n    neck_net = CorrNL.CorrNL(pool_size=pool_size)\n    # multiple heads\n    '''create segnet'''\n    in_channels = 1024\n    # disc_params = edict(layer=\"layer4\", in_channels=in_channels, c_channels=96, out_channels=64) # non-local feat (64 channels rather than 1)\n    '''2020.4.22 change \"out_channels\" to pool_size * pool_size'''\n    disc_params = edict(layer=\"layer4\", in_channels=in_channels, c_channels=96, out_channels=pool_size*pool_size) # non-local feat (64 channels rather than 1)\n    refnet_params = edict(\n        layers=(\"layer5\", \"layer4\", \"layer3\", \"layer2\"),\n        nchannels=64, use_batch_norm=True)\n    disc_params.in_channels = backbone_net.get_out_channels()[disc_params.layer]\n\n    p = refnet_params\n    refinement_layers_channels = {L: nch for L, nch in backbone_net.get_out_channels().items() if L in p.layers}\n    refiner = seg_network.SegNetwork(disc_params.out_channels, p.nchannels, refinement_layers_channels, p.use_batch_norm)\n    '''create Alpha-Refine'''\n    net = ARnet_seg_mask(feature_extractor=backbone_net, neck_module=neck_net,\n                         head_module=refiner,\n                         used_layers=used_layers, extractor_grad=True,\n                         output_size=(int(pool_size*2*16),int(pool_size*2*16)))\n    return net\n"
  },
  {
    "path": "artrackv2_mindspore/external/AR/ltr/models/AR_seg_mask/__init__.py",
    "content": ""
  },
  {
    "path": "artrackv2_mindspore/external/AR/ltr/models/__init__.py",
    "content": ""
  },
  {
    "path": "artrackv2_mindspore/external/AR/ltr/models/backbone/__init__.py",
    "content": "from .resnet import resnet18, resnet50, resnet_baby\nfrom .resnet18_vggm import resnet18_vggmconv1\n"
  },
  {
    "path": "artrackv2_mindspore/external/AR/ltr/models/backbone/base.py",
    "content": "import torch\nimport torch.nn as nn\n\n\nclass Backbone(nn.Module):\n    \"\"\"Base class for backbone networks. Handles freezing layers etc.\n    args:\n        frozen_layers  -  Name of layers to freeze. Either list of strings, 'none' or 'all'. Default: 'none'.\n    \"\"\"\n    def __init__(self, frozen_layers=()):\n        super().__init__()\n\n        if isinstance(frozen_layers, str):\n            if frozen_layers.lower() == 'none':\n                frozen_layers = ()\n            elif frozen_layers.lower() != 'all':\n                raise ValueError('Unknown option for frozen layers: \\\"{}\\\". Should be \\\"all\\\", \\\"none\\\" or list of layer names.'.format(frozen_layers))\n\n        self.frozen_layers = frozen_layers\n        self._is_frozen_nograd = False\n\n\n    def train(self, mode=True):\n        super().train(mode)\n        if mode == True:\n            self._set_frozen_to_eval()\n        if not self._is_frozen_nograd:\n            self._set_frozen_to_nograd()\n            self._is_frozen_nograd = True\n\n\n    def _set_frozen_to_eval(self):\n        if isinstance(self.frozen_layers, str) and self.frozen_layers.lower() == 'all':\n            self.eval()\n        else:\n            for layer in self.frozen_layers:\n                getattr(self, layer).eval()\n\n\n    def _set_frozen_to_nograd(self):\n        if isinstance(self.frozen_layers, str) and self.frozen_layers.lower() == 'all':\n            for p in self.parameters():\n                p.requires_grad_(False)\n        else:\n            for layer in self.frozen_layers:\n                for p in getattr(self, layer).parameters():\n                    p.requires_grad_(False)"
  },
  {
    "path": "artrackv2_mindspore/external/AR/ltr/models/backbone/resnet.py",
    "content": "import math\nimport torch.nn as nn\nfrom collections import OrderedDict\nimport torch.utils.model_zoo as model_zoo\nfrom torchvision.models.resnet import model_urls\nfrom .base import Backbone\n\n\ndef conv3x3(in_planes, out_planes, stride=1, dilation=1):\n    \"\"\"3x3 convolution with padding\"\"\"\n    return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n                     padding=dilation, bias=False, dilation=dilation)\n\n\nclass BasicBlock(nn.Module):\n    expansion = 1\n\n    def __init__(self, inplanes, planes, stride=1, downsample=None, dilation=1, use_bn=True):\n        super(BasicBlock, self).__init__()\n        self.use_bn = use_bn\n        self.conv1 = conv3x3(inplanes, planes, stride, dilation=dilation)\n\n        if use_bn:\n            self.bn1 = nn.BatchNorm2d(planes)\n        self.relu = nn.ReLU(inplace=True)\n        self.conv2 = conv3x3(planes, planes, dilation=dilation)\n\n        if use_bn:\n            self.bn2 = nn.BatchNorm2d(planes)\n        self.downsample = downsample\n        self.stride = stride\n\n    def forward(self, x):\n        residual = x\n\n        out = self.conv1(x)\n\n        if self.use_bn:\n            out = self.bn1(out)\n        out = self.relu(out)\n\n        out = self.conv2(out)\n\n        if self.use_bn:\n            out = self.bn2(out)\n\n        if self.downsample is not None:\n            residual = self.downsample(x)\n\n        out += residual\n        out = self.relu(out)\n\n        return out\n\n\nclass Bottleneck(nn.Module):\n    expansion = 4\n\n    def __init__(self, inplanes, planes, stride=1, downsample=None, dilation=1):\n        super(Bottleneck, self).__init__()\n        self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)\n        self.bn1 = nn.BatchNorm2d(planes)\n        self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,\n                               padding=dilation, bias=False, dilation=dilation)\n        self.bn2 = nn.BatchNorm2d(planes)\n        self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)\n        self.bn3 = nn.BatchNorm2d(planes * 4)\n        self.relu = nn.ReLU(inplace=True)\n        self.downsample = downsample\n        self.stride = stride\n\n    def forward(self, x):\n        residual = x\n\n        out = self.conv1(x)\n        out = self.bn1(out)\n        out = self.relu(out)\n\n        out = self.conv2(out)\n        out = self.bn2(out)\n        out = self.relu(out)\n\n        out = self.conv3(out)\n        out = self.bn3(out)\n\n        if self.downsample is not None:\n            residual = self.downsample(x)\n\n        out += residual\n        out = self.relu(out)\n\n        return out\n\n\nclass ResNet(Backbone):\n    \"\"\" ResNet network module. Allows extracting specific feature blocks.\"\"\"\n    def __init__(self, block, layers, output_layers, num_classes=1000, inplanes=64, dilation_factor=1, frozen_layers=()):\n        self.inplanes = inplanes\n        super(ResNet, self).__init__(frozen_layers=frozen_layers)\n        self.output_layers = output_layers\n        self.conv1 = nn.Conv2d(3, inplanes, kernel_size=7, stride=2, padding=3,\n                               bias=False)\n        self.bn1 = nn.BatchNorm2d(inplanes)\n        self.relu = nn.ReLU(inplace=True)\n        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\n        stride = [1 + (dilation_factor < l) for l in (8, 4, 2)]\n        self.layer1 = self._make_layer(block, inplanes, layers[0], dilation=max(dilation_factor//8, 1))\n        self.layer2 = self._make_layer(block, inplanes*2, layers[1], stride=stride[0], dilation=max(dilation_factor//4, 1))\n        self.layer3 = self._make_layer(block, inplanes*4, layers[2], stride=stride[1], dilation=max(dilation_factor//2, 1))\n        self.layer4 = self._make_layer(block, inplanes*8, layers[3], stride=stride[2], dilation=dilation_factor)\n\n        out_feature_strides = {'conv1': 4, 'layer1': 4, 'layer2': 4*stride[0], 'layer3': 4*stride[0]*stride[1],\n                               'layer4': 4*stride[0]*stride[1]*stride[2]}\n\n        # TODO better way?\n        if isinstance(self.layer1[0], BasicBlock):\n            out_feature_channels = {'conv1': inplanes, 'layer1': inplanes, 'layer2': inplanes*2, 'layer3': inplanes*4,\n                               'layer4': inplanes*8}\n        elif isinstance(self.layer1[0], Bottleneck):\n            base_num_channels = 4 * inplanes\n            out_feature_channels = {'conv1': inplanes, 'layer1': base_num_channels, 'layer2': base_num_channels * 2,\n                                    'layer3': base_num_channels * 4, 'layer4': base_num_channels * 8}\n        else:\n            raise Exception('block not supported')\n\n        self._out_feature_strides = out_feature_strides\n        self._out_feature_channels = out_feature_channels\n\n        # self.avgpool = nn.AvgPool2d(7, stride=1)\n        self.avgpool = nn.AdaptiveAvgPool2d((1,1))\n        self.fc = nn.Linear(inplanes*8 * block.expansion, num_classes)\n\n        for m in self.modules():\n            if isinstance(m, nn.Conv2d):\n                n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n                m.weight.data.normal_(0, math.sqrt(2. / n))\n            elif isinstance(m, nn.BatchNorm2d):\n                m.weight.data.fill_(1)\n                m.bias.data.zero_()\n\n    def out_feature_strides(self, layer=None):\n        if layer is None:\n            return self._out_feature_strides\n        else:\n            return self._out_feature_strides[layer]\n\n    def out_feature_channels(self, layer=None):\n        if layer is None:\n            return self._out_feature_channels\n        else:\n            return self._out_feature_channels[layer]\n\n    def _make_layer(self, block, planes, blocks, stride=1, dilation=1):\n        downsample = None\n        if stride != 1 or self.inplanes != planes * block.expansion:\n            downsample = nn.Sequential(\n                nn.Conv2d(self.inplanes, planes * block.expansion,\n                          kernel_size=1, stride=stride, bias=False),\n                nn.BatchNorm2d(planes * block.expansion),\n            )\n\n        layers = []\n        layers.append(block(self.inplanes, planes, stride, downsample, dilation=dilation))\n        self.inplanes = planes * block.expansion\n        for i in range(1, blocks):\n            layers.append(block(self.inplanes, planes))\n\n        return nn.Sequential(*layers)\n\n    def _add_output_and_check(self, name, x, outputs, output_layers):\n        if name in output_layers:\n            outputs[name] = x\n        return len(output_layers) == len(outputs)\n\n    def forward(self, x, output_layers=None):\n        \"\"\" Forward pass with input x. The output_layers specify the feature blocks which must be returned \"\"\"\n        outputs = OrderedDict()\n\n        if output_layers is None:\n            output_layers = self.output_layers\n\n        x = self.conv1(x)\n        x = self.bn1(x)\n        x = self.relu(x)\n\n        if self._add_output_and_check('conv1', x, outputs, output_layers):\n            return outputs\n\n        x = self.maxpool(x)\n\n        x = self.layer1(x)\n\n        if self._add_output_and_check('layer1', x, outputs, output_layers):\n            return outputs\n\n        x = self.layer2(x)\n\n        if self._add_output_and_check('layer2', x, outputs, output_layers):\n            return outputs\n\n        x = self.layer3(x)\n\n        if self._add_output_and_check('layer3', x, outputs, output_layers):\n            return outputs\n\n        x = self.layer4(x)\n\n        if self._add_output_and_check('layer4', x, outputs, output_layers):\n            return outputs\n\n        x = self.avgpool(x)\n        x = x.view(x.size(0), -1)\n        x = self.fc(x)\n\n        if self._add_output_and_check('fc', x, outputs, output_layers):\n            return outputs\n\n        if len(output_layers) == 1 and output_layers[0] == 'default':\n            return x\n\n        raise ValueError('output_layer is wrong.')\n\n\ndef resnet_baby(output_layers=None, pretrained=False, inplanes=16, **kwargs):\n    \"\"\"Constructs a ResNet-18 model.\n    \"\"\"\n\n    if output_layers is None:\n        output_layers = ['default']\n    else:\n        for l in output_layers:\n            if l not in ['conv1', 'layer1', 'layer2', 'layer3', 'layer4', 'fc']:\n                raise ValueError('Unknown layer: {}'.format(l))\n\n    model = ResNet(BasicBlock, [2, 2, 2, 2], output_layers, inplanes=inplanes, **kwargs)\n\n    if pretrained:\n        raise NotImplementedError\n    return model\n\n\ndef resnet18(output_layers=None, pretrained=False, **kwargs):\n    \"\"\"Constructs a ResNet-18 model.\n    \"\"\"\n\n    if output_layers is None:\n        output_layers = ['default']\n    else:\n        for l in output_layers:\n            if l not in ['conv1', 'layer1', 'layer2', 'layer3', 'layer4', 'fc']:\n                raise ValueError('Unknown layer: {}'.format(l))\n\n    model = ResNet(BasicBlock, [2, 2, 2, 2], output_layers, **kwargs)\n\n    if pretrained:\n        model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))\n    return model\n\n\ndef resnet50(output_layers=None, pretrained=False, **kwargs):\n    \"\"\"Constructs a ResNet-50 model.\n    \"\"\"\n\n    if output_layers is None:\n        output_layers = ['default']\n    else:\n        for l in output_layers:\n            if l not in ['conv1', 'layer1', 'layer2', 'layer3', 'layer4', 'fc']:\n                raise ValueError('Unknown layer: {}'.format(l))\n\n    model = ResNet(Bottleneck, [3, 4, 6, 3], output_layers, **kwargs)\n    if pretrained:\n        model.load_state_dict(model_zoo.load_url(model_urls['resnet50']))\n    return model"
  },
  {
    "path": "artrackv2_mindspore/external/AR/ltr/models/backbone/resnet18_vggm.py",
    "content": "import math\nimport torch\nimport torch.nn as nn\nfrom collections import OrderedDict\nfrom torchvision.models.resnet import BasicBlock\nfrom .base import Backbone\n\n\nclass SpatialCrossMapLRN(nn.Module):\n    def __init__(self, local_size=1, alpha=1.0, beta=0.75, k=1, ACROSS_CHANNELS=True):\n        super(SpatialCrossMapLRN, self).__init__()\n        self.ACROSS_CHANNELS = ACROSS_CHANNELS\n        if ACROSS_CHANNELS:\n            self.average=nn.AvgPool3d(kernel_size=(local_size, 1, 1),\n                    stride=1,\n                    padding=(int((local_size-1.0)/2), 0, 0))\n        else:\n            self.average=nn.AvgPool2d(kernel_size=local_size,\n                    stride=1,\n                    padding=int((local_size-1.0)/2))\n        self.alpha = alpha\n        self.beta = beta\n        self.k = k\n\n    def forward(self, x):\n        if self.ACROSS_CHANNELS:\n            div = x.pow(2).unsqueeze(1)\n            div = self.average(div).squeeze(1)\n            div = div.mul(self.alpha).add(self.k).pow(self.beta)\n        else:\n            div = x.pow(2)\n            div = self.average(div)\n            div = div.mul(self.alpha).add(self.k).pow(self.beta)\n        x = x.div(div)\n        return x\n\n\nclass ResNetVGGm1(Backbone):\n\n    def __init__(self, block, layers, output_layers, num_classes=1000, frozen_layers=()):\n        self.inplanes = 64\n        super(ResNetVGGm1, self).__init__(frozen_layers=frozen_layers)\n        self.output_layers = output_layers\n        self.vggmconv1 = nn.Conv2d(3,96,(7, 7),(2, 2), padding=3)\n        self.vgglrn = SpatialCrossMapLRN(5, 0.0005, 0.75, 2)\n        self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,\n                               bias=False)\n        self.bn1 = nn.BatchNorm2d(64)\n        self.relu = nn.ReLU(inplace=True)\n        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\n        self.layer1 = self._make_layer(block, 64, layers[0])\n        self.layer2 = self._make_layer(block, 128, layers[1], stride=2)\n        self.layer3 = self._make_layer(block, 256, layers[2], stride=2)\n        self.layer4 = self._make_layer(block, 512, layers[3], stride=2)\n        # self.avgpool = nn.AvgPool2d(7, stride=1)\n        self.avgpool = nn.AdaptiveAvgPool2d((1,1))\n        self.fc = nn.Linear(512 * block.expansion, num_classes)\n\n        for m in self.modules():\n            if isinstance(m, nn.Conv2d):\n                n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n                m.weight.data.normal_(0, math.sqrt(2. / n))\n            elif isinstance(m, nn.BatchNorm2d):\n                m.weight.data.fill_(1)\n                m.bias.data.zero_()\n\n    def _make_layer(self, block, planes, blocks, stride=1):\n        downsample = None\n        if stride != 1 or self.inplanes != planes * block.expansion:\n            downsample = nn.Sequential(\n                nn.Conv2d(self.inplanes, planes * block.expansion,\n                          kernel_size=1, stride=stride, bias=False),\n                nn.BatchNorm2d(planes * block.expansion),\n            )\n\n        layers = []\n        layers.append(block(self.inplanes, planes, stride, downsample))\n        self.inplanes = planes * block.expansion\n        for i in range(1, blocks):\n            layers.append(block(self.inplanes, planes))\n\n        return nn.Sequential(*layers)\n\n\n    def _add_output_and_check(self, name, x, outputs, output_layers):\n        if name in output_layers:\n            outputs[name] = x\n        return len(output_layers) == len(outputs)\n\n\n    def forward(self, x, output_layers=None):\n        outputs = OrderedDict()\n\n        if output_layers is None:\n            output_layers = self.output_layers\n\n        if 'vggconv1' in output_layers:\n            c1 = self.vgglrn(self.relu(self.vggmconv1(x)))\n            if self._add_output_and_check('vggconv1', c1, outputs, output_layers):\n                return outputs\n\n        x = self.conv1(x)\n        x = self.bn1(x)\n        x = self.relu(x)\n\n        if self._add_output_and_check('conv1', x, outputs, output_layers):\n            return outputs\n\n        x = self.maxpool(x)\n\n        x = self.layer1(x)\n\n        if self._add_output_and_check('layer1', x, outputs, output_layers):\n            return outputs\n\n        x = self.layer2(x)\n\n        if self._add_output_and_check('layer2', x, outputs, output_layers):\n            return outputs\n\n        x = self.layer3(x)\n\n        if self._add_output_and_check('layer3', x, outputs, output_layers):\n            return outputs\n\n        x = self.layer4(x)\n\n        if self._add_output_and_check('layer4', x, outputs, output_layers):\n            return outputs\n\n        x = self.avgpool(x)\n        x = x.view(x.size(0), -1)\n        x = self.fc(x)\n\n        if self._add_output_and_check('fc', x, outputs, output_layers):\n            return outputs\n\n        if len(output_layers) == 1 and output_layers[0] == 'default':\n            return x\n\n        raise ValueError('output_layer is wrong.')\n\n\ndef resnet18_vggmconv1(output_layers=None, path=None, **kwargs):\n    \"\"\"Constructs a ResNet-18 model with first-layer VGGm features.\n    \"\"\"\n\n    if output_layers is None:\n        output_layers = ['default']\n    else:\n        for l in output_layers:\n            if l not in ['vggconv1', 'conv1', 'layer1', 'layer2', 'layer3', 'layer4', 'fc']:\n                raise ValueError('Unknown layer: {}'.format(l))\n\n    model = ResNetVGGm1(BasicBlock, [2, 2, 2, 2], output_layers, **kwargs)\n\n    if path is not None:\n        model.load_state_dict(torch.load(path), strict=False)\n    return model"
  },
  {
    "path": "artrackv2_mindspore/external/AR/ltr/models/backbone/resnet_seg.py",
    "content": "import math\nimport torch.nn as nn\nfrom collections import OrderedDict\nimport torch.utils.model_zoo as model_zoo\nfrom torchvision.models.resnet import model_urls\n'''2020.4.14 newly added'''\nfrom collections import OrderedDict as odict\nfrom ltr.models.head.utils import get_out_channels\n\ndef conv3x3(in_planes, out_planes, stride=1, dilation=1):\n    \"\"\"3x3 convolution with padding\"\"\"\n    return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n                     padding=dilation, bias=False, dilation=dilation)\n\n\nclass BasicBlock(nn.Module):\n    expansion = 1\n\n    def __init__(self, inplanes, planes, stride=1, downsample=None, dilation=1):\n        super(BasicBlock, self).__init__()\n        self.conv1 = conv3x3(inplanes, planes, stride, dilation=dilation)\n        self.bn1 = nn.BatchNorm2d(planes)\n        self.relu = nn.ReLU(inplace=True)\n        self.conv2 = conv3x3(planes, planes, dilation=dilation)\n        self.bn2 = nn.BatchNorm2d(planes)\n        self.downsample = downsample\n        self.stride = stride\n\n    def forward(self, x):\n        residual = x\n\n        out = self.conv1(x)\n        out = self.bn1(out)\n        out = self.relu(out)\n\n        out = self.conv2(out)\n        out = self.bn2(out)\n\n        if self.downsample is not None:\n            residual = self.downsample(x)\n\n        out += residual\n        out = self.relu(out)\n\n        return out\n\n\nclass Bottleneck(nn.Module):\n    expansion = 4\n\n    def __init__(self, inplanes, planes, stride=1, downsample=None, dilation=1):\n        super(Bottleneck, self).__init__()\n        self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)\n        self.bn1 = nn.BatchNorm2d(planes)\n        self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,\n                               padding=dilation, bias=False, dilation=dilation)\n        self.bn2 = nn.BatchNorm2d(planes)\n        self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)\n        self.bn3 = nn.BatchNorm2d(planes * 4)\n        self.relu = nn.ReLU(inplace=True)\n        self.downsample = downsample\n        self.stride = stride\n\n    def forward(self, x):\n        residual = x\n\n        out = self.conv1(x)\n        out = self.bn1(out)\n        out = self.relu(out)\n\n        out = self.conv2(out)\n        out = self.bn2(out)\n        out = self.relu(out)\n\n        out = self.conv3(out)\n        out = self.bn3(out)\n\n        if self.downsample is not None:\n            residual = self.downsample(x)\n\n        out += residual\n        out = self.relu(out)\n\n        return out\n\n\nclass ResNet(nn.Module):\n    \"\"\" ResNet network module. Allows extracting specific feature blocks.\"\"\"\n    def __init__(self, block, layers, output_layers, num_classes=1000, inplanes=64, dilation_factor=1):\n        self.inplanes = inplanes\n        super(ResNet, self).__init__()\n        self.output_layers = output_layers\n        self.conv1 = nn.Conv2d(3, inplanes, kernel_size=7, stride=2, padding=3,\n                               bias=False)\n        self.bn1 = nn.BatchNorm2d(inplanes)\n        self.relu = nn.ReLU(inplace=True)\n        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\n        stride = [1 + (dilation_factor < l) for l in (8, 4, 2)]\n        self.layer1 = self._make_layer(block, inplanes, layers[0], dilation=max(dilation_factor//8, 1))\n        self.layer2 = self._make_layer(block, inplanes*2, layers[1], stride=stride[0], dilation=max(dilation_factor//4, 1))\n        self.layer3 = self._make_layer(block, inplanes*4, layers[2], stride=stride[1], dilation=max(dilation_factor//2, 1))\n        self.layer4 = self._make_layer(block, inplanes*8, layers[3], stride=stride[2], dilation=dilation_factor)\n        # self.avgpool = nn.AvgPool2d(7, stride=1)\n        self.avgpool = nn.AdaptiveAvgPool2d((1,1))\n        self.fc = nn.Linear(inplanes*8 * block.expansion, num_classes)\n\n        for m in self.modules():\n            if isinstance(m, nn.Conv2d):\n                n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n                m.weight.data.normal_(0, math.sqrt(2. / n))\n            elif isinstance(m, nn.BatchNorm2d):\n                m.weight.data.fill_(1)\n                m.bias.data.zero_()\n        '''2020.4.14 newly added'''\n        self._out_channels = odict(  # Deep-to-shallow order is required by SegNetwork\n            layer5=get_out_channels(self.layer4),\n            layer4=get_out_channels(self.layer3),\n            layer3=get_out_channels(self.layer2),\n            layer2=get_out_channels(self.layer1),\n            layer1=get_out_channels(self.conv1))\n    def _make_layer(self, block, planes, blocks, stride=1, dilation=1):\n        downsample = None\n        if stride != 1 or self.inplanes != planes * block.expansion:\n            downsample = nn.Sequential(\n                nn.Conv2d(self.inplanes, planes * block.expansion,\n                          kernel_size=1, stride=stride, bias=False),\n                nn.BatchNorm2d(planes * block.expansion),\n            )\n\n        layers = []\n        layers.append(block(self.inplanes, planes, stride, downsample, dilation=dilation))\n        self.inplanes = planes * block.expansion\n        for i in range(1, blocks):\n            layers.append(block(self.inplanes, planes))\n\n        return nn.Sequential(*layers)\n\n    def _add_output_and_check(self, name, x, outputs, output_layers):\n        if name in output_layers:\n            outputs[name] = x\n        return len(output_layers) == len(outputs)\n\n    def forward(self, x, output_layers=None):\n        \"\"\" Forward pass with input x. The output_layers specify the feature blocks which must be returned \"\"\"\n        outputs = OrderedDict()\n\n        if output_layers is None:\n            output_layers = self.output_layers\n\n        x = self.conv1(x)\n        x = self.bn1(x)\n        x = self.relu(x)\n        x = self.maxpool(x)# conv1: (batch,64,128,128)\n        '''2020.4.14 change names for every layers'''\n        if self._add_output_and_check('layer1', x, outputs, output_layers):\n            return outputs\n\n        x = self.layer1(x) # (batch,256,64,64)\n        if self._add_output_and_check('layer2', x, outputs, output_layers):\n            return outputs\n\n        x = self.layer2(x) # (batch,512,32,32)\n\n        if self._add_output_and_check('layer3', x, outputs, output_layers):\n            return outputs\n\n        x = self.layer3(x) # (batch,1024,16,16)\n\n        if self._add_output_and_check('layer4', x, outputs, output_layers):\n            return outputs\n\n        x = self.layer4(x)\n\n        if self._add_output_and_check('layer5', x, outputs, output_layers):\n            return outputs\n\n        x = self.avgpool(x)\n        x = x.view(x.size(0), -1)\n        x = self.fc(x)\n\n        if self._add_output_and_check('fc', x, outputs, output_layers):\n            return outputs\n\n        if len(output_layers) == 1 and output_layers[0] == 'default':\n            return x\n\n        raise ValueError('output_layer is wrong.')\n\n    '''2020.4.14 newly added'''\n    def get_out_channels(self):\n        return self._out_channels\n\n\ndef resnet18(output_layers=None, pretrained=False, dilation_factor=1):\n    \"\"\"Constructs a ResNet-18 model.\n    \"\"\"\n\n    if output_layers is None:\n        output_layers = ['default']\n    else:\n        for l in output_layers:\n            if l not in ['conv1', 'layer1', 'layer2', 'layer3', 'layer4', 'fc']:\n                raise ValueError('Unknown layer: {}'.format(l))\n\n    model = ResNet(BasicBlock, [2, 2, 2, 2], output_layers, dilation_factor=dilation_factor)\n\n    if pretrained:\n        model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))\n    return model\n\n\ndef resnet50(output_layers=None, pretrained=False, dilation_factor=1):\n    \"\"\"Constructs a ResNet-50 model.\n    \"\"\"\n\n    if output_layers is None:\n        output_layers = ['default']\n    else:\n        for l in output_layers:\n            if l not in ['conv1', 'layer1', 'layer2', 'layer3', 'layer4', 'fc']:\n                raise ValueError('Unknown layer: {}'.format(l))\n\n    model = ResNet(Bottleneck, [3, 4, 6, 3], output_layers, dilation_factor=dilation_factor)\n    if pretrained:\n        model.load_state_dict(model_zoo.load_url(model_urls['resnet50']))\n    return model\n\n'''newly added'''\ndef resnet101(output_layers=None, pretrained=False, dilation_factor=1):\n    \"\"\"Constructs a ResNet-101 model.\n    \"\"\"\n\n    if output_layers is None:\n        output_layers = ['default']\n    else:\n        for l in output_layers:\n            if l not in ['conv1', 'layer1', 'layer2', 'layer3', 'layer4', 'fc']:\n                raise ValueError('Unknown layer: {}'.format(l))\n\n    model = ResNet(Bottleneck, [3, 4, 23, 3], output_layers, dilation_factor=dilation_factor)\n    if pretrained:\n        model.load_state_dict(model_zoo.load_url(model_urls['resnet101']))\n    return model\n\n"
  },
  {
    "path": "artrackv2_mindspore/external/AR/ltr/models/bbreg/__init__.py",
    "content": "from .atom_iou_net import AtomIoUNet\n"
  },
  {
    "path": "artrackv2_mindspore/external/AR/ltr/models/bbreg/atom.py",
    "content": "import torch.nn as nn\nimport ltr.models.backbone as backbones\nimport ltr.models.bbreg as bbmodels\nfrom ltr import model_constructor\n\n\nclass ATOMnet(nn.Module):\n    \"\"\" ATOM network module\"\"\"\n    def __init__(self, feature_extractor, bb_regressor, bb_regressor_layer, extractor_grad=True):\n        \"\"\"\n        args:\n            feature_extractor - backbone feature extractor\n            bb_regressor - IoU prediction module\n            bb_regressor_layer - List containing the name of the layers from feature_extractor, which are input to\n                                    bb_regressor\n            extractor_grad - Bool indicating whether backbone feature extractor requires gradients\n        \"\"\"\n        super(ATOMnet, self).__init__()\n\n        self.feature_extractor = feature_extractor\n        self.bb_regressor = bb_regressor\n        self.bb_regressor_layer = bb_regressor_layer\n\n        if not extractor_grad:\n            for p in self.feature_extractor.parameters():\n                p.requires_grad_(False)\n\n    def forward(self, train_imgs, test_imgs, train_bb, test_proposals):\n        \"\"\" Forward pass\n        Note: If the training is done in sequence mode, that is, test_imgs.dim() == 5, then the batch dimension\n        corresponds to the first dimensions. test_imgs is thus of the form [sequence, batch, feature, row, col]\n        \"\"\"\n        num_sequences = train_imgs.shape[-4]\n        num_train_images = train_imgs.shape[0] if train_imgs.dim() == 5 else 1\n        num_test_images = test_imgs.shape[0] if test_imgs.dim() == 5 else 1\n\n        # Extract backbone features\n        train_feat = self.extract_backbone_features(train_imgs.reshape(-1, *train_imgs.shape[-3:]))\n        test_feat = self.extract_backbone_features(test_imgs.reshape(-1, *test_imgs.shape[-3:]))\n\n        train_feat_iou = [feat for feat in train_feat.values()]\n        test_feat_iou = [feat for feat in test_feat.values()]\n\n        # Obtain iou prediction\n        iou_pred = self.bb_regressor(train_feat_iou, test_feat_iou,\n                                     train_bb.reshape(num_train_images, num_sequences, 4),\n                                     test_proposals.reshape(num_train_images, num_sequences, -1, 4))\n        return iou_pred\n\n    def extract_backbone_features(self, im, layers=None):\n        if layers is None:\n            layers = self.bb_regressor_layer\n        return self.feature_extractor(im, layers)\n\n    def extract_features(self, im, layers):\n        return self.feature_extractor(im, layers)\n\n\n\n@model_constructor\ndef atom_resnet18(iou_input_dim=(256,256), iou_inter_dim=(256,256), backbone_pretrained=True):\n    # backbone\n    backbone_net = backbones.resnet18(pretrained=backbone_pretrained)\n\n    # Bounding box regressor\n    iou_predictor = bbmodels.AtomIoUNet(pred_input_dim=iou_input_dim, pred_inter_dim=iou_inter_dim)\n\n    net = ATOMnet(feature_extractor=backbone_net, bb_regressor=iou_predictor, bb_regressor_layer=['layer2', 'layer3'],\n                  extractor_grad=False)\n\n    return net\n\n\n@model_constructor\ndef atom_resnet50(iou_input_dim=(256,256), iou_inter_dim=(256,256), backbone_pretrained=True):\n    # backbone\n    backbone_net = backbones.resnet50(pretrained=backbone_pretrained)\n\n    # Bounding box regressor\n    iou_predictor = bbmodels.AtomIoUNet(input_dim=(4*128,4*256), pred_input_dim=iou_input_dim, pred_inter_dim=iou_inter_dim)\n\n    net = ATOMnet(feature_extractor=backbone_net, bb_regressor=iou_predictor, bb_regressor_layer=['layer2', 'layer3'],\n                  extractor_grad=False)\n\n    return net\n"
  },
  {
    "path": "artrackv2_mindspore/external/AR/ltr/models/bbreg/atom_iou_net.py",
    "content": "import torch.nn as nn\nimport torch\nfrom ltr.models.layers.blocks import LinearBlock\nfrom ltr.external.PreciseRoIPooling.pytorch.prroi_pool import PrRoIPool2D\n\n\ndef conv(in_planes, out_planes, kernel_size=3, stride=1, padding=1, dilation=1):\n    return nn.Sequential(\n            nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride,\n                      padding=padding, dilation=dilation, bias=True),\n            nn.BatchNorm2d(out_planes),\n            nn.ReLU(inplace=True))\n\n\nclass AtomIoUNet(nn.Module):\n    \"\"\"Network module for IoU prediction. Refer to the ATOM paper for an illustration of the architecture.\n    It uses two backbone feature layers as input.\n    args:\n        input_dim:  Feature dimensionality of the two input backbone layers.\n        pred_input_dim:  Dimensionality input the the prediction network.\n        pred_inter_dim:  Intermediate dimensionality in the prediction network.\"\"\"\n\n    def __init__(self, input_dim=(128,256), pred_input_dim=(256,256), pred_inter_dim=(256,256)):\n        super().__init__()\n        # _r for reference, _t for test\n        self.conv3_1r = conv(input_dim[0], 128, kernel_size=3, stride=1)\n        self.conv3_1t = conv(input_dim[0], 256, kernel_size=3, stride=1)\n\n        self.conv3_2t = conv(256, pred_input_dim[0], kernel_size=3, stride=1)\n\n        self.prroi_pool3r = PrRoIPool2D(3, 3, 1/8)\n        self.prroi_pool3t = PrRoIPool2D(5, 5, 1/8)\n\n        self.fc3_1r = conv(128, 256, kernel_size=3, stride=1, padding=0)\n\n        self.conv4_1r = conv(input_dim[1], 256, kernel_size=3, stride=1)\n        self.conv4_1t = conv(input_dim[1], 256, kernel_size=3, stride=1)\n\n        self.conv4_2t = conv(256, pred_input_dim[1], kernel_size=3, stride=1)\n\n        self.prroi_pool4r = PrRoIPool2D(1, 1, 1/16)\n        self.prroi_pool4t = PrRoIPool2D(3, 3, 1 / 16)\n\n        self.fc34_3r = conv(256 + 256, pred_input_dim[0], kernel_size=1, stride=1, padding=0)\n        self.fc34_4r = conv(256 + 256, pred_input_dim[1], kernel_size=1, stride=1, padding=0)\n\n        self.fc3_rt = LinearBlock(pred_input_dim[0], pred_inter_dim[0], 5)\n        self.fc4_rt = LinearBlock(pred_input_dim[1], pred_inter_dim[1], 3)\n\n        self.iou_predictor = nn.Linear(pred_inter_dim[0]+pred_inter_dim[1], 1, bias=True)\n\n        # Init weights\n        for m in self.modules():\n            if isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d) or isinstance(m, nn.Linear):\n                nn.init.kaiming_normal_(m.weight.data, mode='fan_in')\n                if m.bias is not None:\n                    m.bias.data.zero_()\n            elif isinstance(m, nn.BatchNorm2d):\n                # In earlier versions batch norm parameters was initialized with default initialization,\n                # which changed in pytorch 1.2. In 1.1 and earlier the weight was set to U(0,1).\n                # So we use the same initialization here.\n                # m.weight.data.fill_(1)\n                m.weight.data.uniform_()\n                m.bias.data.zero_()\n\n    def forward(self, feat1, feat2, bb1, proposals2):\n        \"\"\"Runs the ATOM IoUNet during training operation.\n        This forward pass is mainly used for training. Call the individual functions during tracking instead.\n        args:\n            feat1:  Features from the reference frames (4 or 5 dims).\n            feat2:  Features from the test frames (4 or 5 dims).\n            bb1:  Target boxes (x,y,w,h) in image coords in the reference samples. Dims (images, sequences, 4).\n            proposals2:  Proposal boxes for which the IoU will be predicted (images, sequences, num_proposals, 4).\"\"\"\n\n        assert bb1.dim() == 3\n        assert proposals2.dim() == 4\n\n        num_images = proposals2.shape[0]\n        num_sequences = proposals2.shape[1]\n\n        # Extract first train sample\n        feat1 = [f[0,...] if f.dim()==5 else f.reshape(-1, num_sequences, *f.shape[-3:])[0,...] for f in feat1]\n        bb1 = bb1[0,...]\n\n        # Get modulation vector\n        modulation = self.get_modulation(feat1, bb1)\n\n        iou_feat = self.get_iou_feat(feat2)\n\n        modulation = [f.reshape(1, num_sequences, -1).repeat(num_images, 1, 1).reshape(num_sequences*num_images, -1) for f in modulation]\n\n        proposals2 = proposals2.reshape(num_sequences*num_images, -1, 4)\n        pred_iou = self.predict_iou(modulation, iou_feat, proposals2)\n        return pred_iou.reshape(num_images, num_sequences, -1)\n\n    def predict_iou(self, modulation, feat, proposals):\n        \"\"\"Predicts IoU for the give proposals.\n        args:\n            modulation:  Modulation vectors for the targets. Dims (batch, feature_dim).\n            feat:  IoU features (from get_iou_feat) for test images. Dims (batch, feature_dim, H, W).\n            proposals:  Proposal boxes for which the IoU will be predicted (batch, num_proposals, 4).\"\"\"\n\n        fc34_3_r, fc34_4_r = modulation\n        c3_t, c4_t = feat\n\n        batch_size = c3_t.size()[0]\n\n        # Modulation\n        c3_t_att = c3_t * fc34_3_r.reshape(batch_size, -1, 1, 1)\n        c4_t_att = c4_t * fc34_4_r.reshape(batch_size, -1, 1, 1)\n\n        # Add batch_index to rois\n        batch_index = torch.arange(batch_size, dtype=torch.float32).reshape(-1, 1).to(c3_t.device)\n\n        # Push the different rois for the same image along the batch dimension\n        num_proposals_per_batch = proposals.shape[1]\n\n        # input proposals2 is in format xywh, convert it to x0y0x1y1 format\n        proposals_xyxy = torch.cat((proposals[:, :, 0:2], proposals[:, :, 0:2] + proposals[:, :, 2:4]), dim=2)\n\n        # Add batch index\n        roi2 = torch.cat((batch_index.reshape(batch_size, -1, 1).expand(-1, num_proposals_per_batch, -1),\n                          proposals_xyxy), dim=2)\n        roi2 = roi2.reshape(-1, 5).to(proposals_xyxy.device)\n\n        roi3t = self.prroi_pool3t(c3_t_att, roi2)\n        roi4t = self.prroi_pool4t(c4_t_att, roi2)\n\n        fc3_rt = self.fc3_rt(roi3t)\n        fc4_rt = self.fc4_rt(roi4t)\n\n        fc34_rt_cat = torch.cat((fc3_rt, fc4_rt), dim=1)\n\n        iou_pred = self.iou_predictor(fc34_rt_cat).reshape(batch_size, num_proposals_per_batch)\n\n        return iou_pred\n\n    def get_modulation(self, feat, bb):\n        \"\"\"Get modulation vectors for the targets.\n        args:\n            feat: Backbone features from reference images. Dims (batch, feature_dim, H, W).\n            bb:  Target boxes (x,y,w,h) in image coords in the reference samples. Dims (batch, 4).\"\"\"\n\n        feat3_r, feat4_r = feat\n\n        c3_r = self.conv3_1r(feat3_r)\n\n        # Add batch_index to rois\n        batch_size = bb.shape[0]\n        batch_index = torch.arange(batch_size, dtype=torch.float32).reshape(-1, 1).to(bb.device)\n\n        # input bb is in format xywh, convert it to x0y0x1y1 format\n        bb = bb.clone()\n        bb[:, 2:4] = bb[:, 0:2] + bb[:, 2:4]\n        roi1 = torch.cat((batch_index, bb), dim=1)\n\n        roi3r = self.prroi_pool3r(c3_r, roi1)\n\n        c4_r = self.conv4_1r(feat4_r)\n        roi4r = self.prroi_pool4r(c4_r, roi1)\n\n        fc3_r = self.fc3_1r(roi3r)\n\n        # Concatenate from block 3 and 4\n        fc34_r = torch.cat((fc3_r, roi4r), dim=1)\n\n        fc34_3_r = self.fc34_3r(fc34_r)\n        fc34_4_r = self.fc34_4r(fc34_r)\n\n        return fc34_3_r, fc34_4_r\n\n    def get_iou_feat(self, feat2):\n        \"\"\"Get IoU prediction features from a 4 or 5 dimensional backbone input.\"\"\"\n        feat2 = [f.reshape(-1, *f.shape[-3:]) if f.dim()==5 else f for f in feat2]\n        feat3_t, feat4_t = feat2\n        c3_t = self.conv3_2t(self.conv3_1t(feat3_t))\n        c4_t = self.conv4_2t(self.conv4_1t(feat4_t))\n\n        return c3_t, c4_t\n"
  },
  {
    "path": "artrackv2_mindspore/external/AR/ltr/models/head/__init__.py",
    "content": ""
  },
  {
    "path": "artrackv2_mindspore/external/AR/ltr/models/head/seg_network.py",
    "content": "import torch\nfrom torch import nn as nn\nfrom torch.nn import functional as F\nfrom ltr.models.head.utils import conv, relu, interpolate, adaptive_cat\n\n\nclass TSE(nn.Module):\n\n    def __init__(self, fc, ic, oc):\n        super().__init__()\n\n        nc = ic + oc\n        self.reduce = nn.Sequential(conv(fc, oc, 1), relu(), conv(oc, oc, 1))\n        self.transform = nn.Sequential(conv(nc, nc, 3), relu(), conv(nc, nc, 3), relu(), conv(nc, oc, 3), relu())\n\n    def forward(self, ft, score, x=None):\n        h = self.reduce(ft)\n        hpool = F.adaptive_avg_pool2d(h, (1, 1)) if x is None else x\n        h = adaptive_cat((h, score), dim=1, ref_tensor=0)\n        h = self.transform(h)\n        return h, hpool\n\n\nclass CAB(nn.Module):\n\n    def __init__(self, oc, deepest):\n        super().__init__()\n\n        self.convreluconv = nn.Sequential(conv(2 * oc, oc, 1), relu(), conv(oc, oc, 1))\n        self.deepest = deepest\n\n    def forward(self, deeper, shallower, att_vec=None):\n\n        shallow_pool = F.adaptive_avg_pool2d(shallower, (1, 1))\n        deeper_pool = deeper if self.deepest else F.adaptive_avg_pool2d(deeper, (1, 1))\n        if att_vec is not None:\n            global_pool = torch.cat([shallow_pool, deeper_pool, att_vec], dim=1)\n        else:\n            global_pool = torch.cat((shallow_pool, deeper_pool), dim=1)\n        conv_1x1 = self.convreluconv(global_pool)\n        inputs = shallower * torch.sigmoid(conv_1x1)\n        out = inputs + interpolate(deeper, inputs.shape[-2:])\n\n        return out\n\n\nclass RRB(nn.Module):\n\n    def __init__(self, oc, use_bn=False):\n        super().__init__()\n        self.conv1x1 = conv(oc, oc, 1)\n        if use_bn:\n            self.bblock = nn.Sequential(conv(oc, oc, 3), nn.BatchNorm2d(oc), relu(), conv(oc, oc, 3, bias=False))\n        else:\n            self.bblock = nn.Sequential(conv(oc, oc, 3), relu(), conv(oc, oc, 3, bias=False))  # Basic block\n\n    def forward(self, x):\n        h = self.conv1x1(x)\n        return F.relu(h + self.bblock(h))\n\n\nclass Upsampler(nn.Module):\n\n    def __init__(self, in_channels=64):\n        super().__init__()\n\n        self.conv1 = conv(in_channels, in_channels // 2, 3)\n        self.conv2 = conv(in_channels // 2, 1, 3)\n\n    def forward(self, x, image_size):\n        print(x.shape)\n        x = F.interpolate(x, (2 * x.shape[-2], 2 * x.shape[-1]), mode='bicubic', align_corners=False)\n        x = F.relu(self.conv1(x))\n        x = F.interpolate(x, image_size[-2:], mode='bicubic', align_corners=False)\n        x = self.conv2(x)\n        return x\n\n\nclass PyrUpBicubic2d(nn.Module):\n\n    def __init__(self, channels):\n        super().__init__()\n\n        self.channels = channels\n\n        def kernel(d):\n            x = d + torch.arange(-1, 3, dtype=torch.float32)\n            x = torch.abs(x)\n            a = -0.75\n            f = (x < 1).float() * ((a + 2) * x * x * x - (a + 3) * x * x + 1) + \\\n                ((x >= 1) * (x < 2)).float() * (a * x * x * x - 5 * a * x * x + 8 * a * x - 4 * a)\n            W = f.reshape(1, 1, 1, len(x)).float()\n            Wt = W.permute(0, 1, 3, 2)\n            return W, Wt\n\n        We, We_t = kernel(-0.25)\n        Wo, Wo_t = kernel(-0.25 - 0.5)\n\n        # Building non-separable filters for now. It would make sense to\n        # have separable filters if it proves to be faster.\n\n        # .contiguous() is needed until a bug is fixed in nn.Conv2d.\n        self.W00 = (We_t @ We).expand(channels, 1, 4, 4).contiguous()\n        self.W01 = (We_t @ Wo).expand(channels, 1, 4, 4).contiguous()\n        self.W10 = (Wo_t @ We).expand(channels, 1, 4, 4).contiguous()\n        self.W11 = (Wo_t @ Wo).expand(channels, 1, 4, 4).contiguous()\n\n    def forward(self, input):\n\n        if input.device != self.W00.device:\n            self.W00 = self.W00.to(input.device)\n            self.W01 = self.W01.to(input.device)\n            self.W10 = self.W10.to(input.device)\n            self.W11 = self.W11.to(input.device)\n\n        a = F.pad(input, (2, 2, 2, 2), 'replicate')\n\n        I00 = F.conv2d(a, self.W00, groups=self.channels)\n        I01 = F.conv2d(a, self.W01, groups=self.channels)\n        I10 = F.conv2d(a, self.W10, groups=self.channels)\n        I11 = F.conv2d(a, self.W11, groups=self.channels)\n\n        n, c, h, w = I11.shape\n\n        J0 = torch.stack((I00, I01), dim=-1).view(n, c, h, 2 * w)\n        J1 = torch.stack((I10, I11), dim=-1).view(n, c, h, 2 * w)\n        out = torch.stack((J0, J1), dim=-2).view(n, c, 2 * h, 2 * w)\n\n        out = F.pad(out, (-1, -1, -1, -1))\n        return out\n\n\nclass BackwardCompatibleUpsampler(nn.Module):\n    \"\"\" Upsampler with bicubic interpolation that works with Pytorch 1.0.1 \"\"\"\n\n    def __init__(self, in_channels=64):\n        super().__init__()\n\n        self.conv1 = conv(in_channels, in_channels // 2, 3)\n        self.up1 = PyrUpBicubic2d(in_channels)\n        self.conv2 = conv(in_channels // 2, 1, 3)\n        self.up2 = PyrUpBicubic2d(in_channels // 2)\n\n    def forward(self, x, image_size):\n        x = self.up1(x)\n        x = F.relu(self.conv1(x))\n        x = self.up2(x)\n        x = F.interpolate(x, image_size[-2:], mode='bilinear', align_corners=False)\n        x = self.conv2(x)\n        return x\n\n\nclass SegNetwork(nn.Module):\n\n    def __init__(self, in_channels=1, out_channels=32, ft_channels=None, use_bn=False):\n\n        super().__init__()\n\n        assert ft_channels is not None\n        self.ft_channels = ft_channels\n\n        self.TSE = nn.ModuleDict()\n        self.RRB1 = nn.ModuleDict()\n        self.CAB = nn.ModuleDict()\n        self.RRB2 = nn.ModuleDict()\n\n        ic = in_channels\n        oc = out_channels\n\n        for L, fc in self.ft_channels.items():\n            self.TSE[L] = TSE(fc, ic, oc)\n            self.RRB1[L] = RRB(oc, use_bn=use_bn)\n            self.CAB[L] = CAB(oc, L == 'layer5')\n            self.RRB2[L] = RRB(oc, use_bn=use_bn)\n\n        #if torch.__version__ == '1.0.1'\n        self.project = BackwardCompatibleUpsampler(out_channels)\n        #self.project = Upsampler(out_channels)\n\n    def forward(self, scores, features, image_size):\n\n        num_targets = scores.shape[0]\n        num_fmaps = features[next(iter(self.ft_channels))].shape[0]\n        if num_targets > num_fmaps:\n            multi_targets = True\n        else:\n            multi_targets = False\n\n        x = None\n        for i, L in enumerate(self.ft_channels):\n            ft = features[L]\n            s = interpolate(scores, ft.shape[-2:])  # Resample scores to match features size\n\n            if multi_targets:\n                h, hpool = self.TSE[L](ft.repeat(num_targets, 1, 1, 1), s, x)\n            else:\n                h, hpool = self.TSE[L](ft, s, x)\n\n            h = self.RRB1[L](h)\n            h = self.CAB[L](hpool, h)\n            x = self.RRB2[L](h)\n\n        x = self.project(x, image_size)\n        return x\n\n\n"
  },
  {
    "path": "artrackv2_mindspore/external/AR/ltr/models/head/utils.py",
    "content": "from collections import OrderedDict as odict\n\nimport numpy as np\nimport torch\nfrom torch import nn as nn\nfrom torch.nn import functional as F\n\n\ndef text_bargraph(values):\n    blocks = np.array(('u', ' ', '▁', '▂', '▃', '▄', '▅', '▆', '▇', '█', 'o'))\n    nsteps = len(blocks) - 2 - 1\n    hstep = 1 / (2 * nsteps)\n    values = np.array(values)\n    nans = np.isnan(values)\n    values[nans] = 0  # '░'\n    indices = ((values + hstep) * nsteps + 1).astype(np.int)\n    indices[values < 0] = 0\n    indices[values > 1] = len(blocks) - 1\n    graph = blocks[indices]\n    graph[nans] = '░'\n    graph = str.join('', graph)\n    return graph\n\n\nclass ModuleWrapper:\n    \"\"\" A wrapper for hiding modules from PyTorch, so that the same module can be used in multiple places.\n    and yet saved only once in a checkpoint, or not at all. \"\"\"\n\n    # https://stackoverflow.com/questions/1466676/create-a-wrapper-class-to-call-a-pre-and-post-function-around-existing-functions\n\n    def __init__(self, wrapped_module):\n        self.__wrapped_module__ = wrapped_module\n\n    def __getattr__(self, attr):\n        orig_attr = self.__wrapped_module__.__getattribute__(attr)\n        if callable(orig_attr):\n            def hooked(*args, **kwargs):\n                result = orig_attr(*args, **kwargs)\n                # prevent wrapped_class from becoming unwrapped\n                if result == self.__wrapped_module__:\n                    return self\n                return result\n\n            return hooked\n        else:\n            return orig_attr\n\n    def __call__(self, *args, **kwargs):\n        return self.__wrapped_module__(*args, **kwargs)\n\n\ndef conv(ic, oc, ksize, bias=True, dilation=1, stride=1):\n    return nn.Conv2d(ic, oc, ksize, padding=ksize // 2, bias=bias, dilation=dilation, stride=stride)\n\n\ndef relu(negative_slope=0.0, inplace=False):\n    return nn.LeakyReLU(negative_slope, inplace=inplace)\n\n\ndef interpolate(t, sz):\n    sz = sz.tolist() if torch.is_tensor(sz) else sz\n    return F.interpolate(t, sz, mode='bilinear', align_corners=False) if t.shape[-2:] != sz else t\n\n\ndef adaptive_cat(seq, dim=0, ref_tensor=0):\n    sz = seq[ref_tensor].shape[-2:]\n    t = torch.cat([interpolate(t, sz) for t in seq], dim=dim)\n    return t\n\n\ndef get_out_channels(layer):\n    if hasattr(layer, 'out_channels'):\n        oc = layer.out_channels\n    elif hasattr(layer, '_modules'):\n        oc = get_out_channels(layer._modules)\n    else:\n        ocs = []\n        for key in reversed(layer):\n            ocs.append(get_out_channels(layer[key]))\n\n        oc = 0\n        for elem in ocs:\n            if elem:\n                return elem\n\n    return oc\n\n\ndef is_finite(t):\n    return (torch.isnan(t) + torch.isinf(t)) == 0\n\n\nclass AverageMeter:\n    \"\"\"Computes and stores the average and current value\"\"\"\n\n    def __init__(self):\n        self.val = 0\n        self.avg = 0\n        self.sum = 0\n        self.count = 0\n        self.seq_avg = []\n\n    def reset(self):\n        self.__init__()\n\n    def update(self, val, n=1):\n        if not np.isnan(val):\n            self.val = val\n            self.sum += val * n\n            self.count += n\n            self.avg = self.sum / self.count\n\n    def update_multi(self, val):\n        val = np.array(val)\n        v = val[~np.isnan(val)]\n        n = len(v)\n        self.val = val\n        self.sum += np.nansum(v)\n        self.count += n\n        self.avg = self.sum / self.count\n\n"
  },
  {
    "path": "artrackv2_mindspore/external/AR/ltr/models/layers/__init__.py",
    "content": ""
  },
  {
    "path": "artrackv2_mindspore/external/AR/ltr/models/layers/activation.py",
    "content": "import math\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\ndef softmax_reg(x: torch.Tensor, dim, reg=None):\n    \"\"\"Softmax with optinal denominator regularization.\"\"\"\n    if reg is None:\n        return torch.softmax(x, dim=dim)\n    dim %= x.dim()\n    if isinstance(reg, (float, int)):\n        reg = x.new_tensor([reg])\n    reg = reg.expand([1 if d==dim else x.shape[d] for d in range(x.dim())])\n    x = torch.cat((x, reg), dim=dim)\n    return torch.softmax(x, dim=dim)[[slice(-1) if d==dim else slice(None) for d in range(x.dim())]]\n\n\n\nclass MLU(nn.Module):\n    r\"\"\"MLU activation\n    \"\"\"\n    def __init__(self, min_val, inplace=False):\n        super().__init__()\n        self.min_val = min_val\n        self.inplace = inplace\n\n    def forward(self, input):\n        return F.elu(F.leaky_relu(input, 1/self.min_val, inplace=self.inplace), self.min_val, inplace=self.inplace)\n\n\nclass LeakyReluPar(nn.Module):\n    r\"\"\"LeakyRelu parametric activation\n    \"\"\"\n\n    def forward(self, x, a):\n        return (1.0 - a)/2.0 * torch.abs(x) + (1.0 + a)/2.0 * x\n\nclass LeakyReluParDeriv(nn.Module):\n    r\"\"\"Derivative of the LeakyRelu parametric activation, wrt x.\n    \"\"\"\n\n    def forward(self, x, a):\n        return (1.0 - a)/2.0 * torch.sign(x.detach()) + (1.0 + a)/2.0\n\n\nclass BentIdentPar(nn.Module):\n    r\"\"\"BentIdent parametric activation\n    \"\"\"\n    def __init__(self, b=1.0):\n        super().__init__()\n        self.b = b\n\n    def forward(self, x, a):\n        return (1.0 - a)/2.0 * (torch.sqrt(x*x + 4.0*self.b*self.b) - 2.0*self.b) + (1.0 + a)/2.0 * x\n\n\nclass BentIdentParDeriv(nn.Module):\n    r\"\"\"BentIdent parametric activation deriv\n    \"\"\"\n    def __init__(self, b=1.0):\n        super().__init__()\n        self.b = b\n\n    def forward(self, x, a):\n        return (1.0 - a)/2.0 * (x / torch.sqrt(x*x + 4.0*self.b*self.b)) + (1.0 + a)/2.0\n\n"
  },
  {
    "path": "artrackv2_mindspore/external/AR/ltr/models/layers/blocks.py",
    "content": "from torch import nn\n\n\ndef conv_block(in_planes, out_planes, kernel_size=3, stride=1, padding=1, dilation=1, bias=True,\n               batch_norm=True, relu=True, padding_mode='zeros'):\n    layers = []\n    assert padding_mode == 'zeros' or padding_mode == 'replicate'\n\n    if padding_mode == 'replicate' and padding > 0:\n        assert isinstance(padding, int)\n        layers.append(nn.ReflectionPad2d(padding))\n        padding = 0\n\n    layers.append(nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride,\n                  padding=padding, dilation=dilation, bias=bias))\n    if batch_norm:\n        layers.append(nn.BatchNorm2d(out_planes))\n    if relu:\n        layers.append(nn.ReLU(inplace=True))\n    return nn.Sequential(*layers)\n\n\nclass LinearBlock(nn.Module):\n    def __init__(self, in_planes, out_planes, input_sz, bias=True, batch_norm=True, relu=True):\n        super().__init__()\n        self.linear = nn.Linear(in_planes*input_sz*input_sz, out_planes, bias=bias)\n        self.bn = nn.BatchNorm2d(out_planes) if batch_norm else None\n        self.relu = nn.ReLU(inplace=True) if relu else None\n\n    def forward(self, x):\n        x = self.linear(x.reshape(x.shape[0], -1))\n        if self.bn is not None:\n            x = self.bn(x.reshape(x.shape[0], x.shape[1], 1, 1))\n        if self.relu is not None:\n            x = self.relu(x)\n        return x.reshape(x.shape[0], -1)"
  },
  {
    "path": "artrackv2_mindspore/external/AR/ltr/models/layers/distance.py",
    "content": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass DistanceMap(nn.Module):\n    \"\"\"Generate a distance map from a origin center location.\n    args:\n        num_bins:  Number of bins in the map.\n        bin_displacement:  Displacement of the bins.\n    \"\"\"\n    def __init__(self, num_bins, bin_displacement=1.0):\n        super().__init__()\n        self.num_bins = num_bins\n        self.bin_displacement = bin_displacement\n\n    def forward(self, center, output_sz):\n        \"\"\"Create the distance map.\n        args:\n            center: Torch tensor with (y,x) center position. Dims (batch, 2)\n            output_sz: Size of output distance map. 2-dimensional tuple.\"\"\"\n\n        center = center.view(-1,2)\n\n        bin_centers = torch.arange(self.num_bins, dtype=torch.float32, device=center.device).view(1, -1, 1, 1)\n\n        k0 = torch.arange(output_sz[0], dtype=torch.float32, device=center.device).view(1,1,-1,1)\n        k1 = torch.arange(output_sz[1], dtype=torch.float32, device=center.device).view(1,1,1,-1)\n\n        d0 = k0 - center[:,0].view(-1,1,1,1)\n        d1 = k1 - center[:,1].view(-1,1,1,1)\n\n        dist = torch.sqrt(d0*d0 + d1*d1)\n        bin_diff = dist / self.bin_displacement - bin_centers\n\n        bin_val = torch.cat((F.relu(1.0 - torch.abs(bin_diff[:,:-1,:,:]), inplace=True),\n                             (1.0 + bin_diff[:,-1:,:,:]).clamp(0, 1)), dim=1)\n\n        return bin_val\n\n\n"
  },
  {
    "path": "artrackv2_mindspore/external/AR/ltr/models/layers/filter.py",
    "content": "import torch\nimport torch.nn.functional as F\n\n\ndef apply_filter(feat, filter, dilation_factors=None):\n    \"\"\"Applies the filter on the input features (feat). The number of groups is automatically calculated.\n    args:\n        feat: These are the input features. Must have dimensions (images_in_sequence, sequences, feat_dim, H, W)\n        filter: The filter to apply. Must have dimensions (sequences, feat_dim, fH, fW) or (sequences, filters, feat_dim/groups, fH, fW)\n    output:\n        scores: Output of filtering. Dimensions (images_in_sequence, sequences, yH, yW) or (images_in_sequence, sequences, filters, yH, yW)\n    \"\"\"\n\n    multiple_filters = (filter.dim() == 5)\n\n    padding = (filter.shape[-2] // 2, filter.shape[-1] // 2)\n\n    num_images = feat.shape[0]\n    num_sequences = feat.shape[1] if feat.dim() == 5 else 1\n    num_filters = filter.shape[1] if multiple_filters else 1\n    num_channels = feat.shape[-3]\n    groups = num_channels // filter.shape[-3]\n\n    assert num_filters % groups == 0 and num_channels % groups == 0\n\n    if multiple_filters:\n        if dilation_factors is None:\n            scores = F.conv2d(feat.reshape(num_images, -1, feat.shape[-2], feat.shape[-1]), filter.view(-1, *filter.shape[-3:]),\n                              padding=padding, groups=num_sequences*groups)\n\n            return scores.view(num_images, num_sequences, -1, scores.shape[-2], scores.shape[-1])\n        else:\n            scores_all = []\n            start_id = 0\n\n            for d_factor, num_filters_with_d in dilation_factors.items():\n                f_d = filter[:, start_id:start_id+num_filters_with_d, ...].contiguous()\n\n                padding_d = [p+d_factor-1 for p in padding]\n                scores_d = F.conv2d(feat.reshape(num_images, -1, feat.shape[-2], feat.shape[-1]),\n                                    f_d.view(-1, *f_d.shape[-3:]),\n                                    padding=padding_d, groups=num_sequences * groups,\n                                    dilation=d_factor)\n                scores_d = scores_d.view(num_images, num_sequences, -1, scores_d.shape[-2], scores_d.shape[-1])\n                scores_all.append(scores_d)\n                start_id += num_filters_with_d\n\n            scores = torch.cat(scores_all, dim=2)\n            return scores\n\n    scores = F.conv2d(feat.reshape(num_images, -1, feat.shape[-2], feat.shape[-1]), filter,\n                      padding=padding, groups=num_sequences)\n\n    return scores.view(num_images, num_sequences, scores.shape[-2], scores.shape[-1])\n\n\ndef apply_feat_transpose(feat, input, filter_ksz, training=True, groups=1):\n    \"\"\"Applies the transposed operation off apply_filter w.r.t. filter itself. Can be used to compute the filter gradient.\n    args:\n        feat: These are the input features. Must have dimensions (images_in_sequence, sequences, feat_dim, H, W)\n        input: Input activation (e.g. residuals). Must have dimensions (images_in_sequence, sequences, yH, yW) or\n                (images_in_sequence, sequences, filters, yH, yW)\n        training: Choose the faster implementation whether training or not.\n    output:\n        Output of transposed operation. Dimensions (sequences, feat_dim, fH, fW)\n    \"\"\"\n\n    if groups != 1:\n        raise NotImplementedError('Not implemented other values of group.')\n\n    if training or input.dim() == 5:\n        return _apply_feat_transpose_v3(feat, input, filter_ksz)\n    return _apply_feat_transpose_v2(feat, input, filter_ksz)\n\n\ndef _apply_feat_transpose_v1(feat, input, filter_ksz):\n    \"\"\"This one is slow as hell!!!!\"\"\"\n\n    num_images = feat.shape[0]\n    num_sequences = feat.shape[1] if feat.dim() == 5 else 1\n    feat_sz = (feat.shape[-2], feat.shape[-1])\n    if isinstance(filter_ksz, int):\n        filter_ksz = (filter_ksz, filter_ksz)\n\n    # trans_pad = sz + padding - filter_ksz\n    trans_pad = [sz + ksz//2 - ksz for sz, ksz in zip(feat_sz, filter_ksz)]\n\n    filter_grad = F.conv_transpose2d(input.flip((2, 3)).view(1, -1, input.shape[-2], input.shape[-1]),\n                                     feat.reshape(-1, feat.shape[-3], feat.shape[-2], feat.shape[-1]),\n                                     padding=trans_pad, groups=num_images * num_sequences)\n\n    return filter_grad.view(num_images, num_sequences, -1, filter_grad.shape[-2], filter_grad.shape[-1]).sum(dim=0)\n\n\ndef _apply_feat_transpose_v2(feat, input, filter_ksz):\n    \"\"\"Fast forward and slow backward\"\"\"\n\n    multiple_filters = (input.dim() == 5)\n\n    num_images = feat.shape[0]\n    num_sequences = feat.shape[1] if feat.dim() == 5 else 1\n    num_filters = input.shape[2] if multiple_filters else 1\n    if isinstance(filter_ksz, int):\n        filter_ksz = (filter_ksz, filter_ksz)\n\n    trans_pad = [(ksz-1)//2 for ksz in filter_ksz]\n\n    if multiple_filters:\n        filter_grad = F.conv2d(input.reshape(-1, num_filters, input.shape[-2], input.shape[-1]).permute(1,0,2,3),\n                               feat.reshape(-1, 1, feat.shape[-2], feat.shape[-1]),\n                               padding=trans_pad, groups=num_images * num_sequences)\n\n        if num_images == 1:\n            return filter_grad.view(num_filters, num_sequences, -1, filter_grad.shape[-2], filter_grad.shape[-1]).flip((3,4)).permute(1,0,2,3,4)\n        return filter_grad.view(num_filters, num_images, num_sequences, -1, filter_grad.shape[-2], filter_grad.shape[-1]).sum(dim=1).flip((3,4)).permute(1,0,2,3,4)\n\n    filter_grad = F.conv2d(input.reshape(1, -1, input.shape[-2], input.shape[-1]),\n                                     feat.reshape(-1, 1, feat.shape[-2], feat.shape[-1]),\n                                     padding=trans_pad, groups=num_images * num_sequences)\n\n    return filter_grad.view(num_images, num_sequences, -1, filter_grad.shape[-2], filter_grad.shape[-1]).sum(dim=0).flip((2,3))\n\n\ndef _apply_feat_transpose_v3(feat, input, filter_ksz):\n    \"\"\"Slow forward fast backward\"\"\"\n\n    multiple_filters = (input.dim() == 5)\n\n    num_images = feat.shape[0]\n    num_sequences = feat.shape[1] if feat.dim() == 5 else 1\n    num_filters = input.shape[2] if multiple_filters else 1\n    if isinstance(filter_ksz, int):\n        filter_ksz = (filter_ksz, filter_ksz)\n\n    trans_pad = [ksz//2 for  ksz in filter_ksz]\n\n    filter_grad = F.conv2d(feat.reshape(-1, feat.shape[-3], feat.shape[-2], feat.shape[-1]).permute(1,0,2,3),\n                           input.reshape(-1, 1, input.shape[-2], input.shape[-1]),\n                           padding=trans_pad, groups=num_images * num_sequences)\n\n    if multiple_filters:\n        if num_images == 1:\n            return filter_grad.view(-1, num_sequences, num_filters, filter_grad.shape[-2], filter_grad.shape[-1]).permute(1,2,0,3,4)\n        return filter_grad.view(-1, num_images, num_sequences, num_filters, filter_grad.shape[-2], filter_grad.shape[-1]).sum(dim=1).permute(1,2,0,3,4)\n\n    if num_images == 1:\n        return filter_grad.permute(1,0,2,3)\n    return filter_grad.view(-1, num_images, num_sequences, filter_grad.shape[-2], filter_grad.shape[-1]).sum(dim=1).permute(1,0,2,3)\n\n\ndef _apply_feat_transpose_v4(feat, input, filter_ksz):\n    \"\"\"Slow forward fast backward\"\"\"\n\n    num_images = feat.shape[0]\n    num_sequences = feat.shape[1] if feat.dim() == 5 else 1\n    if isinstance(filter_ksz, int):\n        filter_ksz = (filter_ksz, filter_ksz)\n\n    trans_pad = [ksz//2 for  ksz in filter_ksz]\n\n    filter_grad = F.conv2d(feat.permute(2,1,0,3,4).reshape(feat.shape[-3], -1, feat.shape[-2], feat.shape[-1]),\n                           input.permute(1,0,2,3),\n                           padding=trans_pad, groups=num_sequences)\n\n    return filter_grad.permute(1,0,2,3)\n\n\n\ndef filter_gradient(feat, filter, label=None, training=True):\n    \"\"\"Computes gradient of the filter when applied on the input features and ground truth label.\n    args:\n        feat: These are the input features. Must have dimensions (images_in_sequence, sequences, feat_dim, H, W)\n        filter: The filter to apply. Must have dimensions (sequences, feat_dim, fH, fW)\n        label: Ground truth label in the L2 loss. Dimensions (images_in_sequence, sequences, yH, yW)\n    output:\n        filter_gradient: Dimensions same as input filter (sequences, feat_dim, fH, fW)\n    \"\"\"\n\n    residuals = apply_filter(feat, filter)\n    if label is not None:\n        residuals = residuals - label\n    filter_ksz = (filter.shape[-2], filter.shape[-1])\n    return apply_feat_transpose(feat, residuals, filter_ksz, training=training)\n"
  },
  {
    "path": "artrackv2_mindspore/external/AR/ltr/models/layers/normalization.py",
    "content": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass InstanceL2Norm(nn.Module):\n    \"\"\"Instance L2 normalization.\n    \"\"\"\n    def __init__(self, size_average=True, eps=1e-5, scale=1.0):\n        super().__init__()\n        self.size_average = size_average\n        self.eps = eps\n        self.scale = scale\n\n    def forward(self, input):\n        if self.size_average:\n            return input * (self.scale * ((input.shape[1] * input.shape[2] * input.shape[3]) / (\n                        torch.sum((input * input).view(input.shape[0], 1, 1, -1), dim=3, keepdim=True) + self.eps)).sqrt())\n        else:\n            return input * (self.scale / (torch.sum((input * input).view(input.shape[0], 1, 1, -1), dim=3, keepdim=True) + self.eps).sqrt())\n\n"
  },
  {
    "path": "artrackv2_mindspore/external/AR/ltr/models/layers/transform.py",
    "content": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom collections import OrderedDict\n\n\ndef interpolate(x, sz):\n    \"\"\"Interpolate 4D tensor x to size sz.\"\"\"\n    sz = sz.tolist() if torch.is_tensor(sz) else sz\n    return F.interpolate(x, sz, mode='bilinear', align_corners=False) if x.shape[-2:] != sz else x\n\n\nclass InterpCat(nn.Module):\n    \"\"\"Interpolate and concatenate features of different resolutions.\"\"\"\n\n    def forward(self, input):\n        if isinstance(input, (dict, OrderedDict)):\n            input = list(input.values())\n\n        output_shape = None\n        for x in input:\n            if output_shape is None or output_shape[0] > x.shape[-2]:\n                output_shape = x.shape[-2:]\n\n        return torch.cat([interpolate(x, output_shape) for x in input], dim=-3)\n"
  },
  {
    "path": "artrackv2_mindspore/external/AR/ltr/models/loss/__init__.py",
    "content": "from .target_classification import LBHinge\n"
  },
  {
    "path": "artrackv2_mindspore/external/AR/ltr/models/loss/kl_regression.py",
    "content": "import math\nimport torch\nimport torch.nn as nn\nfrom torch.nn import functional as F\n\n\nclass KLRegression(nn.Module):\n    \"\"\"KL-divergence loss for probabilistic regression.\n    It is computed using Monte Carlo (MC) samples from an arbitrary distribution.\"\"\"\n\n    def __init__(self, eps=0.0):\n        super().__init__()\n        self.eps = eps\n\n    def forward(self, scores, sample_density, gt_density, mc_dim=-1):\n        \"\"\"Args:\n            scores: predicted score values\n            sample_density: probability density of the sample distribution\n            gt_density: probability density of the ground truth distribution\n            mc_dim: dimension of the MC samples\"\"\"\n\n        exp_val = scores - torch.log(sample_density + self.eps)\n\n        L = torch.logsumexp(exp_val, dim=mc_dim) - math.log(scores.shape[mc_dim]) - \\\n            torch.mean(scores * (gt_density / (sample_density + self.eps)), dim=mc_dim)\n\n        return L.mean()\n\n\nclass MLRegression(nn.Module):\n    \"\"\"Maximum likelihood loss for probabilistic regression.\n    It is computed using Monte Carlo (MC) samples from an arbitrary distribution.\"\"\"\n\n    def __init__(self, eps=0.0):\n        super().__init__()\n        self.eps = eps\n\n    def forward(self, scores, sample_density, gt_density=None, mc_dim=-1):\n        \"\"\"Args:\n            scores: predicted score values. First sample must be ground-truth\n            sample_density: probability density of the sample distribution\n            gt_density: not used\n            mc_dim: dimension of the MC samples. Only mc_dim=1 supported\"\"\"\n\n        assert mc_dim == 1\n        assert (sample_density[:,0,...] == -1).all()\n\n        exp_val = scores[:, 1:, ...] - torch.log(sample_density[:, 1:, ...] + self.eps)\n\n        L = torch.logsumexp(exp_val, dim=mc_dim) - math.log(scores.shape[mc_dim] - 1) - scores[:, 0, ...]\n        loss = L.mean()\n        return loss\n\n\nclass KLRegressionGrid(nn.Module):\n    \"\"\"KL-divergence loss for probabilistic regression.\n    It is computed using the grid integration strategy.\"\"\"\n\n    def forward(self, scores, gt_density, grid_dim=-1, grid_scale=1.0):\n        \"\"\"Args:\n            scores: predicted score values\n            gt_density: probability density of the ground truth distribution\n            grid_dim: dimension(s) of the grid\n            grid_scale: area of one grid cell\"\"\"\n\n        score_corr = grid_scale * torch.sum(scores * gt_density, dim=grid_dim)\n\n        L = torch.logsumexp(scores, dim=grid_dim) + math.log(grid_scale) - score_corr\n\n        return L.mean()\n"
  },
  {
    "path": "artrackv2_mindspore/external/AR/ltr/models/loss/target_classification.py",
    "content": "import torch.nn as nn\nimport torch\nfrom torch.nn import functional as F\n\n\nclass LBHinge(nn.Module):\n    \"\"\"Loss that uses a 'hinge' on the lower bound.\n    This means that for samples with a label value smaller than the threshold, the loss is zero if the prediction is\n    also smaller than that threshold.\n    args:\n        error_matric:  What base loss to use (MSE by default).\n        threshold:  Threshold to use for the hinge.\n        clip:  Clip the loss if it is above this value.\n    \"\"\"\n    def __init__(self, error_metric=nn.MSELoss(), threshold=None, clip=None):\n        super().__init__()\n        self.error_metric = error_metric\n        self.threshold = threshold if threshold is not None else -100\n        self.clip = clip\n\n    def forward(self, prediction, label, target_bb=None):\n        negative_mask = (label < self.threshold).float()\n        positive_mask = (1.0 - negative_mask)\n\n        prediction = negative_mask * F.relu(prediction) + positive_mask * prediction\n\n        loss = self.error_metric(prediction, positive_mask * label)\n\n        if self.clip is not None:\n            loss = torch.min(loss, torch.tensor([self.clip], device=loss.device))\n        return loss\n"
  },
  {
    "path": "artrackv2_mindspore/external/AR/ltr/models/meta/__init__.py",
    "content": ""
  },
  {
    "path": "artrackv2_mindspore/external/AR/ltr/models/meta/steepestdescent.py",
    "content": "import math\nimport torch\nimport torch.nn as nn\nfrom pytracking import TensorList\nfrom ltr.models.layers import activation\n\n\nclass GNSteepestDescent(nn.Module):\n    \"\"\"General module for steepest descent based meta learning.\"\"\"\n    def __init__(self, residual_module, num_iter=1, compute_losses=False, detach_length=float('Inf'),\n                 parameter_batch_dim=0, residual_batch_dim=0, steplength_reg=0.0,\n                 filter_dilation_factors=None):\n        super().__init__()\n\n        self.residual_module = residual_module\n        self.num_iter = num_iter\n        self.compute_losses = compute_losses\n        self.detach_length = detach_length\n        self.steplength_reg = steplength_reg\n        self._parameter_batch_dim = parameter_batch_dim\n        self._residual_batch_dim = residual_batch_dim\n        self.filter_dilation_factors = filter_dilation_factors\n\n    def _sqr_norm(self, x: TensorList, batch_dim=0):\n        sum_keep_batch_dim = lambda e: e.sum(dim=[d for d in range(e.dim()) if d != batch_dim])\n        return sum((x * x).apply(sum_keep_batch_dim))\n\n\n    def _compute_loss(self, res):\n        return sum((res * res).sum()) / sum(res.numel())\n\n\n    def forward(self, meta_parameter: TensorList, num_iter=None, *args, **kwargs):\n        # Make sure grad is enabled\n        torch_grad_enabled = torch.is_grad_enabled()\n        torch.set_grad_enabled(True)\n\n        num_iter = self.num_iter if num_iter is None else num_iter\n\n        meta_parameter_iterates = [meta_parameter]\n        losses = []\n\n        for i in range(num_iter):\n            if i > 0 and i % self.detach_length == 0:\n                meta_parameter = meta_parameter.detach()\n\n            meta_parameter.requires_grad_(True)\n\n            # Compute residual vector\n            r = self.residual_module(meta_parameter, filter_dilation_factors=self.filter_dilation_factors, **kwargs)\n\n            if self.compute_losses:\n                losses.append(self._compute_loss(r))\n\n            # Compute gradient of loss\n            u = r.clone()\n            g = TensorList(torch.autograd.grad(r, meta_parameter, u, create_graph=True))\n\n            # Multiply gradient with Jacobian\n            h = TensorList(torch.autograd.grad(g, u, g, create_graph=True))\n\n            # Compute squared norms\n            ip_gg = self._sqr_norm(g, batch_dim=self._parameter_batch_dim)\n            ip_hh = self._sqr_norm(h, batch_dim=self._residual_batch_dim)\n\n            # Compute step length\n            alpha = ip_gg / (ip_hh + self.steplength_reg * ip_gg).clamp(1e-8)\n\n            # Compute optimization step\n            step = g.apply(lambda e: alpha.reshape([-1 if d==self._parameter_batch_dim else 1 for d in range(e.dim())]) * e)\n\n            # Add step to parameter\n            meta_parameter = meta_parameter - step\n\n            meta_parameter_iterates.append(meta_parameter)\n\n\n        if self.compute_losses:\n            losses.append(self._compute_loss(self.residual_module(meta_parameter,\n                                                                  filter_dilation_factors=self.filter_dilation_factors,\n                                                                  **kwargs)))\n\n        # Reset the grad enabled flag\n        torch.set_grad_enabled(torch_grad_enabled)\n        if not torch_grad_enabled:\n            meta_parameter.detach_()\n            for w in meta_parameter_iterates:\n                w.detach_()\n            for l in losses:\n                l.detach_()\n\n        return meta_parameter, meta_parameter_iterates, losses\n"
  },
  {
    "path": "artrackv2_mindspore/external/AR/ltr/models/neck/CorrNL.py",
    "content": "import torch.nn as nn\nimport torch\nfrom ltr.external.PreciseRoIPooling.pytorch.prroi_pool import PrRoIPool2D\nfrom torch.nn import functional as F\nfrom ltr.models.neck.neck_utils import *\n\nclass CorrNL(nn.Module):\n    \"\"\"Network module for IoU prediction. Refer to the ATOM paper for an illustration of the architecture.\n    It uses two backbone feature layers as input.\n    args:\n        input_dim:  Feature dimensionality of the two input backbone layers.\n        pred_input_dim:  Dimensionality input the the prediction network.\n        pred_inter_dim:  Intermediate dimensionality in the prediction network.\"\"\"\n\n    def __init__(self, pool_size=8, use_NL=True):\n        super().__init__()\n        self.prroi_pool = PrRoIPool2D(pool_size, pool_size, 1/16)\n        num_corr_channel = pool_size*pool_size\n        self.channel_attention = SEModule(num_corr_channel,reduction=4)\n        self.spatial_attention = NONLocalBlock2D(in_channels=num_corr_channel)\n        self.use_NL = use_NL\n    def forward(self, feat1, feat2, bb1):\n        \"\"\"Runs the ATOM IoUNet during training operation.\n        This forward pass is mainly used for training. Call the individual functions during tracking instead.\n        args:\n            feat1:  Features from the reference frames (4 or 5 dims).\n            feat2:  Features from the test frames (4 or 5 dims).\n            bb1:  Target boxes (x,y,w,h) in image coords in the reference samples. Dims (images, sequences, 4).\n            proposals2:  Proposal boxes for which the IoU will be predicted (images, sequences, num_proposals, 4).\"\"\"\n\n        assert bb1.dim() == 3\n        # num_images, num_sequences = bb1.size()[:2] # 1, 64\n\n        # Extract first train sample\n        if len(feat1)==1:\n            feat1 = feat1[0] # size为(64,C,H,W)\n            feat2 = feat2[0] # size为(64,C,H,W)\n            bb1 = bb1[0,...] # (64,4)\n        else:\n            raise ValueError(\"Only support single-layer feature map\")\n        '''get PrRoIPool feature '''\n        # Add batch_index to rois\n        batch_size = bb1.shape[0]\n        batch_index = torch.arange(batch_size, dtype=torch.float32).view(-1, 1).to(bb1.device) # (64,1)\n        # input bb is in format xywh, convert it to x0y0x1y1 format\n        bb1 = bb1.clone()\n        bb1[:, 2:4] = bb1[:, 0:2] + bb1[:, 2:4]\n        roi1 = torch.cat((batch_index, bb1), dim=1) #(64,1),(64,4) ---> (64,5)\n        feat_roi1 = self.prroi_pool(feat1, roi1) # (64,C,H,W)\n        feat_corr,_ = self.corr_fun(feat_roi1, feat2)\n        # print('相关后的特征维度是:',feat_corr.size())#(batch,StxSt,Sr,Sr)\n        '''channel attention: Squeeze and Excitation'''\n        feat_ca = self.channel_attention(feat_corr) # 计算通道注意力特征\n        '''spatial attention: Non-local 2D'''\n        feat_sa = self.spatial_attention(feat_ca)\n        return feat_sa\n\n    def get_ref_kernel(self, feat1, bb1):\n        assert bb1.dim() == 3\n        # num_images, num_sequences = bb1.size()[:2] # 1, 64\n\n        # Extract first train sample\n        if len(feat1) == 1:\n            feat1 = feat1[0]  # size为(64,C,H,W)\n            bb1 = bb1[0, ...]  # (64,4)\n        else:\n            raise ValueError(\"Only support single-layer feature map\")\n        '''get PrRoIPool feature '''\n        # Add batch_index to rois\n        batch_size = bb1.shape[0]\n        batch_index = torch.arange(batch_size, dtype=torch.float32).view(-1, 1).to(bb1.device)  # (64,1)\n        # input bb is in format xywh, convert it to x0y0x1y1 format\n        bb1 = bb1.clone()\n        bb1[:, 2:4] = bb1[:, 0:2] + bb1[:, 2:4]\n        roi1 = torch.cat((batch_index, bb1), dim=1)  # (64,1),(64,4) ---> (64,5)\n        '''注意: feat1 and roi1 must be cuda tensor'''\n        self.ref_kernel = self.prroi_pool(feat1.float(), roi1)  # (64,C,H,W)\n        # self.ref_kernel.half()\n\n    def fuse_feat(self, feat2):\n        '''fuse features from reference and test branch'''\n        if len(feat2) == 1:\n            feat2 = feat2[0]\n        '''Step1: pixel-wise correlation'''\n        feat_corr,_ = self.corr_fun(self.ref_kernel, feat2)\n        # print('相关后的特征维度是:',feat_corr.size())#(batch,StxSt,Sr,Sr) (batch,64,16,16)\n        '''Step2: channel attention: Squeeze and Excitation'''\n        feat_ca = self.channel_attention(feat_corr) # 计算通道注意力特征\n        if not self.use_NL:\n            # print('not use non-local')\n            return feat_ca\n        else:\n            '''Step3: spatial attention: Non-local 2D'''\n            feat_sa = self.spatial_attention(feat_ca)\n            return feat_sa\n\n\n    def corr_fun(self, Kernel_tmp, Feature, KERs=None):\n        size = Kernel_tmp.size()\n        CORR = []\n        Kernel = []\n        for i in range(len(Feature)):\n            ker = Kernel_tmp[i:i + 1]\n            fea = Feature[i:i + 1]\n            ker = ker.view(size[1], size[2] * size[3]).transpose(0, 1)\n            ker = ker.unsqueeze(2).unsqueeze(3)\n            if not (type(KERs) == type(None)):\n                ker = torch.cat([ker, KERs[i]], 0)\n            co = F.conv2d(fea, ker.contiguous())\n            CORR.append(co)\n            ker = ker.unsqueeze(0)\n            Kernel.append(ker)\n        corr = torch.cat(CORR, 0)\n        Kernel = torch.cat(Kernel, 0)\n        return corr, Kernel\n"
  },
  {
    "path": "artrackv2_mindspore/external/AR/ltr/models/neck/neck_utils.py",
    "content": "import torch.nn as nn\nimport torch\nfrom torch.nn import functional as F\n\ndef conv(in_planes, out_planes, kernel_size=3, stride=1, padding=1, dilation=1):\n    return nn.Sequential(\n            nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride,\n                      padding=padding, dilation=dilation, bias=True),\n            nn.BatchNorm2d(out_planes),\n            nn.ReLU(inplace=True))\n\n'''Channel attention module'''\nclass SEModule(nn.Module):\n\n    def __init__(self, channels, reduction=4):\n        super(SEModule, self).__init__()\n        self.avg_pool = nn.AdaptiveAvgPool2d(1)\n        self.fc1 = nn.Conv2d(channels, channels // reduction, kernel_size=1,\n                             padding=0)\n        self.relu = nn.ReLU(inplace=True)\n        self.fc2 = nn.Conv2d(channels // reduction, channels, kernel_size=1,\n                             padding=0)\n        self.sigmoid = nn.Sigmoid()\n\n    def forward(self, x):\n        module_input = x\n        x = self.avg_pool(x)\n        x = self.fc1(x)\n        x = self.relu(x)\n        x = self.fc2(x)\n        x = self.sigmoid(x)\n        return module_input * x\n'''Non-local module'''\nclass _NonLocalBlockND(nn.Module):\n    def __init__(self, in_channels, inter_channels=None, dimension=3, sub_sample=True, bn_layer=True):\n        \"\"\"\n        :param in_channels:\n        :param inter_channels:\n        :param dimension:\n        :param sub_sample:\n        :param bn_layer:\n        \"\"\"\n        super(_NonLocalBlockND, self).__init__()\n\n        assert dimension in [1, 2, 3]\n\n        self.dimension = dimension\n        self.sub_sample = sub_sample\n\n        self.in_channels = in_channels\n        self.inter_channels = inter_channels\n\n        if self.inter_channels is None:\n            self.inter_channels = in_channels // 2\n            if self.inter_channels == 0:\n                self.inter_channels = 1\n\n        if dimension == 3:\n            conv_nd = nn.Conv3d\n            max_pool_layer = nn.MaxPool3d(kernel_size=(1, 2, 2))\n            bn = nn.BatchNorm3d\n        elif dimension == 2:\n            conv_nd = nn.Conv2d\n            max_pool_layer = nn.MaxPool2d(kernel_size=(2, 2))\n            bn = nn.BatchNorm2d\n        else:\n            conv_nd = nn.Conv1d\n            max_pool_layer = nn.MaxPool1d(kernel_size=(2))\n            bn = nn.BatchNorm1d\n\n        self.g = conv_nd(in_channels=self.in_channels, out_channels=self.inter_channels,\n                         kernel_size=1, stride=1, padding=0)\n\n        if bn_layer:\n            self.W = nn.Sequential(\n                conv_nd(in_channels=self.inter_channels, out_channels=self.in_channels,\n                        kernel_size=1, stride=1, padding=0),\n                bn(self.in_channels)\n            )\n            nn.init.constant_(self.W[1].weight, 0)\n            nn.init.constant_(self.W[1].bias, 0)\n        else:\n            self.W = conv_nd(in_channels=self.inter_channels, out_channels=self.in_channels,\n                             kernel_size=1, stride=1, padding=0)\n            nn.init.constant_(self.W.weight, 0)\n            nn.init.constant_(self.W.bias, 0)\n\n        self.theta = conv_nd(in_channels=self.in_channels, out_channels=self.inter_channels,\n                             kernel_size=1, stride=1, padding=0)\n        self.phi = conv_nd(in_channels=self.in_channels, out_channels=self.inter_channels,\n                           kernel_size=1, stride=1, padding=0)\n\n        if sub_sample:\n            self.g = nn.Sequential(self.g, max_pool_layer)\n            self.phi = nn.Sequential(self.phi, max_pool_layer)\n\n    def forward(self, x, return_nl_map=False):\n        \"\"\"\n        :param x: (b, c, t, h, w)\n        :param return_nl_map: if True return z, nl_map, else only return z.\n        :return:\n        \"\"\"\n\n        batch_size = x.size(0)\n\n        g_x = self.g(x).view(batch_size, self.inter_channels, -1)\n        g_x = g_x.permute(0, 2, 1)\n\n        theta_x = self.theta(x).view(batch_size, self.inter_channels, -1)\n        theta_x = theta_x.permute(0, 2, 1)\n        phi_x = self.phi(x).view(batch_size, self.inter_channels, -1)\n        f = torch.matmul(theta_x, phi_x)\n        f_div_C = F.softmax(f, -1)\n\n        y = torch.matmul(f_div_C, g_x)\n        y = y.permute(0, 2, 1).contiguous()\n        y = y.view(batch_size, self.inter_channels, *x.size()[2:])\n        W_y = self.W(y)\n        z = W_y + x\n\n        if return_nl_map:\n            return z, f_div_C\n        return z\n\nclass NONLocalBlock2D(_NonLocalBlockND):\n    def __init__(self, in_channels, inter_channels=None, sub_sample=True, bn_layer=True):\n        super(NONLocalBlock2D, self).__init__(in_channels,\n                                              inter_channels=inter_channels,\n                                              dimension=2, sub_sample=sub_sample,\n                                              bn_layer=bn_layer,)\n\n\n\n"
  },
  {
    "path": "artrackv2_mindspore/external/AR/ltr/models/target_classifier/__init__.py",
    "content": "from .linear_filter import LinearFilter\n"
  },
  {
    "path": "artrackv2_mindspore/external/AR/ltr/models/target_classifier/features.py",
    "content": "import torch\nfrom torch import nn\nimport torch.nn.functional as F\nfrom torchvision.models.resnet import BasicBlock, Bottleneck\nfrom ltr.models.layers.normalization import InstanceL2Norm\nfrom ltr.models.layers.transform import InterpCat\n\n\ndef residual_basic_block(feature_dim=256, num_blocks=1, l2norm=True, final_conv=False, norm_scale=1.0, out_dim=None,\n                         interp_cat=False, final_relu=False, init_pool=False):\n    \"\"\"Construct a network block based on the BasicBlock used in ResNet 18 and 34.\"\"\"\n    if out_dim is None:\n        out_dim = feature_dim\n    feat_layers = []\n    if interp_cat:\n        feat_layers.append(InterpCat())\n    if init_pool:\n        feat_layers.append(nn.MaxPool2d(kernel_size=3, stride=2, padding=1))\n    for i in range(num_blocks):\n        odim = feature_dim if i < num_blocks - 1 + int(final_conv) else out_dim\n        feat_layers.append(BasicBlock(feature_dim, odim))\n    if final_conv:\n        feat_layers.append(nn.Conv2d(feature_dim, out_dim, kernel_size=3, padding=1, bias=False))\n        if final_relu:\n            feat_layers.append(nn.ReLU(inplace=True))\n    if l2norm:\n        feat_layers.append(InstanceL2Norm(scale=norm_scale))\n    return nn.Sequential(*feat_layers)\n\n\ndef residual_basic_block_pool(feature_dim=256, num_blocks=1, l2norm=True, final_conv=False, norm_scale=1.0, out_dim=None,\n                              pool=True):\n    \"\"\"Construct a network block based on the BasicBlock used in ResNet.\"\"\"\n    if out_dim is None:\n        out_dim = feature_dim\n    feat_layers = []\n    for i in range(num_blocks):\n        odim = feature_dim if i < num_blocks - 1 + int(final_conv) else out_dim\n        feat_layers.append(BasicBlock(feature_dim, odim))\n    if final_conv:\n        feat_layers.append(nn.Conv2d(feature_dim, out_dim, kernel_size=3, padding=1, bias=False))\n    if pool:\n        feat_layers.append(nn.MaxPool2d(kernel_size=3, stride=2, padding=1))\n    if l2norm:\n        feat_layers.append(InstanceL2Norm(scale=norm_scale))\n\n    return nn.Sequential(*feat_layers)\n\n\ndef residual_bottleneck(feature_dim=256, num_blocks=1, l2norm=True, final_conv=False, norm_scale=1.0, out_dim=None,\n                        interp_cat=False, final_relu=False, final_pool=False):\n    \"\"\"Construct a network block based on the Bottleneck block used in ResNet.\"\"\"\n    if out_dim is None:\n        out_dim = feature_dim\n    feat_layers = []\n    if interp_cat:\n        feat_layers.append(InterpCat())\n    for i in range(num_blocks):\n        planes = feature_dim if i < num_blocks - 1 + int(final_conv) else out_dim // 4\n        feat_layers.append(Bottleneck(4*feature_dim, planes))\n    if final_conv:\n        feat_layers.append(nn.Conv2d(4*feature_dim, out_dim, kernel_size=3, padding=1, bias=False))\n        if final_relu:\n            feat_layers.append(nn.ReLU(inplace=True))\n        if final_pool:\n            feat_layers.append(nn.MaxPool2d(kernel_size=3, stride=2, padding=1))\n    if l2norm:\n        feat_layers.append(InstanceL2Norm(scale=norm_scale))\n    return nn.Sequential(*feat_layers)"
  },
  {
    "path": "artrackv2_mindspore/external/AR/ltr/models/target_classifier/initializer.py",
    "content": "import torch.nn as nn\nimport torch\nimport torch.nn.functional as F\nfrom ltr.external.PreciseRoIPooling.pytorch.prroi_pool import PrRoIPool2D\nfrom ltr.models.layers.blocks import conv_block\nimport math\n\n\nclass FilterPool(nn.Module):\n    \"\"\"Pool the target region in a feature map.\n    args:\n        filter_size:  Size of the filter.\n        feature_stride:  Input feature stride.\n        pool_square:  Do a square pooling instead of pooling the exact target region.\"\"\"\n\n    def __init__(self, filter_size=1, feature_stride=16, pool_square=False):\n        super().__init__()\n        self.prroi_pool = PrRoIPool2D(filter_size, filter_size, 1/feature_stride)\n        self.pool_square = pool_square\n\n    def forward(self, feat, bb):\n        \"\"\"Pool the regions in bb.\n        args:\n            feat:  Input feature maps. Dims (num_samples, feat_dim, H, W).\n            bb:  Target bounding boxes (x, y, w, h) in the image coords. Dims (num_samples, 4).\n        returns:\n            pooled_feat:  Pooled features. Dims (num_samples, feat_dim, wH, wW).\"\"\"\n\n        # Add batch_index to rois\n        bb = bb.reshape(-1,4)\n        num_images_total = bb.shape[0]\n        batch_index = torch.arange(num_images_total, dtype=torch.float32).reshape(-1, 1).to(bb.device)\n\n        # input bb is in format xywh, convert it to x0y0x1y1 format\n        pool_bb = bb.clone()\n\n        if self.pool_square:\n            bb_sz = pool_bb[:, 2:4].prod(dim=1, keepdim=True).sqrt()\n            pool_bb[:, :2] += pool_bb[:, 2:]/2 - bb_sz/2\n            pool_bb[:, 2:] = bb_sz\n\n        pool_bb[:, 2:4] = pool_bb[:, 0:2] + pool_bb[:, 2:4]\n        roi1 = torch.cat((batch_index, pool_bb), dim=1)\n\n        return self.prroi_pool(feat, roi1)\n\n\n\nclass FilterInitializer(nn.Module):\n    \"\"\"Initializes a target classification filter by applying a number of conv layers before and after pooling the target region.\n    args:\n        filter_size:  Size of the filter.\n        feature_dim:  Input feature dimentionality.\n        feature_stride:  Input feature stride.\n        pool_square:  Do a square pooling instead of pooling the exact target region.\n        filter_norm:  Normalize the output filter with its size in the end.\n        num_filter_pre_convs:  Conv layers before pooling.\n        num_filter_post_convs:  Conv layers after pooling.\"\"\"\n\n    def __init__(self, filter_size=1, feature_dim=256, feature_stride=16, pool_square=False, filter_norm=True,\n                 num_filter_pre_convs=1, num_filter_post_convs=0):\n        super().__init__()\n\n        self.filter_pool = FilterPool(filter_size=filter_size, feature_stride=feature_stride, pool_square=pool_square)\n        self.filter_norm = filter_norm\n\n        # Make pre conv\n        pre_conv_layers = []\n        for i in range(num_filter_pre_convs):\n            pre_conv_layers.append(conv_block(feature_dim, feature_dim, kernel_size=3, padding=1))\n        self.filter_pre_layers = nn.Sequential(*pre_conv_layers) if pre_conv_layers else None\n\n        # Make post conv\n        post_conv_layers = []\n        for i in range(num_filter_post_convs):\n            post_conv_layers.append(conv_block(feature_dim, feature_dim, kernel_size=1, padding=0))\n        post_conv_layers.append(nn.Conv2d(feature_dim, feature_dim, kernel_size=1, padding=0))\n        self.filter_post_layers = nn.Sequential(*post_conv_layers)\n\n        # Init weights\n        for m in self.modules():\n            if isinstance(m, nn.Conv2d):\n                n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n                m.weight.data.normal_(0, math.sqrt(2. / n))\n                if m.bias is not None:\n                    m.bias.data.zero_()\n            elif isinstance(m, nn.BatchNorm2d):\n                m.weight.data.fill_(1)\n                m.bias.data.zero_()\n\n\n    def forward(self, feat, bb):\n        \"\"\"Runs the initializer module.\n        Note that [] denotes an optional dimension.\n        args:\n            feat:  Input feature maps. Dims (images_in_sequence, [sequences], feat_dim, H, W).\n            bb:  Target bounding boxes (x, y, w, h) in the image coords. Dims (images_in_sequence, [sequences], 4).\n        returns:\n            weights:  The output weights. Dims (sequences, feat_dim, wH, wW).\"\"\"\n\n        num_images = bb.shape[0] if bb.dim() == 3 else 1\n\n        if self.filter_pre_layers is not None:\n            feat = self.filter_pre_layers(feat.reshape(-1, feat.shape[-3], feat.shape[-2], feat.shape[-1]))\n\n        feat_post = self.filter_pool(feat, bb)\n        weights = self.filter_post_layers(feat_post)\n\n        if num_images > 1:\n            weights = torch.mean(weights.reshape(num_images, -1, weights.shape[-3], weights.shape[-2], weights.shape[-1]), dim=0)\n\n        if self.filter_norm:\n            weights = weights / (weights.shape[1] * weights.shape[2] * weights.shape[3])\n\n        return weights\n\n\nclass FilterInitializerLinear(nn.Module):\n    \"\"\"Initializes a target classification filter by applying a linear conv layer and then pooling the target region.\n    args:\n        filter_size:  Size of the filter.\n        feature_dim:  Input feature dimentionality.\n        feature_stride:  Input feature stride.\n        pool_square:  Do a square pooling instead of pooling the exact target region.\n        filter_norm:  Normalize the output filter with its size in the end.\n        conv_ksz:  Kernel size of the conv layer before pooling.\"\"\"\n\n    def __init__(self, filter_size=1, feature_dim=256, feature_stride=16, pool_square=False, filter_norm=True,\n                 conv_ksz=3, init_weights='default'):\n        super().__init__()\n\n        self.filter_conv = nn.Conv2d(feature_dim, feature_dim, kernel_size=conv_ksz, padding=conv_ksz // 2)\n        self.filter_pool = FilterPool(filter_size=filter_size, feature_stride=feature_stride, pool_square=pool_square)\n        self.filter_norm = filter_norm\n\n        # Init weights\n        for m in self.modules():\n            if isinstance(m, nn.Conv2d):\n                if init_weights == 'default':\n                    n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n                    m.weight.data.normal_(0, math.sqrt(2. / n))\n                elif init_weights == 'zero':\n                    m.weight.data.zero_()\n                if m.bias is not None:\n                    m.bias.data.zero_()\n            elif isinstance(m, nn.BatchNorm2d):\n                m.weight.data.fill_(1)\n                m.bias.data.zero_()\n\n\n    def forward(self, feat, bb):\n        \"\"\"Runs the initializer module.\n        Note that [] denotes an optional dimension.\n        args:\n            feat:  Input feature maps. Dims (images_in_sequence, [sequences], feat_dim, H, W).\n            bb:  Target bounding boxes (x, y, w, h) in the image coords. Dims (images_in_sequence, [sequences], 4).\n        returns:\n            weights:  The output weights. Dims (sequences, feat_dim, wH, wW).\"\"\"\n\n        num_images = feat.shape[0]\n\n        feat = self.filter_conv(feat.reshape(-1, feat.shape[-3], feat.shape[-2], feat.shape[-1]))\n\n        weights = self.filter_pool(feat, bb)\n\n        # If multiple input images, compute the initial filter as the average filter.\n        if num_images > 1:\n            weights = torch.mean(weights.reshape(num_images, -1, weights.shape[-3], weights.shape[-2], weights.shape[-1]), dim=0)\n\n        if self.filter_norm:\n            weights = weights / (weights.shape[1] * weights.shape[2] * weights.shape[3])\n\n        return weights\n\n\n\nclass FilterInitializerZero(nn.Module):\n    \"\"\"Initializes a target classification filter with zeros.\n    args:\n        filter_size:  Size of the filter.\n        feature_dim:  Input feature dimentionality.\"\"\"\n\n    def __init__(self, filter_size=1, feature_dim=256):\n        super().__init__()\n\n        self.filter_size = (feature_dim, filter_size, filter_size)\n\n    def forward(self, feat, bb):\n        \"\"\"Runs the initializer module.\n        Note that [] denotes an optional dimension.\n        args:\n            feat:  Input feature maps. Dims (images_in_sequence, [sequences], feat_dim, H, W).\n            bb:  Target bounding boxes (x, y, w, h) in the image coords. Dims (images_in_sequence, [sequences], 4).\n        returns:\n            weights:  The output weights. Dims (sequences, feat_dim, wH, wW).\"\"\"\n\n        num_sequences = feat.shape[1] if feat.dim() == 5 else 1\n\n        return feat.new_zeros(num_sequences, self.filter_size[0], self.filter_size[1], self.filter_size[2])\n\n\nclass FilterInitializerSiamese(nn.Module):\n    \"\"\"Initializes a target classification filter by only pooling the target region (similar to Siamese trackers).\n    args:\n        filter_size:  Size of the filter.\n        feature_stride:  Input feature stride.\n        pool_square:  Do a square pooling instead of pooling the exact target region.\n        filter_norm:  Normalize the output filter with its size in the end.\"\"\"\n\n    def __init__(self, filter_size=1, feature_stride=16, pool_square=False, filter_norm=True):\n        super().__init__()\n\n        self.filter_pool = FilterPool(filter_size=filter_size, feature_stride=feature_stride, pool_square=pool_square)\n        self.filter_norm = filter_norm\n\n        # Init weights\n        for m in self.modules():\n            if isinstance(m, nn.Conv2d):\n                n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n                m.weight.data.normal_(0, math.sqrt(2. / n))\n                if m.bias is not None:\n                    m.bias.data.zero_()\n            elif isinstance(m, nn.BatchNorm2d):\n                m.weight.data.fill_(1)\n                m.bias.data.zero_()\n\n\n    def forward(self, feat, bb):\n        \"\"\"Runs the initializer module.\n        Note that [] denotes an optional dimension.\n        args:\n            feat:  Input feature maps. Dims (images_in_sequence, [sequences], feat_dim, H, W).\n            bb:  Target bounding boxes (x, y, w, h) in the image coords. Dims (images_in_sequence, [sequences], 4).\n        returns:\n            weights:  The output weights. Dims (sequences, feat_dim, wH, wW).\"\"\"\n\n        num_images = feat.shape[0]\n\n        feat = feat.reshape(-1, feat.shape[-3], feat.shape[-2], feat.shape[-1])\n        weights = self.filter_pool(feat, bb)\n\n        if num_images > 1:\n            weights = torch.mean(weights.reshape(num_images, -1, weights.shape[-3], weights.shape[-2], weights.shape[-1]), dim=0)\n\n        if self.filter_norm:\n            weights = weights / (weights.shape[1] * weights.shape[2] * weights.shape[3])\n\n        return weights\n"
  },
  {
    "path": "artrackv2_mindspore/external/AR/ltr/models/target_classifier/linear_filter.py",
    "content": "import torch.nn as nn\nimport ltr.models.layers.filter as filter_layer\nimport math\n\n\nclass LinearFilter(nn.Module):\n    \"\"\"Target classification filter module.\n    args:\n        filter_size:  Size of filter (int).\n        filter_initialize:  Filter initializer module.\n        filter_optimizer:  Filter optimizer module.\n        feature_extractor:  Feature extractor module applied to the input backbone features.\"\"\"\n\n    def __init__(self, filter_size, filter_initializer, filter_optimizer=None, feature_extractor=None):\n        super().__init__()\n\n        self.filter_size = filter_size\n\n        # Modules\n        self.filter_initializer = filter_initializer\n        self.filter_optimizer = filter_optimizer\n        self.feature_extractor = feature_extractor\n\n        # Init weights\n        for m in self.feature_extractor.modules():\n            if isinstance(m, nn.Conv2d):\n                n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n                m.weight.data.normal_(0, math.sqrt(2. / n))\n                if m.bias is not None:\n                    m.bias.data.zero_()\n            elif isinstance(m, nn.BatchNorm2d):\n                m.weight.data.fill_(1)\n                m.bias.data.zero_()\n\n    def forward(self, train_feat, test_feat, train_bb, *args, **kwargs):\n        \"\"\"Learns a target classification filter based on the train samples and return the resulting classification\n        scores on the test samples.\n        The forward function is ONLY used for training. Call the individual functions during tracking.\n        args:\n            train_feat:  Backbone features for the train samples (4 or 5 dims).\n            test_feat:  Backbone features for the test samples (4 or 5 dims).\n            trian_bb:  Target boxes (x,y,w,h) for the train samples in image coordinates. Dims (images, sequences, 4).\n            *args, **kwargs:  These are passed to the optimizer module.\n        returns:\n            test_scores:  Classification scores on the test samples.\"\"\"\n\n        assert train_bb.dim() == 3\n\n        num_sequences = train_bb.shape[1]\n\n        if train_feat.dim() == 5:\n            train_feat = train_feat.reshape(-1, *train_feat.shape[-3:])\n        if test_feat.dim() == 5:\n            test_feat = test_feat.reshape(-1, *test_feat.shape[-3:])\n\n        # Extract features\n        train_feat = self.extract_classification_feat(train_feat, num_sequences)\n        test_feat = self.extract_classification_feat(test_feat, num_sequences)\n\n        # Train filter\n        filter, filter_iter, losses = self.get_filter(train_feat, train_bb, *args, **kwargs)\n\n        # Classify samples using all return filters\n        test_scores = [self.classify(f, test_feat) for f in filter_iter]\n\n        return test_scores\n\n    def extract_classification_feat(self, feat, num_sequences=None):\n        \"\"\"Extract classification features based on the input backbone features.\"\"\"\n        if self.feature_extractor is None:\n            return feat\n        if num_sequences is None:\n            return self.feature_extractor(feat)\n\n        output = self.feature_extractor(feat)\n        return output.reshape(-1, num_sequences, *output.shape[-3:])\n\n    def classify(self, weights, feat):\n        \"\"\"Run classifier (filter) on the features (feat).\"\"\"\n\n        scores = filter_layer.apply_filter(feat, weights)\n\n        return scores\n\n    def get_filter(self, feat, bb, *args, **kwargs):\n        \"\"\"Outputs the learned filter based on the input features (feat) and target boxes (bb) by running the\n        filter initializer and optimizer. Note that [] denotes an optional dimension.\n        args:\n            feat:  Input feature maps. Dims (images_in_sequence, [sequences], feat_dim, H, W).\n            bb:  Target bounding boxes (x, y, w, h) in the image coords. Dims (images_in_sequence, [sequences], 4).\n            *args, **kwargs:  These are passed to the optimizer module.\n        returns:\n            weights:  The final oprimized weights. Dims (sequences, feat_dim, wH, wW).\n            weight_iterates:  The weights computed in each iteration (including initial input and final output).\n            losses:  Train losses.\"\"\"\n\n        weights = self.filter_initializer(feat, bb)\n\n        if self.filter_optimizer is not None:\n            weights, weights_iter, losses = self.filter_optimizer(weights, feat=feat, bb=bb, *args, **kwargs)\n        else:\n            weights_iter = [weights]\n            losses = None\n\n        return weights, weights_iter, losses\n\n    def train_classifier(self, backbone_feat, bb):\n        num_sequences = bb.shape[1]\n\n        if backbone_feat.dim() == 5:\n            backbone_feat = backbone_feat.reshape(-1, *backbone_feat.shape[-3:])\n\n        # Extract features\n        train_feat = self.extract_classification_feat(backbone_feat, num_sequences)\n\n        # Get filters from each iteration\n        final_filter, _, train_losses = self.get_filter(train_feat, bb)\n        return final_filter, train_losses\n\n    def track_frame(self, filter_weights, backbone_feat):\n        if backbone_feat.dim() == 5:\n            num_sequences = backbone_feat.shape[1]\n            backbone_feat = backbone_feat.reshape(-1, *backbone_feat.shape[-3:])\n        else:\n            num_sequences = None\n\n        test_feat = self.extract_classification_feat(backbone_feat, num_sequences)\n\n        scores = filter_layer.apply_filter(test_feat, filter_weights)\n\n        return scores"
  },
  {
    "path": "artrackv2_mindspore/external/AR/ltr/models/target_classifier/optimizer.py",
    "content": "import torch.nn as nn\nimport torch\nimport torch.nn.functional as F\nimport ltr.models.layers.filter as filter_layer\nimport ltr.models.layers.activation as activation\nfrom ltr.models.layers.distance import DistanceMap\nimport math\n\n\n\nclass DiMPSteepestDescentGN(nn.Module):\n    \"\"\"Optimizer module for DiMP.\n    It unrolls the steepest descent with Gauss-Newton iterations to optimize the target filter.\n    Moreover it learns parameters in the loss itself, as described in the DiMP paper.\n    args:\n        num_iter:  Number of default optimization iterations.\n        feat_stride:  The stride of the input feature.\n        init_step_length:  Initial scaling of the step length (which is then learned).\n        init_filter_reg:  Initial filter regularization weight (which is then learned).\n        init_gauss_sigma:  The standard deviation to use for the initialization of the label function.\n        num_dist_bins:  Number of distance bins used for learning the loss label, mask and weight.\n        bin_displacement:  The displacement of the bins (level of discritization).\n        mask_init_factor:  Parameter controlling the initialization of the target mask.\n        score_act:  Type of score activation (target mask computation) to use. The default 'relu' is what is described in the paper.\n        act_param:  Parameter for the score_act.\n        min_filter_reg:  Enforce a minimum value on the regularization (helps stability sometimes).\n        mask_act:  What activation to do on the output of the mask computation ('sigmoid' or 'linear').\n        detach_length:  Detach the filter every n-th iteration. Default is to never detech, i.e. 'Inf'.\n        alpha_eps:  Term in the denominator of the steepest descent that stabalizes learning.\n    \"\"\"\n    def __init__(self, num_iter=1, feat_stride=16, init_step_length=1.0,\n                 init_filter_reg=1e-2, init_gauss_sigma=1.0, num_dist_bins=5, bin_displacement=1.0, mask_init_factor=4.0,\n                 score_act='relu', act_param=None, min_filter_reg=1e-3, mask_act='sigmoid',\n                 detach_length=float('Inf'), alpha_eps=0):\n        super().__init__()\n\n        self.num_iter = num_iter\n        self.feat_stride = feat_stride\n        self.log_step_length = nn.Parameter(math.log(init_step_length) * torch.ones(1))\n        self.filter_reg = nn.Parameter(init_filter_reg * torch.ones(1))\n        self.distance_map = DistanceMap(num_dist_bins, bin_displacement)\n        self.min_filter_reg = min_filter_reg\n        self.detach_length = detach_length\n        self.alpha_eps = alpha_eps\n\n        # Distance coordinates\n        d = torch.arange(num_dist_bins, dtype=torch.float32).reshape(1,-1,1,1) * bin_displacement\n        if init_gauss_sigma == 0:\n            init_gauss = torch.zeros_like(d)\n            init_gauss[0,0,0,0] = 1\n        else:\n            init_gauss = torch.exp(-1/2 * (d / init_gauss_sigma)**2)\n\n        # Module that predicts the target label function (y in the paper)\n        self.label_map_predictor = nn.Conv2d(num_dist_bins, 1, kernel_size=1, bias=False)\n        self.label_map_predictor.weight.data = init_gauss - init_gauss.min()\n\n        # Module that predicts the target mask (m in the paper)\n        mask_layers = [nn.Conv2d(num_dist_bins, 1, kernel_size=1, bias=False)]\n        if mask_act == 'sigmoid':\n            mask_layers.append(nn.Sigmoid())\n            init_bias = 0.0\n        elif mask_act == 'linear':\n            init_bias = 0.5\n        else:\n            raise ValueError('Unknown activation')\n        self.target_mask_predictor = nn.Sequential(*mask_layers)\n        self.target_mask_predictor[0].weight.data = mask_init_factor * torch.tanh(2.0 - d) + init_bias\n\n        # Module that predicts the residual weights (v in the paper)\n        self.spatial_weight_predictor = nn.Conv2d(num_dist_bins, 1, kernel_size=1, bias=False)\n        self.spatial_weight_predictor.weight.data.fill_(1.0)\n\n        # The score actvation and its derivative\n        if score_act == 'bentpar':\n            self.score_activation = activation.BentIdentPar(act_param)\n            self.score_activation_deriv = activation.BentIdentParDeriv(act_param)\n        elif score_act == 'relu':\n            self.score_activation = activation.LeakyReluPar()\n            self.score_activation_deriv = activation.LeakyReluParDeriv()\n        else:\n            raise ValueError('Unknown score activation')\n\n\n    def forward(self, weights, feat, bb, sample_weight=None, num_iter=None, compute_losses=True):\n        \"\"\"Runs the optimizer module.\n        Note that [] denotes an optional dimension.\n        args:\n            weights:  Initial weights. Dims (sequences, feat_dim, wH, wW).\n            feat:  Input feature maps. Dims (images_in_sequence, [sequences], feat_dim, H, W).\n            bb:  Target bounding boxes (x, y, w, h) in the image coords. Dims (images_in_sequence, [sequences], 4).\n            sample_weight:  Optional weight for each sample. Dims: (images_in_sequence, [sequences]).\n            num_iter:  Number of iterations to run.\n            compute_losses:  Whether to compute the (train) loss in each iteration.\n        returns:\n            weights:  The final oprimized weights.\n            weight_iterates:  The weights computed in each iteration (including initial input and final output).\n            losses:  Train losses.\"\"\"\n\n        # Sizes\n        num_iter = self.num_iter if num_iter is None else num_iter\n        num_images = feat.shape[0]\n        num_sequences = feat.shape[1] if feat.dim() == 5 else 1\n        filter_sz = (weights.shape[-2], weights.shape[-1])\n        output_sz = (feat.shape[-2] + (weights.shape[-2] + 1) % 2, feat.shape[-1] + (weights.shape[-1] + 1) % 2)\n\n        # Get learnable scalars\n        step_length_factor = torch.exp(self.log_step_length)\n        reg_weight = (self.filter_reg*self.filter_reg).clamp(min=self.min_filter_reg**2)\n\n        # Compute distance map\n        dmap_offset = (torch.Tensor(filter_sz).to(bb.device) % 2) / 2.0\n        center = ((bb[..., :2] + bb[..., 2:] / 2) / self.feat_stride).reshape(-1, 2).flip((1,)) - dmap_offset\n        dist_map = self.distance_map(center, output_sz)\n\n        # Compute label map masks and weight\n        label_map = self.label_map_predictor(dist_map).reshape(num_images, num_sequences, *dist_map.shape[-2:])\n        target_mask = self.target_mask_predictor(dist_map).reshape(num_images, num_sequences, *dist_map.shape[-2:])\n        spatial_weight = self.spatial_weight_predictor(dist_map).reshape(num_images, num_sequences, *dist_map.shape[-2:])\n\n        # Get total sample weights\n        if sample_weight is None:\n            sample_weight = math.sqrt(1.0 / num_images) * spatial_weight\n        elif isinstance(sample_weight, torch.Tensor):\n            sample_weight = sample_weight.sqrt().reshape(num_images, num_sequences, 1, 1) * spatial_weight\n\n        backprop_through_learning = (self.detach_length > 0)\n\n        weight_iterates = [weights]\n        losses = []\n\n        for i in range(num_iter):\n            if not backprop_through_learning or (i > 0 and i % self.detach_length == 0):\n                weights = weights.detach()\n\n            # Compute residuals\n            scores = filter_layer.apply_filter(feat, weights)\n            scores_act = self.score_activation(scores, target_mask)\n            score_mask = self.score_activation_deriv(scores, target_mask)\n            residuals = sample_weight * (scores_act - label_map)\n\n            if compute_losses:\n                losses.append(((residuals**2).sum() + reg_weight * (weights**2).sum())/num_sequences)\n\n            # Compute gradient\n            residuals_mapped = score_mask * (sample_weight * residuals)\n            weights_grad = filter_layer.apply_feat_transpose(feat, residuals_mapped, filter_sz, training=self.training) + \\\n                          reg_weight * weights\n\n            # Map the gradient with the Jacobian\n            scores_grad = filter_layer.apply_filter(feat, weights_grad)\n            scores_grad = sample_weight * (score_mask * scores_grad)\n\n            # Compute optimal step length\n            alpha_num = (weights_grad * weights_grad).sum(dim=(1,2,3))\n            alpha_den = ((scores_grad * scores_grad).reshape(num_images, num_sequences, -1).sum(dim=(0,2)) + (reg_weight + self.alpha_eps) * alpha_num).clamp(1e-8)\n            alpha = alpha_num / alpha_den\n\n            # Update filter\n            weights = weights - (step_length_factor * alpha.reshape(-1, 1, 1, 1)) * weights_grad\n\n            # Add the weight iterate\n            weight_iterates.append(weights)\n\n        if compute_losses:\n            scores = filter_layer.apply_filter(feat, weights)\n            scores = self.score_activation(scores, target_mask)\n            losses.append((((sample_weight * (scores - label_map))**2).sum() + reg_weight * (weights**2).sum())/num_sequences)\n\n        return weights, weight_iterates, losses\n\n\n\nclass DiMPL2SteepestDescentGN(nn.Module):\n    \"\"\"A simpler optimizer module that uses L2 loss.\n    args:\n        num_iter:  Number of default optimization iterations.\n        feat_stride:  The stride of the input feature.\n        init_step_length:  Initial scaling of the step length (which is then learned).\n        gauss_sigma:  The standard deviation of the label function.\n        hinge_threshold:  Threshold for the hinge-based loss (see DiMP paper).\n        init_filter_reg:  Initial filter regularization weight (which is then learned).\n        min_filter_reg:  Enforce a minimum value on the regularization (helps stability sometimes).\n        detach_length:  Detach the filter every n-th iteration. Default is to never detech, i.e. 'Inf'.\n        alpha_eps:  Term in the denominator of the steepest descent that stabalizes learning.\n    \"\"\"\n    def __init__(self, num_iter=1, feat_stride=16, init_step_length=1.0, gauss_sigma=1.0, hinge_threshold=-999,\n                 init_filter_reg=1e-2, min_filter_reg=1e-3, detach_length=float('Inf'), alpha_eps=0.0):\n        super().__init__()\n\n        self.num_iter = num_iter\n        self.feat_stride = feat_stride\n        self.log_step_length = nn.Parameter(math.log(init_step_length) * torch.ones(1))\n        self.filter_reg = nn.Parameter(init_filter_reg * torch.ones(1))\n        self.min_filter_reg = min_filter_reg\n        self.detach_length = detach_length\n        self.hinge_threshold = hinge_threshold\n        self.gauss_sigma = gauss_sigma\n        self.alpha_eps = alpha_eps\n\n    def get_label(self, center, output_sz):\n        center = center.reshape(center.shape[0], -1, center.shape[-1])\n        k0 = torch.arange(output_sz[0], dtype=torch.float32).reshape(1, 1, -1, 1).to(center.device)\n        k1 = torch.arange(output_sz[1], dtype=torch.float32).reshape(1, 1, 1, -1).to(center.device)\n        g0 = torch.exp(-1.0 / (2 * self.gauss_sigma ** 2) * (k0 - center[:,:,0].reshape(*center.shape[:2], 1, 1)) ** 2)\n        g1 = torch.exp(-1.0 / (2 * self.gauss_sigma ** 2) * (k1 - center[:,:,1].reshape(*center.shape[:2], 1, 1)) ** 2)\n        gauss = g0 * g1\n        return gauss\n\n\n    def forward(self, weights, feat, bb, sample_weight=None, num_iter=None, compute_losses=True):\n        \"\"\"Runs the optimizer module.\n        Note that [] denotes an optional dimension.\n        args:\n            weights:  Initial weights. Dims (sequences, feat_dim, wH, wW).\n            feat:  Input feature maps. Dims (images_in_sequence, [sequences], feat_dim, H, W).\n            bb:  Target bounding boxes (x, y, w, h) in the image coords. Dims (images_in_sequence, [sequences], 4).\n            sample_weight:  Optional weight for each sample. Dims: (images_in_sequence, [sequences]).\n            num_iter:  Number of iterations to run.\n            compute_losses:  Whether to compute the (train) loss in each iteration.\n        returns:\n            weights:  The final oprimized weights.\n            weight_iterates:  The weights computed in each iteration (including initial input and final output).\n            losses:  Train losses.\"\"\"\n\n        # Sizes\n        num_iter = self.num_iter if num_iter is None else num_iter\n        num_images = feat.shape[0]\n        num_sequences = feat.shape[1] if feat.dim() == 5 else 1\n        filter_sz = (weights.shape[-2], weights.shape[-1])\n        output_sz = (feat.shape[-2] + (weights.shape[-2] + 1) % 2, feat.shape[-1] + (weights.shape[-1] + 1) % 2)\n\n        # Get learnable scalars\n        step_length_factor = torch.exp(self.log_step_length)\n        reg_weight = (self.filter_reg*self.filter_reg).clamp(min=self.min_filter_reg**2)\n\n        # Compute distance map\n        dmap_offset = (torch.Tensor(filter_sz).to(bb.device) % 2) / 2.0\n        center = ((bb[..., :2] + bb[..., 2:] / 2) / self.feat_stride).flip((-1,)) - dmap_offset\n        label_map = self.get_label(center, output_sz)\n        target_mask = (label_map > self.hinge_threshold).float()\n        label_map *= target_mask\n\n        # Get total sample weights\n        if sample_weight is None:\n            sample_weight = math.sqrt(1.0 / num_images)\n        elif isinstance(sample_weight, torch.Tensor):\n            sample_weight = sample_weight.sqrt().reshape(num_images, num_sequences, 1, 1)\n\n        weight_iterates = [weights]\n        losses = []\n\n        for i in range(num_iter):\n            if i > 0 and i % self.detach_length == 0:\n                weights = weights.detach()\n\n            # Compute residuals\n            scores = filter_layer.apply_filter(feat, weights)\n            scores_act = target_mask * scores + (1.0 - target_mask) * F.relu(scores)\n            score_mask = target_mask + (1.0 - target_mask) * (scores.detach() > 0).float()\n            residuals = sample_weight * (scores_act - label_map)\n\n            if compute_losses:\n                losses.append(((residuals**2).sum() + reg_weight * (weights**2).sum())/num_sequences)\n\n            # Compute gradient\n            residuals_mapped = score_mask * (sample_weight * residuals)\n            weights_grad = filter_layer.apply_feat_transpose(feat, residuals_mapped, filter_sz, training=self.training) + \\\n                          reg_weight * weights\n\n            # Map the gradient with the Jacobian\n            scores_grad = filter_layer.apply_filter(feat, weights_grad)\n            scores_grad = sample_weight * (score_mask * scores_grad)\n\n            # Compute optimal step length\n            alpha_num = (weights_grad * weights_grad).sum(dim=(1,2,3))\n            alpha_den = ((scores_grad * scores_grad).reshape(num_images, num_sequences, -1).sum(dim=(0,2)) + (reg_weight + self.alpha_eps) * alpha_num).clamp(1e-8)\n            alpha = alpha_num / alpha_den\n\n            # Update filter\n            weights = weights - (step_length_factor * alpha.reshape(-1, 1, 1, 1)) * weights_grad\n\n            # Add the weight iterate\n            weight_iterates.append(weights)\n\n        if compute_losses:\n            scores = filter_layer.apply_filter(feat, weights)\n            scores = target_mask * scores + (1.0 - target_mask) * F.relu(scores)\n            losses.append((((sample_weight * (scores - label_map))**2).sum() + reg_weight * (weights**2).sum())/num_sequences)\n\n        return weights, weight_iterates, losses\n\n\nclass PrDiMPSteepestDescentNewton(nn.Module):\n    \"\"\"Optimizer module for PrDiMP.\n    It unrolls the steepest descent with Newton iterations to optimize the target filter. See the PrDiMP paper.\n    args:\n        num_iter:  Number of default optimization iterations.\n        feat_stride:  The stride of the input feature.\n        init_step_length:  Initial scaling of the step length (which is then learned).\n        init_filter_reg:  Initial filter regularization weight (which is then learned).\n        gauss_sigma:  The standard deviation to use for the label density function.\n        min_filter_reg:  Enforce a minimum value on the regularization (helps stability sometimes).\n        detach_length:  Detach the filter every n-th iteration. Default is to never detech, i.e. 'Inf'.\n        alpha_eps:  Term in the denominator of the steepest descent that stabalizes learning.\n        init_uni_weight:  Weight of uniform label distribution.\n        normalize_label:  Wheter to normalize the label distribution.\n        label_shrink:  How much to shrink to label distribution.\n        softmax_reg:  Regularization in the denominator of the SoftMax.\n        label_threshold:  Threshold probabilities smaller than this.\n    \"\"\"\n    def __init__(self, num_iter=1, feat_stride=16, init_step_length=1.0,\n                 init_filter_reg=1e-2, gauss_sigma=1.0, min_filter_reg=1e-3, detach_length=float('Inf'),\n                 alpha_eps=0.0, init_uni_weight=None, normalize_label=False, label_shrink=0, softmax_reg=None, label_threshold=0.0):\n        super().__init__()\n\n        self.num_iter = num_iter\n        self.feat_stride = feat_stride\n        self.log_step_length = nn.Parameter(math.log(init_step_length) * torch.ones(1))\n        self.filter_reg = nn.Parameter(init_filter_reg * torch.ones(1))\n        self.gauss_sigma = gauss_sigma\n        self.min_filter_reg = min_filter_reg\n        self.detach_length = detach_length\n        self.alpha_eps = alpha_eps\n        self.uni_weight = 0 if init_uni_weight is None else init_uni_weight\n        self.normalize_label = normalize_label\n        self.label_shrink = label_shrink\n        self.softmax_reg = softmax_reg\n        self.label_threshold = label_threshold\n\n    def get_label_density(self, center, output_sz):\n        center = center.reshape(center.shape[0], -1, center.shape[-1])\n        k0 = torch.arange(output_sz[0], dtype=torch.float32).reshape(1, 1, -1, 1).to(center.device)\n        k1 = torch.arange(output_sz[1], dtype=torch.float32).reshape(1, 1, 1, -1).to(center.device)\n        dist0 = (k0 - center[:,:,0].reshape(*center.shape[:2], 1, 1)) ** 2\n        dist1 = (k1 - center[:,:,1].reshape(*center.shape[:2], 1, 1)) ** 2\n        if self.gauss_sigma == 0:\n            dist0_view = dist0.reshape(-1, dist0.shape[-2])\n            dist1_view = dist1.reshape(-1, dist1.shape[-1])\n            one_hot0 = torch.zeros_like(dist0_view)\n            one_hot1 = torch.zeros_like(dist1_view)\n            one_hot0[torch.arange(one_hot0.shape[0]), dist0_view.argmin(dim=-1)] = 1.0\n            one_hot1[torch.arange(one_hot1.shape[0]), dist1_view.argmin(dim=-1)] = 1.0\n            gauss = one_hot0.reshape(dist0.shape) * one_hot1.reshape(dist1.shape)\n        else:\n            g0 = torch.exp(-1.0 / (2 * self.gauss_sigma ** 2) * dist0)\n            g1 = torch.exp(-1.0 / (2 * self.gauss_sigma ** 2) * dist1)\n            gauss = (g0 / (2*math.pi*self.gauss_sigma**2)) * g1\n        gauss = gauss * (gauss > self.label_threshold).float()\n        if self.normalize_label:\n            gauss /= (gauss.sum(dim=(-2,-1), keepdim=True) + 1e-8)\n        label_dens = (1.0 - self.label_shrink)*((1.0 - self.uni_weight) * gauss + self.uni_weight / (output_sz[0]*output_sz[1]))\n        return label_dens\n\n    def forward(self, weights, feat, bb, sample_weight=None, num_iter=None, compute_losses=True):\n        \"\"\"Runs the optimizer module.\n        Note that [] denotes an optional dimension.\n        args:\n            weights:  Initial weights. Dims (sequences, feat_dim, wH, wW).\n            feat:  Input feature maps. Dims (images_in_sequence, [sequences], feat_dim, H, W).\n            bb:  Target bounding boxes (x, y, w, h) in the image coords. Dims (images_in_sequence, [sequences], 4).\n            sample_weight:  Optional weight for each sample. Dims: (images_in_sequence, [sequences]).\n            num_iter:  Number of iterations to run.\n            compute_losses:  Whether to compute the (train) loss in each iteration.\n        returns:\n            weights:  The final oprimized weights.\n            weight_iterates:  The weights computed in each iteration (including initial input and final output).\n            losses:  Train losses.\"\"\"\n\n        # Sizes\n        num_iter = self.num_iter if num_iter is None else num_iter\n        num_images = feat.shape[0]\n        num_sequences = feat.shape[1] if feat.dim() == 5 else 1\n        filter_sz = (weights.shape[-2], weights.shape[-1])\n        output_sz = (feat.shape[-2] + (weights.shape[-2] + 1) % 2, feat.shape[-1] + (weights.shape[-1] + 1) % 2)\n\n        # Get learnable scalars\n        step_length_factor = torch.exp(self.log_step_length)\n        reg_weight = (self.filter_reg*self.filter_reg).clamp(min=self.min_filter_reg**2)\n\n        # Compute label density\n        offset = (torch.Tensor(filter_sz).to(bb.device) % 2) / 2.0\n        center = ((bb[..., :2] + bb[..., 2:] / 2) / self.feat_stride).flip((-1,)) - offset\n        label_density = self.get_label_density(center, output_sz)\n\n        # Get total sample weights\n        if sample_weight is None:\n            sample_weight = torch.Tensor([1.0 / num_images]).to(feat.device)\n        elif isinstance(sample_weight, torch.Tensor):\n            sample_weight = sample_weight.reshape(num_images, num_sequences, 1, 1)\n\n        exp_reg = 0 if self.softmax_reg is None else math.exp(self.softmax_reg)\n        def _compute_loss(scores, weights):\n            return torch.sum(sample_weight.reshape(sample_weight.shape[0], -1) *\n                             (torch.log(scores.exp().sum(dim=(-2, -1)) + exp_reg) - (label_density * scores).sum(dim=(-2, -1)))) / num_sequences +\\\n                   reg_weight * (weights ** 2).sum() / num_sequences\n\n        weight_iterates = [weights]\n        losses = []\n\n        for i in range(num_iter):\n            if i > 0 and i % self.detach_length == 0:\n                weights = weights.detach()\n\n            # Compute \"residuals\"\n            scores = filter_layer.apply_filter(feat, weights)\n            scores_softmax = activation.softmax_reg(scores.reshape(num_images, num_sequences, -1), dim=2, reg=self.softmax_reg).reshape(scores.shape)\n            res = sample_weight*(scores_softmax - label_density)\n\n            if compute_losses:\n                losses.append(_compute_loss(scores, weights))\n\n            # Compute gradient\n            weights_grad = filter_layer.apply_feat_transpose(feat, res, filter_sz, training=self.training) + \\\n                          reg_weight * weights\n\n            # Map the gradient with the Hessian\n            scores_grad = filter_layer.apply_filter(feat, weights_grad)\n            sm_scores_grad = scores_softmax * scores_grad\n            hes_scores_grad = sm_scores_grad - scores_softmax * torch.sum(sm_scores_grad, dim=(-2,-1), keepdim=True)\n            grad_hes_grad = (scores_grad * hes_scores_grad).reshape(num_images, num_sequences, -1).sum(dim=2).clamp(min=0)\n            grad_hes_grad = (sample_weight.reshape(sample_weight.shape[0], -1) * grad_hes_grad).sum(dim=0)\n\n            # Compute optimal step length\n            alpha_num = (weights_grad * weights_grad).sum(dim=(1,2,3))\n            alpha_den = (grad_hes_grad + (reg_weight + self.alpha_eps) * alpha_num).clamp(1e-8)\n            alpha = alpha_num / alpha_den\n\n            # Update filter\n            weights = weights - (step_length_factor * alpha.reshape(-1, 1, 1, 1)) * weights_grad\n\n            # Add the weight iterate\n            weight_iterates.append(weights)\n\n        if compute_losses:\n            scores = filter_layer.apply_filter(feat, weights)\n            losses.append(_compute_loss(scores, weights))\n\n        return weights, weight_iterates, losses\n"
  },
  {
    "path": "artrackv2_mindspore/external/AR/ltr/models/target_classifier/residual_modules.py",
    "content": "import torch\nimport torch.nn as nn\nimport math\nimport ltr.models.layers.filter as filter_layer\nimport ltr.models.layers.activation as activation\nfrom ltr.models.layers.distance import DistanceMap\nfrom pytracking import TensorList\n\n\nclass LinearFilterLearnGen(nn.Module):\n    def __init__(self, feat_stride=16, init_filter_reg=1e-2, init_gauss_sigma=1.0, num_dist_bins=5, bin_displacement=1.0,\n                 mask_init_factor=4.0, score_act='bentpar', act_param=None, mask_act='sigmoid'):\n        super().__init__()\n\n        self.filter_reg = nn.Parameter(init_filter_reg * torch.ones(1))\n        self.feat_stride = feat_stride\n        self.distance_map = DistanceMap(num_dist_bins, bin_displacement)\n\n        # Distance coordinates\n        d = torch.arange(num_dist_bins, dtype=torch.float32).reshape(1,-1,1,1) * bin_displacement\n        if init_gauss_sigma == 0:\n            init_gauss = torch.zeros_like(d)\n            init_gauss[0,0,0,0] = 1\n        else:\n            init_gauss = torch.exp(-1/2 * (d / init_gauss_sigma)**2)\n\n        self.label_map_predictor = nn.Conv2d(num_dist_bins, 1, kernel_size=1, bias=False)\n        self.label_map_predictor.weight.data = init_gauss - init_gauss.min()\n\n        mask_layers = [nn.Conv2d(num_dist_bins, 1, kernel_size=1, bias=False)]\n        if mask_act == 'sigmoid':\n            mask_layers.append(nn.Sigmoid())\n            init_bias = 0.0\n        elif mask_act == 'linear':\n            init_bias = 0.5\n        else:\n            raise ValueError('Unknown activation')\n        self.target_mask_predictor = nn.Sequential(*mask_layers)\n        self.target_mask_predictor[0].weight.data = mask_init_factor * torch.tanh(2.0 - d) + init_bias\n\n        self.spatial_weight_predictor = nn.Conv2d(num_dist_bins, 1, kernel_size=1, bias=False)\n        self.spatial_weight_predictor.weight.data.fill_(1.0)\n\n        if score_act == 'bentpar':\n            self.score_activation = activation.BentIdentPar(act_param)\n        elif score_act == 'relu':\n            self.score_activation = activation.LeakyReluPar()\n        else:\n            raise ValueError('Unknown activation')\n\n\n    def forward(self, meta_parameter: TensorList, feat, bb, sample_weight=None, is_distractor=None):\n        filter = meta_parameter[0]\n\n        num_images = feat.shape[0]\n        num_sequences = feat.shape[1] if feat.dim() == 5 else 1\n        filter_sz = (filter.shape[-2], filter.shape[-1])\n\n        # Compute scores\n        scores = filter_layer.apply_filter(feat, filter)\n\n        # Compute distance map\n        center = ((bb[..., :2] + bb[..., 2:] / 2) / self.feat_stride).reshape(-1, 2).flip((1,))\n        if is_distractor is not None:\n            center[is_distractor.reshape(-1), :] = 99999\n        dist_map = self.distance_map(center, scores.shape[-2:])\n\n        # Compute label map masks and weight\n        label_map = self.label_map_predictor(dist_map).reshape(num_images, num_sequences, dist_map.shape[-2], dist_map.shape[-1])\n        target_mask = self.target_mask_predictor(dist_map).reshape(num_images, num_sequences, dist_map.shape[-2], dist_map.shape[-1])\n        spatial_weight = self.spatial_weight_predictor(dist_map).reshape(num_images, num_sequences, dist_map.shape[-2], dist_map.shape[-1])\n\n        if sample_weight is None:\n            sample_weight = math.sqrt(1.0 / num_images) * spatial_weight\n        elif isinstance(sample_weight, torch.Tensor):\n            sample_weight = sample_weight.sqrt().reshape(-1, 1, 1, 1) * spatial_weight\n\n        # Compute data residual\n        scores_act = self.score_activation(scores, target_mask)\n        data_residual = sample_weight * (scores_act - label_map)\n\n        # Compute regularization residual. Put batch in second dimension\n        reg_residual = self.filter_reg*filter.reshape(1, num_sequences, -1)\n\n        return TensorList([data_residual, reg_residual])\n"
  },
  {
    "path": "artrackv2_mindspore/external/AR/ltr/models/tracking/__init__.py",
    "content": ""
  },
  {
    "path": "artrackv2_mindspore/external/AR/ltr/models/tracking/dimpnet.py",
    "content": "import math\nimport torch\nimport torch.nn as nn\nfrom collections import OrderedDict\nfrom ltr.models.meta import steepestdescent\nimport ltr.models.target_classifier.linear_filter as target_clf\nimport ltr.models.target_classifier.features as clf_features\nimport ltr.models.target_classifier.initializer as clf_initializer\nimport ltr.models.target_classifier.optimizer as clf_optimizer\nimport ltr.models.bbreg as bbmodels\nimport ltr.models.backbone as backbones\nfrom ltr import model_constructor\n\n\nclass DiMPnet(nn.Module):\n    \"\"\"The DiMP network.\n    args:\n        feature_extractor:  Backbone feature extractor network. Must return a dict of feature maps\n        classifier:  Target classification module.\n        bb_regressor:  Bounding box regression module.\n        classification_layer:  Name of the backbone feature layer to use for classification.\n        bb_regressor_layer:  Names of the backbone layers to use for bounding box regression.\"\"\"\n\n    def __init__(self, feature_extractor, classifier, bb_regressor, classification_layer, bb_regressor_layer):\n        super().__init__()\n\n        self.feature_extractor = feature_extractor\n        self.classifier = classifier\n        self.bb_regressor = bb_regressor\n        self.classification_layer = [classification_layer] if isinstance(classification_layer, str) else classification_layer\n        self.bb_regressor_layer = bb_regressor_layer\n        self.output_layers = sorted(list(set(self.classification_layer + self.bb_regressor_layer)))\n\n\n    def forward(self, train_imgs, test_imgs, train_bb, test_proposals, *args, **kwargs):\n        \"\"\"Runs the DiMP network the way it is applied during training.\n        The forward function is ONLY used for training. Call the individual functions during tracking.\n        args:\n            train_imgs:  Train image samples (images, sequences, 3, H, W).\n            test_imgs:  Test image samples (images, sequences, 3, H, W).\n            trian_bb:  Target boxes (x,y,w,h) for the train images. Dims (images, sequences, 4).\n            test_proposals:  Proposal boxes to use for the IoUNet (bb_regressor) module.\n            *args, **kwargs:  These are passed to the classifier module.\n        returns:\n            test_scores:  Classification scores on the test samples.\n            iou_pred:  Predicted IoU scores for the test_proposals.\"\"\"\n\n        assert train_imgs.dim() == 5 and test_imgs.dim() == 5, 'Expect 5 dimensional inputs'\n\n        # Extract backbone features\n        train_feat = self.extract_backbone_features(train_imgs.reshape(-1, *train_imgs.shape[-3:]))\n        test_feat = self.extract_backbone_features(test_imgs.reshape(-1, *test_imgs.shape[-3:]))\n\n        # Classification features\n        train_feat_clf = self.get_backbone_clf_feat(train_feat)\n        test_feat_clf = self.get_backbone_clf_feat(test_feat)\n\n        # Run classifier module\n        target_scores = self.classifier(train_feat_clf, test_feat_clf, train_bb, *args, **kwargs)\n\n        # Get bb_regressor features\n        train_feat_iou = self.get_backbone_bbreg_feat(train_feat)\n        test_feat_iou = self.get_backbone_bbreg_feat(test_feat)\n\n        # Run the IoUNet module\n        iou_pred = self.bb_regressor(train_feat_iou, test_feat_iou, train_bb, test_proposals)\n\n        return target_scores, iou_pred\n\n    def get_backbone_clf_feat(self, backbone_feat):\n        feat = OrderedDict({l: backbone_feat[l] for l in self.classification_layer})\n        if len(self.classification_layer) == 1:\n            return feat[self.classification_layer[0]]\n        return feat\n\n    def get_backbone_bbreg_feat(self, backbone_feat):\n        return [backbone_feat[l] for l in self.bb_regressor_layer]\n\n    def extract_classification_feat(self, backbone_feat):\n        return self.classifier.extract_classification_feat(self.get_backbone_clf_feat(backbone_feat))\n\n    def extract_backbone_features(self, im, layers=None):\n        if layers is None:\n            layers = self.output_layers\n        return self.feature_extractor(im, layers)\n\n    def extract_features(self, im, layers=None):\n        if layers is None:\n            layers = self.bb_regressor_layer + ['classification']\n        if 'classification' not in layers:\n            return self.feature_extractor(im, layers)\n        backbone_layers = sorted(list(set([l for l in layers + self.classification_layer if l != 'classification'])))\n        all_feat = self.feature_extractor(im, backbone_layers)\n        all_feat['classification'] = self.extract_classification_feat(all_feat)\n        return OrderedDict({l: all_feat[l] for l in layers})\n\n\n\n@model_constructor\ndef dimpnet18(filter_size=1, optim_iter=5, optim_init_step=1.0, optim_init_reg=0.01,\n              classification_layer='layer3', feat_stride=16, backbone_pretrained=True, clf_feat_blocks=1,\n              clf_feat_norm=True, init_filter_norm=False, final_conv=True,\n              out_feature_dim=256, init_gauss_sigma=1.0, num_dist_bins=5, bin_displacement=1.0,\n              mask_init_factor=4.0, iou_input_dim=(256, 256), iou_inter_dim=(256, 256),\n              score_act='relu', act_param=None, target_mask_act='sigmoid',\n              detach_length=float('Inf'), frozen_backbone_layers=()):\n    # Backbone\n    backbone_net = backbones.resnet18(pretrained=backbone_pretrained, frozen_layers=frozen_backbone_layers)\n\n    # Feature normalization\n    norm_scale = math.sqrt(1.0 / (out_feature_dim * filter_size * filter_size))\n\n    # Classifier features\n    clf_feature_extractor = clf_features.residual_basic_block(num_blocks=clf_feat_blocks, l2norm=clf_feat_norm,\n                                                              final_conv=final_conv, norm_scale=norm_scale,\n                                                              out_dim=out_feature_dim)\n\n    # Initializer for the DiMP classifier\n    initializer = clf_initializer.FilterInitializerLinear(filter_size=filter_size, filter_norm=init_filter_norm,\n                                                          feature_dim=out_feature_dim)\n\n    # Optimizer for the DiMP classifier\n    optimizer = clf_optimizer.DiMPSteepestDescentGN(num_iter=optim_iter, feat_stride=feat_stride,\n                                                    init_step_length=optim_init_step,\n                                                    init_filter_reg=optim_init_reg, init_gauss_sigma=init_gauss_sigma,\n                                                    num_dist_bins=num_dist_bins,\n                                                    bin_displacement=bin_displacement,\n                                                    mask_init_factor=mask_init_factor,\n                                                    score_act=score_act, act_param=act_param, mask_act=target_mask_act,\n                                                    detach_length=detach_length)\n\n    # The classifier module\n    classifier = target_clf.LinearFilter(filter_size=filter_size, filter_initializer=initializer,\n                                         filter_optimizer=optimizer, feature_extractor=clf_feature_extractor)\n\n    # Bounding box regressor\n    bb_regressor = bbmodels.AtomIoUNet(pred_input_dim=iou_input_dim, pred_inter_dim=iou_inter_dim)\n\n    # DiMP network\n    net = DiMPnet(feature_extractor=backbone_net, classifier=classifier, bb_regressor=bb_regressor,\n                  classification_layer=classification_layer, bb_regressor_layer=['layer2', 'layer3'])\n    return net\n\n\n@model_constructor\ndef dimpnet50(filter_size=1, optim_iter=5, optim_init_step=1.0, optim_init_reg=0.01,\n              classification_layer='layer3', feat_stride=16, backbone_pretrained=True, clf_feat_blocks=0,\n              clf_feat_norm=True, init_filter_norm=False, final_conv=True,\n              out_feature_dim=512, init_gauss_sigma=1.0, num_dist_bins=5, bin_displacement=1.0,\n              mask_init_factor=4.0, iou_input_dim=(256, 256), iou_inter_dim=(256, 256),\n              score_act='relu', act_param=None, target_mask_act='sigmoid',\n              detach_length=float('Inf'), frozen_backbone_layers=()):\n\n    # Backbone\n    backbone_net = backbones.resnet50(pretrained=backbone_pretrained, frozen_layers=frozen_backbone_layers)\n\n    # Feature normalization\n    norm_scale = math.sqrt(1.0 / (out_feature_dim * filter_size * filter_size))\n\n    # Classifier features\n    if classification_layer == 'layer3':\n        feature_dim = 256\n    elif classification_layer == 'layer4':\n        feature_dim = 512\n    else:\n        raise Exception\n\n    clf_feature_extractor = clf_features.residual_bottleneck(feature_dim=feature_dim,\n                                                             num_blocks=clf_feat_blocks, l2norm=clf_feat_norm,\n                                                             final_conv=final_conv, norm_scale=norm_scale,\n                                                             out_dim=out_feature_dim)\n\n    # Initializer for the DiMP classifier\n    initializer = clf_initializer.FilterInitializerLinear(filter_size=filter_size, filter_norm=init_filter_norm,\n                                                          feature_dim=out_feature_dim)\n\n    # Optimizer for the DiMP classifier\n    optimizer = clf_optimizer.DiMPSteepestDescentGN(num_iter=optim_iter, feat_stride=feat_stride,\n                                                    init_step_length=optim_init_step,\n                                                    init_filter_reg=optim_init_reg, init_gauss_sigma=init_gauss_sigma,\n                                                    num_dist_bins=num_dist_bins,\n                                                    bin_displacement=bin_displacement,\n                                                    mask_init_factor=mask_init_factor,\n                                                    score_act=score_act, act_param=act_param, mask_act=target_mask_act,\n                                                    detach_length=detach_length)\n\n    # The classifier module\n    classifier = target_clf.LinearFilter(filter_size=filter_size, filter_initializer=initializer,\n                                         filter_optimizer=optimizer, feature_extractor=clf_feature_extractor)\n\n    # Bounding box regressor\n    bb_regressor = bbmodels.AtomIoUNet(input_dim=(4*128,4*256), pred_input_dim=iou_input_dim, pred_inter_dim=iou_inter_dim)\n\n    # DiMP network\n    net = DiMPnet(feature_extractor=backbone_net, classifier=classifier, bb_regressor=bb_regressor,\n                  classification_layer=classification_layer, bb_regressor_layer=['layer2', 'layer3'])\n    return net\n\n\n\n@model_constructor\ndef L2dimpnet18(filter_size=1, optim_iter=5, optim_init_step=1.0, optim_init_reg=0.01,\n              classification_layer='layer3', feat_stride=16, backbone_pretrained=True, clf_feat_blocks=1,\n              clf_feat_norm=True, init_filter_norm=False, final_conv=True,\n              out_feature_dim=256, iou_input_dim=(256, 256), iou_inter_dim=(256, 256),\n              detach_length=float('Inf'), hinge_threshold=-999, gauss_sigma=1.0, alpha_eps=0):\n    # Backbone\n    backbone_net = backbones.resnet18(pretrained=backbone_pretrained)\n\n    # Feature normalization\n    norm_scale = math.sqrt(1.0 / (out_feature_dim * filter_size * filter_size))\n\n    # Classifier features\n    clf_feature_extractor = clf_features.residual_basic_block(num_blocks=clf_feat_blocks, l2norm=clf_feat_norm,\n                                                              final_conv=final_conv, norm_scale=norm_scale,\n                                                              out_dim=out_feature_dim)\n\n    # Initializer for the DiMP classifier\n    initializer = clf_initializer.FilterInitializerLinear(filter_size=filter_size, filter_norm=init_filter_norm,\n                                                          feature_dim=out_feature_dim)\n\n    # Optimizer for the DiMP classifier\n    optimizer = clf_optimizer.DiMPL2SteepestDescentGN(num_iter=optim_iter, feat_stride=feat_stride,\n                                                    init_step_length=optim_init_step, hinge_threshold=hinge_threshold,\n                                                    init_filter_reg=optim_init_reg, gauss_sigma=gauss_sigma,\n                                                    detach_length=detach_length, alpha_eps=alpha_eps)\n\n    # The classifier module\n    classifier = target_clf.LinearFilter(filter_size=filter_size, filter_initializer=initializer,\n                                         filter_optimizer=optimizer, feature_extractor=clf_feature_extractor)\n\n    # Bounding box regressor\n    bb_regressor = bbmodels.AtomIoUNet(pred_input_dim=iou_input_dim, pred_inter_dim=iou_inter_dim)\n\n    # DiMP network\n    net = DiMPnet(feature_extractor=backbone_net, classifier=classifier, bb_regressor=bb_regressor,\n                  classification_layer=classification_layer, bb_regressor_layer=['layer2', 'layer3'])\n    return net\n\n\n@model_constructor\ndef klcedimpnet18(filter_size=1, optim_iter=5, optim_init_step=1.0, optim_init_reg=0.01,\n                  classification_layer='layer3', feat_stride=16, backbone_pretrained=True, clf_feat_blocks=1,\n                  clf_feat_norm=True, init_filter_norm=False, final_conv=True,\n                  out_feature_dim=256, gauss_sigma=1.0,\n                  iou_input_dim=(256, 256), iou_inter_dim=(256, 256),\n                  detach_length=float('Inf'), alpha_eps=0.0, train_feature_extractor=True,\n                  init_uni_weight=None, optim_min_reg=1e-3, init_initializer='default', normalize_label=False,\n                  label_shrink=0, softmax_reg=None, label_threshold=0, final_relu=False, init_pool_square=False,\n                  frozen_backbone_layers=()):\n\n    if not train_feature_extractor:\n        frozen_backbone_layers = 'all'\n\n    # Backbone\n    backbone_net = backbones.resnet18(pretrained=backbone_pretrained, frozen_layers=frozen_backbone_layers)\n\n    # Feature normalization\n    norm_scale = math.sqrt(1.0 / (out_feature_dim * filter_size * filter_size))\n\n    # Classifier features\n    clf_feature_extractor = clf_features.residual_basic_block(num_blocks=clf_feat_blocks, l2norm=clf_feat_norm,\n                                                              final_conv=final_conv, norm_scale=norm_scale,\n                                                              out_dim=out_feature_dim, final_relu=final_relu)\n\n    # Initializer for the DiMP classifier\n    initializer = clf_initializer.FilterInitializerLinear(filter_size=filter_size, filter_norm=init_filter_norm,\n                                                          feature_dim=out_feature_dim, init_weights=init_initializer,\n                                                          pool_square=init_pool_square)\n\n    # Optimizer for the DiMP classifier\n    optimizer = clf_optimizer.PrDiMPSteepestDescentNewton(num_iter=optim_iter, feat_stride=feat_stride,\n                                                          init_step_length=optim_init_step,\n                                                          init_filter_reg=optim_init_reg, gauss_sigma=gauss_sigma,\n                                                          detach_length=detach_length, alpha_eps=alpha_eps,\n                                                          init_uni_weight=init_uni_weight,\n                                                          min_filter_reg=optim_min_reg, normalize_label=normalize_label,\n                                                          label_shrink=label_shrink, softmax_reg=softmax_reg,\n                                                          label_threshold=label_threshold)\n\n    # The classifier module\n    classifier = target_clf.LinearFilter(filter_size=filter_size, filter_initializer=initializer,\n                                         filter_optimizer=optimizer, feature_extractor=clf_feature_extractor)\n\n    # Bounding box regressor\n    bb_regressor = bbmodels.AtomIoUNet(pred_input_dim=iou_input_dim, pred_inter_dim=iou_inter_dim)\n\n    # DiMP network\n    net = DiMPnet(feature_extractor=backbone_net, classifier=classifier, bb_regressor=bb_regressor,\n                  classification_layer=classification_layer, bb_regressor_layer=['layer2', 'layer3'])\n    return net\n\n\n@model_constructor\ndef klcedimpnet50(filter_size=1, optim_iter=5, optim_init_step=1.0, optim_init_reg=0.01,\n                  classification_layer='layer3', feat_stride=16, backbone_pretrained=True, clf_feat_blocks=0,\n                  clf_feat_norm=True, init_filter_norm=False, final_conv=True,\n                  out_feature_dim=512, gauss_sigma=1.0,\n                  iou_input_dim=(256, 256), iou_inter_dim=(256, 256),\n                  detach_length=float('Inf'), alpha_eps=0.0, train_feature_extractor=True,\n                  init_uni_weight=None, optim_min_reg=1e-3, init_initializer='default', normalize_label=False,\n                  label_shrink=0, softmax_reg=None, label_threshold=0, final_relu=False, frozen_backbone_layers=()):\n\n    if not train_feature_extractor:\n        frozen_backbone_layers = 'all'\n\n    # Backbone\n    backbone_net = backbones.resnet50(pretrained=backbone_pretrained, frozen_layers=frozen_backbone_layers)\n\n    # Feature normalization\n    norm_scale = math.sqrt(1.0 / (out_feature_dim * filter_size * filter_size))\n\n    # Classifier features\n    clf_feature_extractor = clf_features.residual_bottleneck(num_blocks=clf_feat_blocks, l2norm=clf_feat_norm,\n                                                             final_conv=final_conv, norm_scale=norm_scale,\n                                                             out_dim=out_feature_dim, final_relu=final_relu)\n\n    # Initializer for the DiMP classifier\n    initializer = clf_initializer.FilterInitializerLinear(filter_size=filter_size, filter_norm=init_filter_norm,\n                                                          feature_dim=out_feature_dim, init_weights=init_initializer)\n\n    # Optimizer for the DiMP classifier\n    optimizer = clf_optimizer.PrDiMPSteepestDescentNewton(num_iter=optim_iter, feat_stride=feat_stride,\n                                                          init_step_length=optim_init_step,\n                                                          init_filter_reg=optim_init_reg, gauss_sigma=gauss_sigma,\n                                                          detach_length=detach_length, alpha_eps=alpha_eps,\n                                                          init_uni_weight=init_uni_weight,\n                                                          min_filter_reg=optim_min_reg, normalize_label=normalize_label,\n                                                          label_shrink=label_shrink, softmax_reg=softmax_reg,\n                                                          label_threshold=label_threshold)\n\n    # The classifier module\n    classifier = target_clf.LinearFilter(filter_size=filter_size, filter_initializer=initializer,\n                                         filter_optimizer=optimizer, feature_extractor=clf_feature_extractor)\n\n    # Bounding box regressor\n    bb_regressor = bbmodels.AtomIoUNet(input_dim=(4*128,4*256), pred_input_dim=iou_input_dim, pred_inter_dim=iou_inter_dim)\n\n    # DiMP network\n    net = DiMPnet(feature_extractor=backbone_net, classifier=classifier, bb_regressor=bb_regressor,\n                  classification_layer=classification_layer, bb_regressor_layer=['layer2', 'layer3'])\n    return net\n"
  },
  {
    "path": "artrackv2_mindspore/external/AR/ltr/run_training.py",
    "content": "import os\nimport sys\nimport argparse\nimport importlib\nimport multiprocessing\nimport cv2 as cv\nimport torch.backends.cudnn\n\nenv_path = os.path.join(os.path.dirname(__file__), '..')\nif env_path not in sys.path:\n    sys.path.append(env_path)\n\nimport ltr.admin.settings as ws_settings\n\n\ndef run_training(train_module, train_name, cudnn_benchmark=True):\n    \"\"\"Run a train scripts in train_settings.\n    args:\n        train_module: Name of module in the \"train_settings/\" folder.\n        train_name: Name of the train settings file.\n        cudnn_benchmark: Use cudnn benchmark or not (default is True).\n    \"\"\"\n\n    # This is needed to avoid strange crashes related to opencv\n    cv.setNumThreads(0)\n\n    torch.backends.cudnn.benchmark = cudnn_benchmark\n\n    print('Training:  {}  {}'.format(train_module, train_name))\n\n    settings = ws_settings.Settings()\n    settings.module_name = train_module\n    settings.script_name = train_name\n    settings.project_path = 'ltr/{}/{}'.format(train_module, train_name)\n\n    expr_module = importlib.import_module('ltr.train_settings.{}.{}'.format(train_module, train_name))\n    expr_func = getattr(expr_module, 'run')\n\n    expr_func(settings)\n\n\ndef main():\n    parser = argparse.ArgumentParser(description='Run a train scripts in train_settings.')\n    parser.add_argument('train_module', type=str, help='Name of module in the \"train_settings/\" folder.')\n    parser.add_argument('train_name', type=str, help='Name of the train settings file.')\n    parser.add_argument('--cudnn_benchmark', type=bool, default=True, help='Set cudnn benchmark on (1) or off (0) (default is on).')\n\n    args = parser.parse_args()\n\n    run_training(args.train_module, args.train_name, args.cudnn_benchmark)\n\n\nif __name__ == '__main__':\n    multiprocessing.set_start_method('spawn', force=True)\n    main()\n"
  },
  {
    "path": "artrackv2_mindspore/external/AR/ltr/train_settings/__init__.py",
    "content": ""
  },
  {
    "path": "artrackv2_mindspore/external/AR/ltr/train_settings/bbreg/__init__.py",
    "content": ""
  },
  {
    "path": "artrackv2_mindspore/external/AR/ltr/train_settings/bbreg/atom.py",
    "content": "import torch.nn as nn\nimport torch.optim as optim\nfrom ltr.dataset import Lasot, TrackingNet, MSCOCOSeq, Got10k\nfrom ltr.data import processing, sampler, LTRLoader\nimport ltr.models.bbreg.atom as atom_models\nfrom ltr import actors\nfrom ltr.trainers import LTRTrainer\nimport ltr.data.transforms as tfm\n\n\ndef run(settings):\n    # Most common settings are assigned in the settings struct\n    settings.description = 'ATOM IoUNet with default settings, but additionally using GOT10k for training.'\n    settings.batch_size = 64\n    settings.num_workers = 8\n    settings.print_interval = 1\n    settings.normalize_mean = [0.485, 0.456, 0.406]\n    settings.normalize_std = [0.229, 0.224, 0.225]\n    settings.search_area_factor = 5.0\n    settings.feature_sz = 18\n    settings.output_sz = settings.feature_sz * 16\n    settings.center_jitter_factor = {'train': 0, 'test': 4.5}\n    settings.scale_jitter_factor = {'train': 0, 'test': 0.5}\n\n    # Train datasets\n    lasot_train = Lasot(settings.env.lasot_dir, split='train')\n    got10k_train = Got10k(settings.env.got10k_dir, split='vottrain')\n    trackingnet_train = TrackingNet(settings.env.trackingnet_dir, set_ids=list(range(4)))\n    coco_train = MSCOCOSeq(settings.env.coco_dir)\n\n    # Validation datasets\n    got10k_val = Got10k(settings.env.got10k_dir, split='votval')\n\n    # The joint augmentation transform, that is applied to the pairs jointly\n    transform_joint = tfm.Transform(tfm.ToGrayscale(probability=0.05))\n\n    # The augmentation transform applied to the training set (individually to each image in the pair)\n    transform_train = tfm.Transform(tfm.ToTensorAndJitter(0.2),\n                                    tfm.Normalize(mean=settings.normalize_mean, std=settings.normalize_std))\n\n    # The augmentation transform applied to the validation set (individually to each image in the pair)\n    transform_val = tfm.Transform(tfm.ToTensor(),\n                                  tfm.Normalize(mean=settings.normalize_mean, std=settings.normalize_std))\n\n    # Data processing to do on the training pairs\n    proposal_params = {'min_iou': 0.1, 'boxes_per_frame': 16, 'sigma_factor': [0.01, 0.05, 0.1, 0.2, 0.3]}\n    data_processing_train = processing.ATOMProcessing(search_area_factor=settings.search_area_factor,\n                                                      output_sz=settings.output_sz,\n                                                      center_jitter_factor=settings.center_jitter_factor,\n                                                      scale_jitter_factor=settings.scale_jitter_factor,\n                                                      mode='sequence',\n                                                      proposal_params=proposal_params,\n                                                      transform=transform_train,\n                                                      joint_transform=transform_joint)\n\n    # Data processing to do on the validation pairs\n    data_processing_val = processing.ATOMProcessing(search_area_factor=settings.search_area_factor,\n                                                    output_sz=settings.output_sz,\n                                                    center_jitter_factor=settings.center_jitter_factor,\n                                                    scale_jitter_factor=settings.scale_jitter_factor,\n                                                    mode='sequence',\n                                                    proposal_params=proposal_params,\n                                                    transform=transform_val,\n                                                    joint_transform=transform_joint)\n\n    # The sampler for training\n    dataset_train = sampler.ATOMSampler([lasot_train, got10k_train, trackingnet_train, coco_train], [1,1,1,1],\n                                samples_per_epoch=1000*settings.batch_size, max_gap=50, processing=data_processing_train)\n\n    # The loader for training\n    loader_train = LTRLoader('train', dataset_train, training=True, batch_size=settings.batch_size, num_workers=settings.num_workers,\n                             shuffle=True, drop_last=True, stack_dim=1)\n\n    # The sampler for validation\n    dataset_val = sampler.ATOMSampler([got10k_val], [1], samples_per_epoch=500*settings.batch_size, max_gap=50,\n                                      processing=data_processing_val)\n\n    # The loader for validation\n    loader_val = LTRLoader('val', dataset_val, training=False, batch_size=settings.batch_size, num_workers=settings.num_workers,\n                           shuffle=False, drop_last=True, epoch_interval=5, stack_dim=1)\n\n    # Create network and actor\n    net = atom_models.atom_resnet18(backbone_pretrained=True)\n    objective = nn.MSELoss()\n    actor = actors.AtomActor(net=net, objective=objective)\n\n    # Optimizer\n    optimizer = optim.Adam(actor.net.bb_regressor.parameters(), lr=1e-3)\n    lr_scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=15, gamma=0.2)\n\n    # Create trainer\n    trainer = LTRTrainer(actor, [loader_train, loader_val], optimizer, settings, lr_scheduler)\n\n    # Run training (set fail_safe=False if you are debugging)\n    trainer.train(50, load_latest=True, fail_safe=True)\n"
  },
  {
    "path": "artrackv2_mindspore/external/AR/ltr/train_settings/bbreg/atom_gmm_sampl.py",
    "content": "import torch.nn as nn\nimport torch.optim as optim\nfrom ltr.dataset import Lasot, TrackingNet, MSCOCOSeq, Got10k\nfrom ltr.data import processing, sampler, LTRLoader\nimport ltr.models.bbreg.atom as atom_models\nfrom ltr import actors\nfrom ltr.trainers import LTRTrainer\nimport ltr.data.transforms as tfm\n\n\ndef run(settings):\n    # Most common settings are assigned in the settings struct\n    settings.description = 'ATOM IoUNet using the baseline ATOM* settings in [https://arxiv.org/abs/1909.12297].' \\\n                           'Unlike standard ATOM, it employs the GMM-based proposal sampling and minor parameter changes.'\n    settings.batch_size = 64\n    settings.num_workers = 8\n    settings.print_interval = 1\n    settings.normalize_mean = [0.485, 0.456, 0.406]\n    settings.normalize_std = [0.229, 0.224, 0.225]\n    settings.search_area_factor = 5.0\n    settings.feature_sz = 18\n    settings.output_sz = settings.feature_sz * 16\n    settings.center_jitter_factor = {'train': 0, 'test': 4.5}\n    settings.scale_jitter_factor = {'train': 0, 'test': 0.5}\n\n    # Train datasets\n    lasot_train = Lasot(settings.env.lasot_dir, split='train')\n    got10k_train = Got10k(settings.env.got10k_dir, split='vottrain')\n    trackingnet_train = TrackingNet(settings.env.trackingnet_dir, set_ids=list(range(4)))\n    coco_train = MSCOCOSeq(settings.env.coco_dir)\n\n    # Validation datasets\n    got10k_val = Got10k(settings.env.got10k_dir, split='votval')\n\n    # The joint augmentation transform, that is applied to the pairs jointly\n    transform_joint = tfm.Transform(tfm.ToGrayscale(probability=0.05))\n\n    # The augmentation transform applied to the training set (individually to each image in the pair)\n    transform_train = tfm.Transform(tfm.ToTensorAndJitter(0.2),\n                                    tfm.Normalize(mean=settings.normalize_mean, std=settings.normalize_std))\n\n    # The augmentation transform applied to the validation set (individually to each image in the pair)\n    transform_val = tfm.Transform(tfm.ToTensor(),\n                                  tfm.Normalize(mean=settings.normalize_mean, std=settings.normalize_std))\n\n    # Data processing to do on the training pairs\n    proposal_params = {'proposal_method': 'gmm', 'boxes_per_frame': 128, 'proposal_sigma': [(0.05, 0.05), (0.5, 0.5)]}\n    data_processing_train = processing.ATOMProcessing(search_area_factor=settings.search_area_factor,\n                                                      output_sz=settings.output_sz,\n                                                      center_jitter_factor=settings.center_jitter_factor,\n                                                      scale_jitter_factor=settings.scale_jitter_factor,\n                                                      mode='sequence',\n                                                      proposal_params=proposal_params,\n                                                      transform=transform_train,\n                                                      joint_transform=transform_joint)\n\n    # Data processing to do on the validation pairs\n    data_processing_val = processing.ATOMProcessing(search_area_factor=settings.search_area_factor,\n                                                    output_sz=settings.output_sz,\n                                                    center_jitter_factor=settings.center_jitter_factor,\n                                                    scale_jitter_factor=settings.scale_jitter_factor,\n                                                    mode='sequence',\n                                                    proposal_params=proposal_params,\n                                                    transform=transform_val,\n                                                    joint_transform=transform_joint)\n\n    # The sampler for training\n    dataset_train = sampler.ATOMSampler([lasot_train, got10k_train, trackingnet_train, coco_train], [1,1,1,1],\n                                samples_per_epoch=1000*settings.batch_size, max_gap=200, processing=data_processing_train)\n\n    # The loader for training\n    loader_train = LTRLoader('train', dataset_train, training=True, batch_size=settings.batch_size, num_workers=settings.num_workers,\n                             shuffle=True, drop_last=True, stack_dim=1)\n\n    # The sampler for validation\n    dataset_val = sampler.ATOMSampler([got10k_val], [1], samples_per_epoch=500*settings.batch_size, max_gap=200,\n                                      processing=data_processing_val)\n\n    # The loader for validation\n    loader_val = LTRLoader('val', dataset_val, training=False, batch_size=settings.batch_size, num_workers=settings.num_workers,\n                           shuffle=False, drop_last=True, epoch_interval=5, stack_dim=1)\n\n    # Create network and actor\n    net = atom_models.atom_resnet18(backbone_pretrained=True)\n    objective = nn.MSELoss()\n    actor = actors.AtomActor(net=net, objective=objective)\n\n    # Optimizer\n    optimizer = optim.Adam(actor.net.bb_regressor.parameters(), lr=1e-3)\n    lr_scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=15, gamma=0.2)\n\n    # Create trainer\n    trainer = LTRTrainer(actor, [loader_train, loader_val], optimizer, settings, lr_scheduler)\n\n    # Run training (set fail_safe=False if you are debugging)\n    trainer.train(50, load_latest=True, fail_safe=True)\n"
  },
  {
    "path": "artrackv2_mindspore/external/AR/ltr/train_settings/bbreg/atom_paper.py",
    "content": "import torch.nn as nn\nimport torch.optim as optim\nfrom ltr.dataset import Lasot, TrackingNet, MSCOCOSeq\nfrom ltr.data import processing, sampler, LTRLoader\nimport ltr.models.bbreg.atom as atom_models\nfrom ltr import actors\nfrom ltr.trainers import LTRTrainer\nimport ltr.data.transforms as tfm\n\n\ndef run(settings):\n    # Most common settings are assigned in the settings struct\n    settings.description = 'ATOM IoUNet with default settings according to the paper.'\n    settings.batch_size = 64\n    settings.num_workers = 8\n    settings.print_interval = 1\n    settings.normalize_mean = [0.485, 0.456, 0.406]\n    settings.normalize_std = [0.229, 0.224, 0.225]\n    settings.search_area_factor = 5.0\n    settings.feature_sz = 18\n    settings.output_sz = settings.feature_sz * 16\n    settings.center_jitter_factor = {'train': 0, 'test': 4.5}\n    settings.scale_jitter_factor = {'train': 0, 'test': 0.5}\n\n    # Train datasets\n    lasot_train = Lasot(settings.env.lasot_dir, split='train')\n    trackingnet_train = TrackingNet(settings.env.trackingnet_dir, set_ids=list(range(11)))\n    coco_train = MSCOCOSeq(settings.env.coco_dir)\n\n    # Validation datasets\n    trackingnet_val = TrackingNet(settings.env.trackingnet_dir, set_ids=list(range(11,12)))\n\n    # The joint augmentation transform, that is applied to the pairs jointly\n    transform_joint = tfm.Transform(tfm.ToGrayscale(probability=0.05))\n\n    # The augmentation transform applied to the training set (individually to each image in the pair)\n    transform_train = tfm.Transform(tfm.ToTensorAndJitter(0.2),\n                                    tfm.Normalize(mean=settings.normalize_mean, std=settings.normalize_std))\n\n    # The augmentation transform applied to the validation set (individually to each image in the pair)\n    transform_val = tfm.Transform(tfm.ToTensor(),\n                                  tfm.Normalize(mean=settings.normalize_mean, std=settings.normalize_std))\n\n    # Data processing to do on the training pairs\n    proposal_params = {'min_iou': 0.1, 'boxes_per_frame': 16, 'sigma_factor': [0.01, 0.05, 0.1, 0.2, 0.3]}\n    data_processing_train = processing.ATOMProcessing(search_area_factor=settings.search_area_factor,\n                                                      output_sz=settings.output_sz,\n                                                      center_jitter_factor=settings.center_jitter_factor,\n                                                      scale_jitter_factor=settings.scale_jitter_factor,\n                                                      mode='sequence',\n                                                      proposal_params=proposal_params,\n                                                      transform=transform_train,\n                                                      joint_transform=transform_joint)\n\n    # Data processing to do on the validation pairs\n    data_processing_val = processing.ATOMProcessing(search_area_factor=settings.search_area_factor,\n                                                    output_sz=settings.output_sz,\n                                                    center_jitter_factor=settings.center_jitter_factor,\n                                                    scale_jitter_factor=settings.scale_jitter_factor,\n                                                    mode='sequence',\n                                                    proposal_params=proposal_params,\n                                                    transform=transform_val,\n                                                    joint_transform=transform_joint)\n\n    # The sampler for training\n    dataset_train = sampler.ATOMSampler([lasot_train, trackingnet_train, coco_train], [1,1,1],\n                                samples_per_epoch=1000*settings.batch_size, max_gap=50, processing=data_processing_train)\n\n    # The loader for training\n    loader_train = LTRLoader('train', dataset_train, training=True, batch_size=settings.batch_size, num_workers=settings.num_workers,\n                             shuffle=True, drop_last=True, stack_dim=1)\n\n    # The sampler for validation\n    dataset_val = sampler.ATOMSampler([trackingnet_val], [1], samples_per_epoch=500*settings.batch_size, max_gap=50,\n                              processing=data_processing_val)\n\n    # The loader for validation\n    loader_val = LTRLoader('val', dataset_val, training=False, batch_size=settings.batch_size, num_workers=settings.num_workers,\n                           shuffle=False, drop_last=True, epoch_interval=5, stack_dim=1)\n\n    # Create network and actor\n    net = atom_models.atom_resnet18(backbone_pretrained=True)\n    objective = nn.MSELoss()\n    actor = actors.AtomActor(net=net, objective=objective)\n\n    # Optimizer\n    optimizer = optim.Adam(actor.net.bb_regressor.parameters(), lr=1e-3)\n    lr_scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=15, gamma=0.2)\n\n    # Create trainer\n    trainer = LTRTrainer(actor, [loader_train, loader_val], optimizer, settings, lr_scheduler)\n\n    # Run training (set fail_safe=False if you are debugging)\n    trainer.train(50, load_latest=True, fail_safe=True)\n"
  },
  {
    "path": "artrackv2_mindspore/external/AR/ltr/train_settings/bbreg/atom_prob_ml.py",
    "content": "import torch.optim as optim\nfrom ltr.dataset import Lasot, TrackingNet, MSCOCOSeq, Got10k\nfrom ltr.data import processing, sampler, LTRLoader\nimport ltr.models.bbreg.atom as atom_models\nimport ltr.models.loss.kl_regression as klreg_losses\nimport ltr.actors.bbreg as bbreg_actors\nfrom ltr.trainers import LTRTrainer\nimport ltr.data.transforms as tfm\n\n\ndef run(settings):\n    # Most common settings are assigned in the settings struct\n    settings.description = 'ATOM using the probabilistic maximum likelihood trained regression model for bounding-box' \\\n                           'regression presented in [https://arxiv.org/abs/1909.12297].'\n    settings.batch_size = 64\n    settings.num_workers = 8\n    settings.print_interval = 1\n    settings.normalize_mean = [0.485, 0.456, 0.406]\n    settings.normalize_std = [0.229, 0.224, 0.225]\n    settings.search_area_factor = 5.0\n    settings.feature_sz = 18\n    settings.output_sz = settings.feature_sz * 16\n    settings.center_jitter_factor = {'train': 0, 'test': 4.5}\n    settings.scale_jitter_factor = {'train': 0, 'test': 0.5}\n\n    # Train datasets\n    lasot_train = Lasot(settings.env.lasot_dir, split='train')\n    got10k_train = Got10k(settings.env.got10k_dir, split='vottrain')\n    trackingnet_train = TrackingNet(settings.env.trackingnet_dir, set_ids=list(range(4)))\n    coco_train = MSCOCOSeq(settings.env.coco_dir)\n\n    # Validation datasets\n    got10k_val = Got10k(settings.env.got10k_dir, split='votval')\n\n    # The joint augmentation transform, that is applied to the pairs jointly\n    transform_joint = tfm.Transform(tfm.ToGrayscale(probability=0.05))\n\n    # The augmentation transform applied to the training set (individually to each image in the pair)\n    transform_train = tfm.Transform(tfm.ToTensorAndJitter(0.2),\n                                    tfm.Normalize(mean=settings.normalize_mean, std=settings.normalize_std))\n\n    # The augmentation transform applied to the validation set (individually to each image in the pair)\n    transform_val = tfm.Transform(tfm.ToTensor(),\n                                  tfm.Normalize(mean=settings.normalize_mean, std=settings.normalize_std))\n\n    # Data processing to do on the training pairs\n    proposal_params = {'boxes_per_frame': 128, 'gt_sigma': (0, 0), 'proposal_sigma': [(0.05, 0.05), (0.5, 0.5)],\n                       'add_mean_box': True}\n    data_processing_train = processing.KLBBregProcessing(search_area_factor=settings.search_area_factor,\n                                                         output_sz=settings.output_sz,\n                                                         center_jitter_factor=settings.center_jitter_factor,\n                                                         scale_jitter_factor=settings.scale_jitter_factor,\n                                                         mode='sequence',\n                                                         proposal_params=proposal_params,\n                                                         transform=transform_train,\n                                                         joint_transform=transform_joint)\n\n    # Data processing to do on the validation pairs\n    data_processing_val = processing.KLBBregProcessing(search_area_factor=settings.search_area_factor,\n                                                       output_sz=settings.output_sz,\n                                                       center_jitter_factor=settings.center_jitter_factor,\n                                                       scale_jitter_factor=settings.scale_jitter_factor,\n                                                       mode='sequence',\n                                                       proposal_params=proposal_params,\n                                                       transform=transform_val,\n                                                       joint_transform=transform_joint)\n\n    # The sampler for training\n    dataset_train = sampler.ATOMSampler([lasot_train, got10k_train, trackingnet_train, coco_train], [1,1,1,1],\n                                samples_per_epoch=1000*settings.batch_size, max_gap=200, processing=data_processing_train)\n\n    # The loader for training\n    loader_train = LTRLoader('train', dataset_train, training=True, batch_size=settings.batch_size, num_workers=settings.num_workers,\n                             shuffle=True, drop_last=True, stack_dim=1)\n\n    # The sampler for validation\n    dataset_val = sampler.ATOMSampler([got10k_val], [1], samples_per_epoch=500*settings.batch_size, max_gap=200,\n                                      processing=data_processing_val)\n\n    # The loader for validation\n    loader_val = LTRLoader('val', dataset_val, training=False, batch_size=settings.batch_size, num_workers=settings.num_workers,\n                           shuffle=False, drop_last=True, epoch_interval=5, stack_dim=1)\n\n    # Create network and actor\n    net = atom_models.atom_resnet18(backbone_pretrained=True)\n    objective = klreg_losses.MLRegression()\n    actor = bbreg_actors.AtomBBKLActor(net=net, objective=objective)\n\n    # Optimizer\n    optimizer = optim.Adam(actor.net.bb_regressor.parameters(), lr=1e-3)\n    lr_scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=15, gamma=0.2)\n\n    # Create trainer\n    trainer = LTRTrainer(actor, [loader_train, loader_val], optimizer, settings, lr_scheduler)\n\n    # Run training (set fail_safe=False if you are debugging)\n    trainer.train(50, load_latest=True, fail_safe=True)\n"
  },
  {
    "path": "artrackv2_mindspore/external/AR/ltr/train_settings/dimp/__init__.py",
    "content": ""
  },
  {
    "path": "artrackv2_mindspore/external/AR/ltr/train_settings/dimp/dimp18.py",
    "content": "import torch.nn as nn\nimport torch.optim as optim\nfrom ltr.dataset import Lasot, Got10k, TrackingNet, MSCOCOSeq\nfrom ltr.data import processing, sampler, LTRLoader\nfrom ltr.models.tracking import dimpnet\nimport ltr.models.loss as ltr_losses\nfrom ltr import actors\nfrom ltr.trainers import LTRTrainer\nimport ltr.data.transforms as tfm\nfrom ltr import MultiGPU\n\n\ndef run(settings):\n    settings.description = 'Default train settings for DiMP with ResNet18 as backbone.'\n    settings.batch_size = 26\n    settings.num_workers = 8\n    settings.multi_gpu = False\n    settings.print_interval = 1\n    settings.normalize_mean = [0.485, 0.456, 0.406]\n    settings.normalize_std = [0.229, 0.224, 0.225]\n    settings.search_area_factor = 5.0\n    settings.output_sigma_factor = 1/4\n    settings.target_filter_sz = 4\n    settings.feature_sz = 18\n    settings.output_sz = settings.feature_sz * 16\n    settings.center_jitter_factor = {'train': 3, 'test': 4.5}\n    settings.scale_jitter_factor = {'train': 0.25, 'test': 0.5}\n    settings.hinge_threshold = 0.05\n    # settings.print_stats = ['Loss/total', 'Loss/iou', 'ClfTrain/init_loss', 'ClfTrain/test_loss']\n\n    # Train datasets\n    lasot_train = Lasot(settings.env.lasot_dir, split='train')\n    got10k_train = Got10k(settings.env.got10k_dir, split='vottrain')\n    trackingnet_train = TrackingNet(settings.env.trackingnet_dir, set_ids=list(range(4)))\n    coco_train = MSCOCOSeq(settings.env.coco_dir)\n\n    # Validation datasets\n    got10k_val = Got10k(settings.env.got10k_dir, split='votval')\n\n\n    # Data transform\n    transform_joint = tfm.Transform(tfm.ToGrayscale(probability=0.05))\n\n    transform_train = tfm.Transform(tfm.ToTensorAndJitter(0.2),\n                                    tfm.Normalize(mean=settings.normalize_mean, std=settings.normalize_std))\n\n    transform_val = tfm.Transform(tfm.ToTensor(),\n                                  tfm.Normalize(mean=settings.normalize_mean, std=settings.normalize_std))\n\n    # The tracking pairs processing module\n    output_sigma = settings.output_sigma_factor / settings.search_area_factor\n    proposal_params = {'min_iou': 0.1, 'boxes_per_frame': 8, 'sigma_factor': [0.01, 0.05, 0.1, 0.2, 0.3]}\n    label_params = {'feature_sz': settings.feature_sz, 'sigma_factor': output_sigma, 'kernel_sz': settings.target_filter_sz}\n    data_processing_train = processing.DiMPProcessing(search_area_factor=settings.search_area_factor,\n                                                      output_sz=settings.output_sz,\n                                                      center_jitter_factor=settings.center_jitter_factor,\n                                                      scale_jitter_factor=settings.scale_jitter_factor,\n                                                      mode='sequence',\n                                                      proposal_params=proposal_params,\n                                                      label_function_params=label_params,\n                                                      transform=transform_train,\n                                                      joint_transform=transform_joint)\n\n    data_processing_val = processing.DiMPProcessing(search_area_factor=settings.search_area_factor,\n                                                    output_sz=settings.output_sz,\n                                                    center_jitter_factor=settings.center_jitter_factor,\n                                                    scale_jitter_factor=settings.scale_jitter_factor,\n                                                    mode='sequence',\n                                                    proposal_params=proposal_params,\n                                                    label_function_params=label_params,\n                                                    transform=transform_val,\n                                                    joint_transform=transform_joint)\n\n    # Train sampler and loader\n    dataset_train = sampler.DiMPSampler([lasot_train, got10k_train, trackingnet_train, coco_train], [0.25,1,1,1],\n                                        samples_per_epoch=26000, max_gap=30, num_test_frames=3, num_train_frames=3,\n                                        processing=data_processing_train)\n\n    loader_train = LTRLoader('train', dataset_train, training=True, batch_size=settings.batch_size, num_workers=settings.num_workers,\n                             shuffle=True, drop_last=True, stack_dim=1)\n\n    # Validation samplers and loaders\n    dataset_val = sampler.DiMPSampler([got10k_val], [1], samples_per_epoch=5000, max_gap=30,\n                                      num_test_frames=3, num_train_frames=3,\n                                      processing=data_processing_val)\n\n    loader_val = LTRLoader('val', dataset_val, training=False, batch_size=settings.batch_size, num_workers=settings.num_workers,\n                           shuffle=False, drop_last=True, epoch_interval=5, stack_dim=1)\n\n    # Create network and actor\n    net = dimpnet.dimpnet18(filter_size=settings.target_filter_sz, backbone_pretrained=True, optim_iter=5,\n                            clf_feat_norm=True, final_conv=True, optim_init_step=0.9, optim_init_reg=0.1,\n                            init_gauss_sigma=output_sigma * settings.feature_sz, num_dist_bins=100,\n                            bin_displacement=0.1, mask_init_factor=3.0, target_mask_act='sigmoid', score_act='relu')\n\n    # Wrap the network for multi GPU training\n    if settings.multi_gpu:\n        net = MultiGPU(net, dim=1)\n\n    objective = {'iou': nn.MSELoss(), 'test_clf': ltr_losses.LBHinge(threshold=settings.hinge_threshold)}\n\n    loss_weight = {'iou': 1, 'test_clf': 100, 'test_init_clf': 100, 'test_iter_clf': 400}\n\n    actor = actors.DiMPActor(net=net, objective=objective, loss_weight=loss_weight)\n\n    # Optimizer\n    optimizer = optim.Adam([{'params': actor.net.classifier.filter_initializer.parameters(), 'lr': 5e-5},\n                            {'params': actor.net.classifier.filter_optimizer.parameters(), 'lr': 5e-4},\n                            {'params': actor.net.classifier.feature_extractor.parameters(), 'lr': 5e-5},\n                            {'params': actor.net.bb_regressor.parameters(), 'lr': 1e-3},\n                            {'params': actor.net.feature_extractor.parameters()}],\n                           lr=2e-4)\n\n    lr_scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=15, gamma=0.2)\n\n    trainer = LTRTrainer(actor, [loader_train, loader_val], optimizer, settings, lr_scheduler)\n\n    trainer.train(50, load_latest=True, fail_safe=True)\n"
  },
  {
    "path": "artrackv2_mindspore/external/AR/ltr/train_settings/dimp/dimp50.py",
    "content": "import torch.nn as nn\nimport torch.optim as optim\nfrom ltr.dataset import Lasot, Got10k, TrackingNet, MSCOCOSeq\nfrom ltr.data import processing, sampler, LTRLoader\nfrom ltr.models.tracking import dimpnet\nimport ltr.models.loss as ltr_losses\nfrom ltr import actors\nfrom ltr.trainers import LTRTrainer\nimport ltr.data.transforms as tfm\nfrom ltr import MultiGPU\n\n\ndef run(settings):\n    settings.description = 'Default train settings for DiMP with ResNet50 as backbone.'\n    settings.batch_size = 10\n    settings.num_workers = 8\n    settings.multi_gpu = False\n    settings.print_interval = 1\n    settings.normalize_mean = [0.485, 0.456, 0.406]\n    settings.normalize_std = [0.229, 0.224, 0.225]\n    settings.search_area_factor = 5.0\n    settings.output_sigma_factor = 1/4\n    settings.target_filter_sz = 4\n    settings.feature_sz = 18\n    settings.output_sz = settings.feature_sz * 16\n    settings.center_jitter_factor = {'train': 3, 'test': 4.5}\n    settings.scale_jitter_factor = {'train': 0.25, 'test': 0.5}\n    settings.hinge_threshold = 0.05\n    # settings.print_stats = ['Loss/total', 'Loss/iou', 'ClfTrain/clf_ce', 'ClfTrain/test_loss']\n\n    # Train datasets\n    lasot_train = Lasot(settings.env.lasot_dir, split='train')\n    got10k_train = Got10k(settings.env.got10k_dir, split='vottrain')\n    trackingnet_train = TrackingNet(settings.env.trackingnet_dir, set_ids=list(range(4)))\n    coco_train = MSCOCOSeq(settings.env.coco_dir)\n\n    # Validation datasets\n    got10k_val = Got10k(settings.env.got10k_dir, split='votval')\n\n\n    # Data transform\n    transform_joint = tfm.Transform(tfm.ToGrayscale(probability=0.05))\n\n    transform_train = tfm.Transform(tfm.ToTensorAndJitter(0.2),\n                                    tfm.Normalize(mean=settings.normalize_mean, std=settings.normalize_std))\n\n    transform_val = tfm.Transform(tfm.ToTensor(),\n                                  tfm.Normalize(mean=settings.normalize_mean, std=settings.normalize_std))\n\n    # The tracking pairs processing module\n    output_sigma = settings.output_sigma_factor / settings.search_area_factor\n    proposal_params = {'min_iou': 0.1, 'boxes_per_frame': 8, 'sigma_factor': [0.01, 0.05, 0.1, 0.2, 0.3]}\n    label_params = {'feature_sz': settings.feature_sz, 'sigma_factor': output_sigma, 'kernel_sz': settings.target_filter_sz}\n    data_processing_train = processing.DiMPProcessing(search_area_factor=settings.search_area_factor,\n                                                      output_sz=settings.output_sz,\n                                                      center_jitter_factor=settings.center_jitter_factor,\n                                                      scale_jitter_factor=settings.scale_jitter_factor,\n                                                      mode='sequence',\n                                                      proposal_params=proposal_params,\n                                                      label_function_params=label_params,\n                                                      transform=transform_train,\n                                                      joint_transform=transform_joint)\n\n    data_processing_val = processing.DiMPProcessing(search_area_factor=settings.search_area_factor,\n                                                    output_sz=settings.output_sz,\n                                                    center_jitter_factor=settings.center_jitter_factor,\n                                                    scale_jitter_factor=settings.scale_jitter_factor,\n                                                    mode='sequence',\n                                                    proposal_params=proposal_params,\n                                                    label_function_params=label_params,\n                                                    transform=transform_val,\n                                                    joint_transform=transform_joint)\n\n    # Train sampler and loader\n    dataset_train = sampler.DiMPSampler([lasot_train, got10k_train, trackingnet_train, coco_train], [0.25,1,1,1],\n                                        samples_per_epoch=26000, max_gap=30, num_test_frames=3, num_train_frames=3,\n                                        processing=data_processing_train)\n\n    loader_train = LTRLoader('train', dataset_train, training=True, batch_size=settings.batch_size, num_workers=settings.num_workers,\n                             shuffle=True, drop_last=True, stack_dim=1)\n\n    # Validation samplers and loaders\n    dataset_val = sampler.DiMPSampler([got10k_val], [1], samples_per_epoch=5000, max_gap=30,\n                                      num_test_frames=3, num_train_frames=3,\n                                      processing=data_processing_val)\n\n    loader_val = LTRLoader('val', dataset_val, training=False, batch_size=settings.batch_size, num_workers=settings.num_workers,\n                           shuffle=False, drop_last=True, epoch_interval=5, stack_dim=1)\n\n    # Create network and actor\n    net = dimpnet.dimpnet50(filter_size=settings.target_filter_sz, backbone_pretrained=True, optim_iter=5,\n                            clf_feat_norm=True, clf_feat_blocks=0, final_conv=True, out_feature_dim=512,\n                            optim_init_step=0.9, optim_init_reg=0.1,\n                            init_gauss_sigma=output_sigma * settings.feature_sz, num_dist_bins=100,\n                            bin_displacement=0.1, mask_init_factor=3.0, target_mask_act='sigmoid', score_act='relu')\n\n    # Wrap the network for multi GPU training\n    if settings.multi_gpu:\n        net = MultiGPU(net, dim=1)\n\n    objective = {'iou': nn.MSELoss(), 'test_clf': ltr_losses.LBHinge(threshold=settings.hinge_threshold)}\n\n    loss_weight = {'iou': 1, 'test_clf': 100, 'test_init_clf': 100, 'test_iter_clf': 400}\n\n    actor = actors.DiMPActor(net=net, objective=objective, loss_weight=loss_weight)\n\n    # Optimizer\n    optimizer = optim.Adam([{'params': actor.net.classifier.filter_initializer.parameters(), 'lr': 5e-5},\n                            {'params': actor.net.classifier.filter_optimizer.parameters(), 'lr': 5e-4},\n                            {'params': actor.net.classifier.feature_extractor.parameters(), 'lr': 5e-5},\n                            {'params': actor.net.bb_regressor.parameters()},\n                            {'params': actor.net.feature_extractor.parameters(), 'lr': 2e-5}],\n                           lr=2e-4)\n\n    lr_scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=15, gamma=0.2)\n\n    trainer = LTRTrainer(actor, [loader_train, loader_val], optimizer, settings, lr_scheduler)\n\n    trainer.train(50, load_latest=True, fail_safe=True)\n"
  },
  {
    "path": "artrackv2_mindspore/external/AR/ltr/train_settings/dimp/prdimp18.py",
    "content": "import torch.optim as optim\nfrom ltr.dataset import Lasot, Got10k, TrackingNet, MSCOCOSeq\nfrom ltr.data import processing, sampler, LTRLoader\nfrom ltr.models.tracking import dimpnet\nimport ltr.models.loss as ltr_losses\nimport ltr.models.loss.kl_regression as klreg_losses\nimport ltr.actors.tracking as tracking_actors\nfrom ltr.trainers import LTRTrainer\nimport ltr.data.transforms as tfm\nfrom ltr import MultiGPU\n\n\ndef run(settings):\n    settings.description = 'Default train settings for PrDiMP with ResNet18 as backbone.'\n    settings.batch_size = 26\n    settings.num_workers = 8\n    settings.multi_gpu = False\n    settings.print_interval = 1\n    settings.normalize_mean = [0.485, 0.456, 0.406]\n    settings.normalize_std = [0.229, 0.224, 0.225]\n    settings.search_area_factor = 5.0\n    settings.output_sigma_factor = 1/4\n    settings.target_filter_sz = 4\n    settings.feature_sz = 18\n    settings.output_sz = settings.feature_sz * 16\n    settings.center_jitter_factor = {'train': 3, 'test': 4.5}\n    settings.scale_jitter_factor = {'train': 0.25, 'test': 0.5}\n    settings.hinge_threshold = 0.05\n    settings.print_stats = ['Loss/total', 'Loss/bb_ce', 'ClfTrain/clf_ce']\n\n    # Train datasets\n    lasot_train = Lasot(settings.env.lasot_dir, split='train')\n    got10k_train = Got10k(settings.env.got10k_dir, split='vottrain')\n    trackingnet_train = TrackingNet(settings.env.trackingnet_dir, set_ids=list(range(4)))\n    coco_train = MSCOCOSeq(settings.env.coco_dir)\n\n    # Validation datasets\n    got10k_val = Got10k(settings.env.got10k_dir, split='votval')\n\n\n    # Data transform\n    transform_joint = tfm.Transform(tfm.ToGrayscale(probability=0.05))\n\n    transform_train = tfm.Transform(tfm.ToTensorAndJitter(0.2),\n                                    tfm.Normalize(mean=settings.normalize_mean, std=settings.normalize_std))\n\n    transform_val = tfm.Transform(tfm.ToTensor(),\n                                  tfm.Normalize(mean=settings.normalize_mean, std=settings.normalize_std))\n\n    # The tracking pairs processing module\n    output_sigma = settings.output_sigma_factor / settings.search_area_factor\n    proposal_params = {'boxes_per_frame': 128, 'gt_sigma': (0.05, 0.05), 'proposal_sigma': [(0.05, 0.05), (0.5, 0.5)]}\n    label_params = {'feature_sz': settings.feature_sz, 'sigma_factor': output_sigma, 'kernel_sz': settings.target_filter_sz}\n    label_density_params = {'feature_sz': settings.feature_sz, 'sigma_factor': output_sigma, 'kernel_sz': settings.target_filter_sz, 'normalize': True}\n\n    data_processing_train = processing.KLDiMPProcessing(search_area_factor=settings.search_area_factor,\n                                                        output_sz=settings.output_sz,\n                                                        center_jitter_factor=settings.center_jitter_factor,\n                                                        scale_jitter_factor=settings.scale_jitter_factor,\n                                                        mode='sequence',\n                                                        proposal_params=proposal_params,\n                                                        label_function_params=label_params,\n                                                        label_density_params=label_density_params,\n                                                        transform=transform_train,\n                                                        joint_transform=transform_joint)\n\n    data_processing_val = processing.KLDiMPProcessing(search_area_factor=settings.search_area_factor,\n                                                      output_sz=settings.output_sz,\n                                                      center_jitter_factor=settings.center_jitter_factor,\n                                                      scale_jitter_factor=settings.scale_jitter_factor,\n                                                      mode='sequence',\n                                                      proposal_params=proposal_params,\n                                                      label_function_params=label_params,\n                                                      label_density_params=label_density_params,\n                                                      transform=transform_val,\n                                                      joint_transform=transform_joint)\n\n    # Train sampler and loader\n    dataset_train = sampler.DiMPSampler([lasot_train, got10k_train, trackingnet_train, coco_train], [0.25,1,1,1],\n                                        samples_per_epoch=26000, max_gap=200, num_test_frames=3, num_train_frames=3,\n                                        processing=data_processing_train)\n\n    loader_train = LTRLoader('train', dataset_train, training=True, batch_size=settings.batch_size, num_workers=settings.num_workers,\n                             shuffle=True, drop_last=True, stack_dim=1)\n\n    # Validation samplers and loaders\n    dataset_val = sampler.DiMPSampler([got10k_val], [1], samples_per_epoch=5000, max_gap=200,\n                                      num_test_frames=3, num_train_frames=3,\n                                      processing=data_processing_val)\n\n    loader_val = LTRLoader('val', dataset_val, training=False, batch_size=settings.batch_size, num_workers=settings.num_workers,\n                           shuffle=False, drop_last=True, epoch_interval=5, stack_dim=1)\n\n    # Create network and actor\n    net = dimpnet.klcedimpnet18(filter_size=settings.target_filter_sz, backbone_pretrained=True, optim_iter=5,\n                            clf_feat_norm=True, final_conv=True, optim_init_step=1.0, optim_init_reg=0.05, optim_min_reg=0.05,\n                            gauss_sigma=output_sigma * settings.feature_sz, alpha_eps=0.05, normalize_label=True, init_initializer='zero')\n\n    # Wrap the network for multi GPU training\n    if settings.multi_gpu:\n        net = MultiGPU(net, dim=1)\n\n    objective = {'bb_ce': klreg_losses.KLRegression(), 'clf_ce': klreg_losses.KLRegressionGrid()}\n\n    loss_weight = {'bb_ce': 0.0025, 'clf_ce': 0.25, 'clf_ce_init': 0.25, 'clf_ce_iter': 1.0}\n\n    actor = tracking_actors.KLDiMPActor(net=net, objective=objective, loss_weight=loss_weight)\n\n    # Optimizer\n    optimizer = optim.Adam([{'params': actor.net.classifier.parameters(), 'lr': 1e-3},\n                            {'params': actor.net.bb_regressor.parameters(), 'lr': 1e-3},\n                            {'params': actor.net.feature_extractor.parameters()}],\n                           lr=2e-4)\n\n    lr_scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=15, gamma=0.2)\n\n    trainer = LTRTrainer(actor, [loader_train, loader_val], optimizer, settings, lr_scheduler)\n\n    trainer.train(50, load_latest=True, fail_safe=True)\n"
  },
  {
    "path": "artrackv2_mindspore/external/AR/ltr/train_settings/dimp/prdimp50.py",
    "content": "import torch.optim as optim\nfrom ltr.dataset import Lasot, Got10k, TrackingNet, MSCOCOSeq\nfrom ltr.data import processing, sampler, LTRLoader\nfrom ltr.models.tracking import dimpnet\nimport ltr.models.loss as ltr_losses\nimport ltr.models.loss.kl_regression as klreg_losses\nimport ltr.actors.tracking as tracking_actors\nfrom ltr.trainers import LTRTrainer\nimport ltr.data.transforms as tfm\nfrom ltr import MultiGPU\n\n\ndef run(settings):\n    settings.description = 'Default train settings for PrDiMP with ResNet50 as backbone.'\n    settings.batch_size = 10\n    settings.num_workers = 8\n    settings.multi_gpu = False\n    settings.print_interval = 1\n    settings.normalize_mean = [0.485, 0.456, 0.406]\n    settings.normalize_std = [0.229, 0.224, 0.225]\n    settings.search_area_factor = 5.0\n    settings.output_sigma_factor = 1/4\n    settings.target_filter_sz = 4\n    settings.feature_sz = 18\n    settings.output_sz = settings.feature_sz * 16\n    settings.center_jitter_factor = {'train': 3, 'test': 4.5}\n    settings.scale_jitter_factor = {'train': 0.25, 'test': 0.5}\n    settings.hinge_threshold = 0.05\n    settings.print_stats = ['Loss/total', 'Loss/bb_ce', 'ClfTrain/clf_ce']\n\n    # Train datasets\n    lasot_train = Lasot(settings.env.lasot_dir, split='train')\n    got10k_train = Got10k(settings.env.got10k_dir, split='vottrain')\n    trackingnet_train = TrackingNet(settings.env.trackingnet_dir, set_ids=list(range(4)))\n    coco_train = MSCOCOSeq(settings.env.coco_dir)\n\n    # Validation datasets\n    got10k_val = Got10k(settings.env.got10k_dir, split='votval')\n\n\n    # Data transform\n    transform_joint = tfm.Transform(tfm.ToGrayscale(probability=0.05))\n\n    transform_train = tfm.Transform(tfm.ToTensorAndJitter(0.2),\n                                    tfm.Normalize(mean=settings.normalize_mean, std=settings.normalize_std))\n\n    transform_val = tfm.Transform(tfm.ToTensor(),\n                                  tfm.Normalize(mean=settings.normalize_mean, std=settings.normalize_std))\n\n    # The tracking pairs processing module\n    output_sigma = settings.output_sigma_factor / settings.search_area_factor\n    proposal_params = {'boxes_per_frame': 128, 'gt_sigma': (0.05, 0.05), 'proposal_sigma': [(0.05, 0.05), (0.5, 0.5)]}\n    label_params = {'feature_sz': settings.feature_sz, 'sigma_factor': output_sigma, 'kernel_sz': settings.target_filter_sz}\n    label_density_params = {'feature_sz': settings.feature_sz, 'sigma_factor': output_sigma, 'kernel_sz': settings.target_filter_sz, 'normalize': True}\n\n    data_processing_train = processing.KLDiMPProcessing(search_area_factor=settings.search_area_factor,\n                                                        output_sz=settings.output_sz,\n                                                        center_jitter_factor=settings.center_jitter_factor,\n                                                        scale_jitter_factor=settings.scale_jitter_factor,\n                                                        mode='sequence',\n                                                        proposal_params=proposal_params,\n                                                        label_function_params=label_params,\n                                                        label_density_params=label_density_params,\n                                                        transform=transform_train,\n                                                        joint_transform=transform_joint)\n\n    data_processing_val = processing.KLDiMPProcessing(search_area_factor=settings.search_area_factor,\n                                                      output_sz=settings.output_sz,\n                                                      center_jitter_factor=settings.center_jitter_factor,\n                                                      scale_jitter_factor=settings.scale_jitter_factor,\n                                                      mode='sequence',\n                                                      proposal_params=proposal_params,\n                                                      label_function_params=label_params,\n                                                      label_density_params=label_density_params,\n                                                      transform=transform_val,\n                                                      joint_transform=transform_joint)\n\n    # Train sampler and loader\n    dataset_train = sampler.DiMPSampler([lasot_train, got10k_train, trackingnet_train, coco_train], [0.25,1,1,1],\n                                        samples_per_epoch=26000, max_gap=200, num_test_frames=3, num_train_frames=3,\n                                        processing=data_processing_train)\n\n    loader_train = LTRLoader('train', dataset_train, training=True, batch_size=settings.batch_size, num_workers=settings.num_workers,\n                             shuffle=True, drop_last=True, stack_dim=1)\n\n    # Validation samplers and loaders\n    dataset_val = sampler.DiMPSampler([got10k_val], [1], samples_per_epoch=5000, max_gap=200,\n                                      num_test_frames=3, num_train_frames=3,\n                                      processing=data_processing_val)\n\n    loader_val = LTRLoader('val', dataset_val, training=False, batch_size=settings.batch_size, num_workers=settings.num_workers,\n                           shuffle=False, drop_last=True, epoch_interval=5, stack_dim=1)\n\n    # Create network and actor\n    net = dimpnet.klcedimpnet50(filter_size=settings.target_filter_sz, backbone_pretrained=True, optim_iter=5,\n                                clf_feat_norm=True, clf_feat_blocks=0, final_conv=True, out_feature_dim=512,\n                                optim_init_step=1.0, optim_init_reg=0.05, optim_min_reg=0.05,\n                                gauss_sigma=output_sigma * settings.feature_sz, alpha_eps=0.05, normalize_label=True, init_initializer='zero')\n\n    # Wrap the network for multi GPU training\n    if settings.multi_gpu:\n        net = MultiGPU(net, dim=1)\n\n    objective = {'bb_ce': klreg_losses.KLRegression(), 'clf_ce': klreg_losses.KLRegressionGrid()}\n\n    loss_weight = {'bb_ce': 0.0025, 'clf_ce': 0.25, 'clf_ce_init': 0.25, 'clf_ce_iter': 1.0}\n\n    actor = tracking_actors.KLDiMPActor(net=net, objective=objective, loss_weight=loss_weight)\n\n    # Optimizer\n    optimizer = optim.Adam([{'params': actor.net.classifier.parameters(), 'lr': 1e-3},\n                            {'params': actor.net.bb_regressor.parameters(), 'lr': 1e-3},\n                            {'params': actor.net.feature_extractor.parameters(), 'lr': 2e-5}],\n                           lr=2e-4)\n\n    lr_scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=15, gamma=0.2)\n\n    trainer = LTRTrainer(actor, [loader_train, loader_val], optimizer, settings, lr_scheduler)\n\n    trainer.train(50, load_latest=True, fail_safe=True)\n"
  },
  {
    "path": "artrackv2_mindspore/external/AR/ltr/train_settings/dimp/super_dimp.py",
    "content": "import torch.optim as optim\nfrom ltr.dataset import Lasot, Got10k, TrackingNet, MSCOCOSeq\nfrom ltr.data import processing, sampler, LTRLoader\nfrom ltr.models.tracking import dimpnet\nimport ltr.models.loss as ltr_losses\nimport ltr.models.loss.kl_regression as klreg_losses\nimport ltr.actors.tracking as tracking_actors\nfrom ltr.trainers import LTRTrainer\nimport ltr.data.transforms as tfm\nfrom ltr import MultiGPU\n\n\ndef run(settings):\n    settings.description = 'SuperDiMP: Combines the DiMP classifier with the PrDiMP bounding box regressor and better' \\\n                           'training settings (larger batch size, inside_major cropping, and flipping augmentation.' \\\n                           'Gives results significantly better than both DiMP-50 and PrDiMP-50.'\n    settings.batch_size = 20\n    settings.num_workers = 8\n    settings.multi_gpu = False\n    settings.print_interval = 1\n    settings.normalize_mean = [0.485, 0.456, 0.406]\n    settings.normalize_std = [0.229, 0.224, 0.225]\n    settings.search_area_factor = 6.0\n    settings.output_sigma_factor = 1/4\n    settings.target_filter_sz = 4\n    settings.feature_sz = 22\n    settings.output_sz = settings.feature_sz * 16\n    settings.center_jitter_factor = {'train': 3, 'test': 5.5}\n    settings.scale_jitter_factor = {'train': 0.25, 'test': 0.5}\n    settings.hinge_threshold = 0.05\n    # settings.print_stats = ['Loss/total', 'Loss/iou', 'ClfTrain/init_loss', 'ClfTrain/test_loss']\n\n    # Train datasets\n    lasot_train = Lasot(settings.env.lasot_dir, split='train')\n    got10k_train = Got10k(settings.env.got10k_dir, split='vottrain')\n    trackingnet_train = TrackingNet(settings.env.trackingnet_dir, set_ids=list(range(4)))\n    coco_train = MSCOCOSeq(settings.env.coco_dir)\n\n    # Validation datasets\n    got10k_val = Got10k(settings.env.got10k_dir, split='votval')\n\n\n    # Data transform\n    transform_joint = tfm.Transform(tfm.ToGrayscale(probability=0.05),\n                                    tfm.RandomHorizontalFlip(probability=0.5))\n\n    transform_train = tfm.Transform(tfm.ToTensorAndJitter(0.2),\n                                    tfm.RandomHorizontalFlip(probability=0.5),\n                                    tfm.Normalize(mean=settings.normalize_mean, std=settings.normalize_std))\n\n    transform_val = tfm.Transform(tfm.ToTensor(),\n                                  tfm.Normalize(mean=settings.normalize_mean, std=settings.normalize_std))\n\n    # The tracking pairs processing module\n    output_sigma = settings.output_sigma_factor / settings.search_area_factor\n    proposal_params = {'boxes_per_frame': 128, 'gt_sigma': (0.05, 0.05), 'proposal_sigma': [(0.05, 0.05), (0.5, 0.5)]}\n    label_params = {'feature_sz': settings.feature_sz, 'sigma_factor': output_sigma, 'kernel_sz': settings.target_filter_sz}\n    label_density_params = {'feature_sz': settings.feature_sz, 'sigma_factor': output_sigma, 'kernel_sz': settings.target_filter_sz}\n\n    data_processing_train = processing.KLDiMPProcessing(search_area_factor=settings.search_area_factor,\n                                                        output_sz=settings.output_sz,\n                                                        center_jitter_factor=settings.center_jitter_factor,\n                                                        scale_jitter_factor=settings.scale_jitter_factor,\n                                                        crop_type='inside_major',\n                                                        max_scale_change=1.5,\n                                                        mode='sequence',\n                                                        proposal_params=proposal_params,\n                                                        label_function_params=label_params,\n                                                        label_density_params=label_density_params,\n                                                        transform=transform_train,\n                                                        joint_transform=transform_joint)\n\n    data_processing_val = processing.KLDiMPProcessing(search_area_factor=settings.search_area_factor,\n                                                      output_sz=settings.output_sz,\n                                                      center_jitter_factor=settings.center_jitter_factor,\n                                                      scale_jitter_factor=settings.scale_jitter_factor,\n                                                      crop_type='inside_major',\n                                                      max_scale_change=1.5,\n                                                      mode='sequence',\n                                                      proposal_params=proposal_params,\n                                                      label_function_params=label_params,\n                                                      label_density_params=label_density_params,\n                                                      transform=transform_val,\n                                                      joint_transform=transform_joint)\n\n    # Train sampler and loader\n    dataset_train = sampler.DiMPSampler([lasot_train, got10k_train, trackingnet_train, coco_train], [1,1,1,1],\n                                        samples_per_epoch=40000, max_gap=200, num_test_frames=3, num_train_frames=3,\n                                        processing=data_processing_train)\n\n    loader_train = LTRLoader('train', dataset_train, training=True, batch_size=settings.batch_size, num_workers=settings.num_workers,\n                             shuffle=True, drop_last=True, stack_dim=1)\n\n    # Validation samplers and loaders\n    dataset_val = sampler.DiMPSampler([got10k_val], [1], samples_per_epoch=10000, max_gap=200,\n                                      num_test_frames=3, num_train_frames=3,\n                                      processing=data_processing_val)\n\n    loader_val = LTRLoader('val', dataset_val, training=False, batch_size=settings.batch_size, num_workers=settings.num_workers,\n                           shuffle=False, drop_last=True, epoch_interval=5, stack_dim=1)\n\n    # Create network and actor\n    net = dimpnet.dimpnet50(filter_size=settings.target_filter_sz, backbone_pretrained=True, optim_iter=5,\n                            clf_feat_norm=True, clf_feat_blocks=0, final_conv=True, out_feature_dim=512,\n                            optim_init_step=0.9, optim_init_reg=0.1,\n                            init_gauss_sigma=output_sigma * settings.feature_sz, num_dist_bins=100,\n                            bin_displacement=0.1, mask_init_factor=3.0, target_mask_act='sigmoid', score_act='relu',\n                            frozen_backbone_layers=['conv1', 'bn1', 'layer1', 'layer2'])\n\n    # Wrap the network for multi GPU training\n    if settings.multi_gpu:\n        net = MultiGPU(net, dim=1)\n\n    objective = {'bb_ce': klreg_losses.KLRegression(), 'test_clf': ltr_losses.LBHinge(threshold=settings.hinge_threshold)}\n\n    loss_weight = {'bb_ce': 0.01, 'test_clf': 100, 'test_init_clf': 100, 'test_iter_clf': 400}\n\n    actor = tracking_actors.KLDiMPActor(net=net, objective=objective, loss_weight=loss_weight)\n\n    # Optimizer\n    optimizer = optim.Adam([{'params': actor.net.classifier.filter_initializer.parameters(), 'lr': 5e-5},\n                            {'params': actor.net.classifier.filter_optimizer.parameters(), 'lr': 5e-4},\n                            {'params': actor.net.classifier.feature_extractor.parameters(), 'lr': 5e-5},\n                            {'params': actor.net.bb_regressor.parameters(), 'lr': 1e-3},\n                            {'params': actor.net.feature_extractor.layer3.parameters(), 'lr': 2e-5}],\n                           lr=2e-4)\n\n    lr_scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=15, gamma=0.2)\n\n    trainer = LTRTrainer(actor, [loader_train, loader_val], optimizer, settings, lr_scheduler)\n\n    trainer.train(50, load_latest=True, fail_safe=True)\n"
  },
  {
    "path": "artrackv2_mindspore/external/AR/ltr/trainers/__init__.py",
    "content": "from .base_trainer import BaseTrainer\nfrom .ltr_trainer import LTRTrainer"
  },
  {
    "path": "artrackv2_mindspore/external/AR/ltr/trainers/base_trainer.py",
    "content": "import os\nimport glob\nimport torch\nimport traceback\nfrom ltr.admin import loading, multigpu\n\n\nclass BaseTrainer:\n    \"\"\"Base trainer class. Contains functions for training and saving/loading chackpoints.\n    Trainer classes should inherit from this one and overload the train_epoch function.\"\"\"\n\n    def __init__(self, actor, loaders, optimizer, settings, lr_scheduler=None):\n        \"\"\"\n        args:\n            actor - The actor for training the network\n            loaders - list of dataset loaders, e.g. [train_loader, val_loader]. In each epoch, the trainer runs one\n                        epoch for each loader.\n            optimizer - The optimizer used for training, e.g. Adam\n            settings - Training settings\n            lr_scheduler - Learning rate scheduler\n        \"\"\"\n        self.actor = actor\n        self.optimizer = optimizer\n        self.lr_scheduler = lr_scheduler\n        self.loaders = loaders\n\n        self.update_settings(settings)\n\n        self.epoch = 0\n        self.stats = {}\n\n        self.device = getattr(settings, 'device', None)\n        if self.device is None:\n            self.device = torch.device(\"cuda:0\" if torch.cuda.is_available() and settings.use_gpu else \"cpu\")\n\n        self.actor.to(self.device)\n\n    def update_settings(self, settings=None):\n        \"\"\"Updates the trainer settings. Must be called to update internal settings.\"\"\"\n        if settings is not None:\n            self.settings = settings\n\n        if self.settings.env.workspace_dir is not None:\n            self.settings.env.workspace_dir = os.path.expanduser(self.settings.env.workspace_dir)\n            self._checkpoint_dir = os.path.join(self.settings.env.workspace_dir, 'checkpoints')\n            if not os.path.exists(self._checkpoint_dir):\n                os.makedirs(self._checkpoint_dir)\n        else:\n            self._checkpoint_dir = None\n\n\n    def train(self, max_epochs, load_latest=False, fail_safe=True):\n        \"\"\"Do training for the given number of epochs.\n        args:\n            max_epochs - Max number of training epochs,\n            load_latest - Bool indicating whether to resume from latest epoch.\n            fail_safe - Bool indicating whether the training to automatically restart in case of any crashes.\n        \"\"\"\n\n        epoch = -1\n        num_tries = 10\n        for i in range(num_tries):\n            try:\n                if load_latest:\n                    self.load_checkpoint()\n\n                for epoch in range(self.epoch+1, max_epochs+1):\n                    self.epoch = epoch\n\n                    self.train_epoch()\n\n                    if self.lr_scheduler is not None:\n                        self.lr_scheduler.step()\n\n                    if self._checkpoint_dir:\n                        self.save_checkpoint()\n            except:\n                print('Training crashed at epoch {}'.format(epoch))\n                if fail_safe:\n                    self.epoch -= 1\n                    load_latest = True\n                    print('Traceback for the error!')\n                    print(traceback.format_exc())\n                    print('Restarting training from last epoch ...')\n                else:\n                    raise\n\n        print('Finished training!')\n\n\n    def train_epoch(self):\n        raise NotImplementedError\n\n\n    def save_checkpoint(self):\n        \"\"\"Saves a checkpoint of the network and other variables.\"\"\"\n\n        net = self.actor.net.module if multigpu.is_multi_gpu(self.actor.net) else self.actor.net\n\n        actor_type = type(self.actor).__name__\n        net_type = type(net).__name__\n        state = {\n            'epoch': self.epoch,\n            'actor_type': actor_type,\n            'net_type': net_type,\n            'net': net.state_dict(),\n            'net_info': getattr(net, 'info', None),\n            'constructor': getattr(net, 'constructor', None),\n            'optimizer': self.optimizer.state_dict(),\n            'stats': self.stats,\n            'settings': self.settings\n        }\n\n\n        directory = '{}/{}'.format(self._checkpoint_dir, self.settings.project_path)\n        if not os.path.exists(directory):\n            os.makedirs(directory)\n\n        # First save as a tmp file\n        tmp_file_path = '{}/{}_ep{:04d}.tmp'.format(directory, net_type, self.epoch)\n        torch.save(state, tmp_file_path)\n\n        file_path = '{}/{}_ep{:04d}.pth.tar'.format(directory, net_type, self.epoch)\n\n        # Now rename to actual checkpoint. os.rename seems to be atomic if files are on same filesystem. Not 100% sure\n        os.rename(tmp_file_path, file_path)\n\n\n    def load_checkpoint(self, checkpoint = None, fields = None, ignore_fields = None, load_constructor = False):\n        \"\"\"Loads a network checkpoint file.\n\n        Can be called in three different ways:\n            load_checkpoint():\n                Loads the latest epoch from the workspace. Use this to continue training.\n            load_checkpoint(epoch_num):\n                Loads the network at the given epoch number (int).\n            load_checkpoint(path_to_checkpoint):\n                Loads the file from the given absolute path (str).\n        \"\"\"\n\n        net = self.actor.net.module if multigpu.is_multi_gpu(self.actor.net) else self.actor.net\n\n        actor_type = type(self.actor).__name__\n        net_type = type(net).__name__\n\n        if checkpoint is None:\n            # Load most recent checkpoint\n            checkpoint_list = sorted(glob.glob('{}/{}/{}_ep*.pth.tar'.format(self._checkpoint_dir,\n                                                                             self.settings.project_path, net_type)))\n            if checkpoint_list:\n                checkpoint_path = checkpoint_list[-1]\n            else:\n                print('No matching checkpoint file found')\n                return\n        elif isinstance(checkpoint, int):\n            # Checkpoint is the epoch number\n            checkpoint_path = '{}/{}/{}_ep{:04d}.pth.tar'.format(self._checkpoint_dir, self.settings.project_path,\n                                                                 net_type, checkpoint)\n        elif isinstance(checkpoint, str):\n            # checkpoint is the path\n            if os.path.isdir(checkpoint):\n                checkpoint_list = sorted(glob.glob('{}/*_ep*.pth.tar'.format(checkpoint)))\n                if checkpoint_list:\n                    checkpoint_path = checkpoint_list[-1]\n                else:\n                    raise Exception('No checkpoint found')\n            else:\n                checkpoint_path = os.path.expanduser(checkpoint)\n        else:\n            raise TypeError\n\n        # Load network\n        checkpoint_dict = loading.torch_load_legacy(checkpoint_path)\n\n        assert net_type == checkpoint_dict['net_type'], 'Network is not of correct type.'\n\n        if fields is None:\n            fields = checkpoint_dict.keys()\n        if ignore_fields is None:\n            ignore_fields = ['settings']\n\n            # Never load the scheduler. It exists in older checkpoints.\n        ignore_fields.extend(['lr_scheduler', 'constructor', 'net_type', 'actor_type', 'net_info'])\n\n        # Load all fields\n        for key in fields:\n            if key in ignore_fields:\n                continue\n            if key == 'net':\n                net.load_state_dict(checkpoint_dict[key])\n            elif key == 'optimizer':\n                self.optimizer.load_state_dict(checkpoint_dict[key])\n            else:\n                setattr(self, key, checkpoint_dict[key])\n\n        # Set the net info\n        if load_constructor and 'constructor' in checkpoint_dict and checkpoint_dict['constructor'] is not None:\n            net.constructor = checkpoint_dict['constructor']\n        if 'net_info' in checkpoint_dict and checkpoint_dict['net_info'] is not None:\n            net.info = checkpoint_dict['net_info']\n\n        # Update the epoch in lr scheduler\n        if 'epoch' in fields:\n            self.lr_scheduler.last_epoch = self.epoch\n\n        return True\n"
  },
  {
    "path": "artrackv2_mindspore/external/AR/ltr/trainers/ltr_trainer.py",
    "content": "import os\nfrom collections import OrderedDict\nfrom ltr.trainers import BaseTrainer\nfrom ltr.admin.stats import AverageMeter, StatValue\nfrom ltr.admin.tensorboard import TensorboardWriter\nimport torch\nimport time\n\n\nclass LTRTrainer(BaseTrainer):\n    def __init__(self, actor, loaders, optimizer, settings, lr_scheduler=None):\n        \"\"\"\n        args:\n            actor - The actor for training the network\n            loaders - list of dataset loaders, e.g. [train_loader, val_loader]. In each epoch, the trainer runs one\n                        epoch for each loader.\n            optimizer - The optimizer used for training, e.g. Adam\n            settings - Training settings\n            lr_scheduler - Learning rate scheduler\n        \"\"\"\n        super().__init__(actor, loaders, optimizer, settings, lr_scheduler)\n\n        self._set_default_settings()\n\n        # Initialize statistics variables\n        self.stats = OrderedDict({loader.name: None for loader in self.loaders})\n\n        # Initialize tensorboard\n        tensorboard_writer_dir = os.path.join(self.settings.env.tensorboard_dir, self.settings.project_path)\n        self.tensorboard_writer = TensorboardWriter(tensorboard_writer_dir, [l.name for l in loaders])\n\n        self.move_data_to_gpu = getattr(settings, 'move_data_to_gpu', True)\n\n    def _set_default_settings(self):\n        # Dict of all default values\n        default = {'print_interval': 10,\n                   'print_stats': None,\n                   'description': ''}\n\n        for param, default_value in default.items():\n            if getattr(self.settings, param, None) is None:\n                setattr(self.settings, param, default_value)\n\n    def cycle_dataset(self, loader):\n        \"\"\"Do a cycle of training or validation.\"\"\"\n\n        self.actor.train(loader.training)\n        torch.set_grad_enabled(loader.training)\n\n        self._init_timing()\n\n        for i, data in enumerate(loader, 1):\n            # get inputs\n            if self.move_data_to_gpu:\n                data = data.to(self.device)\n\n            data['epoch'] = self.epoch\n            data['settings'] = self.settings\n\n            # forward pass\n            loss, stats = self.actor(data)\n\n            # backward pass and update weights\n            if loader.training:\n                self.optimizer.zero_grad()\n                loss.backward()\n                self.optimizer.step()\n\n            # update statistics\n            batch_size = data['train_images'].shape[loader.stack_dim]\n            self._update_stats(stats, batch_size, loader)\n\n            # print statistics\n            self._print_stats(i, loader, batch_size)\n\n    def train_epoch(self):\n        \"\"\"Do one epoch for each loader.\"\"\"\n        for loader in self.loaders:\n            if self.epoch % loader.epoch_interval == 0:\n                self.cycle_dataset(loader)\n\n        self._stats_new_epoch()\n        self._write_tensorboard()\n\n    def _init_timing(self):\n        self.num_frames = 0\n        self.start_time = time.time()\n        self.prev_time = self.start_time\n\n    def _update_stats(self, new_stats: OrderedDict, batch_size, loader):\n        # Initialize stats if not initialized yet\n        if loader.name not in self.stats.keys() or self.stats[loader.name] is None:\n            self.stats[loader.name] = OrderedDict({name: AverageMeter() for name in new_stats.keys()})\n\n        for name, val in new_stats.items():\n            if name not in self.stats[loader.name].keys():\n                self.stats[loader.name][name] = AverageMeter()\n            self.stats[loader.name][name].update(val, batch_size)\n\n    def _print_stats(self, i, loader, batch_size):\n        self.num_frames += batch_size\n        current_time = time.time()\n        batch_fps = batch_size / (current_time - self.prev_time)\n        average_fps = self.num_frames / (current_time - self.start_time)\n        self.prev_time = current_time\n        if i % self.settings.print_interval == 0 or i == loader.__len__():\n            print_str = '[%s: %d, %d / %d] ' % (loader.name, self.epoch, i, loader.__len__())\n            print_str += 'FPS: %.1f (%.1f)  ,  ' % (average_fps, batch_fps)\n            for name, val in self.stats[loader.name].items():\n                if (self.settings.print_stats is None or name in self.settings.print_stats) and hasattr(val, 'avg'):\n                    print_str += '%s: %.5f  ,  ' % (name, val.avg)\n            print(print_str[:-5])\n\n    def _stats_new_epoch(self):\n        # Record learning rate\n        for loader in self.loaders:\n            if loader.training:\n                lr_list = self.lr_scheduler.get_lr()\n                for i, lr in enumerate(lr_list):\n                    var_name = 'LearningRate/group{}'.format(i)\n                    if var_name not in self.stats[loader.name].keys():\n                        self.stats[loader.name][var_name] = StatValue()\n                    self.stats[loader.name][var_name].update(lr)\n\n        for loader_stats in self.stats.values():\n            if loader_stats is None:\n                continue\n            for stat_value in loader_stats.values():\n                if hasattr(stat_value, 'new_epoch'):\n                    stat_value.new_epoch()\n\n    def _write_tensorboard(self):\n        if self.epoch == 1:\n            self.tensorboard_writer.write_info(self.settings.module_name, self.settings.script_name, self.settings.description)\n\n        self.tensorboard_writer.write_epoch(self.stats, self.epoch)"
  },
  {
    "path": "artrackv2_mindspore/external/AR/pytracking/ARcm_seg.py",
    "content": "import os\nimport sys\nimport torch\nimport numpy as np\nimport cv2\nimport torch.nn as nn\nfrom external.AR.pytracking.utils.loading import load_network\nfrom external.AR.ltr.data.processing_utils_SE import sample_target_SE, transform_image_to_crop_SE, map_mask_back\nenv_path = os.path.join(os.path.dirname(__file__), '..')\nif env_path not in sys.path:\n    sys.path.append(env_path)\n\n\ndef mask_torch2numpy(Pmask):\n    Pmask_arr = np.array(Pmask.squeeze().cpu())  # (H,W) (0,1)\n    return Pmask_arr\n\n\nclass ARcm_seg(object):\n    def __init__(self, refine_net_dir, search_factor=2.0, input_sz=256):\n        self.refine_network = self.get_network(refine_net_dir)\n        self.search_factor = search_factor\n        self.input_sz = input_sz\n        self.mean = np.array([0.485, 0.456, 0.406]).reshape((1,1,3))\n        self.std = np.array([0.229, 0.224, 0.225]).reshape((1,1,3))\n\n    def initialize(self, frame1, bbox1):\n        '''\n        :param frame1: cv array (H,W,3)\n        :param bbox1: ndarray (4,)\n        :return:\n        '''\n        '''Step1: get cropped patch(tensor)'''\n        patch1, h_f, w_f = sample_target_SE(frame1, bbox1, self.search_factor, self.input_sz, mode=cv2.BORDER_CONSTANT)\n        patch1_tensor = self.img_preprocess(patch1)\n        '''Step2: get GT's cooridinate on the cropped patch(tensor)'''\n        crop_sz = torch.Tensor((self.input_sz, self.input_sz))\n        bbox1_tensor = self.gt_preprocess(bbox1) # (4,)\n        bbox1_crop_tensor = transform_image_to_crop_SE(bbox1_tensor, bbox1_tensor, h_f, w_f, crop_sz).cuda()\n        '''Step3: forward prop (reference branch)'''\n        with torch.no_grad():\n            self.refine_network.forward_ref(patch1_tensor, bbox1_crop_tensor)\n\n    '''refine'''\n    def get_mask(self, Cframe, Cbbox, dtm=None, vis=False):\n        '''\n        :param Cframe: Current frame(cv2 array)\n        :param Cbbox: Current bbox (ndarray) (x1,y1,w,h)\n        :return: mask\n        '''\n        '''Step1: get cropped patch(tensor)'''\n        Cpatch, h_f, w_f = sample_target_SE(Cframe, Cbbox, self.search_factor, self.input_sz, mode=cv2.BORDER_CONSTANT)\n        Cpatch_tensor = self.img_preprocess(Cpatch)\n\n        '''Step2: forward prop (test branch)'''\n        with torch.no_grad():\n            if dtm is not None:\n                '''2020.4.26 support input dtm'''\n                pred = self.refine_network.forward_test(Cpatch_tensor, dtm, mode='mask')\n            else:\n                pred = self.refine_network.forward_test(Cpatch_tensor,mode='mask')\n            Pmask_arr = mask_torch2numpy(pred)\n            mask_arr = map_mask_back(Cframe, Cbbox, self.search_factor, Pmask_arr,\n                                     mode=cv2.BORDER_CONSTANT)\n            if vis:\n                return mask_arr, Cpatch, Pmask_arr\n            else:\n                return mask_arr\n\n    def get_network(self,checkpoint_dir):\n        network = load_network(checkpoint_dir)\n        network.cuda()\n        network.eval()\n        return network\n\n    def img_preprocess(self,img_arr):\n        '''---> Pytorch tensor(RGB),Normal(-1 to 1,subtract mean, divide std)\n        input img_arr (H,W,3)\n        output (1,1,3,H,W)\n        '''\n        norm_img = ((img_arr/255.0) - self.mean)/(self.std)\n        img_f32 = norm_img.astype(np.float32)\n        img_tensor = torch.from_numpy(img_f32).cuda()\n        img_tensor = img_tensor.permute((2,0,1))\n        return img_tensor.unsqueeze(dim=0).unsqueeze(dim=0)\n\n    def gt_preprocess(self,gt_arr):\n        '''\n        :param gt: ndarray (4,)\n        :return: torch tensor (4,)\n        '''\n        return torch.from_numpy(gt_arr.astype(np.float32))\n\n\ndef add_frame_mask(frame, mask, threshold=0.5):\n    mask_new = (mask>threshold)*255 #(H,W)\n    frame_new = frame.copy().astype(np.float)\n    frame_new[...,1] += 0.3*mask_new\n    frame_new = frame_new.clip(0,255).astype(np.uint8)\n    return frame_new\n\n\ndef add_frame_bbox(frame, refined_box, color):\n    x1, y1, w, h = refined_box.tolist()\n    cv2.rectangle(frame, (int(x1), int(y1)), (int(x1 + w), int(y1 + h)), color, 2)\n    return frame\n"
  },
  {
    "path": "artrackv2_mindspore/external/AR/pytracking/VOT/tracker_DiMP.m",
    "content": "% Set path to the python in the pytracking conda environment\npython_path = 'PATH_TO_CONDA_INSTALLATION/envs/pytracking/bin/python';\n\n% Set path to pytracking\npytracking_path = 'PATH_TO_VISIONML/pytracking';\n\n% Set path to trax installation. Check\n% https://trax.readthedocs.io/en/latest/tutorial_compiling.html for\n% compilation information\ntrax_path = 'PATH_TO_VOT_TOOLKIT/native/trax';\n\ntracker_name = 'dimp';          % Name of the tracker to evaluate\nrunfile_name = 'dimp18_vot';    % Name of the parameter file to use\ndebug = 0;\n\n%%\ntracker_label = [tracker_name, '_', runfile_name];\n\n% Generate python command\ntracker_command = sprintf(['%s -c \"import sys; sys.path.append(''%s'');', ...\n                           'sys.path.append(''%s/support/python'');', ...\n                           'import run_vot;', ...\n                           'run_vot.run_vot(''%s'', ''%s'', debug=%d)\"'],...\n                           python_path, pytracking_path, trax_path, ...\n                           tracker_name, runfile_name, debug);\n\n\ntracker_interpreter = python_path;\n\ntracker_linkpath = {[trax_path, '/build'],...\n\t\t[trax_path, '/build/support/client'],...\n\t\t[trax_path, '/build/support/opencv']};\n"
  },
  {
    "path": "artrackv2_mindspore/external/AR/pytracking/VOT/trackers.ini",
    "content": "[DiMP]  # <tracker-name>\nlabel = DiMP\nprotocol = traxpython\n\ncommand = run_vot; run_vot.run_vot2020('dimp', 'dimp50')  # Set the tracker name and the parameter name\n\n# Specify a path to trax python wrapper if it is not visible (separate by ; if using multiple paths)\npaths = PATH_TO_PYTRACKING\n\n# Additional environment paths\n#env_PATH = <additional-env-paths>;${PATH}\n\n"
  },
  {
    "path": "artrackv2_mindspore/external/AR/pytracking/VOT/vot.py",
    "content": "\"\"\"\n\\file vot.py\n\n@brief Python utility functions for VOT integration\n\n@author Luka Cehovin, Alessio Dore\n\n@date 2016, 2019\n\n\"\"\"\n\nimport sys\nimport copy\nimport collections\n\ntry:\n    import trax\nexcept ImportError:\n    raise Exception('TraX support not found. Please add trax module to Python path.')\n\nRectangle = collections.namedtuple('Rectangle', ['x', 'y', 'width', 'height'])\nPoint = collections.namedtuple('Point', ['x', 'y'])\nPolygon = collections.namedtuple('Polygon', ['points'])\n\nclass VOT(object):\n    \"\"\" Base class for Python VOT integration \"\"\"\n    def __init__(self, region_format, channels=None):\n        \"\"\" Constructor\n\n        Args:\n            region_format: Region format options\n        \"\"\"\n        assert(region_format in [trax.Region.RECTANGLE, trax.Region.POLYGON])\n\n        if channels is None:\n            channels = ['color']\n        elif channels == 'rgbd':\n            channels = ['color', 'depth']\n        elif channels == 'rgbt':\n            channels = ['color', 'ir']\n        elif channels == 'ir':\n            channels = ['ir']\n        else:\n            raise Exception('Illegal configuration {}.'.format(channels))\n\n        self._trax = trax.Server([region_format], [trax.Image.PATH], channels)\n\n        request = self._trax.wait()\n        assert(request.type == 'initialize')\n        if isinstance(request.region, trax.Polygon):\n            self._region = Polygon([Point(x[0], x[1]) for x in request.region])\n        else:\n            self._region = Rectangle(*request.region.bounds())\n        self._image = [str(x) for k, x in request.image.items()]\n        if len(self._image) == 1:\n            self._image = self._image[0]\n        self._trax.status(request.region)\n\n    def region(self):\n        \"\"\"\n        Send configuration message to the client and receive the initialization\n        region and the path of the first image\n\n        Returns:\n            initialization region\n        \"\"\"\n\n        return self._region\n\n    def report(self, region, confidence = None):\n        \"\"\"\n        Report the tracking results to the client\n\n        Arguments:\n            region: region for the frame\n        \"\"\"\n        assert(isinstance(region, Rectangle) or isinstance(region, Polygon))\n        if isinstance(region, Polygon):\n            tregion = trax.Polygon.create([(x.x, x.y) for x in region.points])\n        else:\n            tregion = trax.Rectangle.create(region.x, region.y, region.width, region.height)\n        properties = {}\n        if not confidence is None:\n            properties['confidence'] = confidence\n        self._trax.status(tregion, properties)\n\n    def frame(self):\n        \"\"\"\n        Get a frame (image path) from client\n\n        Returns:\n            absolute path of the image\n        \"\"\"\n        if hasattr(self, \"_image\"):\n            image = self._image\n            del self._image\n            return tuple(image)\n\n        request = self._trax.wait()\n\n        if request.type == 'frame':\n            image = [str(x) for k, x in request.image.items()]\n            if len(image) == 1:\n                image = image[0]\n            return tuple(image)\n        else:\n            return None\n\n\n    def quit(self):\n        if hasattr(self, '_trax'):\n            self._trax.quit()\n\n    def __del__(self):\n        self.quit()\n\n"
  },
  {
    "path": "artrackv2_mindspore/external/AR/pytracking/VOT2020_super_only_mask_384_HP/dimp_alpha_065.py",
    "content": "from pytracking.VOT2020_super_only_mask_384_HP.dimp_alpha_seg_class import run_vot_exp\nimport os\nos.environ['CUDA_VISIBLE_DEVICES'] = '1'\n# run_vot_exp('dimp','dimp50_vot19','SEbcm',0.60,VIS=False)\nrun_vot_exp('dimp','super_dimp','ARcm_coco_seg_only_mask_384',0.65,VIS=False)\n# run_vot_exp('dimp','super_dimp','ARcm_coco_seg_only_mask_384',0.65,VIS=True)"
  },
  {
    "path": "artrackv2_mindspore/external/AR/pytracking/VOT2020_super_only_mask_384_HP/dimp_alpha_seg_class.py",
    "content": "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport cv2\nimport torch\nimport vot\nimport sys\nimport time\n\n'''Refine module & Pytracking base trackers'''\nimport os\nfrom pytracking.evaluation import Tracker\nfrom pytracking.ARcm_seg import ARcm_seg\nfrom pytracking.vot20_utils import *\n\n''''''\n'''DiMP-alpha class'''\n\n\nclass DIMP_ALPHA(object):\n    def __init__(self, tracker_name='dimp', para_name='dimp50_vot19',\n                 refine_model_name='ARcm_coco_seg', threshold=0.15):\n        self.THRES = threshold\n        '''create tracker'''\n        '''DIMP'''\n        tracker_info = Tracker(tracker_name, para_name, None)\n        params = tracker_info.get_parameters()\n        params.visualization = False\n        params.debug = False\n        params.visdom_info = {'use_visdom': False, 'server': '127.0.0.1', 'port': 8097}\n        self.dimp = tracker_info.tracker_class(params)\n        '''Alpha-Refine'''\n        project_path = os.path.join(os.path.dirname(__file__), '..', '..')\n        refine_root = os.path.join(project_path, 'ltr/checkpoints/ltr/ARcm_seg/')\n        refine_path = os.path.join(refine_root, refine_model_name)\n        '''2020.4.25 input size: 384x384'''\n        self.alpha = ARcm_seg(refine_path, input_sz=384)\n\n    def initialize(self, img_RGB, mask):\n        region = rect_from_mask(mask)\n        self.H, self.W, _ = img_RGB.shape\n        gt_bbox_np = np.array(region).astype(np.float32)\n        '''Initialize dimp for specific video'''\n        gt_bbox_torch = torch.from_numpy(gt_bbox_np)\n        init_info = {}\n        init_info['init_bbox'] = gt_bbox_torch\n        _ = self.dimp.initialize(img_RGB, init_info)\n        '''initilize refinement module for specific video'''\n        self.alpha.initialize(img_RGB, np.array(gt_bbox_np))\n\n    def track(self, img_RGB):\n        '''TRACK'''\n        '''base tracker'''\n        outputs = self.dimp.track(img_RGB)\n        pred_bbox = outputs['target_bbox']\n        '''Step1: Post-Process'''\n        x1, y1, w, h = pred_bbox\n        # add boundary and min size limit\n        x1, y1, x2, y2 = bbox_clip(x1, y1, x1 + w, y1 + h, (self.H, self.W))\n        w = x2 - x1\n        h = y2 - y1\n        new_pos = torch.from_numpy(np.array([y1 + h / 2, x1 + w / 2]).astype(np.float32))\n        new_target_sz = torch.from_numpy(np.array([h, w]).astype(np.float32))\n        new_scale = torch.sqrt(new_target_sz.prod() / self.dimp.base_target_sz.prod())\n        ##### update\n        self.dimp.pos = new_pos.clone()\n        self.dimp.target_sz = new_target_sz\n        self.dimp.target_scale = new_scale\n        bbox_new = [x1, y1, w, h]\n        '''Step2: Mask report'''\n        pred_mask, search, search_mask = self.alpha.get_mask(img_RGB, np.array(bbox_new), vis=True)\n        final_mask = (pred_mask > self.THRES).astype(np.uint8)\n        search_region = search.astype(np.uint8)\n        search_mask = (search_mask > self.THRES).astype(np.uint8)\n        return bbox_new, final_mask, search_region, search_mask\n\n\ndef run_vot_exp(tracker_name, para_name, refine_model_name, threshold, VIS=False):\n    torch.set_num_threads(1)\n    # torch.cuda.set_device(CUDA_ID)  # set GPU id\n    save_root = os.path.join('<SAVE_DIR>', para_name)\n    if VIS and (not os.path.exists(save_root)):\n        os.mkdir(save_root)\n    tracker = DIMP_ALPHA(tracker_name=tracker_name, para_name=para_name,\n                         refine_model_name=refine_model_name, threshold=threshold)\n    handle = vot.VOT(\"mask\")\n    selection = handle.region()\n    imagefile = handle.frame()\n    if not imagefile:\n        sys.exit(0)\n    if VIS:\n        '''for vis'''\n        seq_name = imagefile.split('/')[-3]\n        save_v_dir = os.path.join(save_root, seq_name)\n        if not os.path.exists(save_v_dir):\n            os.mkdir(save_v_dir)\n        cur_time = int(time.time() % 10000)\n        save_dir = os.path.join(save_v_dir, str(cur_time))\n        if not os.path.exists(save_dir):\n            os.makedirs(save_dir)\n\n    image = cv2.cvtColor(cv2.imread(imagefile), cv2.COLOR_BGR2RGB)  # Right\n    # mask given by the toolkit ends with the target (zero-padding to the right and down is needed)\n    mask = make_full_size(selection, (image.shape[1], image.shape[0]))\n    tracker.initialize(image, mask)\n\n    while True:\n        imagefile = handle.frame()\n        if not imagefile:\n            break\n        image = cv2.cvtColor(cv2.imread(imagefile), cv2.COLOR_BGR2RGB)  # Right\n        b1, m, search, search_m = tracker.track(image)\n        handle.report(m)\n        if VIS:\n            '''Visualization'''\n            # original image\n            image_ori = image[:, :, ::-1].copy()  # RGB --> BGR\n            image_name = imagefile.split('/')[-1]\n            save_path = os.path.join(save_dir, image_name)\n            cv2.imwrite(save_path, image_ori)\n            # dimp box\n            image_b = image_ori.copy()\n            cv2.rectangle(image_b, (int(b1[0]), int(b1[1])),\n                          (int(b1[0] + b1[2]), int(b1[1] + b1[3])), (0, 0, 255), 2)\n            image_b_name = image_name.replace('.jpg', '_bbox.jpg')\n            save_path = os.path.join(save_dir, image_b_name)\n            cv2.imwrite(save_path, image_b)\n            # search region\n            search_bgr = search[:, :, ::-1].copy()\n            search_name = image_name.replace('.jpg', '_search.jpg')\n            save_path = os.path.join(save_dir, search_name)\n            cv2.imwrite(save_path, search_bgr)\n            # search region mask\n            search_bgr_m = search_bgr.astype(np.float32)\n            search_bgr_m[:, :, 1] += 127.0 * search_m\n            search_bgr_m[:, :, 2] += 127.0 * search_m\n            contours, _ = cv2.findContours(search_m, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n            search_bgr_m = cv2.drawContours(search_bgr_m, contours, -1, (0, 255, 255), 4)\n            search_bgr_m = search_bgr_m.clip(0, 255).astype(np.uint8)\n            search_name_m = image_name.replace('.jpg', '_search_mask.jpg')\n            save_path = os.path.join(save_dir, search_name_m)\n            cv2.imwrite(save_path, search_bgr_m)\n            # original image + mask\n            image_m = image_ori.copy().astype(np.float32)\n            image_m[:, :, 1] += 127.0 * m\n            image_m[:, :, 2] += 127.0 * m\n            contours, _ = cv2.findContours(m, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n            image_m = cv2.drawContours(image_m, contours, -1, (0, 255, 255), 2)\n            image_m = image_m.clip(0, 255).astype(np.uint8)\n            image_mask_name_m = image_name.replace('.jpg', '_mask.jpg')\n            save_path = os.path.join(save_dir, image_mask_name_m)\n            cv2.imwrite(save_path, image_m)\n"
  },
  {
    "path": "artrackv2_mindspore/external/AR/pytracking/VOT2020_super_only_mask_384_HP/mixformer_alpha_seg_class.py",
    "content": "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport cv2\nimport torch\nimport vot\nimport sys\nimport time\nimport os\nimport numpy as np\nfrom lib.test.tracker.mixformer_online import MixFormerOnline\nfrom pytracking.ARcm_seg import ARcm_seg\nfrom pytracking.vot20_utils import *\n\nimport lib.test.parameter.mixformer_online as vot_params\n\nclass MIXFORMER_ALPHA_SEG(object):\n    def __init__(self, tracker,\n                 refine_model_name='ARcm_coco_seg', threshold=0.6):\n        self.THRES = threshold\n        self.tracker = tracker\n        '''create tracker'''\n        '''Alpha-Refine'''\n        project_path = os.path.join(os.path.dirname(__file__), '..', '..')\n        refine_root = os.path.join(project_path, 'ltr/checkpoints/ltr/ARcm_seg/')\n        refine_path = os.path.join(refine_root, refine_model_name)\n        '''2020.4.25 input size: 384x384'''\n        self.alpha = ARcm_seg(refine_path, input_sz=384)\n\n    def initialize(self, image, mask):\n        region = rect_from_mask(mask)\n        # init_info = {'init_bbox': region}\n        # self.tracker.initialize(image, init_info)\n\n        self.H, self.W, _ = image.shape\n        gt_bbox_np = np.array(region).astype(np.float32)\n        '''Initialize STARK for specific video'''\n        init_info = {'init_bbox': list(gt_bbox_np)}\n        self.tracker.initialize(image, init_info)\n        '''initilize refinement module for specific video'''\n        self.alpha.initialize(image, np.array(gt_bbox_np))\n\n    def track(self, img_RGB):\n        '''TRACK'''\n        '''base tracker'''\n        outputs = self.tracker.track(img_RGB)\n        pred_bbox = outputs['target_bbox']\n        '''Step2: Mask report'''\n        pred_mask, search, search_mask = self.alpha.get_mask(img_RGB, np.array(pred_bbox), vis=True)\n        final_mask = (pred_mask > self.THRES).astype(np.uint8)\n        return final_mask, 1\n\n\ndef make_full_size(x, output_sz):\n    '''\n    zero-pad input x (right and down) to match output_sz\n    x: numpy array e.g., binary mask\n    output_sz: size of the output [width, height]\n    '''\n    if x.shape[0] == output_sz[1] and x.shape[1] == output_sz[0]:\n        return x\n    pad_x = output_sz[0] - x.shape[1]\n    if pad_x < 0:\n        x = x[:, :x.shape[1] + pad_x]\n        # padding has to be set to zero, otherwise pad function fails\n        pad_x = 0\n    pad_y = output_sz[1] - x.shape[0]\n    if pad_y < 0:\n        x = x[:x.shape[0] + pad_y, :]\n        # padding has to be set to zero, otherwise pad function fails\n        pad_y = 0\n    return np.pad(x, ((0, pad_y), (0, pad_x)), 'constant', constant_values=0)\n\n\n\nrefine_model_name = 'ARcm_coco_seg_only_mask_384'\nparams = vot_params.parameters(\"baseline\", model=\"mixformer_online_22k.pth.tar\")\n# params = vot_params.parameters(\"baseline\")\nmixformer = MixFormerOnline(params, \"VOT20\")\ntracker = MIXFORMER_ALPHA_SEG(tracker=mixformer, refine_model_name=refine_model_name)\nhandle = vot.VOT(\"mask\")\nselection = handle.region()\nimagefile = handle.frame()\n\nif not imagefile:\n    sys.exit(0)\n\nimage = cv2.cvtColor(cv2.imread(imagefile), cv2.COLOR_BGR2RGB)  # Right\n# mask given by the toolkit ends with the target (zero-padding to the right and down is needed)\nmask = make_full_size(selection, (image.shape[1], image.shape[0]))\n\ntracker.H = image.shape[0]\ntracker.W = image.shape[1]\n\ntracker.initialize(image, mask)\n\nwhile True:\n    imagefile = handle.frame()\n    if not imagefile:\n        break\n    image = cv2.cvtColor(cv2.imread(imagefile), cv2.COLOR_BGR2RGB)  # Right\n    region, confidence = tracker.track(image)\n    handle.report(region, confidence)\n"
  },
  {
    "path": "artrackv2_mindspore/external/AR/pytracking/VOT2020_super_only_mask_384_HP/mixformer_large_alpha_seg_class.py",
    "content": "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport cv2\nimport torch\nimport vot\nimport sys\nimport time\nimport os\nimport numpy as np\nfrom lib.test.tracker.mixformer_online import MixFormerOnline\nfrom pytracking.ARcm_seg import ARcm_seg\nfrom pytracking.vot20_utils import *\n\nimport lib.test.parameter.mixformer_online as vot_params\n\nclass MIXFORMER_ALPHA_SEG(object):\n    def __init__(self, tracker,\n                 refine_model_name='ARcm_coco_seg', threshold=0.6):\n        self.THRES = threshold\n        self.tracker = tracker\n        '''create tracker'''\n        '''Alpha-Refine'''\n        project_path = os.path.join(os.path.dirname(__file__), '..', '..')\n        refine_root = os.path.join(project_path, 'ltr/checkpoints/ltr/ARcm_seg/')\n        refine_path = os.path.join(refine_root, refine_model_name)\n        '''2020.4.25 input size: 384x384'''\n        self.alpha = ARcm_seg(refine_path, input_sz=384)\n\n    def initialize(self, image, mask):\n        region = rect_from_mask(mask)\n        # init_info = {'init_bbox': region}\n        # self.tracker.initialize(image, init_info)\n\n        self.H, self.W, _ = image.shape\n        gt_bbox_np = np.array(region).astype(np.float32)\n        '''Initialize STARK for specific video'''\n        init_info = {'init_bbox': list(gt_bbox_np)}\n        self.tracker.initialize(image, init_info)\n        '''initilize refinement module for specific video'''\n        self.alpha.initialize(image, np.array(gt_bbox_np))\n\n    def track(self, img_RGB):\n        '''TRACK'''\n        '''base tracker'''\n        outputs = self.tracker.track(img_RGB)\n        pred_bbox = outputs['target_bbox']\n        '''Step2: Mask report'''\n        pred_mask, search, search_mask = self.alpha.get_mask(img_RGB, np.array(pred_bbox), vis=True)\n        final_mask = (pred_mask > self.THRES).astype(np.uint8)\n        return final_mask, 1\n\n\ndef make_full_size(x, output_sz):\n    '''\n    zero-pad input x (right and down) to match output_sz\n    x: numpy array e.g., binary mask\n    output_sz: size of the output [width, height]\n    '''\n    if x.shape[0] == output_sz[1] and x.shape[1] == output_sz[0]:\n        return x\n    pad_x = output_sz[0] - x.shape[1]\n    if pad_x < 0:\n        x = x[:, :x.shape[1] + pad_x]\n        # padding has to be set to zero, otherwise pad function fails\n        pad_x = 0\n    pad_y = output_sz[1] - x.shape[0]\n    if pad_y < 0:\n        x = x[:x.shape[0] + pad_y, :]\n        # padding has to be set to zero, otherwise pad function fails\n        pad_y = 0\n    return np.pad(x, ((0, pad_y), (0, pad_x)), 'constant', constant_values=0)\n\n\n\nrefine_model_name = 'ARcm_coco_seg_only_mask_384'\n# params = vot_params.parameters(\"baseline_large\")\nparams = vot_params.parameters(\"baseline_large\", model=\"mixformerL_online_22k.pth.tar\")\nmixformer = MixFormerOnline(params, \"VOT20\")\ntracker = MIXFORMER_ALPHA_SEG(tracker=mixformer, refine_model_name=refine_model_name)\nhandle = vot.VOT(\"mask\")\nselection = handle.region()\nimagefile = handle.frame()\n\nif not imagefile:\n    sys.exit(0)\n\nimage = cv2.cvtColor(cv2.imread(imagefile), cv2.COLOR_BGR2RGB)  # Right\n# mask given by the toolkit ends with the target (zero-padding to the right and down is needed)\nmask = make_full_size(selection, (image.shape[1], image.shape[0]))\n\ntracker.H = image.shape[0]\ntracker.W = image.shape[1]\n\ntracker.initialize(image, mask)\n\nwhile True:\n    imagefile = handle.frame()\n    if not imagefile:\n        break\n    image = cv2.cvtColor(cv2.imread(imagefile), cv2.COLOR_BGR2RGB)  # Right\n    region, confidence = tracker.track(image)\n    handle.report(region, confidence)\n"
  },
  {
    "path": "artrackv2_mindspore/external/AR/pytracking/VOT2020_super_only_mask_384_HP/vot.py",
    "content": "\"\"\"\n\\file vot.py\n@brief Python utility functions for VOT integration\n@author Luka Cehovin, Alessio Dore\n@date 2016\n\"\"\"\n\nimport sys\nimport copy\nimport collections\nimport numpy as np\n\ntry:\n    import trax\nexcept ImportError:\n    raise Exception('TraX support not found. Please add trax module to Python path.')\n\nRectangle = collections.namedtuple('Rectangle', ['x', 'y', 'width', 'height'])\nPoint = collections.namedtuple('Point', ['x', 'y'])\nPolygon = collections.namedtuple('Polygon', ['points'])\n\nclass VOT(object):\n    \"\"\" Base class for Python VOT integration \"\"\"\n    def __init__(self, region_format, channels=None):\n        \"\"\" Constructor\n        Args:\n            region_format: Region format options\n        \"\"\"\n        assert(region_format in [trax.Region.RECTANGLE, trax.Region.POLYGON, trax.Region.MASK])\n\n        if channels is None:\n            channels = ['color']\n        elif channels == 'rgbd':\n            channels = ['color', 'depth']\n        elif channels == 'rgbt':\n            channels = ['color', 'ir']\n        elif channels == 'ir':\n            channels = ['ir']\n        else:\n            raise Exception('Illegal configuration {}.'.format(channels))\n\n        self._trax = trax.Server([region_format], [trax.Image.PATH], channels, customMetadata=dict(vot=\"python\"))\n\n        request = self._trax.wait()\n        assert(request.type == 'initialize')\n        if isinstance(request.region, trax.Polygon):\n            self._region = Polygon([Point(x[0], x[1]) for x in request.region])\n        elif isinstance(request.region, trax.Mask):\n            self._region = request.region.array(True)\n        else:\n            self._region = Rectangle(*request.region.bounds())\n        self._image = [x.path() for k, x in request.image.items()]\n        if len(self._image) == 1:\n            self._image = self._image[0]\n\n        self._trax.status(request.region)\n\n    def region(self):\n        \"\"\"\n        Send configuration message to the client and receive the initialization\n        region and the path of the first image\n        Returns:\n            initialization region\n        \"\"\"\n\n        return self._region\n\n    def report(self, region, confidence = None):\n        \"\"\"\n        Report the tracking results to the client\n        Arguments:\n            region: region for the frame\n        \"\"\"\n        assert(isinstance(region, (Rectangle, Polygon, np.ndarray)))\n        if isinstance(region, Polygon):\n            tregion = trax.Polygon.create([(x.x, x.y) for x in region.points])\n        elif isinstance(region, np.ndarray):\n            tregion = trax.Mask.create(region)\n        else:\n            tregion = trax.Rectangle.create(region.x, region.y, region.width, region.height)\n        properties = {}\n        if not confidence is None:\n            properties['confidence'] = confidence\n        self._trax.status(tregion, properties)\n\n    def frame(self):\n        \"\"\"\n        Get a frame (image path) from client\n        Returns:\n            absolute path of the image\n        \"\"\"\n        if hasattr(self, \"_image\"):\n            image = self._image\n            del self._image\n            return image\n\n        request = self._trax.wait()\n\n        if request.type == 'frame':\n            image = [x.path() for k, x in request.image.items()]\n            if len(image) == 1:\n                return image[0]\n            return image\n        else:\n            return None\n\n\n    def quit(self):\n        if hasattr(self, '_trax'):\n            self._trax.quit()\n\n    def __del__(self):\n        self.quit()\n"
  },
  {
    "path": "artrackv2_mindspore/external/AR/pytracking/__init__.py",
    "content": "from pytracking.libs import TensorList, TensorDict\n"
  },
  {
    "path": "artrackv2_mindspore/external/AR/pytracking/analysis/__init__.py",
    "content": ""
  },
  {
    "path": "artrackv2_mindspore/external/AR/pytracking/analysis/evaluate_vos.py",
    "content": "import os\nimport numpy as np\nimport torch\nimport pandas as pd\nfrom collections import OrderedDict\nfrom ltr.data.image_loader import imread_indexed\nfrom pytracking.evaluation import get_dataset\nfrom pathlib import Path\nfrom pytracking.analysis.plot_results import generate_formatted_report\n\nimport pytracking.analysis.vos_utils as utils\n\n# Originally db_eval_sequence() in the davis challenge toolkit:\ndef evaluate_sequence(seq_name, segmentations, annotations, object_info, measure='J'):\n    \"\"\"\n    Evaluate video sequence results.\n\n      Arguments:\n          segmentations (dict of ndarray): segmentation labels.\n          annotations   (dict of ndarray): ground-truth labels.\n          object_info   dict: {object_id: first_frame_index}\n\n      measure       evaluation metric (J,F)\n    \"\"\"\n\n    results = dict(raw=OrderedDict())\n\n    _measures = {'J': utils.davis_jaccard_measure, 'F': utils.davis_f_measure}\n    _statistics = {'decay': utils.decay, 'mean': utils.mean, 'recall': utils.recall, 'std': utils.std}\n\n    for obj_id, first_frame in object_info.items():\n\n        r = np.ones((len(annotations))) * np.nan\n\n        for i, (an, sg) in enumerate(zip(annotations, segmentations)):\n            if list(annotations.keys()).index(first_frame) < i < len(annotations) - 1:\n                r[i] = _measures[measure](annotations[an] == obj_id, segmentations[sg] == obj_id)\n\n        results['raw'][obj_id] = r\n\n    for stat, stat_fn in _statistics.items():\n        results[stat] = [float(stat_fn(r)) for r in results['raw'].values()]\n\n    return results\n\n\ndef evaluate_dataset(results_path, dset_name, measure='J', to_file=True, scores=False, sequences=None, quiet=False):\n    dset = get_dataset(dset_name)\n    results = OrderedDict()\n    dset_scores = []\n    dset_decay = []\n    dset_recall = []\n\n    if to_file:\n        f = open(results_path / (\"evaluation-%s.txt\" % measure), \"w\")\n\n    def _print(msg):\n        if not quiet:\n            print(msg)\n        if to_file:\n            print(msg, file=f)\n\n    if sequences is not None:\n        sequences = [sequences] if not isinstance(sequences, (list, tuple)) else sequences\n\n    target_names = []\n    for j, sequence in enumerate(dset):\n        if (sequences is not None) and (sequence.name not in sequences):\n            continue\n\n        # Load all frames\n        frames = sequence.ground_truth_seg\n\n        annotations = OrderedDict()\n        segmentations = OrderedDict()\n\n        for f in frames:\n            if f is None:\n                continue\n\n            file = Path(f)\n            annotations[file.name] = imread_indexed(file)\n            if not scores:\n                segmentations[file.name] = imread_indexed(os.path.join(results_path, sequence.name, file.name))\n            else:\n                raise NotImplementedError\n        # Find object ids and starting frames\n\n        object_info = dict()\n\n        for f_id, d in sequence.init_data.items():\n            for obj_id in d['object_ids']:\n                object_info[int(obj_id)] = Path(d['mask']).name\n\n        if 0 in object_info:  # Remove background\n            object_info.pop(0)\n\n        # Evaluate\n        n_seqs = len(dset)\n        n_objs = len(object_info)\n        seq_name = sequence.name\n\n        _print(\"%d/%d: %s: %d object%s\" % (j + 1, n_seqs, seq_name, n_objs, \"s\" if n_objs > 1 else \"\"))\n        r = evaluate_sequence(seq_name, segmentations, annotations, object_info, measure=measure)\n        results[seq_name] = r\n\n        # Print scores, per frame and object, ignoring NaNs\n\n        per_obj_score = []  # Per-object accuracies, averaged over the sequence\n        per_frame_score = []  # Per-frame accuracies, averaged over the objects\n\n        for obj_id, score in r['raw'].items():\n            target_names.append('{}_{}'.format(seq_name, obj_id))\n            per_frame_score.append(score)\n            s = utils.mean(score)  # Sequence average for one object\n            per_obj_score.append(s)\n            if n_objs > 1:\n                _print(\"joint {obj}: acc {score:.3f} ┊{apf}┊\".format(obj=obj_id, score=s, apf=utils.text_bargraph(score)))\n\n        # Print mean object score per frame and final score\n        dset_decay.extend(r['decay'])\n        dset_recall.extend(r['recall'])\n        dset_scores.extend(per_obj_score)\n\n        seq_score = utils.mean(per_obj_score)  # Final score\n        seq_mean_score = utils.nanmean(np.array(per_frame_score), axis=0)  # Mean object score per frame\n\n        # Print sequence results\n        _print(\"final  : acc {seq:.3f} ({dset:.3f}) ┊{apf}┊\".format(\n            seq=seq_score, dset=np.mean(dset_scores), apf=utils.text_bargraph(seq_mean_score)))\n\n    _print(\"%s: %.3f, recall: %.3f, decay: %.3f\" % (measure, utils.mean(dset_scores), utils.mean(dset_recall), utils.mean(dset_decay)))\n\n    if to_file:\n        f.close()\n\n    return target_names, dset_scores, dset_recall, dset_decay\n\n\ndef evaluate_vos(trackers, dataset='yt2019_jjval', force=False):\n    \"\"\" evaluate a list of trackers on a vos dataset.\n\n    args:\n        trackers - list of trackers to evaluate\n        dataset - name of the dataset\n        force - Force re-evaluation. If False, the pre-computed results are loaded if available\n    \"\"\"\n    csv_name_global = f'{dataset}_global_results.csv'\n    csv_name_per_sequence = f'{dataset}_per-sequence_results.csv'\n\n    table_g_all = []\n    table_seq_all = []\n    scores = {'J-Mean': [], 'J-Recall': [], 'J-Decay': []}\n    display_names = []\n    for t in trackers:\n        if t.display_name is not None:\n            disp_name = t.display_name\n        elif t.run_id is not None:\n            disp_name = '{} {}_{:03d}'.format(t.name, t.parameter_name, t.run_id)\n        else:\n            disp_name = '{} {}'.format(t.name, t.parameter_name)\n\n        display_names.append(disp_name)\n        results_path = t.segmentation_dir\n\n        csv_name_global_path = os.path.join(results_path, csv_name_global)\n        csv_name_per_sequence_path = os.path.join(results_path, csv_name_per_sequence)\n        if os.path.exists(csv_name_global_path) and os.path.exists(csv_name_per_sequence_path) and not force:\n            table_g = pd.read_csv(csv_name_global_path)\n            table_seq = pd.read_csv(csv_name_per_sequence_path)\n        else:\n            seq_names, dset_scores, dset_recall, dset_decay = evaluate_dataset(results_path, dataset, measure='J',\n                                                                               to_file=False, scores=False,\n                                                                               sequences=None)\n            g_measures = ['J-Mean', 'J-Recall', 'J-Decay']\n            g_res = np.array([utils.mean(dset_scores), utils.mean(dset_recall), utils.mean(dset_decay)])\n            g_res = np.reshape(g_res, [1, len(g_res)])\n\n            table_g = pd.DataFrame(data=g_res, columns=g_measures)\n            with open(csv_name_global_path, 'w') as f:\n                table_g.to_csv(f, index=False, float_format=\"%.3f\")\n\n            seq_measures = ['Sequence', 'J-Mean', 'J-Recall', 'J-Decay']\n\n            table_seq = pd.DataFrame(data=list(zip(seq_names, dset_scores, dset_recall, dset_decay)), columns=seq_measures)\n            with open(csv_name_per_sequence_path, 'w') as f:\n                table_seq.to_csv(f, index=False, float_format=\"%.3f\")\n\n        scores['J-Mean'].append(table_g['J-Mean'].values[0]*100)\n        scores['J-Recall'].append(table_g['J-Recall'].values[0]*100)\n        scores['J-Decay'].append(table_g['J-Decay'].values[0]*100)\n\n        table_g_all.append(table_g)\n        table_seq_all.append(table_seq)\n\n    report = generate_formatted_report(display_names, scores)\n    print(report)\n\n    return table_g_all, table_seq_all\n"
  },
  {
    "path": "artrackv2_mindspore/external/AR/pytracking/analysis/extract_results.py",
    "content": "import os\nimport sys\nimport importlib\nimport numpy as np\nfrom pytracking.utils.load_text import load_text\nimport torch\nimport pickle\nfrom tqdm import tqdm\n\nenv_path = os.path.join(os.path.dirname(__file__), '../..')\nif env_path not in sys.path:\n    sys.path.append(env_path)\n\nfrom pytracking.evaluation.environment import env_settings\n\n\ndef calc_err_center(pred_bb, anno_bb, normalized=False):\n    pred_center = pred_bb[:, :2] + 0.5 * (pred_bb[:, 2:] - 1.0)\n    anno_center = anno_bb[:, :2] + 0.5 * (anno_bb[:, 2:] - 1.0)\n\n    if normalized:\n        pred_center = pred_center / anno_bb[:, 2:]\n        anno_center = anno_center / anno_bb[:, 2:]\n\n    err_center = ((pred_center - anno_center)**2).sum(1).sqrt()\n    return err_center\n\n\ndef calc_iou_overlap(pred_bb, anno_bb):\n    tl = torch.max(pred_bb[:, :2], anno_bb[:, :2])\n    br = torch.min(pred_bb[:, :2] + pred_bb[:, 2:] - 1.0, anno_bb[:, :2] + anno_bb[:, 2:] - 1.0)\n    sz = (br - tl + 1.0).clamp(0)\n\n    # Area\n    intersection = sz.prod(dim=1)\n    union = pred_bb[:, 2:].prod(dim=1) + anno_bb[:, 2:].prod(dim=1) - intersection\n\n    return intersection / union\n\n\ndef calc_seq_err_robust(pred_bb, anno_bb, dataset, target_visible=None):\n    pred_bb = pred_bb.clone()\n\n    # Check if invalid values are present\n    if torch.isnan(pred_bb).any() or (pred_bb[:, 2:] < 0.0).any():\n        raise Exception('Error: Invalid results')\n\n    if torch.isnan(anno_bb).any():\n        if dataset == 'uav':\n            pass\n        else:\n            raise Exception('Warning: NaNs in annotation')\n\n    if (pred_bb[:, 2:] == 0.0).any():\n        for i in range(1, pred_bb.shape[0]):\n            if (pred_bb[i, 2:] == 0.0).any() and not torch.isnan(anno_bb[i, :]).any():\n                pred_bb[i, :] = pred_bb[i-1, :]\n\n    if pred_bb.shape[0] != anno_bb.shape[0]:\n        if dataset == 'lasot':\n            if pred_bb.shape[0] > anno_bb.shape[0]:\n                # For monkey-17, there is a mismatch for some trackers.\n                pred_bb = pred_bb[:anno_bb.shape[0], :]\n            else:\n                raise Exception('Mis-match in tracker prediction and GT lengths')\n        else:\n            # print('Warning: Mis-match in tracker prediction and GT lengths')\n            if pred_bb.shape[0] > anno_bb.shape[0]:\n                pred_bb = pred_bb[:anno_bb.shape[0], :]\n            else:\n                pad = torch.zeros((anno_bb.shape[0] - pred_bb.shape[0], 4)).type_as(pred_bb)\n                pred_bb = torch.cat((pred_bb, pad), dim=0)\n\n    pred_bb[0, :] = anno_bb[0, :]\n\n    if target_visible is not None:\n        target_visible = target_visible.bool()\n        valid = ((anno_bb[:, 2:] > 0.0).sum(1) == 2) & target_visible\n    else:\n        valid = ((anno_bb[:, 2:] > 0.0).sum(1) == 2)\n\n    err_center = calc_err_center(pred_bb, anno_bb)\n    err_center_normalized = calc_err_center(pred_bb, anno_bb, normalized=True)\n    err_overlap = calc_iou_overlap(pred_bb, anno_bb)\n\n    # handle invalid anno cases\n    if dataset in ['uav']:\n        err_center[~valid] = -1.0\n    else:\n        err_center[~valid] = float(\"Inf\")\n    err_center_normalized[~valid] = -1.0\n    err_overlap[~valid] = -1.0\n\n    if dataset == 'lasot':\n        err_center_normalized[~target_visible] = float(\"Inf\")\n        err_center[~target_visible] = float(\"Inf\")\n\n    if torch.isnan(err_overlap).any():\n        raise Exception('Nans in calculated overlap')\n    return err_overlap, err_center, err_center_normalized, valid\n\n\ndef extract_results(trackers, dataset, report_name, skip_missing_seq=False, plot_bin_gap=0.05,\n                    exclude_invalid_frames=False):\n    settings = env_settings()\n    eps = 1e-16\n\n    result_plot_path = os.path.join(settings.result_plot_path, report_name)\n\n    if not os.path.exists(result_plot_path):\n        os.makedirs(result_plot_path)\n\n    threshold_set_overlap = torch.arange(0.0, 1.0 + plot_bin_gap, plot_bin_gap, dtype=torch.float64)\n    threshold_set_center = torch.arange(0, 51, dtype=torch.float64)\n    threshold_set_center_norm = torch.arange(0, 51, dtype=torch.float64) / 100.0\n\n    avg_overlap_all = torch.zeros((len(dataset), len(trackers)), dtype=torch.float64)\n    ave_success_rate_plot_overlap = torch.zeros((len(dataset), len(trackers), threshold_set_overlap.numel()),\n                                                dtype=torch.float32)\n    ave_success_rate_plot_center = torch.zeros((len(dataset), len(trackers), threshold_set_center.numel()),\n                                               dtype=torch.float32)\n    ave_success_rate_plot_center_norm = torch.zeros((len(dataset), len(trackers), threshold_set_center.numel()),\n                                                    dtype=torch.float32)\n\n    valid_sequence = torch.ones(len(dataset), dtype=torch.uint8)\n\n    for seq_id, seq in enumerate(tqdm(dataset)):\n        # Load anno\n        anno_bb = torch.tensor(seq.ground_truth_rect)\n        target_visible = torch.tensor(seq.target_visible, dtype=torch.uint8) if seq.target_visible is not None else None\n        for trk_id, trk in enumerate(trackers):\n            # Load results\n            base_results_path = '{}/{}'.format(trk.results_dir, seq.name)\n            results_path = '{}.txt'.format(base_results_path)\n\n            if os.path.isfile(results_path):\n                pred_bb = torch.tensor(load_text(str(results_path), delimiter=('\\t', ','), dtype=np.float64))\n            else:\n                if skip_missing_seq:\n                    valid_sequence[seq_id] = 0\n                    break\n                else:\n                    raise Exception('Result not found. {}'.format(results_path))\n\n            # Calculate measures\n            err_overlap, err_center, err_center_normalized, valid_frame = calc_seq_err_robust(\n                pred_bb, anno_bb, seq.dataset, target_visible)\n\n            avg_overlap_all[seq_id, trk_id] = err_overlap[valid_frame].mean()\n\n            if exclude_invalid_frames:\n                seq_length = valid_frame.long().sum()\n            else:\n                seq_length = anno_bb.shape[0]\n\n            if seq_length <= 0:\n                raise Exception('Seq length zero')\n\n            ave_success_rate_plot_overlap[seq_id, trk_id, :] = (err_overlap.view(-1, 1) > threshold_set_overlap.view(1, -1)).sum(0).float() / seq_length\n            ave_success_rate_plot_center[seq_id, trk_id, :] = (err_center.view(-1, 1) <= threshold_set_center.view(1, -1)).sum(0).float() / seq_length\n            ave_success_rate_plot_center_norm[seq_id, trk_id, :] = (err_center_normalized.view(-1, 1) <= threshold_set_center_norm.view(1, -1)).sum(0).float() / seq_length\n\n    print('\\n\\nComputed results over {} / {} sequences'.format(valid_sequence.long().sum().item(), valid_sequence.shape[0]))\n\n    # Prepare dictionary for saving data\n    seq_names = [s.name for s in dataset]\n    tracker_names = [{'name': t.name, 'param': t.parameter_name, 'run_id': t.run_id, 'disp_name': t.display_name}\n                     for t in trackers]\n\n    eval_data = {'sequences': seq_names, 'trackers': tracker_names,\n                 'valid_sequence': valid_sequence.tolist(),\n                 'ave_success_rate_plot_overlap': ave_success_rate_plot_overlap.tolist(),\n                 'ave_success_rate_plot_center': ave_success_rate_plot_center.tolist(),\n                 'ave_success_rate_plot_center_norm': ave_success_rate_plot_center_norm.tolist(),\n                 'avg_overlap_all': avg_overlap_all.tolist(),\n                 'threshold_set_overlap': threshold_set_overlap.tolist(),\n                 'threshold_set_center': threshold_set_center.tolist(),\n                 'threshold_set_center_norm': threshold_set_center_norm.tolist()}\n\n    with open(result_plot_path + '/eval_data.pkl', 'wb') as fh:\n        pickle.dump(eval_data, fh)\n\n    return eval_data\n"
  },
  {
    "path": "artrackv2_mindspore/external/AR/pytracking/analysis/playback_results.py",
    "content": "import os\nimport sys\nimport importlib\nimport numpy as np\nimport torch\nimport time\nimport matplotlib.patches as patches\nimport cv2 as cv\nimport matplotlib.pyplot as plt\nfrom pytracking.analysis.plot_results import get_plot_draw_styles\nfrom pytracking.utils.plotting import draw_figure\nfrom pytracking.evaluation import get_dataset, trackerlist\n\nenv_path = os.path.join(os.path.dirname(__file__), '../..')\nif env_path not in sys.path:\n    sys.path.append(env_path)\n\n\nclass Display:\n    def __init__(self, sequence_length, plot_draw_styles, sequence_name):\n        self.active = True\n        self.frame_number = 0\n        self.pause_mode = True\n        self.step_size = 0\n        self.step_direction = 'forward'\n        self.fig, self.ax = plt.subplots(1)\n        self.fig.canvas.mpl_connect('key_press_event', self.key_callback_fn)\n        plt.tight_layout()\n\n        self.sequence_length = sequence_length\n        self.sequence_name = sequence_name\n        self.plot_draw_styles = plot_draw_styles\n\n    def key_callback_fn(self, event):\n        if event.key == ' ':\n            self.pause_mode = not self.pause_mode\n            self.step_size = 0\n            self.step_direction = 'forward'\n        elif event.key == 'right':\n            if self.pause_mode:\n                self.frame_number += 1\n\n                if self.frame_number >= self.sequence_length:\n                    self.frame_number = self.sequence_length - 1\n            elif self.step_direction == 'stop':\n                self.step_direction = 'forward'\n                self.step_size = 0\n            elif self.step_direction == 'backward' and self.step_size == 0:\n                self.step_direction = 'stop'\n            else:\n                self.step_size += 1\n        elif event.key == 'left':\n            if self.pause_mode:\n                self.frame_number -= 1\n\n                if self.frame_number < 0:\n                    self.frame_number = 0\n            elif self.step_direction == 'stop':\n                self.step_direction = 'backward'\n                self.step_size = 0\n            elif self.step_direction == 'forward' and self.step_size == 0:\n                self.step_direction = 'stop'\n            else:\n                self.step_size -= 1\n        elif event.key == 'escape' or event.key == 'q':\n            self.active = False\n\n    def _get_speed(self):\n        delta = 0\n        if self.step_direction == 'forward':\n            delta = 2 ** abs(self.step_size)\n        elif self.step_direction == 'backward':\n            delta = -1 * 2 ** abs(self.step_size)\n\n        return delta\n\n    def step(self):\n        delta = self._get_speed()\n\n        self.frame_number += delta\n        if self.frame_number < 0:\n            self.frame_number = 0\n        elif self.frame_number >= self.sequence_length:\n            self.frame_number = self.sequence_length - 1\n\n    def show(self, image, bb_list, trackers, gt=None):\n        self.ax.cla()\n        self.ax.imshow(image)\n\n        # Draw rects\n        rect_handles = []\n        for i, bb in enumerate(bb_list):\n            rect = patches.Rectangle((bb[0], bb[1]), bb[2], bb[3], linewidth=1,\n                                     edgecolor=self.plot_draw_styles[i]['color'], facecolor='none')\n            self.ax.add_patch(rect)\n\n            rect_handles.append(patches.Rectangle((bb[0], bb[1]), bb[2], bb[3], linewidth=1,\n                                     edgecolor=self.plot_draw_styles[i]['color'],\n                                                  facecolor=self.plot_draw_styles[i]['color'],\n                                                  label=trackers[i]))\n\n        if gt is not None:\n            rect = patches.Rectangle((gt[0], gt[1]), gt[2], gt[3], linewidth=2, edgecolor='g',\n                                     facecolor='none')\n            self.ax.add_patch(rect)\n            rect_handles.append(rect)\n\n        self.ax.set_axis_off()\n        self.ax.axis('equal')\n        plt.legend(handles=rect_handles, loc=4, borderaxespad=0.)\n        mode = 'manual' if self.pause_mode else 'auto     '\n        speed = self._get_speed()\n        self.fig.suptitle('Sequence: {}    Mode: {}    Speed: {:d}x'.format(self.sequence_name, mode, speed),\n                          fontsize=14)\n        draw_figure(self.fig)\n\n\ndef read_image(image_file: str):\n    im = cv.imread(image_file)\n    return cv.cvtColor(im, cv.COLOR_BGR2RGB)\n\n\ndef _get_display_name(tracker):\n    if tracker.display_name is None:\n        if tracker.run_id is not None:\n            return '{}_{}_{:03d}'.format(tracker.name, tracker.parameter_name, tracker.run_id)\n        else:\n            return '{}_{}'.format(tracker.name, tracker.parameter_name)\n    else:\n        return tracker.display_name\n\n\ndef playback_results(trackers, sequence):\n    \"\"\"\n    Playback saved results of input trackers for a particular sequence. You can navigate the sequence using left/right\n    arrow keys. You can also change to 'auto' mode by pressing space bar, in which case the sequence will be replayed\n    at a particular speed. The speed for playback in 'auto' mode can be controlled using the left/right arrow keys.\n    You can exit the application using escape or q keys.\n    \"\"\"\n    plot_draw_styles = get_plot_draw_styles()\n\n    tracker_results = []\n    # Load results\n    for trk_id, trk in enumerate(trackers):\n        # Load results\n        base_results_path = '{}/{}'.format(trk.results_dir, sequence.name)\n        results_path = '{}.txt'.format(base_results_path)\n\n        if os.path.isfile(results_path):\n            try:\n                pred_bb = torch.tensor(np.loadtxt(str(results_path), dtype=np.float64))\n            except:\n                pred_bb = torch.tensor(np.loadtxt(str(results_path), delimiter=',', dtype=np.float64))\n        else:\n            raise Exception('Result not found. {}'.format(results_path))\n\n        tracker_results.append(pred_bb)\n\n    # Convert to list of shape seq_length * num_trackers * 4\n    tracker_results = torch.stack(tracker_results, dim=1).tolist()\n    tracker_names = [_get_display_name(t) for t in trackers]\n\n    display = Display(len(tracker_results), plot_draw_styles, sequence.name)\n\n    while display.active:\n        frame_number = display.frame_number\n        image = read_image(sequence.frames[frame_number])\n\n        display.show(image, tracker_results[frame_number], tracker_names)\n\n        time.sleep(0.01)\n        if display.pause_mode and display.frame_number == frame_number:\n            time.sleep(0.1)\n        elif not display.pause_mode:\n            display.step()\n\n"
  },
  {
    "path": "artrackv2_mindspore/external/AR/pytracking/analysis/plot_results.py",
    "content": "import tikzplotlib\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport os\nimport torch\nimport pickle\nimport json\nfrom pytracking.evaluation.environment import env_settings\nfrom pytracking.analysis.extract_results import extract_results\n\n\ndef get_plot_draw_styles():\n    plot_draw_style = [{'color': (1.0, 0.0, 0.0), 'line_style': '-'},\n                       {'color': (0.0, 1.0, 0.0), 'line_style': '-'},\n                       {'color': (0.0, 0.0, 1.0), 'line_style': '-'},\n                       {'color': (0.0, 0.0, 0.0), 'line_style': '-'},\n                       {'color': (1.0, 0.0, 1.0), 'line_style': '-'},\n                       {'color': (0.0, 1.0, 1.0), 'line_style': '-'},\n                       {'color': (0.5, 0.5, 0.5), 'line_style': '-'},\n                       {'color': (136.0 / 255.0, 0.0, 21.0 / 255.0), 'line_style': '-'},\n                       {'color': (1.0, 127.0 / 255.0, 39.0 / 255.0), 'line_style': '-'},\n                       {'color': (0.0, 162.0 / 255.0, 232.0 / 255.0), 'line_style': '-'},\n                       {'color': (0.0, 0.5, 0.0), 'line_style': '-'},\n                       {'color': (1.0, 0.5, 0.2), 'line_style': '-'},\n                       {'color': (0.1, 0.4, 0.0), 'line_style': '-'},\n                       {'color': (0.6, 0.3, 0.9), 'line_style': '-'},\n                       {'color': (0.4, 0.7, 0.1), 'line_style': '-'},\n                       {'color': (0.2, 0.1, 0.7), 'line_style': '-'},\n                       {'color': (0.7, 0.6, 0.2), 'line_style': '-'}]\n\n    return plot_draw_style\n\n\ndef check_eval_data_is_valid(eval_data, trackers, dataset):\n    \"\"\" Checks if the pre-computed results are valid\"\"\"\n    seq_names = [s.name for s in dataset]\n    seq_names_saved = eval_data['sequences']\n\n    tracker_names_f = [(t.name, t.parameter_name, t.run_id) for t in trackers]\n    tracker_names_f_saved = [(t['name'], t['param'], t['run_id']) for t in eval_data['trackers']]\n\n    return seq_names == seq_names_saved and tracker_names_f == tracker_names_f_saved\n\n\ndef merge_multiple_runs(eval_data):\n    new_tracker_names = []\n    ave_success_rate_plot_overlap_merged = []\n    ave_success_rate_plot_center_merged = []\n    ave_success_rate_plot_center_norm_merged = []\n    avg_overlap_all_merged = []\n\n    ave_success_rate_plot_overlap = torch.tensor(eval_data['ave_success_rate_plot_overlap'])\n    ave_success_rate_plot_center = torch.tensor(eval_data['ave_success_rate_plot_center'])\n    ave_success_rate_plot_center_norm = torch.tensor(eval_data['ave_success_rate_plot_center_norm'])\n    avg_overlap_all = torch.tensor(eval_data['avg_overlap_all'])\n\n    trackers = eval_data['trackers']\n    merged = torch.zeros(len(trackers), dtype=torch.uint8)\n    for i in range(len(trackers)):\n        if merged[i]:\n            continue\n        base_tracker = trackers[i]\n        new_tracker_names.append(base_tracker)\n\n        match = [t['name'] == base_tracker['name'] and t['param'] == base_tracker['param'] for t in trackers]\n        match = torch.tensor(match)\n\n        ave_success_rate_plot_overlap_merged.append(ave_success_rate_plot_overlap[:, match, :].mean(1))\n        ave_success_rate_plot_center_merged.append(ave_success_rate_plot_center[:, match, :].mean(1))\n        ave_success_rate_plot_center_norm_merged.append(ave_success_rate_plot_center_norm[:, match, :].mean(1))\n        avg_overlap_all_merged.append(avg_overlap_all[:, match].mean(1))\n\n        merged[match] = 1\n\n    ave_success_rate_plot_overlap_merged = torch.stack(ave_success_rate_plot_overlap_merged, dim=1)\n    ave_success_rate_plot_center_merged = torch.stack(ave_success_rate_plot_center_merged, dim=1)\n    ave_success_rate_plot_center_norm_merged = torch.stack(ave_success_rate_plot_center_norm_merged, dim=1)\n    avg_overlap_all_merged = torch.stack(avg_overlap_all_merged, dim=1)\n\n    eval_data['trackers'] = new_tracker_names\n    eval_data['ave_success_rate_plot_overlap'] = ave_success_rate_plot_overlap_merged.tolist()\n    eval_data['ave_success_rate_plot_center'] = ave_success_rate_plot_center_merged.tolist()\n    eval_data['ave_success_rate_plot_center_norm'] = ave_success_rate_plot_center_norm_merged.tolist()\n    eval_data['avg_overlap_all'] = avg_overlap_all_merged.tolist()\n\n    return eval_data\n\n\ndef get_tracker_display_name(tracker):\n    if tracker['disp_name'] is None:\n        if tracker['run_id'] is None:\n            disp_name = '{}_{}'.format(tracker['name'], tracker['param'])\n        else:\n            disp_name = '{}_{}_{:03d}'.format(tracker['name'], tracker['param'],\n                                              tracker['run_id'])\n    else:\n        disp_name = tracker['disp_name']\n\n    return  disp_name\n\n\ndef plot_draw_save(y, x, scores, trackers, plot_draw_styles, result_plot_path, plot_opts):\n    # Plot settings\n    font_size = plot_opts.get('font_size', 12)\n    font_size_axis = plot_opts.get('font_size_axis', 13)\n    line_width = plot_opts.get('line_width', 2)\n    font_size_legend = plot_opts.get('font_size_legend', 13)\n\n    plot_type = plot_opts['plot_type']\n    legend_loc = plot_opts['legend_loc']\n\n    xlabel = plot_opts['xlabel']\n    ylabel = plot_opts['ylabel']\n    xlim = plot_opts['xlim']\n    ylim = plot_opts['ylim']\n\n    title = plot_opts['title']\n\n    matplotlib.rcParams.update({'font.size': font_size})\n    matplotlib.rcParams.update({'axes.titlesize': font_size_axis})\n    matplotlib.rcParams.update({'axes.titleweight': 'black'})\n    matplotlib.rcParams.update({'axes.labelsize': font_size_axis})\n\n    fig, ax = plt.subplots()\n\n    index_sort = scores.argsort(descending=False)\n\n    plotted_lines = []\n    legend_text = []\n\n    for id, id_sort in enumerate(index_sort):\n        line = ax.plot(x.tolist(), y[id_sort, :].tolist(),\n                       linewidth=line_width,\n                       color=plot_draw_styles[index_sort.numel() - id - 1]['color'],\n                       linestyle=plot_draw_styles[index_sort.numel() - id - 1]['line_style'])\n\n        plotted_lines.append(line[0])\n\n        tracker = trackers[id_sort]\n        disp_name = get_tracker_display_name(tracker)\n\n        legend_text.append('{} [{:.1f}]'.format(disp_name, scores[id_sort]))\n\n    ax.legend(plotted_lines[::-1], legend_text[::-1], loc=legend_loc, fancybox=False, edgecolor='black',\n              fontsize=font_size_legend, framealpha=1.0)\n\n    ax.set(xlabel=xlabel,\n           ylabel=ylabel,\n           xlim=xlim, ylim=ylim,\n           title=title)\n\n    ax.grid(True, linestyle='-.')\n    fig.tight_layout()\n\n    tikzplotlib.save('{}/{}_plot.tex'.format(result_plot_path, plot_type))\n    fig.savefig('{}/{}_plot.pdf'.format(result_plot_path, plot_type), dpi=300, format='pdf', transparent=True)\n    plt.draw()\n\n\ndef check_and_load_precomputed_results(trackers, dataset, report_name, force_evaluation=False, **kwargs):\n    # Load data\n    settings = env_settings()\n\n    # Load pre-computed results\n    result_plot_path = os.path.join(settings.result_plot_path, report_name)\n    eval_data_path = os.path.join(result_plot_path, 'eval_data.pkl')\n\n    if os.path.isfile(eval_data_path) and not force_evaluation:\n        with open(eval_data_path, 'rb') as fh:\n            eval_data = pickle.load(fh)\n    else:\n        # print('Pre-computed evaluation data not found. Computing results!')\n        eval_data = extract_results(trackers, dataset, report_name, **kwargs)\n\n    if not check_eval_data_is_valid(eval_data, trackers, dataset):\n        # print('Pre-computed evaluation data invalid. Re-computing results!')\n        eval_data = extract_results(trackers, dataset, report_name, **kwargs)\n    else:\n        # Update display names\n        tracker_names = [{'name': t.name, 'param': t.parameter_name, 'run_id': t.run_id, 'disp_name': t.display_name}\n                         for t in trackers]\n        eval_data['trackers'] = tracker_names\n\n    return eval_data\n\n\ndef get_auc_curve(ave_success_rate_plot_overlap, valid_sequence):\n    ave_success_rate_plot_overlap = ave_success_rate_plot_overlap[valid_sequence, :, :]\n    auc_curve = ave_success_rate_plot_overlap.mean(0) * 100.0\n    auc = auc_curve.mean(-1)\n\n    return auc_curve, auc\n\n\ndef get_prec_curve(ave_success_rate_plot_center, valid_sequence):\n    ave_success_rate_plot_center = ave_success_rate_plot_center[valid_sequence, :, :]\n    prec_curve = ave_success_rate_plot_center.mean(0) * 100.0\n    prec_score = prec_curve[:, 20]\n\n    return prec_curve, prec_score\n\n\ndef plot_results(trackers, dataset, report_name, merge_results=False,\n                 plot_types=('success'), force_evaluation=False, **kwargs):\n    \"\"\"\n    Plot results for the given trackers\n\n    args:\n        trackers - List of trackers to evaluate\n        dataset - List of sequences to evaluate\n        report_name - Name of the folder in env_settings.perm_mat_path where the computed results and plots are saved\n        merge_results - If True, multiple random runs for a non-deterministic trackers are averaged\n        plot_types - List of scores to display. Can contain 'success',\n                    'prec' (precision), and 'norm_prec' (normalized precision)\n    \"\"\"\n    # Load data\n    settings = env_settings()\n\n    plot_draw_styles = get_plot_draw_styles()\n\n    # Load pre-computed results\n    result_plot_path = os.path.join(settings.result_plot_path, report_name)\n    eval_data = check_and_load_precomputed_results(trackers, dataset, report_name, force_evaluation, **kwargs)\n\n    # Merge results from multiple runs\n    if merge_results:\n        eval_data = merge_multiple_runs(eval_data)\n\n    tracker_names = eval_data['trackers']\n\n    valid_sequence = torch.tensor(eval_data['valid_sequence'], dtype=torch.bool)\n\n    print('\\nPlotting results over {} / {} sequences'.format(valid_sequence.long().sum().item(), valid_sequence.shape[0]))\n\n    print('\\nGenerating plots for: {}'.format(report_name))\n\n    # ********************************  Success Plot **************************************\n    if 'success' in plot_types:\n        ave_success_rate_plot_overlap = torch.tensor(eval_data['ave_success_rate_plot_overlap'])\n\n        # Index out valid sequences\n        auc_curve, auc = get_auc_curve(ave_success_rate_plot_overlap, valid_sequence)\n        threshold_set_overlap = torch.tensor(eval_data['threshold_set_overlap'])\n\n        success_plot_opts = {'plot_type': 'success', 'legend_loc': 'lower left', 'xlabel': 'Overlap threshold',\n                             'ylabel': 'Overlap Precision [%]', 'xlim': (0, 1.0), 'ylim': (0, 100), 'title': 'Success plot'}\n        plot_draw_save(auc_curve, threshold_set_overlap, auc, tracker_names, plot_draw_styles, result_plot_path, success_plot_opts)\n\n    # ********************************  Precision Plot **************************************\n    if 'prec' in plot_types:\n        ave_success_rate_plot_center = torch.tensor(eval_data['ave_success_rate_plot_center'])\n\n        # Index out valid sequences\n        prec_curve, prec_score = get_prec_curve(ave_success_rate_plot_center, valid_sequence)\n        threshold_set_center = torch.tensor(eval_data['threshold_set_center'])\n\n        precision_plot_opts = {'plot_type': 'precision', 'legend_loc': 'lower right',\n                               'xlabel': 'Location error threshold [pixels]', 'ylabel': 'Distance Precision [%]',\n                               'xlim': (0, 50), 'ylim': (0, 100), 'title': 'Precision plot'}\n        plot_draw_save(prec_curve, threshold_set_center, prec_score, tracker_names, plot_draw_styles, result_plot_path,\n                       precision_plot_opts)\n\n    # ********************************  Norm Precision Plot **************************************\n    if 'norm_prec' in plot_types:\n        ave_success_rate_plot_center_norm = torch.tensor(eval_data['ave_success_rate_plot_center_norm'])\n\n        # Index out valid sequences\n        prec_curve, prec_score = get_prec_curve(ave_success_rate_plot_center_norm, valid_sequence)\n        threshold_set_center_norm = torch.tensor(eval_data['threshold_set_center_norm'])\n\n        norm_precision_plot_opts = {'plot_type': 'norm_precision', 'legend_loc': 'lower right',\n                                    'xlabel': 'Location error threshold', 'ylabel': 'Distance Precision [%]',\n                                    'xlim': (0, 0.5), 'ylim': (0, 100), 'title': 'Normalized Precision plot'}\n        plot_draw_save(prec_curve, threshold_set_center_norm, prec_score, tracker_names, plot_draw_styles, result_plot_path,\n                       norm_precision_plot_opts)\n\n    plt.show()\n\n\ndef generate_formatted_report(row_labels, scores, table_name=''):\n    name_width = max([len(d) for d in row_labels] + [len(table_name)]) + 5\n    min_score_width = 10\n\n    report_text = '\\n{label: <{width}} |'.format(label=table_name, width=name_width)\n\n    score_widths = [max(min_score_width, len(k) + 3) for k in scores.keys()]\n\n    for s, s_w in zip(scores.keys(), score_widths):\n        report_text = '{prev} {s: <{width}} |'.format(prev=report_text, s=s, width=s_w)\n\n    report_text = '{prev}\\n'.format(prev=report_text)\n\n    for trk_id, d_name in enumerate(row_labels):\n        # display name\n        report_text = '{prev}{tracker: <{width}} |'.format(prev=report_text, tracker=d_name,\n                                                           width=name_width)\n        for (score_type, score_value), s_w in zip(scores.items(), score_widths):\n            report_text = '{prev} {score: <{width}} |'.format(prev=report_text,\n                                                              score='{:0.2f}'.format(score_value[trk_id].item()),\n                                                              width=s_w)\n        report_text = '{prev}\\n'.format(prev=report_text)\n\n    return report_text\n\n\ndef print_results(trackers, dataset, report_name, merge_results=False,\n                  plot_types=('success'), **kwargs):\n    \"\"\" Print the results for the given trackers in a formatted table\n    args:\n        trackers - List of trackers to evaluate\n        dataset - List of sequences to evaluate\n        report_name - Name of the folder in env_settings.perm_mat_path where the computed results and plots are saved\n        merge_results - If True, multiple random runs for a non-deterministic trackers are averaged\n        plot_types - List of scores to display. Can contain 'success' (prints AUC, OP50, and OP75 scores),\n                    'prec' (prints precision score), and 'norm_prec' (prints normalized precision score)\n    \"\"\"\n    # Load pre-computed results\n    eval_data = check_and_load_precomputed_results(trackers, dataset, report_name, **kwargs)\n\n    # Merge results from multiple runs\n    if merge_results:\n        eval_data = merge_multiple_runs(eval_data)\n\n    tracker_names = eval_data['trackers']\n    valid_sequence = torch.tensor(eval_data['valid_sequence'], dtype=torch.bool)\n\n    print('\\nReporting results over {} / {} sequences'.format(valid_sequence.long().sum().item(), valid_sequence.shape[0]))\n\n    scores = {}\n\n    # ********************************  Success Plot **************************************\n    if 'success' in plot_types:\n        threshold_set_overlap = torch.tensor(eval_data['threshold_set_overlap'])\n        ave_success_rate_plot_overlap = torch.tensor(eval_data['ave_success_rate_plot_overlap'])\n\n        # Index out valid sequences\n        auc_curve, auc = get_auc_curve(ave_success_rate_plot_overlap, valid_sequence)\n        scores['AUC'] = auc\n        scores['OP50'] = auc_curve[:, threshold_set_overlap == 0.50]\n        scores['OP75'] = auc_curve[:, threshold_set_overlap == 0.75]\n\n    # ********************************  Precision Plot **************************************\n    if 'prec' in plot_types:\n        ave_success_rate_plot_center = torch.tensor(eval_data['ave_success_rate_plot_center'])\n\n        # Index out valid sequences\n        prec_curve, prec_score = get_prec_curve(ave_success_rate_plot_center, valid_sequence)\n        scores['Precision'] = prec_score\n\n    # ********************************  Norm Precision Plot *********************************\n    if 'norm_prec' in plot_types:\n        ave_success_rate_plot_center_norm = torch.tensor(eval_data['ave_success_rate_plot_center_norm'])\n\n        # Index out valid sequences\n        norm_prec_curve, norm_prec_score = get_prec_curve(ave_success_rate_plot_center_norm, valid_sequence)\n        scores['Norm Precision'] = norm_prec_score\n\n    # Print\n    tracker_disp_names = [get_tracker_display_name(trk) for trk in tracker_names]\n    report_text = generate_formatted_report(tracker_disp_names, scores, table_name=report_name)\n    print(report_text)\n\n\ndef plot_got_success(trackers, report_name):\n    \"\"\" Plot success plot for GOT-10k dataset using the json reports.\n    Save the json reports from http://got-10k.aitestunion.com/leaderboard in the directory set to\n    env_settings.got_reports_path\n\n    The tracker name in the experiment file should be set to the name of the report file for that tracker,\n    e.g. DiMP50_report_2019_09_02_15_44_25 if the report is name DiMP50_report_2019_09_02_15_44_25.json\n\n    args:\n        trackers - List of trackers to evaluate\n        report_name - Name of the folder in env_settings.perm_mat_path where the computed results and plots are saved\n    \"\"\"\n    # Load data\n    settings = env_settings()\n    plot_draw_styles = get_plot_draw_styles()\n\n    result_plot_path = os.path.join(settings.result_plot_path, report_name)\n\n    auc_curve = torch.zeros((len(trackers), 101))\n    scores = torch.zeros(len(trackers))\n\n    # Load results\n    tracker_names = []\n    for trk_id, trk in enumerate(trackers):\n        json_path = '{}/{}.json'.format(settings.got_reports_path, trk.name)\n\n        if os.path.isfile(json_path):\n            with open(json_path, 'r') as f:\n                eval_data = json.load(f)\n        else:\n            raise Exception('Report not found {}'.format(json_path))\n\n        if len(eval_data.keys()) > 1:\n            raise Exception\n\n        # First field is the tracker name. Index it out\n        eval_data = eval_data[list(eval_data.keys())[0]]\n        if 'succ_curve' in eval_data.keys():\n            curve = eval_data['succ_curve']\n            ao = eval_data['ao']\n        elif 'overall' in eval_data.keys() and 'succ_curve' in eval_data['overall'].keys():\n            curve = eval_data['overall']['succ_curve']\n            ao = eval_data['overall']['ao']\n        else:\n            raise Exception('Invalid JSON file {}'.format(json_path))\n\n        auc_curve[trk_id, :] = torch.tensor(curve) * 100.0\n        scores[trk_id] = ao * 100.0\n\n        tracker_names.append({'name': trk.name, 'param': trk.parameter_name, 'run_id': trk.run_id,\n                              'disp_name': trk.display_name})\n\n    threshold_set_overlap = torch.arange(0.0, 1.01, 0.01, dtype=torch.float64)\n\n    success_plot_opts = {'plot_type': 'success', 'legend_loc': 'lower left', 'xlabel': 'Overlap threshold',\n                         'ylabel': 'Overlap Precision [%]', 'xlim': (0, 1.0), 'ylim': (0, 100), 'title': 'Success plot'}\n    plot_draw_save(auc_curve, threshold_set_overlap, scores, tracker_names, plot_draw_styles, result_plot_path,\n                   success_plot_opts)\n    plt.show()\n\n\ndef print_per_sequence_results(trackers, dataset, report_name, merge_results=False,\n                               filter_criteria=None, **kwargs):\n    \"\"\" Print per-sequence results for the given trackers. Additionally, the sequences to list can be filtered using\n    the filter criteria.\n\n    args:\n        trackers - List of trackers to evaluate\n        dataset - List of sequences to evaluate\n        report_name - Name of the folder in env_settings.perm_mat_path where the computed results and plots are saved\n        merge_results - If True, multiple random runs for a non-deterministic trackers are averaged\n        filter_criteria - Filter sequence results which are reported. Following modes are supported\n                        None: No filtering. Display results for all sequences in dataset\n                        'ao_min': Only display sequences for which the minimum average overlap (AO) score over the\n                                  trackers is less than a threshold filter_criteria['threshold']. This mode can\n                                  be used to select sequences where at least one tracker performs poorly.\n                        'ao_max': Only display sequences for which the maximum average overlap (AO) score over the\n                                  trackers is less than a threshold filter_criteria['threshold']. This mode can\n                                  be used to select sequences all tracker performs poorly.\n                        'delta_ao': Only display sequences for which the performance of different trackers vary by at\n                                    least filter_criteria['threshold'] in average overlap (AO) score. This mode can\n                                    be used to select sequences where the behaviour of the trackers greatly differ\n                                    between each other.\n    \"\"\"\n    # Load pre-computed results\n    eval_data = check_and_load_precomputed_results(trackers, dataset, report_name, **kwargs)\n\n    # Merge results from multiple runs\n    if merge_results:\n        eval_data = merge_multiple_runs(eval_data)\n\n    tracker_names = eval_data['trackers']\n    valid_sequence = torch.tensor(eval_data['valid_sequence'], dtype=torch.bool)\n    sequence_names = eval_data['sequences']\n    avg_overlap_all = torch.tensor(eval_data['avg_overlap_all']) * 100.0\n\n    # Filter sequences\n    if filter_criteria is not None:\n        if filter_criteria['mode'] == 'ao_min':\n            min_ao = avg_overlap_all.min(dim=1)[0]\n            valid_sequence = valid_sequence & (min_ao < filter_criteria['threshold'])\n        elif filter_criteria['mode'] == 'ao_max':\n            max_ao = avg_overlap_all.max(dim=1)[0]\n            valid_sequence = valid_sequence & (max_ao < filter_criteria['threshold'])\n        elif filter_criteria['mode'] == 'delta_ao':\n            min_ao = avg_overlap_all.min(dim=1)[0]\n            max_ao = avg_overlap_all.max(dim=1)[0]\n            valid_sequence = valid_sequence & ((max_ao - min_ao) > filter_criteria['threshold'])\n        else:\n            raise Exception\n\n    avg_overlap_all = avg_overlap_all[valid_sequence, :]\n    sequence_names = [s + ' (ID={})'.format(i) for i, (s, v) in enumerate(zip(sequence_names, valid_sequence.tolist())) if v]\n\n    tracker_disp_names = [get_tracker_display_name(trk) for trk in tracker_names]\n\n    scores_per_tracker = {k: avg_overlap_all[:, i] for i, k in enumerate(tracker_disp_names)}\n    report_text = generate_formatted_report(sequence_names, scores_per_tracker)\n\n    print(report_text)\n"
  },
  {
    "path": "artrackv2_mindspore/external/AR/pytracking/analysis/vos_utils.py",
    "content": "import warnings\nimport numpy as np\nfrom skimage.morphology import binary_dilation, disk\nfrom math import floor\n\n\ndef text_bargraph(values):\n\n    blocks = np.array(('u', ' ', '▁', '▂', '▃', '▄', '▅', '▆', '▇', '█', 'o'))\n    nsteps = len(blocks)-2-1\n    hstep = 1 / (2*nsteps)\n    values = np.array(values)\n    nans = np.isnan(values)\n    values[nans] = 0  # '░'\n    indices = ((values + hstep) * nsteps + 1).astype(np.int)\n    indices[values < 0] = 0\n    indices[values > 1] = len(blocks)-1\n    graph = blocks[indices]\n    graph[nans] = '░'\n    graph = str.join('', graph)\n    return graph\n\n\n# ----------------------------------------------------------------------------\n# The 2017 DAVIS Challenge on Video Object Segmentation\n# -----------------------------------------------------------------------------\n# Copyright (c) 2017 Federico Perazzi\n# Licensed under the BSD License [see LICENSE for details]\n# Written by Federico Perazzi (federico@disneyresearch.com)\n# Adapted from DAVIS 2016 (Federico Perazzi)\n# ----------------------------------------------------------------------------\n\n# Originally db_eval_iou() in the davis challenge toolkit:\ndef davis_jaccard_measure(fg_mask, gt_mask):\n    \"\"\" Compute region similarity as the Jaccard Index.\n\n    :param fg_mask: (ndarray): binary segmentation map.\n    :param gt_mask: (ndarray): binary annotation map.\n    :return: jaccard (float): region similarity\n    \"\"\"\n\n    gt_mask = gt_mask.astype(np.bool)\n    fg_mask = fg_mask.astype(np.bool)\n\n    if np.isclose(np.sum(gt_mask), 0) and np.isclose(np.sum(fg_mask), 0):\n        return 1\n    else:\n        return np.sum((gt_mask & fg_mask)) / \\\n               np.sum((gt_mask | fg_mask), dtype=np.float32)\n\n\ndef davis_jaccard_measure_torch(fg_mask, gt_mask):\n    \"\"\" Compute region similarity as the Jaccard Index.\n\n    :param fg_mask: (ndarray): binary segmentation map.\n    :param gt_mask: (ndarray): binary annotation map.\n    :return: jaccard (float): region similarity\n    \"\"\"\n\n    #gt_mask = gt_mask.astype(np.bool)\n    #fg_mask = fg_mask.astype(np.bool)\n\n    if gt_mask.sum() == 0 and fg_mask.sum() == 0:\n        return 1\n    else:\n        return (gt_mask & fg_mask).sum() / \\\n               (gt_mask | fg_mask).sum().float()\n\n# Originally db_eval_boundary() in the davis challenge toolkit:\ndef davis_f_measure(foreground_mask, gt_mask, bound_th=0.008):\n    \"\"\"\n    Compute mean,recall and decay from per-frame evaluation.\n    Calculates precision/recall for boundaries between foreground_mask and\n    gt_mask using morphological operators to speed it up.\n\n    Arguments:\n        foreground_mask (ndarray): binary segmentation image.\n        gt_mask         (ndarray): binary annotated image.\n\n    Returns:\n        F (float): boundaries F-measure\n        P (float): boundaries precision\n        R (float): boundaries recall\n    \"\"\"\n    assert np.atleast_3d(foreground_mask).shape[2] == 1\n\n    bound_pix = bound_th if bound_th >= 1 else \\\n        np.ceil(bound_th * np.linalg.norm(foreground_mask.shape))\n\n    # Get the pixel boundaries of both masks\n    fg_boundary = seg2bmap(foreground_mask)\n    gt_boundary = seg2bmap(gt_mask)\n\n    fg_dil = binary_dilation(fg_boundary, disk(bound_pix))\n    gt_dil = binary_dilation(gt_boundary, disk(bound_pix))\n\n    # Get the intersection\n    gt_match = gt_boundary * fg_dil\n    fg_match = fg_boundary * gt_dil\n\n    # Area of the intersection\n    n_fg = np.sum(fg_boundary)\n    n_gt = np.sum(gt_boundary)\n\n    # % Compute precision and recall\n    if n_fg == 0 and n_gt > 0:\n        precision = 1\n        recall = 0\n    elif n_fg > 0 and n_gt == 0:\n        precision = 0\n        recall = 1\n    elif n_fg == 0 and n_gt == 0:\n        precision = 1\n        recall = 1\n    else:\n        precision = np.sum(fg_match) / float(n_fg)\n        recall = np.sum(gt_match) / float(n_gt)\n\n    # Compute F measure\n    if precision + recall == 0:\n        F = 0\n    else:\n        F = 2 * precision * recall / (precision + recall)\n\n    return F\n\n\ndef seg2bmap(seg, width=None, height=None):\n    \"\"\"\n    From a segmentation, compute a binary boundary map with 1 pixel wide\n    boundaries.  The boundary pixels are offset by 1/2 pixel towards the\n    origin from the actual segment boundary.\n\n    Arguments:\n        seg     : Segments labeled from 1..k.\n        width\t  :\tWidth of desired bmap  <= seg.shape[1]\n        height  :\tHeight of desired bmap <= seg.shape[0]\n\n    Returns:\n        bmap (ndarray):\tBinary boundary map.\n\n     David Martin <dmartin@eecs.berkeley.edu>\n     January 2003\n \"\"\"\n\n    seg = seg.astype(np.bool)\n    seg[seg > 0] = 1\n\n    assert np.atleast_3d(seg).shape[2] == 1\n\n    width = seg.shape[1] if width is None else width\n    height = seg.shape[0] if height is None else height\n\n    h, w = seg.shape[:2]\n\n    ar1 = float(width) / float(height)\n    ar2 = float(w) / float(h)\n\n    assert not (width > w | height > h | abs(ar1 - ar2) > 0.01), \\\n        'Can''t convert %dx%d seg to %dx%d bmap.' % (w, h, width, height)\n\n    e = np.zeros_like(seg)\n    s = np.zeros_like(seg)\n    se = np.zeros_like(seg)\n\n    e[:, :-1] = seg[:, 1:]\n    s[:-1, :] = seg[1:, :]\n    se[:-1, :-1] = seg[1:, 1:]\n\n    b = seg ^ e | seg ^ s | seg ^ se\n    b[-1, :] = seg[-1, :] ^ e[-1, :]\n    b[:, -1] = seg[:, -1] ^ s[:, -1]\n    b[-1, -1] = 0\n\n    if w == width and h == height:\n        bmap = b\n    else:\n        bmap = np.zeros((height, width))\n        for x in range(w):\n            for y in range(h):\n                if b[y, x]:\n                    j = 1 + floor((y - 1) + height / h)\n                    i = 1 + floor((x - 1) + width / h)\n                    bmap[j, i] = 1\n\n    return bmap\n\n\ndef nanmean(*args, **kwargs):\n    with warnings.catch_warnings():\n        warnings.simplefilter(\"ignore\", category=RuntimeWarning)\n        return np.nanmean(*args, **kwargs)\n\n\ndef mean(X):\n    \"\"\"\n    Compute average ignoring NaN values.\n    \"\"\"\n    return np.nanmean(X)\n\n\ndef recall(X, threshold=0.5):\n    \"\"\"\n    Fraction of values of X scoring higher than 'threshold'\n    \"\"\"\n    with warnings.catch_warnings():\n        warnings.simplefilter(\"ignore\", category=RuntimeWarning)\n        x = X[~np.isnan(X)]\n        x = mean(x > threshold)\n    return x\n\n\ndef decay(X, n_bins=4):\n    \"\"\"\n    Performance loss over time.\n    \"\"\"\n    X = X[~np.isnan(X)]\n    ids = np.round(np.linspace(1, len(X), n_bins + 1) + 1e-10) - 1\n    ids = ids.astype(np.uint8)\n\n    D_bins = [X[ids[i]:ids[i + 1] + 1] for i in range(0, 4)]\n\n    with warnings.catch_warnings():\n        warnings.simplefilter(\"ignore\", category=RuntimeWarning)\n        D = np.nanmean(D_bins[0]) - np.nanmean(D_bins[3])\n    return D\n\n\ndef std(X):\n    \"\"\"\n    Compute standard deviation.\n    \"\"\"\n    return np.nanstd(X)\n\n\n"
  },
  {
    "path": "artrackv2_mindspore/external/AR/pytracking/evaluation/__init__.py",
    "content": "from .data import Sequence\nfrom .tracker import Tracker, trackerlist\nfrom .datasets import get_dataset"
  },
  {
    "path": "artrackv2_mindspore/external/AR/pytracking/evaluation/data.py",
    "content": "import numpy as np\nfrom pytracking.evaluation.environment import env_settings\nfrom ltr.data.image_loader import imread_indexed\nfrom collections import OrderedDict\n\n\nclass BaseDataset:\n    \"\"\"Base class for all datasets.\"\"\"\n    def __init__(self):\n        self.env_settings = env_settings()\n\n    def __len__(self):\n        \"\"\"Overload this function in your dataset. This should return number of sequences in the dataset.\"\"\"\n        raise NotImplementedError\n\n    def get_sequence_list(self):\n        \"\"\"Overload this in your dataset. Should return the list of sequences in the dataset.\"\"\"\n        raise NotImplementedError\n\n\nclass Sequence:\n    \"\"\"Class for the sequence in an evaluation.\"\"\"\n    def __init__(self, name, frames, dataset, ground_truth_rect, ground_truth_seg=None, init_data=None,\n                 object_class=None, target_visible=None, object_ids=None, multiobj_mode=False):\n        self.name = name\n        self.frames = frames\n        self.dataset = dataset\n        self.ground_truth_rect = ground_truth_rect\n        self.ground_truth_seg = ground_truth_seg\n        self.object_class = object_class\n        self.target_visible = target_visible\n        self.object_ids = object_ids\n        self.multiobj_mode = multiobj_mode\n        self.init_data = self._construct_init_data(init_data)\n        self._ensure_start_frame()\n\n    def _ensure_start_frame(self):\n        # Ensure start frame is 0\n        start_frame = min(list(self.init_data.keys()))\n        if start_frame > 0:\n            self.frames = self.frames[start_frame:]\n            if self.ground_truth_rect is not None:\n                if isinstance(self.ground_truth_rect, (dict, OrderedDict)):\n                    for obj_id, gt in self.ground_truth_rect.items():\n                        self.ground_truth_rect[obj_id] = gt[start_frame:,:]\n                else:\n                    self.ground_truth_rect = self.ground_truth_rect[start_frame:,:]\n            if self.ground_truth_seg is not None:\n                self.ground_truth_seg = self.ground_truth_seg[start_frame:]\n                assert len(self.frames) == len(self.ground_truth_seg)\n\n            if self.target_visible is not None:\n                self.target_visible = self.target_visible[start_frame:]\n            self.init_data = {frame-start_frame: val for frame, val in self.init_data.items()}\n\n    def _construct_init_data(self, init_data):\n        if init_data is not None:\n            if not self.multiobj_mode:\n                assert self.object_ids is None or len(self.object_ids) == 1\n                for frame, init_val in init_data.items():\n                    if 'bbox' in init_val and isinstance(init_val['bbox'], (dict, OrderedDict)):\n                        init_val['bbox'] = init_val['bbox'][self.object_ids[0]]\n            # convert to list\n            for frame, init_val in init_data.items():\n                if 'bbox' in init_val:\n                    if isinstance(init_val['bbox'], (dict, OrderedDict)):\n                        init_val['bbox'] = OrderedDict({obj_id: list(init) for obj_id, init in init_val['bbox'].items()})\n                    else:\n                        init_val['bbox'] = list(init_val['bbox'])\n        else:\n            init_data = {0: dict()}     # Assume start from frame 0\n\n            if self.object_ids is not None:\n                init_data[0]['object_ids'] = self.object_ids\n\n            if self.ground_truth_rect is not None:\n                if self.multiobj_mode:\n                    assert isinstance(self.ground_truth_rect, (dict, OrderedDict))\n                    init_data[0]['bbox'] = OrderedDict({obj_id: list(gt[0,:]) for obj_id, gt in self.ground_truth_rect.items()})\n                else:\n                    assert self.object_ids is None or len(self.object_ids) == 1\n                    if isinstance(self.ground_truth_rect, (dict, OrderedDict)):\n                        init_data[0]['bbox'] = list(self.ground_truth_rect[self.object_ids[0]][0, :])\n                    else:\n                        init_data[0]['bbox'] = list(self.ground_truth_rect[0,:])\n\n            if self.ground_truth_seg is not None:\n                init_data[0]['mask'] = self.ground_truth_seg[0]\n\n        return init_data\n\n    def init_info(self):\n        info = self.frame_info(frame_num=0)\n        return info\n\n    def frame_info(self, frame_num):\n        info = self.object_init_data(frame_num=frame_num)\n        return info\n\n    def init_bbox(self, frame_num=0):\n        return self.object_init_data(frame_num=frame_num).get('init_bbox')\n\n    def init_mask(self, frame_num=0):\n        return self.object_init_data(frame_num=frame_num).get('init_mask')\n\n    def get_info(self, keys, frame_num=None):\n        info = dict()\n        for k in keys:\n            val = self.get(k, frame_num=frame_num)\n            if val is not None:\n                info[k] = val\n        return info\n\n    def object_init_data(self, frame_num=None) -> dict:\n        if frame_num is None:\n            frame_num = 0\n        if frame_num not in self.init_data:\n            return dict()\n\n        init_data = dict()\n        for key, val in self.init_data[frame_num].items():\n            if val is None:\n                continue\n            init_data['init_'+key] = val\n\n        if 'init_mask' in init_data and init_data['init_mask'] is not None:\n            anno = imread_indexed(init_data['init_mask'])\n            if not self.multiobj_mode and self.object_ids is not None:\n                assert len(self.object_ids) == 1\n                anno = (anno == int(self.object_ids[0])).astype(np.uint8)\n            init_data['init_mask'] = anno\n\n        if self.object_ids is not None:\n            init_data['object_ids'] = self.object_ids\n            init_data['sequence_object_ids'] = self.object_ids\n\n        return init_data\n\n    def target_class(self, frame_num=None):\n        return self.object_class\n\n    def get(self, name, frame_num=None):\n        return getattr(self, name)(frame_num)\n\n    def __repr__(self):\n        return \"{self.__class__.__name__} {self.name}, length={len} frames\".format(self=self, len=len(self.frames))\n\n\n\nclass SequenceList(list):\n    \"\"\"List of sequences. Supports the addition operator to concatenate sequence lists.\"\"\"\n    def __getitem__(self, item):\n        if isinstance(item, str):\n            for seq in self:\n                if seq.name == item:\n                    return seq\n            raise IndexError('Sequence name not in the dataset.')\n        elif isinstance(item, int):\n            return super(SequenceList, self).__getitem__(item)\n        elif isinstance(item, (tuple, list)):\n            return SequenceList([super(SequenceList, self).__getitem__(i) for i in item])\n        else:\n            return SequenceList(super(SequenceList, self).__getitem__(item))\n\n    def __add__(self, other):\n        return SequenceList(super(SequenceList, self).__add__(other))\n\n    def copy(self):\n        return SequenceList(super(SequenceList, self).copy())"
  },
  {
    "path": "artrackv2_mindspore/external/AR/pytracking/evaluation/datasets.py",
    "content": "from collections import namedtuple\nimport importlib\nfrom pytracking.evaluation.data import SequenceList\n\nDatasetInfo = namedtuple('DatasetInfo', ['module', 'class_name', 'kwargs'])\n\npt = \"pytracking.evaluation.%sdataset\"  # Useful abbreviations to reduce the clutter\n\ndataset_dict = dict(\n    otb=DatasetInfo(module=pt % \"otb\", class_name=\"OTBDataset\", kwargs=dict()),\n    nfs=DatasetInfo(module=pt % \"nfs\", class_name=\"NFSDataset\", kwargs=dict()),\n    uav=DatasetInfo(module=pt % \"uav\", class_name=\"UAVDataset\", kwargs=dict()),\n    tpl=DatasetInfo(module=pt % \"tpl\", class_name=\"TPLDataset\", kwargs=dict()),\n    tpl_nootb=DatasetInfo(module=pt % \"tpl\", class_name=\"TPLDataset\", kwargs=dict(exclude_otb=True)),\n    vot=DatasetInfo(module=pt % \"vot\", class_name=\"VOTDataset\", kwargs=dict()),\n    trackingnet=DatasetInfo(module=pt % \"trackingnet\", class_name=\"TrackingNetDataset\", kwargs=dict()),\n    got10k_test=DatasetInfo(module=pt % \"got10k\", class_name=\"GOT10KDataset\", kwargs=dict(split='test')),\n    got10k_val=DatasetInfo(module=pt % \"got10k\", class_name=\"GOT10KDataset\", kwargs=dict(split='val')),\n    got10k_ltrval=DatasetInfo(module=pt % \"got10k\", class_name=\"GOT10KDataset\", kwargs=dict(split='ltrval')),\n    lasot=DatasetInfo(module=pt % \"lasot\", class_name=\"LaSOTDataset\", kwargs=dict()),\n    dv2017_val=DatasetInfo(module=\"ltr.dataset.davis\", class_name=\"Davis\", kwargs=dict(version='2017', split='val')),\n    dv2016_val=DatasetInfo(module=\"ltr.dataset.davis\", class_name=\"Davis\", kwargs=dict(version='2016', split='val')),\n    dv2017_test_dev=DatasetInfo(module=\"ltr.dataset.davis\", class_name=\"Davis\",\n                                kwargs=dict(version='2017', split='test-dev')),\n    dv2017_test_chal=DatasetInfo(module=\"ltr.dataset.davis\", class_name=\"Davis\",\n                                 kwargs=dict(version='2017', split='test-challenge')),\n    yt2019_test=DatasetInfo(module=\"ltr.dataset.youtubevos\", class_name=\"YouTubeVOS\",\n                            kwargs=dict(version='2019', split='test')),\n    yt2019_valid=DatasetInfo(module=\"ltr.dataset.youtubevos\", class_name=\"YouTubeVOS\",\n                             kwargs=dict(version='2019', split='valid')),\n    yt2019_valid_all=DatasetInfo(module=\"ltr.dataset.youtubevos\", class_name=\"YouTubeVOS\",\n                                 kwargs=dict(version='2019', split='valid', all_frames=True)),\n    yt2018_valid_all=DatasetInfo(module=\"ltr.dataset.youtubevos\", class_name=\"YouTubeVOS\",\n                                 kwargs=dict(version='2018', split='valid', all_frames=True)),\n    yt2018_jjval=DatasetInfo(module=\"ltr.dataset.youtubevos\", class_name=\"YouTubeVOS\",\n                             kwargs=dict(version='2018', split='jjvalid')),\n    yt2019_jjval=DatasetInfo(module=\"ltr.dataset.youtubevos\", class_name=\"YouTubeVOS\",\n                             kwargs=dict(version='2019', split='jjvalid', cleanup=['starts'])),\n    yt2019_jjval_all=DatasetInfo(module=\"ltr.dataset.youtubevos\", class_name=\"YouTubeVOS\",\n                                 kwargs=dict(version='2019', split='jjvalid', all_frames=True, cleanup=['starts'])),\n)\n\n\ndef load_dataset(name: str):\n    \"\"\" Import and load a single dataset.\"\"\"\n    name = name.lower()\n    dset_info = dataset_dict.get(name)\n    if dset_info is None:\n        raise ValueError('Unknown dataset \\'%s\\'' % name)\n\n    m = importlib.import_module(dset_info.module)\n    dataset = getattr(m, dset_info.class_name)(**dset_info.kwargs)  # Call the constructor\n    return dataset.get_sequence_list()\n\n\ndef get_dataset(*args):\n    \"\"\" Get a single or set of datasets.\"\"\"\n    dset = SequenceList()\n    for name in args:\n        dset.extend(load_dataset(name))\n    return dset"
  },
  {
    "path": "artrackv2_mindspore/external/AR/pytracking/evaluation/environment.py",
    "content": "import importlib\nimport os\n\n\nclass EnvSettings:\n    def __init__(self):\n        pytracking_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))\n\n        self.results_path = '{}/tracking_results/'.format(pytracking_path)\n        self.segmentation_path = '{}/segmentation_results/'.format(pytracking_path)\n        self.network_path = '{}/networks/'.format(pytracking_path)\n        self.result_plot_path = '{}/result_plots/'.format(pytracking_path)\n        self.otb_path = ''\n        self.nfs_path = ''\n        self.uav_path = ''\n        self.tpl_path = ''\n        self.vot_path = ''\n        self.got10k_path = ''\n        self.lasot_path = ''\n        self.trackingnet_path = ''\n        self.davis_dir = ''\n        self.youtubevos_dir = ''\n\n        self.got_packed_results_path = ''\n        self.got_reports_path = ''\n        self.tn_packed_results_path = ''\n\n\ndef create_default_local_file():\n    comment = {'results_path': 'Where to store tracking results',\n               'network_path': 'Where tracking networks are stored.'}\n\n    path = os.path.join(os.path.dirname(__file__), 'local.py')\n    with open(path, 'w') as f:\n        settings = EnvSettings()\n\n        f.write('from pytracking.evaluation.environment import EnvSettings\\n\\n')\n        f.write('def local_env_settings():\\n')\n        f.write('    settings = EnvSettings()\\n\\n')\n        f.write('    # Set your local paths here.\\n\\n')\n\n        for attr in dir(settings):\n            comment_str = None\n            if attr in comment:\n                comment_str = comment[attr]\n            attr_val = getattr(settings, attr)\n            if not attr.startswith('__') and not callable(attr_val):\n                if comment_str is None:\n                    f.write('    settings.{} = \\'{}\\'\\n'.format(attr, attr_val))\n                else:\n                    f.write('    settings.{} = \\'{}\\'    # {}\\n'.format(attr, attr_val, comment_str))\n        f.write('\\n    return settings\\n\\n')\n\n\ndef env_settings():\n    env_module_name = 'pytracking.evaluation.local'\n    try:\n        env_module = importlib.import_module(env_module_name)\n        return env_module.local_env_settings()\n    except:\n        env_file = os.path.join(os.path.dirname(__file__), 'local.py')\n\n        # Create a default file\n        create_default_local_file()\n        raise RuntimeError('YOU HAVE NOT SETUP YOUR local.py!!!\\n Go to \"{}\" and set all the paths you need. '\n                           'Then try to run again.'.format(env_file))"
  },
  {
    "path": "artrackv2_mindspore/external/AR/pytracking/evaluation/got10kdataset.py",
    "content": "import numpy as np\nfrom pytracking.evaluation.data import Sequence, BaseDataset, SequenceList\nfrom pytracking.utils.load_text import load_text\nimport os\n\n\nclass GOT10KDataset(BaseDataset):\n    \"\"\" GOT-10k dataset.\n\n    Publication:\n        GOT-10k: A Large High-Diversity Benchmark for Generic Object Tracking in the Wild\n        Lianghua Huang, Xin Zhao, and Kaiqi Huang\n        arXiv:1810.11981, 2018\n        https://arxiv.org/pdf/1810.11981.pdf\n\n    Download dataset from http://got-10k.aitestunion.com/downloads\n    \"\"\"\n    def __init__(self, split):\n        super().__init__()\n        # Split can be test, val, or ltrval (a validation split consisting of videos from the official train set)\n        if split == 'test' or split == 'val':\n            self.base_path = os.path.join(self.env_settings.got10k_path, split)\n        else:\n            self.base_path = os.path.join(self.env_settings.got10k_path, 'train')\n\n        self.sequence_list = self._get_sequence_list(split)\n        self.split = split\n\n    def get_sequence_list(self):\n        return SequenceList([self._construct_sequence(s) for s in self.sequence_list])\n\n    def _construct_sequence(self, sequence_name):\n        anno_path = '{}/{}/groundtruth.txt'.format(self.base_path, sequence_name)\n\n        ground_truth_rect = load_text(str(anno_path), delimiter=',', dtype=np.float64)\n\n        frames_path = '{}/{}'.format(self.base_path, sequence_name)\n        frame_list = [frame for frame in os.listdir(frames_path) if frame.endswith(\".jpg\")]\n        frame_list.sort(key=lambda f: int(f[:-4]))\n        frames_list = [os.path.join(frames_path, frame) for frame in frame_list]\n\n        return Sequence(sequence_name, frames_list, 'got10k', ground_truth_rect.reshape(-1, 4))\n\n    def __len__(self):\n        return len(self.sequence_list)\n\n    def _get_sequence_list(self, split):\n        with open('{}/list.txt'.format(self.base_path)) as f:\n            sequence_list = f.read().splitlines()\n\n        if split == 'ltrval':\n            with open('{}/got10k_val_split.txt'.format(self.env_settings.dataspec_path)) as f:\n                seq_ids = f.read().splitlines()\n\n            sequence_list = [sequence_list[int(x)] for x in seq_ids]\n        return sequence_list\n"
  },
  {
    "path": "artrackv2_mindspore/external/AR/pytracking/evaluation/lasotdataset.py",
    "content": "import numpy as np\nfrom pytracking.evaluation.data import Sequence, BaseDataset, SequenceList\nfrom pytracking.utils.load_text import load_text\n\n\nclass LaSOTDataset(BaseDataset):\n    \"\"\"\n    LaSOT test set consisting of 280 videos (see Protocol-II in the LaSOT paper)\n\n    Publication:\n        LaSOT: A High-quality Benchmark for Large-scale Single Object Tracking\n        Heng Fan, Liting Lin, Fan Yang, Peng Chu, Ge Deng, Sijia Yu, Hexin Bai, Yong Xu, Chunyuan Liao and Haibin Ling\n        CVPR, 2019\n        https://arxiv.org/pdf/1809.07845.pdf\n\n    Download the dataset from https://cis.temple.edu/lasot/download.html\n    \"\"\"\n    def __init__(self):\n        super().__init__()\n        self.base_path = self.env_settings.lasot_path\n        self.sequence_list = self._get_sequence_list()\n        self.clean_list = self.clean_seq_list()\n\n    def clean_seq_list(self):\n        clean_lst = []\n        for i in range(len(self.sequence_list)):\n            cls, _ = self.sequence_list[i].split('-')\n            clean_lst.append(cls)\n        return  clean_lst\n\n    def get_sequence_list(self):\n        return SequenceList([self._construct_sequence(s) for s in self.sequence_list])\n\n    def _construct_sequence(self, sequence_name):\n        class_name = sequence_name.split('-')[0]\n        anno_path = '{}/{}/{}/groundtruth.txt'.format(self.base_path, class_name, sequence_name)\n\n        ground_truth_rect = load_text(str(anno_path), delimiter=',', dtype=np.float64)\n\n        occlusion_label_path = '{}/{}/{}/full_occlusion.txt'.format(self.base_path, class_name, sequence_name)\n\n        # NOTE: pandas backed seems super super slow for loading occlusion/oov masks\n        full_occlusion = load_text(str(occlusion_label_path), delimiter=',', dtype=np.float64, backend='numpy')\n\n        out_of_view_label_path = '{}/{}/{}/out_of_view.txt'.format(self.base_path, class_name, sequence_name)\n        out_of_view = load_text(str(out_of_view_label_path), delimiter=',', dtype=np.float64, backend='numpy')\n\n        target_visible = np.logical_and(full_occlusion == 0, out_of_view == 0)\n\n        frames_path = '{}/{}/{}/img'.format(self.base_path, class_name, sequence_name)\n\n        frames_list = ['{}/{:08d}.jpg'.format(frames_path, frame_number) for frame_number in range(1, ground_truth_rect.shape[0] + 1)]\n\n        target_class = class_name\n        return Sequence(sequence_name, frames_list, 'lasot', ground_truth_rect.reshape(-1, 4),\n                        object_class=target_class, target_visible=target_visible)\n\n    def __len__(self):\n        return len(self.sequence_list)\n\n    def _get_sequence_list(self):\n        sequence_list = ['airplane-1',\n                         'airplane-9',\n                         'airplane-13',\n                         'airplane-15',\n                         'basketball-1',\n                         'basketball-6',\n                         'basketball-7',\n                         'basketball-11',\n                         'bear-2',\n                         'bear-4',\n                         'bear-6',\n                         'bear-17',\n                         'bicycle-2',\n                         'bicycle-7',\n                         'bicycle-9',\n                         'bicycle-18',\n                         'bird-2',\n                         'bird-3',\n                         'bird-15',\n                         'bird-17',\n                         'boat-3',\n                         'boat-4',\n                         'boat-12',\n                         'boat-17',\n                         'book-3',\n                         'book-10',\n                         'book-11',\n                         'book-19',\n                         'bottle-1',\n                         'bottle-12',\n                         'bottle-14',\n                         'bottle-18',\n                         'bus-2',\n                         'bus-5',\n                         'bus-17',\n                         'bus-19',\n                         'car-2',\n                         'car-6',\n                         'car-9',\n                         'car-17',\n                         'cat-1',\n                         'cat-3',\n                         'cat-18',\n                         'cat-20',\n                         'cattle-2',\n                         'cattle-7',\n                         'cattle-12',\n                         'cattle-13',\n                         'spider-14',\n                         'spider-16',\n                         'spider-18',\n                         'spider-20',\n                         'coin-3',\n                         'coin-6',\n                         'coin-7',\n                         'coin-18',\n                         'crab-3',\n                         'crab-6',\n                         'crab-12',\n                         'crab-18',\n                         'surfboard-12',\n                         'surfboard-4',\n                         'surfboard-5',\n                         'surfboard-8',\n                         'cup-1',\n                         'cup-4',\n                         'cup-7',\n                         'cup-17',\n                         'deer-4',\n                         'deer-8',\n                         'deer-10',\n                         'deer-14',\n                         'dog-1',\n                         'dog-7',\n                         'dog-15',\n                         'dog-19',\n                         'guitar-3',\n                         'guitar-8',\n                         'guitar-10',\n                         'guitar-16',\n                         'person-1',\n                         'person-5',\n                         'person-10',\n                         'person-12',\n                         'pig-2',\n                         'pig-10',\n                         'pig-13',\n                         'pig-18',\n                         'rubicCube-1',\n                         'rubicCube-6',\n                         'rubicCube-14',\n                         'rubicCube-19',\n                         'swing-10',\n                         'swing-14',\n                         'swing-17',\n                         'swing-20',\n                         'drone-13',\n                         'drone-15',\n                         'drone-2',\n                         'drone-7',\n                         'pool-12',\n                         'pool-15',\n                         'pool-3',\n                         'pool-7',\n                         'rabbit-10',\n                         'rabbit-13',\n                         'rabbit-17',\n                         'rabbit-19',\n                         'racing-10',\n                         'racing-15',\n                         'racing-16',\n                         'racing-20',\n                         'robot-1',\n                         'robot-19',\n                         'robot-5',\n                         'robot-8',\n                         'sepia-13',\n                         'sepia-16',\n                         'sepia-6',\n                         'sepia-8',\n                         'sheep-3',\n                         'sheep-5',\n                         'sheep-7',\n                         'sheep-9',\n                         'skateboard-16',\n                         'skateboard-19',\n                         'skateboard-3',\n                         'skateboard-8',\n                         'tank-14',\n                         'tank-16',\n                         'tank-6',\n                         'tank-9',\n                         'tiger-12',\n                         'tiger-18',\n                         'tiger-4',\n                         'tiger-6',\n                         'train-1',\n                         'train-11',\n                         'train-20',\n                         'train-7',\n                         'truck-16',\n                         'truck-3',\n                         'truck-6',\n                         'truck-7',\n                         'turtle-16',\n                         'turtle-5',\n                         'turtle-8',\n                         'turtle-9',\n                         'umbrella-17',\n                         'umbrella-19',\n                         'umbrella-2',\n                         'umbrella-9',\n                         'yoyo-15',\n                         'yoyo-17',\n                         'yoyo-19',\n                         'yoyo-7',\n                         'zebra-10',\n                         'zebra-14',\n                         'zebra-16',\n                         'zebra-17',\n                         'elephant-1',\n                         'elephant-12',\n                         'elephant-16',\n                         'elephant-18',\n                         'goldfish-3',\n                         'goldfish-7',\n                         'goldfish-8',\n                         'goldfish-10',\n                         'hat-1',\n                         'hat-2',\n                         'hat-5',\n                         'hat-18',\n                         'kite-4',\n                         'kite-6',\n                         'kite-10',\n                         'kite-15',\n                         'motorcycle-1',\n                         'motorcycle-3',\n                         'motorcycle-9',\n                         'motorcycle-18',\n                         'mouse-1',\n                         'mouse-8',\n                         'mouse-9',\n                         'mouse-17',\n                         'flag-3',\n                         'flag-9',\n                         'flag-5',\n                         'flag-2',\n                         'frog-3',\n                         'frog-4',\n                         'frog-20',\n                         'frog-9',\n                         'gametarget-1',\n                         'gametarget-2',\n                         'gametarget-7',\n                         'gametarget-13',\n                         'hand-2',\n                         'hand-3',\n                         'hand-9',\n                         'hand-16',\n                         'helmet-5',\n                         'helmet-11',\n                         'helmet-19',\n                         'helmet-13',\n                         'licenseplate-6',\n                         'licenseplate-12',\n                         'licenseplate-13',\n                         'licenseplate-15',\n                         'electricfan-1',\n                         'electricfan-10',\n                         'electricfan-18',\n                         'electricfan-20',\n                         'chameleon-3',\n                         'chameleon-6',\n                         'chameleon-11',\n                         'chameleon-20',\n                         'crocodile-3',\n                         'crocodile-4',\n                         'crocodile-10',\n                         'crocodile-14',\n                         'gecko-1',\n                         'gecko-5',\n                         'gecko-16',\n                         'gecko-19',\n                         'fox-2',\n                         'fox-3',\n                         'fox-5',\n                         'fox-20',\n                         'giraffe-2',\n                         'giraffe-10',\n                         'giraffe-13',\n                         'giraffe-15',\n                         'gorilla-4',\n                         'gorilla-6',\n                         'gorilla-9',\n                         'gorilla-13',\n                         'hippo-1',\n                         'hippo-7',\n                         'hippo-9',\n                         'hippo-20',\n                         'horse-1',\n                         'horse-4',\n                         'horse-12',\n                         'horse-15',\n                         'kangaroo-2',\n                         'kangaroo-5',\n                         'kangaroo-11',\n                         'kangaroo-14',\n                         'leopard-1',\n                         'leopard-7',\n                         'leopard-16',\n                         'leopard-20',\n                         'lion-1',\n                         'lion-5',\n                         'lion-12',\n                         'lion-20',\n                         'lizard-1',\n                         'lizard-3',\n                         'lizard-6',\n                         'lizard-13',\n                         'microphone-2',\n                         'microphone-6',\n                         'microphone-14',\n                         'microphone-16',\n                         'monkey-3',\n                         'monkey-4',\n                         'monkey-9',\n                         'monkey-17',\n                         'shark-2',\n                         'shark-3',\n                         'shark-5',\n                         'shark-6',\n                         'squirrel-8',\n                         'squirrel-11',\n                         'squirrel-13',\n                         'squirrel-19',\n                         'volleyball-1',\n                         'volleyball-13',\n                         'volleyball-18',\n                         'volleyball-19']\n        return sequence_list\n"
  },
  {
    "path": "artrackv2_mindspore/external/AR/pytracking/evaluation/mobifacedataset.py",
    "content": "from pytracking.evaluation.data import Sequence, BaseDataset, SequenceList\nimport glob\nimport numpy as np\nimport os.path as osp\nfrom collections import OrderedDict\nimport pandas as pd\n\n\nclass MobifaceDataset(BaseDataset):\n    \"\"\" Mobiface dataset.\n        Publication:\n            MobiFace: A Novel Dataset for Mobile Face Tracking in the Wild\n            Yiming Lin, Shiyang Cheng, Jie Shen, Maja Pantic\n            arXiv:1805.09749, 2018\n            https://arxiv.org/pdf/1805.09749v2\n\n        Download dataset from https://mobiface.github.io/\n    \"\"\"\n    def __init__(self, split):\n        \"\"\"\n        args:\n            split - Split to use. Can be i) 'train': official training set, ii) 'test': official test set, iii) 'all': whole dataset.\n        \"\"\"\n        super().__init__()\n        self.base_path = self.env_settings.mobiface_path\n        self.sequence_list = self._get_sequence_list(split)\n        self.split = split\n\n    def get_sequence_list(self):\n        return SequenceList([self._construct_sequence(s) for s in self.sequence_list])\n\n    def _get_sequence_list(self, split):\n\n        self.train_meta_fn = osp.join(self.base_path, 'train.meta.csv')\n        self.test_meta_fn = osp.join(self.base_path, 'test.meta.csv')\n        self.train_meta = pd.read_csv(self.train_meta_fn,index_col=0).transpose().to_dict()\n        self.test_meta = pd.read_csv(self.test_meta_fn,index_col=0).transpose().to_dict()\n        if split == 'train':\n            self.meta = self.train_meta\n        elif split == 'test':\n            self.meta = self.test_meta\n        else:\n            self.meta = {**self.train_meta, **self.test_meta} # In Python 3.5 or greater\n        self.meta = OrderedDict(sorted(self.meta.items(), key=lambda t: t[0]))\n        self.anno_files = []\n        for k,v in self.meta.items():\n            if k in self.train_meta.keys():\n                self.anno_files.append(osp.abspath(osp.join(self.base_path,'train', k+'.annot.csv')))\n            else:\n                self.anno_files.append(osp.abspath(osp.join(self.base_path,'test', k+'.annot.csv')))\n        self.seq_names = sorted(list(self.meta.keys()))\n        self.seq_dirs = [fn[:-len('.annot.csv')] for fn in self.anno_files]\n        return self.seq_names\n\n    def _construct_sequence(self, sequence_name):\n        index = self.seq_names.index(sequence_name)\n        img_files = sorted(glob.glob(self.seq_dirs[index]+'/*.jpg'))\n        if len(img_files) == 0:\n            img_files = sorted(glob.glob(self.seq_dirs[index]+'.png'))\n        with open(self.anno_files[index], 'r') as f:\n            anno = np.loadtxt(f, delimiter=',', skiprows=1, dtype=int)\n        anno = anno[:,1:]\n        assert anno.shape[1] == 4\n\n        return Sequence(sequence_name, img_files, anno.reshape(-1, 4))\n\n    def __len__(self):\n        return len(self.sequence_list)\n"
  },
  {
    "path": "artrackv2_mindspore/external/AR/pytracking/evaluation/multi_object_wrapper.py",
    "content": "import numpy as np\nfrom collections import OrderedDict\nimport time\nimport copy\n\n\nclass MultiObjectWrapper:\n    def __init__(self, base_tracker_class, params, visdom=None, fast_load=False):\n        self.base_tracker_class = base_tracker_class\n        self.params = params\n        self.visdom = visdom\n\n        self.initialized_ids = []\n        self.trackers = OrderedDict()\n\n        self.fast_load = fast_load\n        if self.fast_load:\n            self.tracker_copy = self.base_tracker_class(self.params)\n            if hasattr(self.tracker_copy, 'initialize_features'):\n                self.tracker_copy.initialize_features()\n\n    def create_tracker(self):\n        tracker = None\n        if self.fast_load:\n            try:\n                tracker = copy.deepcopy(self.tracker_copy)\n            except:\n                pass\n        if tracker is None:\n            tracker = self.base_tracker_class(self.params)\n        tracker.visdom = self.visdom\n        return tracker\n\n    def _split_info(self, info):\n        info_split = OrderedDict()\n        init_other = OrderedDict()              # Init other contains init info for all other objects\n        for obj_id in info['init_object_ids']:\n            info_split[obj_id] = dict()\n            init_other[obj_id] = dict()\n            info_split[obj_id]['object_ids'] = [obj_id]\n            info_split[obj_id]['sequence_object_ids'] = info['sequence_object_ids']\n            if 'init_bbox' in info:\n                info_split[obj_id]['init_bbox'] = info['init_bbox'][obj_id]\n                init_other[obj_id]['init_bbox'] = info['init_bbox'][obj_id]\n            if 'init_mask' in info:\n                info_split[obj_id]['init_mask'] = (info['init_mask'] == int(obj_id)).astype(np.uint8)\n                init_other[obj_id]['init_mask'] = info_split[obj_id]['init_mask']\n        for obj_info in info_split.values():\n            obj_info['init_other'] = init_other\n        return info_split\n\n    def _set_defaults(self, tracker_out: dict, defaults=None):\n        defaults = {} if defaults is None else defaults\n\n        for key, val in defaults.items():\n            if tracker_out.get(key) is None:\n                tracker_out[key] = val\n\n        return tracker_out\n\n    def default_merge(self, out_all):\n        out_merged = OrderedDict()\n\n        out_first = list(out_all.values())[0]\n        out_types = out_first.keys()\n\n        # Merge segmentation mask\n        if 'segmentation' in out_types and out_first['segmentation'] is not None:\n            # Stack all masks\n            # If a tracker outputs soft segmentation mask, use that. Else use the binary segmentation\n            segmentation_maps = [out.get('segmentation_soft', out['segmentation']) for out in out_all.values()]\n            segmentation_maps = np.stack(segmentation_maps)\n\n            obj_ids = np.array([0, *map(int, out_all.keys())], dtype=np.uint8)\n            segm_threshold = getattr(self.params, 'segmentation_threshold', 0.5)\n            merged_segmentation = obj_ids[np.where(segmentation_maps.max(axis=0) > segm_threshold,\n                                                   segmentation_maps.argmax(axis=0) + 1, 0)]\n\n            out_merged['segmentation'] = merged_segmentation\n\n        # Merge other fields\n        for key in out_types:\n            if key == 'segmentation':\n                pass\n            else:\n                out_merged[key] = {obj_id: out[key] for obj_id, out in out_all.items()}\n\n        return out_merged\n\n    def merge_outputs(self, out_all):\n        if hasattr(self.base_tracker_class, 'merge_results'):\n            out_merged = self.base_tracker_class.merge_results(out_all)\n        else:\n            out_merged = self.default_merge(out_all)\n\n        return out_merged\n\n    def initialize(self, image, info: dict) -> dict:\n        self.initialized_ids = []\n        self.trackers = OrderedDict()\n\n        if len(info['init_object_ids']) == 0:\n            return None\n\n        object_ids = info['object_ids']\n\n        init_info_split = self._split_info(info)\n        self.trackers = OrderedDict({obj_id: self.create_tracker() for obj_id in object_ids})\n\n        out_all = OrderedDict()\n        # Run individual trackers for each object\n        for obj_id in info['init_object_ids']:\n            start_time = time.time()\n            out = self.trackers[obj_id].initialize(image, init_info_split[obj_id])\n            if out is None:\n                out = {}\n\n            init_default = {'target_bbox': init_info_split[obj_id].get('init_bbox'),\n                            'time': time.time() - start_time,\n                            'segmentation': init_info_split[obj_id].get('init_mask')}\n\n            out = self._set_defaults(out, init_default)\n            out_all[obj_id] = out\n\n        # Merge results\n        out_merged = self.merge_outputs(out_all)\n\n        self.initialized_ids = info['init_object_ids'].copy()\n        return out_merged\n\n    def track(self, image, info: dict = None) -> dict:\n        if info is None:\n            info = {}\n\n        prev_output = info.get('previous_output', OrderedDict())\n\n        if info.get('init_object_ids', False):\n            init_info_split = self._split_info(info)\n            for obj_init_info in init_info_split.values():\n                obj_init_info['previous_output'] = prev_output\n\n            info['init_other'] = list(init_info_split.values())[0]['init_other']\n\n        out_all = OrderedDict()\n        for obj_id in self.initialized_ids:\n            start_time = time.time()\n\n            out = self.trackers[obj_id].track(image, info)\n\n            default = {'time': time.time() - start_time}\n            out = self._set_defaults(out, default)\n            out_all[obj_id] = out\n\n        # Initialize new\n        if info.get('init_object_ids', False):\n            for obj_id in info['init_object_ids']:\n                if not obj_id in self.trackers:\n                    self.trackers[obj_id] = self.create_tracker()\n\n                start_time = time.time()\n                out = self.trackers[obj_id].initialize(image, init_info_split[obj_id])\n                if out is None:\n                    out = {}\n\n                init_default = {'target_bbox': init_info_split[obj_id].get('init_bbox'),\n                                'time': time.time() - start_time,\n                                'segmentation': init_info_split[obj_id].get('init_mask')}\n\n                out = self._set_defaults(out, init_default)\n                out_all[obj_id] = out\n\n            self.initialized_ids.extend(info['init_object_ids'])\n\n        # Merge results\n        out_merged = self.merge_outputs(out_all)\n\n        return out_merged\n\n    def visdom_draw_tracking(self, image, box, segmentation):\n        if isinstance(box, (OrderedDict, dict)):\n            box = [v for k, v in box.items()]\n        else:\n            box = (box,)\n        if segmentation is None:\n            self.visdom.register((image, *box), 'Tracking', 1, 'Tracking')\n        else:\n            self.visdom.register((image, *box, segmentation), 'Tracking', 1, 'Tracking')\n"
  },
  {
    "path": "artrackv2_mindspore/external/AR/pytracking/evaluation/nfsdataset.py",
    "content": "import numpy as np\nfrom pytracking.evaluation.data import Sequence, BaseDataset, SequenceList\nfrom pytracking.utils.load_text import load_text\n\n\nclass NFSDataset(BaseDataset):\n    \"\"\" NFS dataset.\n\n    Publication:\n        Need for Speed: A Benchmark for Higher Frame Rate Object Tracking\n        H. Kiani Galoogahi, A. Fagg, C. Huang, D. Ramanan, and S.Lucey\n        ICCV, 2017\n        http://openaccess.thecvf.com/content_ICCV_2017/papers/Galoogahi_Need_for_Speed_ICCV_2017_paper.pdf\n\n    Download the dataset from http://ci2cv.net/nfs/index.html\n    \"\"\"\n    def __init__(self):\n        super().__init__()\n        self.base_path = self.env_settings.nfs_path\n        self.sequence_info_list = self._get_sequence_info_list()\n\n    def get_sequence_list(self):\n        return SequenceList([self._construct_sequence(s) for s in self.sequence_info_list])\n\n    def _construct_sequence(self, sequence_info):\n        sequence_path = sequence_info['path']\n        nz = sequence_info['nz']\n        ext = sequence_info['ext']\n        start_frame = sequence_info['startFrame']\n        end_frame = sequence_info['endFrame']\n\n        init_omit = 0\n        if 'initOmit' in sequence_info:\n            init_omit = sequence_info['initOmit']\n\n        frames = ['{base_path}/{sequence_path}/{frame:0{nz}}.{ext}'.format(base_path=self.base_path, \n        sequence_path=sequence_path, frame=frame_num, nz=nz, ext=ext) for frame_num in range(start_frame+init_omit, end_frame+1)]\n\n        anno_path = '{}/{}'.format(self.base_path, sequence_info['anno_path'])\n\n        ground_truth_rect = load_text(str(anno_path), delimiter='\\t', dtype=np.float64)\n\n        return Sequence(sequence_info['name'], frames, 'nfs', ground_truth_rect[init_omit:,:],\n                        object_class=sequence_info['object_class'])\n\n    def __len__(self):\n        return len(self.sequence_info_list)\n\n    def _get_sequence_info_list(self):\n        sequence_info_list = [\n            {\"name\": \"nfs_Gymnastics\", \"path\": \"sequences/Gymnastics\", \"startFrame\": 1, \"endFrame\": 368, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_Gymnastics.txt\", \"object_class\": \"person\", 'occlusion': False},\n            {\"name\": \"nfs_MachLoop_jet\", \"path\": \"sequences/MachLoop_jet\", \"startFrame\": 1, \"endFrame\": 99, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_MachLoop_jet.txt\", \"object_class\": \"aircraft\", 'occlusion': False},\n            {\"name\": \"nfs_Skiing_red\", \"path\": \"sequences/Skiing_red\", \"startFrame\": 1, \"endFrame\": 69, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_Skiing_red.txt\", \"object_class\": \"person\", 'occlusion': False},\n            {\"name\": \"nfs_Skydiving\", \"path\": \"sequences/Skydiving\", \"startFrame\": 1, \"endFrame\": 196, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_Skydiving.txt\", \"object_class\": \"person\", 'occlusion': True},\n            {\"name\": \"nfs_airboard_1\", \"path\": \"sequences/airboard_1\", \"startFrame\": 1, \"endFrame\": 425, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_airboard_1.txt\", \"object_class\": \"ball\", 'occlusion': False},\n            {\"name\": \"nfs_airplane_landing\", \"path\": \"sequences/airplane_landing\", \"startFrame\": 1, \"endFrame\": 81, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_airplane_landing.txt\", \"object_class\": \"aircraft\", 'occlusion': False},\n            {\"name\": \"nfs_airtable_3\", \"path\": \"sequences/airtable_3\", \"startFrame\": 1, \"endFrame\": 482, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_airtable_3.txt\", \"object_class\": \"ball\", 'occlusion': False},\n            {\"name\": \"nfs_basketball_1\", \"path\": \"sequences/basketball_1\", \"startFrame\": 1, \"endFrame\": 282, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_basketball_1.txt\", \"object_class\": \"ball\", 'occlusion': False},\n            {\"name\": \"nfs_basketball_2\", \"path\": \"sequences/basketball_2\", \"startFrame\": 1, \"endFrame\": 102, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_basketball_2.txt\", \"object_class\": \"ball\", 'occlusion': False},\n            {\"name\": \"nfs_basketball_3\", \"path\": \"sequences/basketball_3\", \"startFrame\": 1, \"endFrame\": 421, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_basketball_3.txt\", \"object_class\": \"ball\", 'occlusion': False},\n            {\"name\": \"nfs_basketball_6\", \"path\": \"sequences/basketball_6\", \"startFrame\": 1, \"endFrame\": 224, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_basketball_6.txt\", \"object_class\": \"ball\", 'occlusion': False},\n            {\"name\": \"nfs_basketball_7\", \"path\": \"sequences/basketball_7\", \"startFrame\": 1, \"endFrame\": 240, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_basketball_7.txt\", \"object_class\": \"person\", 'occlusion': True},\n            {\"name\": \"nfs_basketball_player\", \"path\": \"sequences/basketball_player\", \"startFrame\": 1, \"endFrame\": 369, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_basketball_player.txt\", \"object_class\": \"person\", 'occlusion': True},\n            {\"name\": \"nfs_basketball_player_2\", \"path\": \"sequences/basketball_player_2\", \"startFrame\": 1, \"endFrame\": 437, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_basketball_player_2.txt\", \"object_class\": \"person\", 'occlusion': False},\n            {\"name\": \"nfs_beach_flipback_person\", \"path\": \"sequences/beach_flipback_person\", \"startFrame\": 1, \"endFrame\": 61, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_beach_flipback_person.txt\", \"object_class\": \"person head\", 'occlusion': False},\n            {\"name\": \"nfs_bee\", \"path\": \"sequences/bee\", \"startFrame\": 1, \"endFrame\": 45, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_bee.txt\", \"object_class\": \"insect\", 'occlusion': False},\n            {\"name\": \"nfs_biker_acrobat\", \"path\": \"sequences/biker_acrobat\", \"startFrame\": 1, \"endFrame\": 128, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_biker_acrobat.txt\", \"object_class\": \"bicycle\", 'occlusion': False},\n            {\"name\": \"nfs_biker_all_1\", \"path\": \"sequences/biker_all_1\", \"startFrame\": 1, \"endFrame\": 113, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_biker_all_1.txt\", \"object_class\": \"person\", 'occlusion': False},\n            {\"name\": \"nfs_biker_head_2\", \"path\": \"sequences/biker_head_2\", \"startFrame\": 1, \"endFrame\": 132, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_biker_head_2.txt\", \"object_class\": \"person head\", 'occlusion': False},\n            {\"name\": \"nfs_biker_head_3\", \"path\": \"sequences/biker_head_3\", \"startFrame\": 1, \"endFrame\": 254, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_biker_head_3.txt\", \"object_class\": \"person head\", 'occlusion': False},\n            {\"name\": \"nfs_biker_upper_body\", \"path\": \"sequences/biker_upper_body\", \"startFrame\": 1, \"endFrame\": 194, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_biker_upper_body.txt\", \"object_class\": \"person\", 'occlusion': False},\n            {\"name\": \"nfs_biker_whole_body\", \"path\": \"sequences/biker_whole_body\", \"startFrame\": 1, \"endFrame\": 572, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_biker_whole_body.txt\", \"object_class\": \"person\", 'occlusion': True},\n            {\"name\": \"nfs_billiard_2\", \"path\": \"sequences/billiard_2\", \"startFrame\": 1, \"endFrame\": 604, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_billiard_2.txt\", \"object_class\": \"ball\", 'occlusion': False},\n            {\"name\": \"nfs_billiard_3\", \"path\": \"sequences/billiard_3\", \"startFrame\": 1, \"endFrame\": 698, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_billiard_3.txt\", \"object_class\": \"ball\", 'occlusion': False},\n            {\"name\": \"nfs_billiard_6\", \"path\": \"sequences/billiard_6\", \"startFrame\": 1, \"endFrame\": 771, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_billiard_6.txt\", \"object_class\": \"ball\", 'occlusion': False},\n            {\"name\": \"nfs_billiard_7\", \"path\": \"sequences/billiard_7\", \"startFrame\": 1, \"endFrame\": 724, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_billiard_7.txt\", \"object_class\": \"ball\", 'occlusion': False},\n            {\"name\": \"nfs_billiard_8\", \"path\": \"sequences/billiard_8\", \"startFrame\": 1, \"endFrame\": 778, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_billiard_8.txt\", \"object_class\": \"ball\", 'occlusion': False},\n            {\"name\": \"nfs_bird_2\", \"path\": \"sequences/bird_2\", \"startFrame\": 1, \"endFrame\": 476, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_bird_2.txt\", \"object_class\": \"bird\", 'occlusion': False},\n            {\"name\": \"nfs_book\", \"path\": \"sequences/book\", \"startFrame\": 1, \"endFrame\": 288, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_book.txt\", \"object_class\": \"other\", 'occlusion': False},\n            {\"name\": \"nfs_bottle\", \"path\": \"sequences/bottle\", \"startFrame\": 1, \"endFrame\": 2103, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_bottle.txt\", \"object_class\": \"other\", 'occlusion': False},\n            {\"name\": \"nfs_bowling_1\", \"path\": \"sequences/bowling_1\", \"startFrame\": 1, \"endFrame\": 303, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_bowling_1.txt\", \"object_class\": \"ball\", 'occlusion': True},\n            {\"name\": \"nfs_bowling_2\", \"path\": \"sequences/bowling_2\", \"startFrame\": 1, \"endFrame\": 710, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_bowling_2.txt\", \"object_class\": \"ball\", 'occlusion': True},\n            {\"name\": \"nfs_bowling_3\", \"path\": \"sequences/bowling_3\", \"startFrame\": 1, \"endFrame\": 271, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_bowling_3.txt\", \"object_class\": \"ball\", 'occlusion': True},\n            {\"name\": \"nfs_bowling_6\", \"path\": \"sequences/bowling_6\", \"startFrame\": 1, \"endFrame\": 260, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_bowling_6.txt\", \"object_class\": \"ball\", 'occlusion': False},\n            {\"name\": \"nfs_bowling_ball\", \"path\": \"sequences/bowling_ball\", \"startFrame\": 1, \"endFrame\": 275, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_bowling_ball.txt\", \"object_class\": \"ball\", 'occlusion': True},\n            {\"name\": \"nfs_bunny\", \"path\": \"sequences/bunny\", \"startFrame\": 1, \"endFrame\": 705, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_bunny.txt\", \"object_class\": \"mammal\", 'occlusion': False},\n            {\"name\": \"nfs_car\", \"path\": \"sequences/car\", \"startFrame\": 1, \"endFrame\": 2020, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_car.txt\", \"object_class\": \"car\", 'occlusion': True},\n            {\"name\": \"nfs_car_camaro\", \"path\": \"sequences/car_camaro\", \"startFrame\": 1, \"endFrame\": 36, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_car_camaro.txt\", \"object_class\": \"car\", 'occlusion': False},\n            {\"name\": \"nfs_car_drifting\", \"path\": \"sequences/car_drifting\", \"startFrame\": 1, \"endFrame\": 173, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_car_drifting.txt\", \"object_class\": \"car\", 'occlusion': False},\n            {\"name\": \"nfs_car_jumping\", \"path\": \"sequences/car_jumping\", \"startFrame\": 1, \"endFrame\": 22, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_car_jumping.txt\", \"object_class\": \"car\", 'occlusion': False},\n            {\"name\": \"nfs_car_rc_rolling\", \"path\": \"sequences/car_rc_rolling\", \"startFrame\": 1, \"endFrame\": 62, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_car_rc_rolling.txt\", \"object_class\": \"car\", 'occlusion': False},\n            {\"name\": \"nfs_car_rc_rotating\", \"path\": \"sequences/car_rc_rotating\", \"startFrame\": 1, \"endFrame\": 80, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_car_rc_rotating.txt\", \"object_class\": \"car\", 'occlusion': False},\n            {\"name\": \"nfs_car_side\", \"path\": \"sequences/car_side\", \"startFrame\": 1, \"endFrame\": 108, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_car_side.txt\", \"object_class\": \"car\", 'occlusion': False},\n            {\"name\": \"nfs_car_white\", \"path\": \"sequences/car_white\", \"startFrame\": 1, \"endFrame\": 2063, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_car_white.txt\", \"object_class\": \"car\", 'occlusion': False},\n            {\"name\": \"nfs_cheetah\", \"path\": \"sequences/cheetah\", \"startFrame\": 1, \"endFrame\": 167, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_cheetah.txt\", \"object_class\": \"mammal\", 'occlusion': True},\n            {\"name\": \"nfs_cup\", \"path\": \"sequences/cup\", \"startFrame\": 1, \"endFrame\": 1281, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_cup.txt\", \"object_class\": \"other\", 'occlusion': False},\n            {\"name\": \"nfs_cup_2\", \"path\": \"sequences/cup_2\", \"startFrame\": 1, \"endFrame\": 182, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_cup_2.txt\", \"object_class\": \"other\", 'occlusion': False},\n            {\"name\": \"nfs_dog\", \"path\": \"sequences/dog\", \"startFrame\": 1, \"endFrame\": 1030, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_dog.txt\", \"object_class\": \"dog\", 'occlusion': True},\n            {\"name\": \"nfs_dog_1\", \"path\": \"sequences/dog_1\", \"startFrame\": 1, \"endFrame\": 168, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_dog_1.txt\", \"object_class\": \"dog\", 'occlusion': False},\n            {\"name\": \"nfs_dog_2\", \"path\": \"sequences/dog_2\", \"startFrame\": 1, \"endFrame\": 594, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_dog_2.txt\", \"object_class\": \"dog\", 'occlusion': True},\n            {\"name\": \"nfs_dog_3\", \"path\": \"sequences/dog_3\", \"startFrame\": 1, \"endFrame\": 200, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_dog_3.txt\", \"object_class\": \"dog\", 'occlusion': False},\n            {\"name\": \"nfs_dogs\", \"path\": \"sequences/dogs\", \"startFrame\": 1, \"endFrame\": 198, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_dogs.txt\", \"object_class\": \"dog\", 'occlusion': True},\n            {\"name\": \"nfs_dollar\", \"path\": \"sequences/dollar\", \"startFrame\": 1, \"endFrame\": 1426, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_dollar.txt\", \"object_class\": \"other\", 'occlusion': False},\n            {\"name\": \"nfs_drone\", \"path\": \"sequences/drone\", \"startFrame\": 1, \"endFrame\": 70, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_drone.txt\", \"object_class\": \"aircraft\", 'occlusion': False},\n            {\"name\": \"nfs_ducks_lake\", \"path\": \"sequences/ducks_lake\", \"startFrame\": 1, \"endFrame\": 107, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_ducks_lake.txt\", \"object_class\": \"bird\", 'occlusion': False},\n            {\"name\": \"nfs_exit\", \"path\": \"sequences/exit\", \"startFrame\": 1, \"endFrame\": 359, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_exit.txt\", \"object_class\": \"other\", 'occlusion': False},\n            {\"name\": \"nfs_first\", \"path\": \"sequences/first\", \"startFrame\": 1, \"endFrame\": 435, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_first.txt\", \"object_class\": \"other\", 'occlusion': False},\n            {\"name\": \"nfs_flower\", \"path\": \"sequences/flower\", \"startFrame\": 1, \"endFrame\": 448, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_flower.txt\", \"object_class\": \"other\", 'occlusion': False},\n            {\"name\": \"nfs_footbal_skill\", \"path\": \"sequences/footbal_skill\", \"startFrame\": 1, \"endFrame\": 131, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_footbal_skill.txt\", \"object_class\": \"ball\", 'occlusion': True},\n            {\"name\": \"nfs_helicopter\", \"path\": \"sequences/helicopter\", \"startFrame\": 1, \"endFrame\": 310, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_helicopter.txt\", \"object_class\": \"aircraft\", 'occlusion': False},\n            {\"name\": \"nfs_horse_jumping\", \"path\": \"sequences/horse_jumping\", \"startFrame\": 1, \"endFrame\": 117, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_horse_jumping.txt\", \"object_class\": \"horse\", 'occlusion': True},\n            {\"name\": \"nfs_horse_running\", \"path\": \"sequences/horse_running\", \"startFrame\": 1, \"endFrame\": 139, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_horse_running.txt\", \"object_class\": \"horse\", 'occlusion': False},\n            {\"name\": \"nfs_iceskating_6\", \"path\": \"sequences/iceskating_6\", \"startFrame\": 1, \"endFrame\": 603, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_iceskating_6.txt\", \"object_class\": \"person\", 'occlusion': False},\n            {\"name\": \"nfs_jellyfish_5\", \"path\": \"sequences/jellyfish_5\", \"startFrame\": 1, \"endFrame\": 746, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_jellyfish_5.txt\", \"object_class\": \"invertebrate\", 'occlusion': False},\n            {\"name\": \"nfs_kid_swing\", \"path\": \"sequences/kid_swing\", \"startFrame\": 1, \"endFrame\": 169, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_kid_swing.txt\", \"object_class\": \"person\", 'occlusion': False},\n            {\"name\": \"nfs_motorcross\", \"path\": \"sequences/motorcross\", \"startFrame\": 1, \"endFrame\": 39, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_motorcross.txt\", \"object_class\": \"vehicle\", 'occlusion': True},\n            {\"name\": \"nfs_motorcross_kawasaki\", \"path\": \"sequences/motorcross_kawasaki\", \"startFrame\": 1, \"endFrame\": 65, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_motorcross_kawasaki.txt\", \"object_class\": \"vehicle\", 'occlusion': False},\n            {\"name\": \"nfs_parkour\", \"path\": \"sequences/parkour\", \"startFrame\": 1, \"endFrame\": 58, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_parkour.txt\", \"object_class\": \"person head\", 'occlusion': False},\n            {\"name\": \"nfs_person_scooter\", \"path\": \"sequences/person_scooter\", \"startFrame\": 1, \"endFrame\": 413, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_person_scooter.txt\", \"object_class\": \"person\", 'occlusion': True},\n            {\"name\": \"nfs_pingpong_2\", \"path\": \"sequences/pingpong_2\", \"startFrame\": 1, \"endFrame\": 1277, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_pingpong_2.txt\", \"object_class\": \"ball\", 'occlusion': False},\n            {\"name\": \"nfs_pingpong_7\", \"path\": \"sequences/pingpong_7\", \"startFrame\": 1, \"endFrame\": 1290, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_pingpong_7.txt\", \"object_class\": \"ball\", 'occlusion': False},\n            {\"name\": \"nfs_pingpong_8\", \"path\": \"sequences/pingpong_8\", \"startFrame\": 1, \"endFrame\": 296, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_pingpong_8.txt\", \"object_class\": \"ball\", 'occlusion': False},\n            {\"name\": \"nfs_purse\", \"path\": \"sequences/purse\", \"startFrame\": 1, \"endFrame\": 968, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_purse.txt\", \"object_class\": \"other\", 'occlusion': False},\n            {\"name\": \"nfs_rubber\", \"path\": \"sequences/rubber\", \"startFrame\": 1, \"endFrame\": 1328, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_rubber.txt\", \"object_class\": \"other\", 'occlusion': False},\n            {\"name\": \"nfs_running\", \"path\": \"sequences/running\", \"startFrame\": 1, \"endFrame\": 677, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_running.txt\", \"object_class\": \"person\", 'occlusion': False},\n            {\"name\": \"nfs_running_100_m\", \"path\": \"sequences/running_100_m\", \"startFrame\": 1, \"endFrame\": 313, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_running_100_m.txt\", \"object_class\": \"person\", 'occlusion': True},\n            {\"name\": \"nfs_running_100_m_2\", \"path\": \"sequences/running_100_m_2\", \"startFrame\": 1, \"endFrame\": 337, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_running_100_m_2.txt\", \"object_class\": \"person\", 'occlusion': True},\n            {\"name\": \"nfs_running_2\", \"path\": \"sequences/running_2\", \"startFrame\": 1, \"endFrame\": 363, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_running_2.txt\", \"object_class\": \"person\", 'occlusion': False},\n            {\"name\": \"nfs_shuffleboard_1\", \"path\": \"sequences/shuffleboard_1\", \"startFrame\": 1, \"endFrame\": 42, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_shuffleboard_1.txt\", \"object_class\": \"other\", 'occlusion': False},\n            {\"name\": \"nfs_shuffleboard_2\", \"path\": \"sequences/shuffleboard_2\", \"startFrame\": 1, \"endFrame\": 41, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_shuffleboard_2.txt\", \"object_class\": \"other\", 'occlusion': False},\n            {\"name\": \"nfs_shuffleboard_4\", \"path\": \"sequences/shuffleboard_4\", \"startFrame\": 1, \"endFrame\": 62, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_shuffleboard_4.txt\", \"object_class\": \"other\", 'occlusion': False},\n            {\"name\": \"nfs_shuffleboard_5\", \"path\": \"sequences/shuffleboard_5\", \"startFrame\": 1, \"endFrame\": 32, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_shuffleboard_5.txt\", \"object_class\": \"other\", 'occlusion': False},\n            {\"name\": \"nfs_shuffleboard_6\", \"path\": \"sequences/shuffleboard_6\", \"startFrame\": 1, \"endFrame\": 52, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_shuffleboard_6.txt\", \"object_class\": \"other\", 'occlusion': False},\n            {\"name\": \"nfs_shuffletable_2\", \"path\": \"sequences/shuffletable_2\", \"startFrame\": 1, \"endFrame\": 372, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_shuffletable_2.txt\", \"object_class\": \"other\", 'occlusion': False},\n            {\"name\": \"nfs_shuffletable_3\", \"path\": \"sequences/shuffletable_3\", \"startFrame\": 1, \"endFrame\": 368, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_shuffletable_3.txt\", \"object_class\": \"other\", 'occlusion': False},\n            {\"name\": \"nfs_shuffletable_4\", \"path\": \"sequences/shuffletable_4\", \"startFrame\": 1, \"endFrame\": 101, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_shuffletable_4.txt\", \"object_class\": \"other\", 'occlusion': False},\n            {\"name\": \"nfs_ski_long\", \"path\": \"sequences/ski_long\", \"startFrame\": 1, \"endFrame\": 274, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_ski_long.txt\", \"object_class\": \"person\", 'occlusion': False},\n            {\"name\": \"nfs_soccer_ball\", \"path\": \"sequences/soccer_ball\", \"startFrame\": 1, \"endFrame\": 163, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_soccer_ball.txt\", \"object_class\": \"ball\", 'occlusion': False},\n            {\"name\": \"nfs_soccer_ball_2\", \"path\": \"sequences/soccer_ball_2\", \"startFrame\": 1, \"endFrame\": 1934, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_soccer_ball_2.txt\", \"object_class\": \"ball\", 'occlusion': False},\n            {\"name\": \"nfs_soccer_ball_3\", \"path\": \"sequences/soccer_ball_3\", \"startFrame\": 1, \"endFrame\": 1381, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_soccer_ball_3.txt\", \"object_class\": \"ball\", 'occlusion': False},\n            {\"name\": \"nfs_soccer_player_2\", \"path\": \"sequences/soccer_player_2\", \"startFrame\": 1, \"endFrame\": 475, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_soccer_player_2.txt\", \"object_class\": \"person\", 'occlusion': False},\n            {\"name\": \"nfs_soccer_player_3\", \"path\": \"sequences/soccer_player_3\", \"startFrame\": 1, \"endFrame\": 319, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_soccer_player_3.txt\", \"object_class\": \"person\", 'occlusion': True},\n            {\"name\": \"nfs_stop_sign\", \"path\": \"sequences/stop_sign\", \"startFrame\": 1, \"endFrame\": 302, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_stop_sign.txt\", \"object_class\": \"other\", 'occlusion': False},\n            {\"name\": \"nfs_suv\", \"path\": \"sequences/suv\", \"startFrame\": 1, \"endFrame\": 2584, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_suv.txt\", \"object_class\": \"car\", 'occlusion': False},\n            {\"name\": \"nfs_tiger\", \"path\": \"sequences/tiger\", \"startFrame\": 1, \"endFrame\": 1556, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_tiger.txt\", \"object_class\": \"mammal\", 'occlusion': False},\n            {\"name\": \"nfs_walking\", \"path\": \"sequences/walking\", \"startFrame\": 1, \"endFrame\": 555, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_walking.txt\", \"object_class\": \"person\", 'occlusion': False},\n            {\"name\": \"nfs_walking_3\", \"path\": \"sequences/walking_3\", \"startFrame\": 1, \"endFrame\": 1427, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_walking_3.txt\", \"object_class\": \"person\", 'occlusion': False},\n            {\"name\": \"nfs_water_ski_2\", \"path\": \"sequences/water_ski_2\", \"startFrame\": 1, \"endFrame\": 47, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_water_ski_2.txt\", \"object_class\": \"person\", 'occlusion': False},\n            {\"name\": \"nfs_yoyo\", \"path\": \"sequences/yoyo\", \"startFrame\": 1, \"endFrame\": 67, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_yoyo.txt\", \"object_class\": \"other\", 'occlusion': False},\n            {\"name\": \"nfs_zebra_fish\", \"path\": \"sequences/zebra_fish\", \"startFrame\": 1, \"endFrame\": 671, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_zebra_fish.txt\", \"object_class\": \"fish\", 'occlusion': False},\n        ]\n\n        return sequence_info_list\n"
  },
  {
    "path": "artrackv2_mindspore/external/AR/pytracking/evaluation/otbdataset.py",
    "content": "import numpy as np\nfrom pytracking.evaluation.data import Sequence, BaseDataset, SequenceList\nfrom pytracking.utils.load_text import load_text\n\n\nclass OTBDataset(BaseDataset):\n    \"\"\" OTB-2015 dataset\n\n    Publication:\n        Object Tracking Benchmark\n        Wu, Yi, Jongwoo Lim, and Ming-hsuan Yan\n        TPAMI, 2015\n        http://faculty.ucmerced.edu/mhyang/papers/pami15_tracking_benchmark.pdf\n\n    Download the dataset from http://cvlab.hanyang.ac.kr/tracker_benchmark/index.html\n    \"\"\"\n    def __init__(self):\n        super().__init__()\n        self.base_path = self.env_settings.otb_path\n        self.sequence_info_list = self._get_sequence_info_list()\n\n    def get_sequence_list(self):\n        return SequenceList([self._construct_sequence(s) for s in self.sequence_info_list])\n\n    def _construct_sequence(self, sequence_info):\n        sequence_path = sequence_info['path']\n        nz = sequence_info['nz']\n        ext = sequence_info['ext']\n        start_frame = sequence_info['startFrame']\n        end_frame = sequence_info['endFrame']\n\n        init_omit = 0\n        if 'initOmit' in sequence_info:\n            init_omit = sequence_info['initOmit']\n\n        frames = ['{base_path}/{sequence_path}/{frame:0{nz}}.{ext}'.format(base_path=self.base_path, \n        sequence_path=sequence_path, frame=frame_num, nz=nz, ext=ext) for frame_num in range(start_frame+init_omit, end_frame+1)]\n\n        anno_path = '{}/{}'.format(self.base_path, sequence_info['anno_path'])\n\n        # NOTE: OTB has some weird annos which panda cannot handle\n        ground_truth_rect = load_text(str(anno_path), delimiter=(',', None), dtype=np.float64, backend='numpy')\n\n        return Sequence(sequence_info['name'], frames, 'otb', ground_truth_rect[init_omit:,:],\n                        object_class=sequence_info['object_class'])\n\n    def __len__(self):\n        return len(self.sequence_info_list)\n\n    def _get_sequence_info_list(self):\n        sequence_info_list = [\n            {\"name\": \"Basketball\", \"path\": \"Basketball/img\", \"startFrame\": 1, \"endFrame\": 725, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Basketball/groundtruth_rect.txt\",\n             \"object_class\": \"person\"},\n            {\"name\": \"Biker\", \"path\": \"Biker/img\", \"startFrame\": 1, \"endFrame\": 142, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Biker/groundtruth_rect.txt\",\n             \"object_class\": \"person head\"},\n            {\"name\": \"Bird1\", \"path\": \"Bird1/img\", \"startFrame\": 1, \"endFrame\": 408, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Bird1/groundtruth_rect.txt\",\n             \"object_class\": \"bird\"},\n            {\"name\": \"Bird2\", \"path\": \"Bird2/img\", \"startFrame\": 1, \"endFrame\": 99, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Bird2/groundtruth_rect.txt\",\n             \"object_class\": \"bird\"},\n            {\"name\": \"BlurBody\", \"path\": \"BlurBody/img\", \"startFrame\": 1, \"endFrame\": 334, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"BlurBody/groundtruth_rect.txt\",\n             \"object_class\": \"person\"},\n            {\"name\": \"BlurCar1\", \"path\": \"BlurCar1/img\", \"startFrame\": 247, \"endFrame\": 988, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"BlurCar1/groundtruth_rect.txt\",\n             \"object_class\": \"car\"},\n            {\"name\": \"BlurCar2\", \"path\": \"BlurCar2/img\", \"startFrame\": 1, \"endFrame\": 585, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"BlurCar2/groundtruth_rect.txt\",\n             \"object_class\": \"car\"},\n            {\"name\": \"BlurCar3\", \"path\": \"BlurCar3/img\", \"startFrame\": 3, \"endFrame\": 359, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"BlurCar3/groundtruth_rect.txt\",\n             \"object_class\": \"car\"},\n            {\"name\": \"BlurCar4\", \"path\": \"BlurCar4/img\", \"startFrame\": 18, \"endFrame\": 397, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"BlurCar4/groundtruth_rect.txt\",\n             \"object_class\": \"car\"},\n            {\"name\": \"BlurFace\", \"path\": \"BlurFace/img\", \"startFrame\": 1, \"endFrame\": 493, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"BlurFace/groundtruth_rect.txt\",\n             \"object_class\": \"face\"},\n            {\"name\": \"BlurOwl\", \"path\": \"BlurOwl/img\", \"startFrame\": 1, \"endFrame\": 631, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"BlurOwl/groundtruth_rect.txt\",\n             \"object_class\": \"other\"},\n            {\"name\": \"Board\", \"path\": \"Board/img\", \"startFrame\": 1, \"endFrame\": 698, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"Board/groundtruth_rect.txt\",\n             \"object_class\": \"other\"},\n            {\"name\": \"Bolt\", \"path\": \"Bolt/img\", \"startFrame\": 1, \"endFrame\": 350, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Bolt/groundtruth_rect.txt\",\n             \"object_class\": \"person\"},\n            {\"name\": \"Bolt2\", \"path\": \"Bolt2/img\", \"startFrame\": 1, \"endFrame\": 293, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Bolt2/groundtruth_rect.txt\",\n             \"object_class\": \"person\"},\n            {\"name\": \"Box\", \"path\": \"Box/img\", \"startFrame\": 1, \"endFrame\": 1161, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Box/groundtruth_rect.txt\",\n             \"object_class\": \"other\"},\n            {\"name\": \"Boy\", \"path\": \"Boy/img\", \"startFrame\": 1, \"endFrame\": 602, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Boy/groundtruth_rect.txt\",\n             \"object_class\": \"face\"},\n            {\"name\": \"Car1\", \"path\": \"Car1/img\", \"startFrame\": 1, \"endFrame\": 1020, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Car1/groundtruth_rect.txt\",\n             \"object_class\": \"car\"},\n            {\"name\": \"Car2\", \"path\": \"Car2/img\", \"startFrame\": 1, \"endFrame\": 913, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Car2/groundtruth_rect.txt\",\n             \"object_class\": \"car\"},\n            {\"name\": \"Car24\", \"path\": \"Car24/img\", \"startFrame\": 1, \"endFrame\": 3059, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Car24/groundtruth_rect.txt\",\n             \"object_class\": \"car\"},\n            {\"name\": \"Car4\", \"path\": \"Car4/img\", \"startFrame\": 1, \"endFrame\": 659, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Car4/groundtruth_rect.txt\",\n             \"object_class\": \"car\"},\n            {\"name\": \"CarDark\", \"path\": \"CarDark/img\", \"startFrame\": 1, \"endFrame\": 393, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"CarDark/groundtruth_rect.txt\",\n             \"object_class\": \"car\"},\n            {\"name\": \"CarScale\", \"path\": \"CarScale/img\", \"startFrame\": 1, \"endFrame\": 252, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"CarScale/groundtruth_rect.txt\",\n             \"object_class\": \"car\"},\n            {\"name\": \"ClifBar\", \"path\": \"ClifBar/img\", \"startFrame\": 1, \"endFrame\": 472, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"ClifBar/groundtruth_rect.txt\",\n             \"object_class\": \"other\"},\n            {\"name\": \"Coke\", \"path\": \"Coke/img\", \"startFrame\": 1, \"endFrame\": 291, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Coke/groundtruth_rect.txt\",\n             \"object_class\": \"other\"},\n            {\"name\": \"Couple\", \"path\": \"Couple/img\", \"startFrame\": 1, \"endFrame\": 140, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Couple/groundtruth_rect.txt\",\n             \"object_class\": \"person\"},\n            {\"name\": \"Coupon\", \"path\": \"Coupon/img\", \"startFrame\": 1, \"endFrame\": 327, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Coupon/groundtruth_rect.txt\",\n             \"object_class\": \"other\"},\n            {\"name\": \"Crossing\", \"path\": \"Crossing/img\", \"startFrame\": 1, \"endFrame\": 120, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Crossing/groundtruth_rect.txt\",\n             \"object_class\": \"person\"},\n            {\"name\": \"Crowds\", \"path\": \"Crowds/img\", \"startFrame\": 1, \"endFrame\": 347, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Crowds/groundtruth_rect.txt\",\n             \"object_class\": \"person\"},\n            {\"name\": \"Dancer\", \"path\": \"Dancer/img\", \"startFrame\": 1, \"endFrame\": 225, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Dancer/groundtruth_rect.txt\",\n             \"object_class\": \"person\"},\n            {\"name\": \"Dancer2\", \"path\": \"Dancer2/img\", \"startFrame\": 1, \"endFrame\": 150, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Dancer2/groundtruth_rect.txt\",\n             \"object_class\": \"person\"},\n            {\"name\": \"David\", \"path\": \"David/img\", \"startFrame\": 300, \"endFrame\": 770, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"David/groundtruth_rect.txt\",\n             \"object_class\": \"face\"},\n            {\"name\": \"David2\", \"path\": \"David2/img\", \"startFrame\": 1, \"endFrame\": 537, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"David2/groundtruth_rect.txt\",\n             \"object_class\": \"face\"},\n            {\"name\": \"David3\", \"path\": \"David3/img\", \"startFrame\": 1, \"endFrame\": 252, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"David3/groundtruth_rect.txt\",\n             \"object_class\": \"person\"},\n            {\"name\": \"Deer\", \"path\": \"Deer/img\", \"startFrame\": 1, \"endFrame\": 71, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Deer/groundtruth_rect.txt\",\n             \"object_class\": \"mammal\"},\n            {\"name\": \"Diving\", \"path\": \"Diving/img\", \"startFrame\": 1, \"endFrame\": 215, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Diving/groundtruth_rect.txt\",\n             \"object_class\": \"person\"},\n            {\"name\": \"Dog\", \"path\": \"Dog/img\", \"startFrame\": 1, \"endFrame\": 127, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Dog/groundtruth_rect.txt\",\n             \"object_class\": \"dog\"},\n            {\"name\": \"Dog1\", \"path\": \"Dog1/img\", \"startFrame\": 1, \"endFrame\": 1350, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Dog1/groundtruth_rect.txt\",\n             \"object_class\": \"dog\"},\n            {\"name\": \"Doll\", \"path\": \"Doll/img\", \"startFrame\": 1, \"endFrame\": 3872, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Doll/groundtruth_rect.txt\",\n             \"object_class\": \"other\"},\n            {\"name\": \"DragonBaby\", \"path\": \"DragonBaby/img\", \"startFrame\": 1, \"endFrame\": 113, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"DragonBaby/groundtruth_rect.txt\",\n             \"object_class\": \"face\"},\n            {\"name\": \"Dudek\", \"path\": \"Dudek/img\", \"startFrame\": 1, \"endFrame\": 1145, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Dudek/groundtruth_rect.txt\",\n             \"object_class\": \"face\"},\n            {\"name\": \"FaceOcc1\", \"path\": \"FaceOcc1/img\", \"startFrame\": 1, \"endFrame\": 892, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"FaceOcc1/groundtruth_rect.txt\",\n             \"object_class\": \"face\"},\n            {\"name\": \"FaceOcc2\", \"path\": \"FaceOcc2/img\", \"startFrame\": 1, \"endFrame\": 812, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"FaceOcc2/groundtruth_rect.txt\",\n             \"object_class\": \"face\"},\n            {\"name\": \"Fish\", \"path\": \"Fish/img\", \"startFrame\": 1, \"endFrame\": 476, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Fish/groundtruth_rect.txt\",\n             \"object_class\": \"other\"},\n            {\"name\": \"FleetFace\", \"path\": \"FleetFace/img\", \"startFrame\": 1, \"endFrame\": 707, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"FleetFace/groundtruth_rect.txt\",\n             \"object_class\": \"face\"},\n            {\"name\": \"Football\", \"path\": \"Football/img\", \"startFrame\": 1, \"endFrame\": 362, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Football/groundtruth_rect.txt\",\n             \"object_class\": \"person head\"},\n            {\"name\": \"Football1\", \"path\": \"Football1/img\", \"startFrame\": 1, \"endFrame\": 74, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Football1/groundtruth_rect.txt\",\n             \"object_class\": \"face\"},\n            {\"name\": \"Freeman1\", \"path\": \"Freeman1/img\", \"startFrame\": 1, \"endFrame\": 326, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Freeman1/groundtruth_rect.txt\",\n             \"object_class\": \"face\"},\n            {\"name\": \"Freeman3\", \"path\": \"Freeman3/img\", \"startFrame\": 1, \"endFrame\": 460, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Freeman3/groundtruth_rect.txt\",\n             \"object_class\": \"face\"},\n            {\"name\": \"Freeman4\", \"path\": \"Freeman4/img\", \"startFrame\": 1, \"endFrame\": 283, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Freeman4/groundtruth_rect.txt\",\n             \"object_class\": \"face\"},\n            {\"name\": \"Girl\", \"path\": \"Girl/img\", \"startFrame\": 1, \"endFrame\": 500, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Girl/groundtruth_rect.txt\",\n             \"object_class\": \"face\"},\n            {\"name\": \"Girl2\", \"path\": \"Girl2/img\", \"startFrame\": 1, \"endFrame\": 1500, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Girl2/groundtruth_rect.txt\",\n             \"object_class\": \"person\"},\n            {\"name\": \"Gym\", \"path\": \"Gym/img\", \"startFrame\": 1, \"endFrame\": 767, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Gym/groundtruth_rect.txt\",\n             \"object_class\": \"person\"},\n            {\"name\": \"Human2\", \"path\": \"Human2/img\", \"startFrame\": 1, \"endFrame\": 1128, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Human2/groundtruth_rect.txt\",\n             \"object_class\": \"person\"},\n            {\"name\": \"Human3\", \"path\": \"Human3/img\", \"startFrame\": 1, \"endFrame\": 1698, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Human3/groundtruth_rect.txt\",\n             \"object_class\": \"person\"},\n            {\"name\": \"Human4_2\", \"path\": \"Human4/img\", \"startFrame\": 1, \"endFrame\": 667, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Human4/groundtruth_rect.2.txt\",\n             \"object_class\": \"person\"},\n            {\"name\": \"Human5\", \"path\": \"Human5/img\", \"startFrame\": 1, \"endFrame\": 713, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Human5/groundtruth_rect.txt\",\n             \"object_class\": \"person\"},\n            {\"name\": \"Human6\", \"path\": \"Human6/img\", \"startFrame\": 1, \"endFrame\": 792, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Human6/groundtruth_rect.txt\",\n             \"object_class\": \"person\"},\n            {\"name\": \"Human7\", \"path\": \"Human7/img\", \"startFrame\": 1, \"endFrame\": 250, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Human7/groundtruth_rect.txt\",\n             \"object_class\": \"person\"},\n            {\"name\": \"Human8\", \"path\": \"Human8/img\", \"startFrame\": 1, \"endFrame\": 128, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Human8/groundtruth_rect.txt\",\n             \"object_class\": \"person\"},\n            {\"name\": \"Human9\", \"path\": \"Human9/img\", \"startFrame\": 1, \"endFrame\": 305, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Human9/groundtruth_rect.txt\",\n             \"object_class\": \"person\"},\n            {\"name\": \"Ironman\", \"path\": \"Ironman/img\", \"startFrame\": 1, \"endFrame\": 166, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Ironman/groundtruth_rect.txt\",\n             \"object_class\": \"person head\"},\n            {\"name\": \"Jogging_1\", \"path\": \"Jogging/img\", \"startFrame\": 1, \"endFrame\": 307, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Jogging/groundtruth_rect.1.txt\",\n             \"object_class\": \"person\"},\n            {\"name\": \"Jogging_2\", \"path\": \"Jogging/img\", \"startFrame\": 1, \"endFrame\": 307, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Jogging/groundtruth_rect.2.txt\",\n             \"object_class\": \"person\"},\n            {\"name\": \"Jump\", \"path\": \"Jump/img\", \"startFrame\": 1, \"endFrame\": 122, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Jump/groundtruth_rect.txt\",\n             \"object_class\": \"person\"},\n            {\"name\": \"Jumping\", \"path\": \"Jumping/img\", \"startFrame\": 1, \"endFrame\": 313, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Jumping/groundtruth_rect.txt\",\n             \"object_class\": \"face\"},\n            {\"name\": \"KiteSurf\", \"path\": \"KiteSurf/img\", \"startFrame\": 1, \"endFrame\": 84, \"nz\": 4, \"ext\": \"png\", \"anno_path\": \"KiteSurf/groundtruth_rect.txt\",\n             \"object_class\": \"face\"},\n            {\"name\": \"Lemming\", \"path\": \"Lemming/img\", \"startFrame\": 1, \"endFrame\": 1336, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Lemming/groundtruth_rect.txt\",\n             \"object_class\": \"other\"},\n            {\"name\": \"Liquor\", \"path\": \"Liquor/img\", \"startFrame\": 1, \"endFrame\": 1741, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Liquor/groundtruth_rect.txt\",\n             \"object_class\": \"other\"},\n            {\"name\": \"Man\", \"path\": \"Man/img\", \"startFrame\": 1, \"endFrame\": 134, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Man/groundtruth_rect.txt\",\n             \"object_class\": \"face\"},\n            {\"name\": \"Matrix\", \"path\": \"Matrix/img\", \"startFrame\": 1, \"endFrame\": 100, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Matrix/groundtruth_rect.txt\",\n             \"object_class\": \"person head\"},\n            {\"name\": \"Mhyang\", \"path\": \"Mhyang/img\", \"startFrame\": 1, \"endFrame\": 1490, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Mhyang/groundtruth_rect.txt\",\n             \"object_class\": \"face\"},\n            {\"name\": \"MotorRolling\", \"path\": \"MotorRolling/img\", \"startFrame\": 1, \"endFrame\": 164, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"MotorRolling/groundtruth_rect.txt\",\n             \"object_class\": \"vehicle\"},\n            {\"name\": \"MountainBike\", \"path\": \"MountainBike/img\", \"startFrame\": 1, \"endFrame\": 228, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"MountainBike/groundtruth_rect.txt\",\n             \"object_class\": \"bicycle\"},\n            {\"name\": \"Panda\", \"path\": \"Panda/img\", \"startFrame\": 1, \"endFrame\": 1000, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Panda/groundtruth_rect.txt\",\n             \"object_class\": \"mammal\"},\n            {\"name\": \"RedTeam\", \"path\": \"RedTeam/img\", \"startFrame\": 1, \"endFrame\": 1918, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"RedTeam/groundtruth_rect.txt\",\n             \"object_class\": \"vehicle\"},\n            {\"name\": \"Rubik\", \"path\": \"Rubik/img\", \"startFrame\": 1, \"endFrame\": 1997, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Rubik/groundtruth_rect.txt\",\n             \"object_class\": \"other\"},\n            {\"name\": \"Shaking\", \"path\": \"Shaking/img\", \"startFrame\": 1, \"endFrame\": 365, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Shaking/groundtruth_rect.txt\",\n             \"object_class\": \"face\"},\n            {\"name\": \"Singer1\", \"path\": \"Singer1/img\", \"startFrame\": 1, \"endFrame\": 351, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Singer1/groundtruth_rect.txt\",\n             \"object_class\": \"person\"},\n            {\"name\": \"Singer2\", \"path\": \"Singer2/img\", \"startFrame\": 1, \"endFrame\": 366, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Singer2/groundtruth_rect.txt\",\n             \"object_class\": \"person\"},\n            {\"name\": \"Skater\", \"path\": \"Skater/img\", \"startFrame\": 1, \"endFrame\": 160, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Skater/groundtruth_rect.txt\",\n             \"object_class\": \"person\"},\n            {\"name\": \"Skater2\", \"path\": \"Skater2/img\", \"startFrame\": 1, \"endFrame\": 435, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Skater2/groundtruth_rect.txt\",\n             \"object_class\": \"person\"},\n            {\"name\": \"Skating1\", \"path\": \"Skating1/img\", \"startFrame\": 1, \"endFrame\": 400, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Skating1/groundtruth_rect.txt\",\n             \"object_class\": \"person\"},\n            {\"name\": \"Skating2_1\", \"path\": \"Skating2/img\", \"startFrame\": 1, \"endFrame\": 473, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Skating2/groundtruth_rect.1.txt\",\n             \"object_class\": \"person\"},\n            {\"name\": \"Skating2_2\", \"path\": \"Skating2/img\", \"startFrame\": 1, \"endFrame\": 473, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Skating2/groundtruth_rect.2.txt\",\n             \"object_class\": \"person\"},\n            {\"name\": \"Skiing\", \"path\": \"Skiing/img\", \"startFrame\": 1, \"endFrame\": 81, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Skiing/groundtruth_rect.txt\",\n             \"object_class\": \"person\"},\n            {\"name\": \"Soccer\", \"path\": \"Soccer/img\", \"startFrame\": 1, \"endFrame\": 392, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Soccer/groundtruth_rect.txt\",\n             \"object_class\": \"face\"},\n            {\"name\": \"Subway\", \"path\": \"Subway/img\", \"startFrame\": 1, \"endFrame\": 175, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Subway/groundtruth_rect.txt\",\n             \"object_class\": \"person\"},\n            {\"name\": \"Surfer\", \"path\": \"Surfer/img\", \"startFrame\": 1, \"endFrame\": 376, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Surfer/groundtruth_rect.txt\",\n             \"object_class\": \"person head\"},\n            {\"name\": \"Suv\", \"path\": \"Suv/img\", \"startFrame\": 1, \"endFrame\": 945, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Suv/groundtruth_rect.txt\",\n             \"object_class\": \"car\"},\n            {\"name\": \"Sylvester\", \"path\": \"Sylvester/img\", \"startFrame\": 1, \"endFrame\": 1345, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Sylvester/groundtruth_rect.txt\",\n             \"object_class\": \"other\"},\n            {\"name\": \"Tiger1\", \"path\": \"Tiger1/img\", \"startFrame\": 1, \"endFrame\": 354, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Tiger1/groundtruth_rect.txt\", \"initOmit\": 5,\n             \"object_class\": \"other\"},\n            {\"name\": \"Tiger2\", \"path\": \"Tiger2/img\", \"startFrame\": 1, \"endFrame\": 365, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Tiger2/groundtruth_rect.txt\",\n             \"object_class\": \"other\"},\n            {\"name\": \"Toy\", \"path\": \"Toy/img\", \"startFrame\": 1, \"endFrame\": 271, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Toy/groundtruth_rect.txt\",\n             \"object_class\": \"other\"},\n            {\"name\": \"Trans\", \"path\": \"Trans/img\", \"startFrame\": 1, \"endFrame\": 124, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Trans/groundtruth_rect.txt\",\n             \"object_class\": \"other\"},\n            {\"name\": \"Trellis\", \"path\": \"Trellis/img\", \"startFrame\": 1, \"endFrame\": 569, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Trellis/groundtruth_rect.txt\",\n             \"object_class\": \"face\"},\n            {\"name\": \"Twinnings\", \"path\": \"Twinnings/img\", \"startFrame\": 1, \"endFrame\": 472, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Twinnings/groundtruth_rect.txt\",\n             \"object_class\": \"other\"},\n            {\"name\": \"Vase\", \"path\": \"Vase/img\", \"startFrame\": 1, \"endFrame\": 271, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Vase/groundtruth_rect.txt\",\n             \"object_class\": \"other\"},\n            {\"name\": \"Walking\", \"path\": \"Walking/img\", \"startFrame\": 1, \"endFrame\": 412, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Walking/groundtruth_rect.txt\",\n             \"object_class\": \"person\"},\n            {\"name\": \"Walking2\", \"path\": \"Walking2/img\", \"startFrame\": 1, \"endFrame\": 500, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Walking2/groundtruth_rect.txt\",\n             \"object_class\": \"person\"},\n            {\"name\": \"Woman\", \"path\": \"Woman/img\", \"startFrame\": 1, \"endFrame\": 597, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Woman/groundtruth_rect.txt\",\n             \"object_class\": \"person\"}\n        ]\n    \n        return sequence_info_list\n"
  },
  {
    "path": "artrackv2_mindspore/external/AR/pytracking/evaluation/running.py",
    "content": "import numpy as np\nimport multiprocessing\nimport os\nimport sys\nfrom itertools import product\nfrom collections import OrderedDict\nfrom pytracking.evaluation import Sequence, Tracker\nfrom ltr.data.image_loader import imwrite_indexed\n\n\ndef _save_tracker_output(seq: Sequence, tracker: Tracker, output: dict):\n    \"\"\"Saves the output of the tracker.\"\"\"\n\n    if not os.path.exists(tracker.results_dir):\n        os.makedirs(tracker.results_dir)\n\n    base_results_path = os.path.join(tracker.results_dir, seq.name)\n    segmentation_path = os.path.join(tracker.segmentation_dir, seq.name)\n\n    frame_names = [os.path.splitext(os.path.basename(f))[0] for f in seq.frames]\n\n    def save_bb(file, data):\n        tracked_bb = np.array(data).astype(int)\n        np.savetxt(file, tracked_bb, delimiter='\\t', fmt='%d')\n\n    def save_time(file, data):\n        exec_times = np.array(data).astype(float)\n        np.savetxt(file, exec_times, delimiter='\\t', fmt='%f')\n\n    def _convert_dict(input_dict):\n        data_dict = {}\n        for elem in input_dict:\n            for k, v in elem.items():\n                if k in data_dict.keys():\n                    data_dict[k].append(v)\n                else:\n                    data_dict[k] = [v, ]\n        return data_dict\n\n    for key, data in output.items():\n        # If data is empty\n        if not data:\n            continue\n\n        if key == 'target_bbox':\n            if isinstance(data[0], (dict, OrderedDict)):\n                data_dict = _convert_dict(data)\n\n                for obj_id, d in data_dict.items():\n                    bbox_file = '{}_{}.txt'.format(base_results_path, obj_id)\n                    save_bb(bbox_file, d)\n            else:\n                # Single-object mode\n                bbox_file = '{}.txt'.format(base_results_path)\n                save_bb(bbox_file, data)\n\n        elif key == 'time':\n            if isinstance(data[0], dict):\n                data_dict = _convert_dict(data)\n\n                for obj_id, d in data_dict.items():\n                    timings_file = '{}_{}_time.txt'.format(base_results_path, obj_id)\n                    save_time(timings_file, d)\n            else:\n                timings_file = '{}_time.txt'.format(base_results_path)\n                save_time(timings_file, data)\n\n        elif key == 'segmentation':\n            assert len(frame_names) == len(data)\n            if not os.path.exists(segmentation_path):\n                os.makedirs(segmentation_path)\n            for frame_name, frame_seg in zip(frame_names, data):\n                imwrite_indexed(os.path.join(segmentation_path, '{}.png'.format(frame_name)), frame_seg)\n\n\ndef run_sequence(seq: Sequence, tracker: Tracker, debug=False, visdom_info=None):\n    \"\"\"Runs a tracker on a sequence.\"\"\"\n\n    def _results_exist():\n        if seq.object_ids is None:\n            bbox_file = '{}/{}.txt'.format(tracker.results_dir, seq.name)\n            return os.path.isfile(bbox_file)\n        else:\n            bbox_files = ['{}/{}_{}.txt'.format(tracker.results_dir, seq.name, obj_id) for obj_id in seq.object_ids]\n            missing = [not os.path.isfile(f) for f in bbox_files]\n            return sum(missing) == 0\n\n    visdom_info = {} if visdom_info is None else visdom_info\n\n    if _results_exist() and not debug:\n        return\n\n    print('Tracker: {} {} {} ,  Sequence: {}'.format(tracker.name, tracker.parameter_name, tracker.run_id, seq.name))\n\n    if debug:\n        output = tracker.run_sequence(seq, debug=debug, visdom_info=visdom_info)\n    else:\n        try:\n            output = tracker.run_sequence(seq, debug=debug, visdom_info=visdom_info)\n        except Exception as e:\n            print(e)\n            return\n\n    sys.stdout.flush()\n\n    if isinstance(output['time'][0], (dict, OrderedDict)):\n        exec_time = sum([sum(times.values()) for times in output['time']])\n        num_frames = len(output['time'])\n    else:\n        exec_time = sum(output['time'])\n        num_frames = len(output['time'])\n\n    print('FPS: {}'.format(num_frames / exec_time))\n\n    if not debug:\n        _save_tracker_output(seq, tracker, output)\n\n\ndef run_dataset(dataset, trackers, debug=False, threads=0, visdom_info=None):\n    \"\"\"Runs a list of trackers on a dataset.\n    args:\n        dataset: List of Sequence instances, forming a dataset.\n        trackers: List of Tracker instances.\n        debug: Debug level.\n        threads: Number of threads to use (default 0).\n        visdom_info: Dict containing information about the server for visdom\n    \"\"\"\n    multiprocessing.set_start_method('spawn', force=True)\n\n    print('Evaluating {:4d} trackers on {:5d} sequences'.format(len(trackers), len(dataset)))\n\n    multiprocessing.set_start_method('spawn', force=True)\n\n    visdom_info = {} if visdom_info is None else visdom_info\n\n    if threads == 0:\n        mode = 'sequential'\n    else:\n        mode = 'parallel'\n\n    if mode == 'sequential':\n        for seq in dataset:\n            for tracker_info in trackers:\n                run_sequence(seq, tracker_info, debug=debug, visdom_info=visdom_info)\n    elif mode == 'parallel':\n        param_list = [(seq, tracker_info, debug, visdom_info) for seq, tracker_info in product(dataset, trackers)]\n        with multiprocessing.Pool(processes=threads) as pool:\n            pool.starmap(run_sequence, param_list)\n    print('Done')\n"
  },
  {
    "path": "artrackv2_mindspore/external/AR/pytracking/evaluation/tpldataset.py",
    "content": "import numpy as np\nfrom pytracking.evaluation.data import Sequence, BaseDataset, SequenceList\nfrom pytracking.utils.load_text import load_text\n\n\nclass TPLDataset(BaseDataset):\n    \"\"\"\n    Temple Color 128 dataset\n\n    Publication:\n        Encoding Color Information for Visual Tracking: Algorithms and Benchmark\n        P. Liang, E. Blasch, and H. Ling\n        TIP, 2015\n        http://www.dabi.temple.edu/~hbling/publication/TColor-128.pdf\n\n    Download the dataset from http://www.dabi.temple.edu/~hbling/data/TColor-128/TColor-128.html\n    \"\"\"\n    def __init__(self, exclude_otb=False):\n        \"\"\"\n        args:\n            exclude_otb (bool) - If True, sequences overlapping with the OTB dataset are excluded\n        \"\"\"\n        super().__init__()\n        self.base_path = self.env_settings.tpl_path\n        self.sequence_info_list = self._get_sequence_info_list(exclude_otb)\n\n    def get_sequence_list(self):\n        return SequenceList([self._construct_sequence(s) for s in self.sequence_info_list])\n\n    def _construct_sequence(self, sequence_info):\n        sequence_path = sequence_info['path']\n        nz = sequence_info['nz']\n        ext = sequence_info['ext']\n        start_frame = sequence_info['startFrame']\n        end_frame = sequence_info['endFrame']\n\n        init_omit = 0\n        if 'initOmit' in sequence_info:\n            init_omit = sequence_info['initOmit']\n\n        frames = ['{base_path}/{sequence_path}/{frame:0{nz}}.{ext}'.format(base_path=self.base_path, \n        sequence_path=sequence_path, frame=frame_num, nz=nz, ext=ext) for frame_num in range(start_frame+init_omit, end_frame+1)]\n\n        anno_path = '{}/{}'.format(self.base_path, sequence_info['anno_path'])\n\n        ground_truth_rect = load_text(str(anno_path), delimiter=(',', None), dtype=np.float64, backend='numpy')\n\n        return Sequence(sequence_info['name'], frames, 'tpl', ground_truth_rect[init_omit:,:])\n\n    def __len__(self):\n        return len(self.sequence_info_list)\n\n    def _get_sequence_info_list(self, exclude_otb=False):\n        sequence_info_list = [\n            {\"name\": \"tpl_Skating2\", \"path\": \"tpl_Skating2/img\", \"startFrame\": 1, \"endFrame\": 707, \"nz\": 4,\n             \"ext\": \"jpg\", \"anno_path\": \"tpl_Skating2/Skating2_gt.txt\"},\n            {\"name\": \"tpl_Pool_ce3\", \"path\": \"tpl_Pool_ce3/img\", \"startFrame\": 1, \"endFrame\": 124, \"nz\": 4,\n             \"ext\": \"jpg\", \"anno_path\": \"tpl_Pool_ce3/Pool_ce3_gt.txt\"},\n            {\"name\": \"tpl_Microphone_ce1\", \"path\": \"tpl_Microphone_ce1/img\", \"startFrame\": 1, \"endFrame\": 204, \"nz\": 4,\n             \"ext\": \"jpg\", \"anno_path\": \"tpl_Microphone_ce1/Microphone_ce1_gt.txt\"},\n            {\"name\": \"tpl_Torus\", \"path\": \"tpl_Torus/img\", \"startFrame\": 1, \"endFrame\": 264, \"nz\": 4, \"ext\": \"jpg\",\n             \"anno_path\": \"tpl_Torus/Torus_gt.txt\"},\n            {\"name\": \"tpl_Lemming\", \"path\": \"tpl_Lemming/img\", \"startFrame\": 1, \"endFrame\": 1336, \"nz\": 4, \"ext\": \"jpg\",\n             \"anno_path\": \"tpl_Lemming/Lemming_gt.txt\"},\n            {\"name\": \"tpl_Eagle_ce\", \"path\": \"tpl_Eagle_ce/img\", \"startFrame\": 1, \"endFrame\": 112, \"nz\": 4,\n             \"ext\": \"jpg\", \"anno_path\": \"tpl_Eagle_ce/Eagle_ce_gt.txt\"},\n            {\"name\": \"tpl_Skating_ce2\", \"path\": \"tpl_Skating_ce2/img\", \"startFrame\": 1, \"endFrame\": 497, \"nz\": 4,\n             \"ext\": \"jpg\", \"anno_path\": \"tpl_Skating_ce2/Skating_ce2_gt.txt\"},\n            {\"name\": \"tpl_Yo_yos_ce3\", \"path\": \"tpl_Yo_yos_ce3/img\", \"startFrame\": 1, \"endFrame\": 201, \"nz\": 4,\n             \"ext\": \"jpg\", \"anno_path\": \"tpl_Yo_yos_ce3/Yo-yos_ce3_gt.txt\"},\n            {\"name\": \"tpl_Board\", \"path\": \"tpl_Board/img\", \"startFrame\": 1, \"endFrame\": 598, \"nz\": 4, \"ext\": \"jpg\",\n             \"anno_path\": \"tpl_Board/Board_gt.txt\"},\n            {\"name\": \"tpl_Tennis_ce3\", \"path\": \"tpl_Tennis_ce3/img\", \"startFrame\": 1, \"endFrame\": 204, \"nz\": 4,\n             \"ext\": \"jpg\", \"anno_path\": \"tpl_Tennis_ce3/Tennis_ce3_gt.txt\"},\n            {\"name\": \"tpl_SuperMario_ce\", \"path\": \"tpl_SuperMario_ce/img\", \"startFrame\": 1, \"endFrame\": 146, \"nz\": 4,\n             \"ext\": \"jpg\", \"anno_path\": \"tpl_SuperMario_ce/SuperMario_ce_gt.txt\"},\n            {\"name\": \"tpl_Yo_yos_ce1\", \"path\": \"tpl_Yo_yos_ce1/img\", \"startFrame\": 1, \"endFrame\": 235, \"nz\": 4,\n             \"ext\": \"jpg\", \"anno_path\": \"tpl_Yo_yos_ce1/Yo-yos_ce1_gt.txt\"},\n            {\"name\": \"tpl_Soccer\", \"path\": \"tpl_Soccer/img\", \"startFrame\": 1, \"endFrame\": 392, \"nz\": 4, \"ext\": \"jpg\",\n             \"anno_path\": \"tpl_Soccer/Soccer_gt.txt\"},\n            {\"name\": \"tpl_Fish_ce2\", \"path\": \"tpl_Fish_ce2/img\", \"startFrame\": 1, \"endFrame\": 573, \"nz\": 4,\n             \"ext\": \"jpg\", \"anno_path\": \"tpl_Fish_ce2/Fish_ce2_gt.txt\"},\n            {\"name\": \"tpl_Liquor\", \"path\": \"tpl_Liquor/img\", \"startFrame\": 1, \"endFrame\": 1741, \"nz\": 4, \"ext\": \"jpg\",\n             \"anno_path\": \"tpl_Liquor/Liquor_gt.txt\"},\n            {\"name\": \"tpl_Plane_ce2\", \"path\": \"tpl_Plane_ce2/img\", \"startFrame\": 1, \"endFrame\": 653, \"nz\": 4,\n             \"ext\": \"jpg\", \"anno_path\": \"tpl_Plane_ce2/Plane_ce2_gt.txt\"},\n            {\"name\": \"tpl_Couple\", \"path\": \"tpl_Couple/img\", \"startFrame\": 1, \"endFrame\": 140, \"nz\": 4, \"ext\": \"jpg\",\n             \"anno_path\": \"tpl_Couple/Couple_gt.txt\"},\n            {\"name\": \"tpl_Logo_ce\", \"path\": \"tpl_Logo_ce/img\", \"startFrame\": 1, \"endFrame\": 610, \"nz\": 4, \"ext\": \"jpg\",\n             \"anno_path\": \"tpl_Logo_ce/Logo_ce_gt.txt\"},\n            {\"name\": \"tpl_Hand_ce2\", \"path\": \"tpl_Hand_ce2/img\", \"startFrame\": 1, \"endFrame\": 251, \"nz\": 4,\n             \"ext\": \"jpg\", \"anno_path\": \"tpl_Hand_ce2/Hand_ce2_gt.txt\"},\n            {\"name\": \"tpl_Kite_ce2\", \"path\": \"tpl_Kite_ce2/img\", \"startFrame\": 1, \"endFrame\": 658, \"nz\": 4,\n             \"ext\": \"jpg\", \"anno_path\": \"tpl_Kite_ce2/Kite_ce2_gt.txt\"},\n            {\"name\": \"tpl_Walking\", \"path\": \"tpl_Walking/img\", \"startFrame\": 1, \"endFrame\": 412, \"nz\": 4, \"ext\": \"jpg\",\n             \"anno_path\": \"tpl_Walking/Walking_gt.txt\"},\n            {\"name\": \"tpl_David\", \"path\": \"tpl_David/img\", \"startFrame\": 300, \"endFrame\": 770, \"nz\": 4, \"ext\": \"jpg\",\n             \"anno_path\": \"tpl_David/David_gt.txt\"},\n            {\"name\": \"tpl_Boat_ce1\", \"path\": \"tpl_Boat_ce1/img\", \"startFrame\": 1, \"endFrame\": 377, \"nz\": 4,\n             \"ext\": \"jpg\", \"anno_path\": \"tpl_Boat_ce1/Boat_ce1_gt.txt\"},\n            {\"name\": \"tpl_Airport_ce\", \"path\": \"tpl_Airport_ce/img\", \"startFrame\": 1, \"endFrame\": 148, \"nz\": 4,\n             \"ext\": \"jpg\", \"anno_path\": \"tpl_Airport_ce/Airport_ce_gt.txt\"},\n            {\"name\": \"tpl_Tiger2\", \"path\": \"tpl_Tiger2/img\", \"startFrame\": 1, \"endFrame\": 365, \"nz\": 4, \"ext\": \"jpg\",\n             \"anno_path\": \"tpl_Tiger2/Tiger2_gt.txt\"},\n            {\"name\": \"tpl_Suitcase_ce\", \"path\": \"tpl_Suitcase_ce/img\", \"startFrame\": 1, \"endFrame\": 184, \"nz\": 4,\n             \"ext\": \"jpg\", \"anno_path\": \"tpl_Suitcase_ce/Suitcase_ce_gt.txt\"},\n            {\"name\": \"tpl_TennisBall_ce\", \"path\": \"tpl_TennisBall_ce/img\", \"startFrame\": 1, \"endFrame\": 288, \"nz\": 4,\n             \"ext\": \"jpg\", \"anno_path\": \"tpl_TennisBall_ce/TennisBall_ce_gt.txt\"},\n            {\"name\": \"tpl_Singer_ce1\", \"path\": \"tpl_Singer_ce1/img\", \"startFrame\": 1, \"endFrame\": 214, \"nz\": 4,\n             \"ext\": \"jpg\", \"anno_path\": \"tpl_Singer_ce1/Singer_ce1_gt.txt\"},\n            {\"name\": \"tpl_Pool_ce2\", \"path\": \"tpl_Pool_ce2/img\", \"startFrame\": 1, \"endFrame\": 133, \"nz\": 4,\n             \"ext\": \"jpg\", \"anno_path\": \"tpl_Pool_ce2/Pool_ce2_gt.txt\"},\n            {\"name\": \"tpl_Surf_ce3\", \"path\": \"tpl_Surf_ce3/img\", \"startFrame\": 1, \"endFrame\": 279, \"nz\": 4,\n             \"ext\": \"jpg\", \"anno_path\": \"tpl_Surf_ce3/Surf_ce3_gt.txt\"},\n            {\"name\": \"tpl_Bird\", \"path\": \"tpl_Bird/img\", \"startFrame\": 1, \"endFrame\": 99, \"nz\": 4, \"ext\": \"jpg\",\n             \"anno_path\": \"tpl_Bird/Bird_gt.txt\"},\n            {\"name\": \"tpl_Crossing\", \"path\": \"tpl_Crossing/img\", \"startFrame\": 1, \"endFrame\": 120, \"nz\": 4,\n             \"ext\": \"jpg\", \"anno_path\": \"tpl_Crossing/Crossing_gt.txt\"},\n            {\"name\": \"tpl_Plate_ce1\", \"path\": \"tpl_Plate_ce1/img\", \"startFrame\": 1, \"endFrame\": 142, \"nz\": 4,\n             \"ext\": \"jpg\", \"anno_path\": \"tpl_Plate_ce1/Plate_ce1_gt.txt\"},\n            {\"name\": \"tpl_Cup\", \"path\": \"tpl_Cup/img\", \"startFrame\": 1, \"endFrame\": 303, \"nz\": 4, \"ext\": \"jpg\",\n             \"anno_path\": \"tpl_Cup/Cup_gt.txt\"},\n            {\"name\": \"tpl_Surf_ce2\", \"path\": \"tpl_Surf_ce2/img\", \"startFrame\": 1, \"endFrame\": 391, \"nz\": 4,\n             \"ext\": \"jpg\", \"anno_path\": \"tpl_Surf_ce2/Surf_ce2_gt.txt\"},\n            {\"name\": \"tpl_Busstation_ce2\", \"path\": \"tpl_Busstation_ce2/img\", \"startFrame\": 6, \"endFrame\": 400, \"nz\": 4,\n             \"ext\": \"jpg\", \"anno_path\": \"tpl_Busstation_ce2/Busstation_ce2_gt.txt\"},\n            {\"name\": \"tpl_Charger_ce\", \"path\": \"tpl_Charger_ce/img\", \"startFrame\": 1, \"endFrame\": 298, \"nz\": 4,\n             \"ext\": \"jpg\", \"anno_path\": \"tpl_Charger_ce/Charger_ce_gt.txt\"},\n            {\"name\": \"tpl_Pool_ce1\", \"path\": \"tpl_Pool_ce1/img\", \"startFrame\": 1, \"endFrame\": 166, \"nz\": 4,\n             \"ext\": \"jpg\", \"anno_path\": \"tpl_Pool_ce1/Pool_ce1_gt.txt\"},\n            {\"name\": \"tpl_MountainBike\", \"path\": \"tpl_MountainBike/img\", \"startFrame\": 1, \"endFrame\": 228, \"nz\": 4,\n             \"ext\": \"jpg\", \"anno_path\": \"tpl_MountainBike/MountainBike_gt.txt\"},\n            {\"name\": \"tpl_Guitar_ce1\", \"path\": \"tpl_Guitar_ce1/img\", \"startFrame\": 1, \"endFrame\": 268, \"nz\": 4,\n             \"ext\": \"jpg\", \"anno_path\": \"tpl_Guitar_ce1/Guitar_ce1_gt.txt\"},\n            {\"name\": \"tpl_Busstation_ce1\", \"path\": \"tpl_Busstation_ce1/img\", \"startFrame\": 1, \"endFrame\": 363, \"nz\": 4,\n             \"ext\": \"jpg\", \"anno_path\": \"tpl_Busstation_ce1/Busstation_ce1_gt.txt\"},\n            {\"name\": \"tpl_Diving\", \"path\": \"tpl_Diving/img\", \"startFrame\": 1, \"endFrame\": 231, \"nz\": 4, \"ext\": \"jpg\",\n             \"anno_path\": \"tpl_Diving/Diving_gt.txt\"},\n            {\"name\": \"tpl_Skating_ce1\", \"path\": \"tpl_Skating_ce1/img\", \"startFrame\": 1, \"endFrame\": 409, \"nz\": 4,\n             \"ext\": \"jpg\", \"anno_path\": \"tpl_Skating_ce1/Skating_ce1_gt.txt\"},\n            {\"name\": \"tpl_Hurdle_ce2\", \"path\": \"tpl_Hurdle_ce2/img\", \"startFrame\": 27, \"endFrame\": 330, \"nz\": 4,\n             \"ext\": \"jpg\", \"anno_path\": \"tpl_Hurdle_ce2/Hurdle_ce2_gt.txt\"},\n            {\"name\": \"tpl_Plate_ce2\", \"path\": \"tpl_Plate_ce2/img\", \"startFrame\": 1, \"endFrame\": 181, \"nz\": 4,\n             \"ext\": \"jpg\", \"anno_path\": \"tpl_Plate_ce2/Plate_ce2_gt.txt\"},\n            {\"name\": \"tpl_CarDark\", \"path\": \"tpl_CarDark/img\", \"startFrame\": 1, \"endFrame\": 393, \"nz\": 4, \"ext\": \"jpg\",\n             \"anno_path\": \"tpl_CarDark/CarDark_gt.txt\"},\n            {\"name\": \"tpl_Singer_ce2\", \"path\": \"tpl_Singer_ce2/img\", \"startFrame\": 1, \"endFrame\": 999, \"nz\": 4,\n             \"ext\": \"jpg\", \"anno_path\": \"tpl_Singer_ce2/Singer_ce2_gt.txt\"},\n            {\"name\": \"tpl_Shaking\", \"path\": \"tpl_Shaking/img\", \"startFrame\": 1, \"endFrame\": 365, \"nz\": 4, \"ext\": \"jpg\",\n             \"anno_path\": \"tpl_Shaking/Shaking_gt.txt\"},\n            {\"name\": \"tpl_Iceskater\", \"path\": \"tpl_Iceskater/img\", \"startFrame\": 1, \"endFrame\": 500, \"nz\": 4,\n             \"ext\": \"jpg\", \"anno_path\": \"tpl_Iceskater/Iceskater_gt.txt\"},\n            {\"name\": \"tpl_Badminton_ce2\", \"path\": \"tpl_Badminton_ce2/img\", \"startFrame\": 1, \"endFrame\": 705, \"nz\": 4,\n             \"ext\": \"jpg\", \"anno_path\": \"tpl_Badminton_ce2/Badminton_ce2_gt.txt\"},\n            {\"name\": \"tpl_Spiderman_ce\", \"path\": \"tpl_Spiderman_ce/img\", \"startFrame\": 1, \"endFrame\": 351, \"nz\": 4,\n             \"ext\": \"jpg\", \"anno_path\": \"tpl_Spiderman_ce/Spiderman_ce_gt.txt\"},\n            {\"name\": \"tpl_Kite_ce1\", \"path\": \"tpl_Kite_ce1/img\", \"startFrame\": 1, \"endFrame\": 484, \"nz\": 4,\n             \"ext\": \"jpg\", \"anno_path\": \"tpl_Kite_ce1/Kite_ce1_gt.txt\"},\n            {\"name\": \"tpl_Skyjumping_ce\", \"path\": \"tpl_Skyjumping_ce/img\", \"startFrame\": 1, \"endFrame\": 938, \"nz\": 4,\n             \"ext\": \"jpg\", \"anno_path\": \"tpl_Skyjumping_ce/Skyjumping_ce_gt.txt\"},\n            {\"name\": \"tpl_Ball_ce1\", \"path\": \"tpl_Ball_ce1/img\", \"startFrame\": 1, \"endFrame\": 391, \"nz\": 4,\n             \"ext\": \"jpg\", \"anno_path\": \"tpl_Ball_ce1/Ball_ce1_gt.txt\"},\n            {\"name\": \"tpl_Yo_yos_ce2\", \"path\": \"tpl_Yo_yos_ce2/img\", \"startFrame\": 1, \"endFrame\": 454, \"nz\": 4,\n             \"ext\": \"jpg\", \"anno_path\": \"tpl_Yo_yos_ce2/Yo-yos_ce2_gt.txt\"},\n            {\"name\": \"tpl_Ironman\", \"path\": \"tpl_Ironman/img\", \"startFrame\": 1, \"endFrame\": 166, \"nz\": 4, \"ext\": \"jpg\",\n             \"anno_path\": \"tpl_Ironman/Ironman_gt.txt\"},\n            {\"name\": \"tpl_FaceOcc1\", \"path\": \"tpl_FaceOcc1/img\", \"startFrame\": 1, \"endFrame\": 892, \"nz\": 4,\n             \"ext\": \"jpg\", \"anno_path\": \"tpl_FaceOcc1/FaceOcc1_gt.txt\"},\n            {\"name\": \"tpl_Surf_ce1\", \"path\": \"tpl_Surf_ce1/img\", \"startFrame\": 1, \"endFrame\": 404, \"nz\": 4,\n             \"ext\": \"jpg\", \"anno_path\": \"tpl_Surf_ce1/Surf_ce1_gt.txt\"},\n            {\"name\": \"tpl_Ring_ce\", \"path\": \"tpl_Ring_ce/img\", \"startFrame\": 1, \"endFrame\": 201, \"nz\": 4, \"ext\": \"jpg\",\n             \"anno_path\": \"tpl_Ring_ce/Ring_ce_gt.txt\"},\n            {\"name\": \"tpl_Surf_ce4\", \"path\": \"tpl_Surf_ce4/img\", \"startFrame\": 1, \"endFrame\": 135, \"nz\": 4,\n             \"ext\": \"jpg\", \"anno_path\": \"tpl_Surf_ce4/Surf_ce4_gt.txt\"},\n            {\"name\": \"tpl_Ball_ce4\", \"path\": \"tpl_Ball_ce4/img\", \"startFrame\": 1, \"endFrame\": 538, \"nz\": 4,\n             \"ext\": \"jpg\", \"anno_path\": \"tpl_Ball_ce4/Ball_ce4_gt.txt\"},\n            {\"name\": \"tpl_Bikeshow_ce\", \"path\": \"tpl_Bikeshow_ce/img\", \"startFrame\": 1, \"endFrame\": 361, \"nz\": 4,\n             \"ext\": \"jpg\", \"anno_path\": \"tpl_Bikeshow_ce/Bikeshow_ce_gt.txt\"},\n            {\"name\": \"tpl_Kobe_ce\", \"path\": \"tpl_Kobe_ce/img\", \"startFrame\": 1, \"endFrame\": 582, \"nz\": 4, \"ext\": \"jpg\",\n             \"anno_path\": \"tpl_Kobe_ce/Kobe_ce_gt.txt\"},\n            {\"name\": \"tpl_Tiger1\", \"path\": \"tpl_Tiger1/img\", \"startFrame\": 1, \"endFrame\": 354, \"nz\": 4, \"ext\": \"jpg\",\n             \"anno_path\": \"tpl_Tiger1/Tiger1_gt.txt\"},\n            {\"name\": \"tpl_Skiing\", \"path\": \"tpl_Skiing/img\", \"startFrame\": 1, \"endFrame\": 81, \"nz\": 4, \"ext\": \"jpg\",\n             \"anno_path\": \"tpl_Skiing/Skiing_gt.txt\"},\n            {\"name\": \"tpl_Tennis_ce1\", \"path\": \"tpl_Tennis_ce1/img\", \"startFrame\": 1, \"endFrame\": 454, \"nz\": 4,\n             \"ext\": \"jpg\", \"anno_path\": \"tpl_Tennis_ce1/Tennis_ce1_gt.txt\"},\n            {\"name\": \"tpl_Carchasing_ce4\", \"path\": \"tpl_Carchasing_ce4/img\", \"startFrame\": 1, \"endFrame\": 442, \"nz\": 4,\n             \"ext\": \"jpg\", \"anno_path\": \"tpl_Carchasing_ce4/Carchasing_ce4_gt.txt\"},\n            {\"name\": \"tpl_Walking2\", \"path\": \"tpl_Walking2/img\", \"startFrame\": 1, \"endFrame\": 500, \"nz\": 4,\n             \"ext\": \"jpg\", \"anno_path\": \"tpl_Walking2/Walking2_gt.txt\"},\n            {\"name\": \"tpl_Sailor_ce\", \"path\": \"tpl_Sailor_ce/img\", \"startFrame\": 1, \"endFrame\": 402, \"nz\": 4,\n             \"ext\": \"jpg\", \"anno_path\": \"tpl_Sailor_ce/Sailor_ce_gt.txt\"},\n            {\"name\": \"tpl_Railwaystation_ce\", \"path\": \"tpl_Railwaystation_ce/img\", \"startFrame\": 1, \"endFrame\": 413,\n             \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"tpl_Railwaystation_ce/Railwaystation_ce_gt.txt\"},\n            {\"name\": \"tpl_Bee_ce\", \"path\": \"tpl_Bee_ce/img\", \"startFrame\": 1, \"endFrame\": 90, \"nz\": 4, \"ext\": \"jpg\",\n             \"anno_path\": \"tpl_Bee_ce/Bee_ce_gt.txt\"},\n            {\"name\": \"tpl_Girl\", \"path\": \"tpl_Girl/img\", \"startFrame\": 1, \"endFrame\": 500, \"nz\": 4, \"ext\": \"jpg\",\n             \"anno_path\": \"tpl_Girl/Girl_gt.txt\"},\n            {\"name\": \"tpl_Subway\", \"path\": \"tpl_Subway/img\", \"startFrame\": 1, \"endFrame\": 175, \"nz\": 4, \"ext\": \"jpg\",\n             \"anno_path\": \"tpl_Subway/Subway_gt.txt\"},\n            {\"name\": \"tpl_David3\", \"path\": \"tpl_David3/img\", \"startFrame\": 1, \"endFrame\": 252, \"nz\": 4, \"ext\": \"jpg\",\n             \"anno_path\": \"tpl_David3/David3_gt.txt\"},\n            {\"name\": \"tpl_Electricalbike_ce\", \"path\": \"tpl_Electricalbike_ce/img\", \"startFrame\": 1, \"endFrame\": 818,\n             \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"tpl_Electricalbike_ce/Electricalbike_ce_gt.txt\"},\n            {\"name\": \"tpl_Michaeljackson_ce\", \"path\": \"tpl_Michaeljackson_ce/img\", \"startFrame\": 1, \"endFrame\": 393,\n             \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"tpl_Michaeljackson_ce/Michaeljackson_ce_gt.txt\"},\n            {\"name\": \"tpl_Woman\", \"path\": \"tpl_Woman/img\", \"startFrame\": 1, \"endFrame\": 597, \"nz\": 4, \"ext\": \"jpg\",\n             \"anno_path\": \"tpl_Woman/Woman_gt.txt\"},\n            {\"name\": \"tpl_TableTennis_ce\", \"path\": \"tpl_TableTennis_ce/img\", \"startFrame\": 1, \"endFrame\": 198, \"nz\": 4,\n             \"ext\": \"jpg\", \"anno_path\": \"tpl_TableTennis_ce/TableTennis_ce_gt.txt\"},\n            {\"name\": \"tpl_Motorbike_ce\", \"path\": \"tpl_Motorbike_ce/img\", \"startFrame\": 1, \"endFrame\": 563, \"nz\": 4,\n             \"ext\": \"jpg\", \"anno_path\": \"tpl_Motorbike_ce/Motorbike_ce_gt.txt\"},\n            {\"name\": \"tpl_Baby_ce\", \"path\": \"tpl_Baby_ce/img\", \"startFrame\": 1, \"endFrame\": 296, \"nz\": 4, \"ext\": \"jpg\",\n             \"anno_path\": \"tpl_Baby_ce/Baby_ce_gt.txt\"},\n            {\"name\": \"tpl_Gym\", \"path\": \"tpl_Gym/img\", \"startFrame\": 1, \"endFrame\": 766, \"nz\": 4, \"ext\": \"jpg\",\n             \"anno_path\": \"tpl_Gym/Gym_gt.txt\"},\n            {\"name\": \"tpl_Matrix\", \"path\": \"tpl_Matrix/img\", \"startFrame\": 1, \"endFrame\": 100, \"nz\": 4, \"ext\": \"jpg\",\n             \"anno_path\": \"tpl_Matrix/Matrix_gt.txt\"},\n            {\"name\": \"tpl_Kite_ce3\", \"path\": \"tpl_Kite_ce3/img\", \"startFrame\": 1, \"endFrame\": 528, \"nz\": 4,\n             \"ext\": \"jpg\", \"anno_path\": \"tpl_Kite_ce3/Kite_ce3_gt.txt\"},\n            {\"name\": \"tpl_Fish_ce1\", \"path\": \"tpl_Fish_ce1/img\", \"startFrame\": 1, \"endFrame\": 401, \"nz\": 4,\n             \"ext\": \"jpg\", \"anno_path\": \"tpl_Fish_ce1/Fish_ce1_gt.txt\"},\n            {\"name\": \"tpl_Hand_ce1\", \"path\": \"tpl_Hand_ce1/img\", \"startFrame\": 1, \"endFrame\": 401, \"nz\": 4,\n             \"ext\": \"jpg\", \"anno_path\": \"tpl_Hand_ce1/Hand_ce1_gt.txt\"},\n            {\"name\": \"tpl_Doll\", \"path\": \"tpl_Doll/img\", \"startFrame\": 1, \"endFrame\": 3872, \"nz\": 4, \"ext\": \"jpg\",\n             \"anno_path\": \"tpl_Doll/Doll_gt.txt\"},\n            {\"name\": \"tpl_Carchasing_ce3\", \"path\": \"tpl_Carchasing_ce3/img\", \"startFrame\": 1, \"endFrame\": 572, \"nz\": 4,\n             \"ext\": \"jpg\", \"anno_path\": \"tpl_Carchasing_ce3/Carchasing_ce3_gt.txt\"},\n            {\"name\": \"tpl_Thunder_ce\", \"path\": \"tpl_Thunder_ce/img\", \"startFrame\": 1, \"endFrame\": 375, \"nz\": 4,\n             \"ext\": \"jpg\", \"anno_path\": \"tpl_Thunder_ce/Thunder_ce_gt.txt\"},\n            {\"name\": \"tpl_Singer2\", \"path\": \"tpl_Singer2/img\", \"startFrame\": 1, \"endFrame\": 366, \"nz\": 4, \"ext\": \"jpg\",\n             \"anno_path\": \"tpl_Singer2/Singer2_gt.txt\"},\n            {\"name\": \"tpl_Basketball\", \"path\": \"tpl_Basketball/img\", \"startFrame\": 1, \"endFrame\": 725, \"nz\": 4,\n             \"ext\": \"jpg\", \"anno_path\": \"tpl_Basketball/Basketball_gt.txt\"},\n            {\"name\": \"tpl_Hand\", \"path\": \"tpl_Hand/img\", \"startFrame\": 1, \"endFrame\": 244, \"nz\": 4, \"ext\": \"jpg\",\n             \"anno_path\": \"tpl_Hand/Hand_gt.txt\"},\n            {\"name\": \"tpl_Cup_ce\", \"path\": \"tpl_Cup_ce/img\", \"startFrame\": 1, \"endFrame\": 338, \"nz\": 4, \"ext\": \"jpg\",\n             \"anno_path\": \"tpl_Cup_ce/Cup_ce_gt.txt\"},\n            {\"name\": \"tpl_MotorRolling\", \"path\": \"tpl_MotorRolling/img\", \"startFrame\": 1, \"endFrame\": 164, \"nz\": 4,\n             \"ext\": \"jpg\", \"anno_path\": \"tpl_MotorRolling/MotorRolling_gt.txt\"},\n            {\"name\": \"tpl_Boat_ce2\", \"path\": \"tpl_Boat_ce2/img\", \"startFrame\": 1, \"endFrame\": 412, \"nz\": 4,\n             \"ext\": \"jpg\", \"anno_path\": \"tpl_Boat_ce2/Boat_ce2_gt.txt\"},\n            {\"name\": \"tpl_CarScale\", \"path\": \"tpl_CarScale/img\", \"startFrame\": 1, \"endFrame\": 252, \"nz\": 4,\n             \"ext\": \"jpg\", \"anno_path\": \"tpl_CarScale/CarScale_gt.txt\"},\n            {\"name\": \"tpl_Sunshade\", \"path\": \"tpl_Sunshade/img\", \"startFrame\": 1, \"endFrame\": 172, \"nz\": 4,\n             \"ext\": \"jpg\", \"anno_path\": \"tpl_Sunshade/Sunshade_gt.txt\"},\n            {\"name\": \"tpl_Football1\", \"path\": \"tpl_Football1/img\", \"startFrame\": 1, \"endFrame\": 74, \"nz\": 4,\n             \"ext\": \"jpg\", \"anno_path\": \"tpl_Football1/Football1_gt.txt\"},\n            {\"name\": \"tpl_Singer1\", \"path\": \"tpl_Singer1/img\", \"startFrame\": 1, \"endFrame\": 351, \"nz\": 4, \"ext\": \"jpg\",\n             \"anno_path\": \"tpl_Singer1/Singer1_gt.txt\"},\n            {\"name\": \"tpl_Hurdle_ce1\", \"path\": \"tpl_Hurdle_ce1/img\", \"startFrame\": 1, \"endFrame\": 300, \"nz\": 4,\n             \"ext\": \"jpg\", \"anno_path\": \"tpl_Hurdle_ce1/Hurdle_ce1_gt.txt\"},\n            {\"name\": \"tpl_Basketball_ce3\", \"path\": \"tpl_Basketball_ce3/img\", \"startFrame\": 1, \"endFrame\": 441, \"nz\": 4,\n             \"ext\": \"jpg\", \"anno_path\": \"tpl_Basketball_ce3/Basketball_ce3_gt.txt\"},\n            {\"name\": \"tpl_Toyplane_ce\", \"path\": \"tpl_Toyplane_ce/img\", \"startFrame\": 1, \"endFrame\": 405, \"nz\": 4,\n             \"ext\": \"jpg\", \"anno_path\": \"tpl_Toyplane_ce/Toyplane_ce_gt.txt\"},\n            {\"name\": \"tpl_Skating1\", \"path\": \"tpl_Skating1/img\", \"startFrame\": 1, \"endFrame\": 400, \"nz\": 4,\n             \"ext\": \"jpg\", \"anno_path\": \"tpl_Skating1/Skating1_gt.txt\"},\n            {\"name\": \"tpl_Juice\", \"path\": \"tpl_Juice/img\", \"startFrame\": 1, \"endFrame\": 404, \"nz\": 4, \"ext\": \"jpg\",\n             \"anno_path\": \"tpl_Juice/Juice_gt.txt\"},\n            {\"name\": \"tpl_Biker\", \"path\": \"tpl_Biker/img\", \"startFrame\": 1, \"endFrame\": 180, \"nz\": 4, \"ext\": \"jpg\",\n             \"anno_path\": \"tpl_Biker/Biker_gt.txt\"},\n            {\"name\": \"tpl_Boy\", \"path\": \"tpl_Boy/img\", \"startFrame\": 1, \"endFrame\": 602, \"nz\": 4, \"ext\": \"jpg\",\n             \"anno_path\": \"tpl_Boy/Boy_gt.txt\"},\n            {\"name\": \"tpl_Jogging1\", \"path\": \"tpl_Jogging1/img\", \"startFrame\": 1, \"endFrame\": 307, \"nz\": 4,\n             \"ext\": \"jpg\", \"anno_path\": \"tpl_Jogging1/Jogging1_gt.txt\"},\n            {\"name\": \"tpl_Deer\", \"path\": \"tpl_Deer/img\", \"startFrame\": 1, \"endFrame\": 71, \"nz\": 4, \"ext\": \"jpg\",\n             \"anno_path\": \"tpl_Deer/Deer_gt.txt\"},\n            {\"name\": \"tpl_Panda\", \"path\": \"tpl_Panda/img\", \"startFrame\": 1, \"endFrame\": 241, \"nz\": 4, \"ext\": \"jpg\",\n             \"anno_path\": \"tpl_Panda/Panda_gt.txt\"},\n            {\"name\": \"tpl_Coke\", \"path\": \"tpl_Coke/img\", \"startFrame\": 1, \"endFrame\": 291, \"nz\": 4, \"ext\": \"jpg\",\n             \"anno_path\": \"tpl_Coke/Coke_gt.txt\"},\n            {\"name\": \"tpl_Carchasing_ce1\", \"path\": \"tpl_Carchasing_ce1/img\", \"startFrame\": 1, \"endFrame\": 501, \"nz\": 4,\n             \"ext\": \"jpg\", \"anno_path\": \"tpl_Carchasing_ce1/Carchasing_ce1_gt.txt\"},\n            {\"name\": \"tpl_Badminton_ce1\", \"path\": \"tpl_Badminton_ce1/img\", \"startFrame\": 1, \"endFrame\": 579, \"nz\": 4,\n             \"ext\": \"jpg\", \"anno_path\": \"tpl_Badminton_ce1/Badminton_ce1_gt.txt\"},\n            {\"name\": \"tpl_Trellis\", \"path\": \"tpl_Trellis/img\", \"startFrame\": 1, \"endFrame\": 569, \"nz\": 4, \"ext\": \"jpg\",\n             \"anno_path\": \"tpl_Trellis/Trellis_gt.txt\"},\n            {\"name\": \"tpl_Face_ce2\", \"path\": \"tpl_Face_ce2/img\", \"startFrame\": 1, \"endFrame\": 148, \"nz\": 4,\n             \"ext\": \"jpg\", \"anno_path\": \"tpl_Face_ce2/Face_ce2_gt.txt\"},\n            {\"name\": \"tpl_Ball_ce2\", \"path\": \"tpl_Ball_ce2/img\", \"startFrame\": 1, \"endFrame\": 603, \"nz\": 4,\n             \"ext\": \"jpg\", \"anno_path\": \"tpl_Ball_ce2/Ball_ce2_gt.txt\"},\n            {\"name\": \"tpl_Skiing_ce\", \"path\": \"tpl_Skiing_ce/img\", \"startFrame\": 1, \"endFrame\": 511, \"nz\": 4,\n             \"ext\": \"jpg\", \"anno_path\": \"tpl_Skiing_ce/Skiing_ce_gt.txt\"},\n            {\"name\": \"tpl_Jogging2\", \"path\": \"tpl_Jogging2/img\", \"startFrame\": 1, \"endFrame\": 307, \"nz\": 4,\n             \"ext\": \"jpg\", \"anno_path\": \"tpl_Jogging2/Jogging2_gt.txt\"},\n            {\"name\": \"tpl_Bike_ce1\", \"path\": \"tpl_Bike_ce1/img\", \"startFrame\": 1, \"endFrame\": 801, \"nz\": 4,\n             \"ext\": \"jpg\", \"anno_path\": \"tpl_Bike_ce1/Bike_ce1_gt.txt\"},\n            {\"name\": \"tpl_Bike_ce2\", \"path\": \"tpl_Bike_ce2/img\", \"startFrame\": 1, \"endFrame\": 812, \"nz\": 4,\n             \"ext\": \"jpg\", \"anno_path\": \"tpl_Bike_ce2/Bike_ce2_gt.txt\"},\n            {\"name\": \"tpl_Ball_ce3\", \"path\": \"tpl_Ball_ce3/img\", \"startFrame\": 1, \"endFrame\": 273, \"nz\": 4,\n             \"ext\": \"jpg\", \"anno_path\": \"tpl_Ball_ce3/Ball_ce3_gt.txt\"},\n            {\"name\": \"tpl_Girlmov\", \"path\": \"tpl_Girlmov/img\", \"startFrame\": 1, \"endFrame\": 1500, \"nz\": 4, \"ext\": \"jpg\",\n             \"anno_path\": \"tpl_Girlmov/Girlmov_gt.txt\"},\n            {\"name\": \"tpl_Bolt\", \"path\": \"tpl_Bolt/img\", \"startFrame\": 1, \"endFrame\": 350, \"nz\": 4, \"ext\": \"jpg\",\n             \"anno_path\": \"tpl_Bolt/Bolt_gt.txt\"},\n            {\"name\": \"tpl_Basketball_ce2\", \"path\": \"tpl_Basketball_ce2/img\", \"startFrame\": 1, \"endFrame\": 455, \"nz\": 4,\n             \"ext\": \"jpg\", \"anno_path\": \"tpl_Basketball_ce2/Basketball_ce2_gt.txt\"},\n            {\"name\": \"tpl_Bicycle\", \"path\": \"tpl_Bicycle/img\", \"startFrame\": 1, \"endFrame\": 271, \"nz\": 4, \"ext\": \"jpg\",\n             \"anno_path\": \"tpl_Bicycle/Bicycle_gt.txt\"},\n            {\"name\": \"tpl_Face_ce\", \"path\": \"tpl_Face_ce/img\", \"startFrame\": 1, \"endFrame\": 620, \"nz\": 4, \"ext\": \"jpg\",\n             \"anno_path\": \"tpl_Face_ce/Face_ce_gt.txt\"},\n            {\"name\": \"tpl_Basketball_ce1\", \"path\": \"tpl_Basketball_ce1/img\", \"startFrame\": 1, \"endFrame\": 496, \"nz\": 4,\n             \"ext\": \"jpg\", \"anno_path\": \"tpl_Basketball_ce1/Basketball_ce1_gt.txt\"},\n            {\"name\": \"tpl_Messi_ce\", \"path\": \"tpl_Messi_ce/img\", \"startFrame\": 1, \"endFrame\": 272, \"nz\": 4,\n             \"ext\": \"jpg\", \"anno_path\": \"tpl_Messi_ce/Messi_ce_gt.txt\"},\n            {\"name\": \"tpl_Tennis_ce2\", \"path\": \"tpl_Tennis_ce2/img\", \"startFrame\": 1, \"endFrame\": 305, \"nz\": 4,\n             \"ext\": \"jpg\", \"anno_path\": \"tpl_Tennis_ce2/Tennis_ce2_gt.txt\"},\n            {\"name\": \"tpl_Microphone_ce2\", \"path\": \"tpl_Microphone_ce2/img\", \"startFrame\": 1, \"endFrame\": 103, \"nz\": 4,\n             \"ext\": \"jpg\", \"anno_path\": \"tpl_Microphone_ce2/Microphone_ce2_gt.txt\"},\n            {\"name\": \"tpl_Guitar_ce2\", \"path\": \"tpl_Guitar_ce2/img\", \"startFrame\": 1, \"endFrame\": 313, \"nz\": 4,\n             \"ext\": \"jpg\", \"anno_path\": \"tpl_Guitar_ce2/Guitar_ce2_gt.txt\"}\n\n        ]\n\n        otb_sequences = ['tpl_Skating2', 'tpl_Lemming', 'tpl_Board', 'tpl_Soccer', 'tpl_Liquor', 'tpl_Couple', 'tpl_Walking', 'tpl_David', 'tpl_Tiger2', 'tpl_Bird', 'tpl_Crossing', 'tpl_MountainBike',\n                         'tpl_Diving', 'tpl_CarDark', 'tpl_Shaking', 'tpl_Ironman', 'tpl_FaceOcc1', 'tpl_Tiger1', 'tpl_Skiing', 'tpl_Walking2', 'tpl_Girl', 'tpl_Girlmov', 'tpl_Subway', 'tpl_David3', 'tpl_Woman',\n                         'tpl_Gym', 'tpl_Matrix', 'tpl_Doll', 'tpl_Singer2', 'tpl_Basketball', 'tpl_MotorRolling', 'tpl_CarScale', 'tpl_Football1', 'tpl_Singer1', 'tpl_Skating1', 'tpl_Biker',\n                         'tpl_Boy', 'tpl_Jogging1', 'tpl_Deer', 'tpl_Panda', 'tpl_Coke', 'tpl_Trellis', 'tpl_Jogging2', 'tpl_Bolt', ]\n        if exclude_otb:\n            sequence_info_list_nootb = []\n            for seq in sequence_info_list:\n                if seq['name'] not in otb_sequences:\n                    sequence_info_list_nootb.append(seq)\n\n            sequence_info_list = sequence_info_list_nootb\n\n        return sequence_info_list\n"
  },
  {
    "path": "artrackv2_mindspore/external/AR/pytracking/evaluation/tracker.py",
    "content": "import importlib\nimport os\nimport numpy as np\nfrom collections import OrderedDict\nfrom pytracking.evaluation.environment import env_settings\nimport time\nimport cv2 as cv\nfrom pytracking.utils.visdom import Visdom\nimport matplotlib.pyplot as plt\nimport matplotlib.patches as patches\nfrom pytracking.utils.plotting import draw_figure, overlay_mask\nfrom pytracking.utils.convert_vot_anno_to_rect import convert_vot_anno_to_rect\nfrom ltr.data.bounding_box_utils import masks_to_bboxes\nfrom pytracking.evaluation.multi_object_wrapper import MultiObjectWrapper\nimport torch\n\n\n_tracker_disp_colors = {1: (0, 255, 0), 2: (0, 0, 255), 3: (255, 0, 0),\n                        4: (255, 255, 255), 5: (0, 0, 0), 6: (0, 255, 128),\n                        7: (123, 123, 123), 8: (255, 128, 0), 9: (128, 0, 255)}\n\n\ndef trackerlist(name: str, parameter_name: str, run_ids = None, display_name: str = None):\n    \"\"\"Generate list of trackers.\n    args:\n        name: Name of tracking method.\n        parameter_name: Name of parameter file.\n        run_ids: A single or list of run_ids.\n        display_name: Name to be displayed in the result plots.\n    \"\"\"\n    if run_ids is None or isinstance(run_ids, int):\n        run_ids = [run_ids]\n    return [Tracker(name, parameter_name, run_id, display_name) for run_id in run_ids]\n\n\nclass Tracker:\n    \"\"\"Wraps the tracker for evaluation and running purposes.\n    args:\n        name: Name of tracking method.\n        parameter_name: Name of parameter file.\n        run_id: The run id.\n        display_name: Name to be displayed in the result plots.\n    \"\"\"\n\n    def __init__(self, name: str, parameter_name: str, run_id: int = None, display_name: str = None):\n        assert run_id is None or isinstance(run_id, int)\n\n        self.name = name\n        self.parameter_name = parameter_name\n        self.run_id = run_id\n        self.display_name = display_name\n\n        env = env_settings()\n        if self.run_id is None:\n            self.results_dir = '{}/{}/{}'.format(env.results_path, self.name, self.parameter_name)\n            self.segmentation_dir = '{}/{}/{}'.format(env.segmentation_path, self.name, self.parameter_name)\n        else:\n            self.results_dir = '{}/{}/{}_{:03d}'.format(env.results_path, self.name, self.parameter_name, self.run_id)\n            self.segmentation_dir = '{}/{}/{}_{:03d}'.format(env.segmentation_path, self.name, self.parameter_name, self.run_id)\n\n        tracker_module_abspath = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', 'tracker', self.name))\n        if os.path.isdir(tracker_module_abspath):\n            tracker_module = importlib.import_module('pytracking.tracker.{}'.format(self.name))\n            self.tracker_class = tracker_module.get_tracker_class()\n        else:\n            self.tracker_class = None\n\n        self.visdom = None\n\n\n    def _init_visdom(self, visdom_info, debug):\n        visdom_info = {} if visdom_info is None else visdom_info\n        self.pause_mode = False\n        self.step = False\n        if debug > 0 and visdom_info.get('use_visdom', True):\n            try:\n                self.visdom = Visdom(debug, {'handler': self._visdom_ui_handler, 'win_id': 'Tracking'},\n                                     visdom_info=visdom_info)\n\n                # Show help\n                help_text = 'You can pause/unpause the tracker by pressing ''space'' with the ''Tracking'' window ' \\\n                            'selected. During paused mode, you can track for one frame by pressing the right arrow key.' \\\n                            'To enable/disable plotting of a data block, tick/untick the corresponding entry in ' \\\n                            'block list.'\n                self.visdom.register(help_text, 'text', 1, 'Help')\n            except:\n                time.sleep(0.5)\n                print('!!! WARNING: Visdom could not start, so using matplotlib visualization instead !!!\\n'\n                      '!!! Start Visdom in a separate terminal window by typing \\'visdom\\' !!!')\n\n    def _visdom_ui_handler(self, data):\n        if data['event_type'] == 'KeyPress':\n            if data['key'] == ' ':\n                self.pause_mode = not self.pause_mode\n\n            elif data['key'] == 'ArrowRight' and self.pause_mode:\n                self.step = True\n\n\n    def create_tracker(self, params):\n        tracker = self.tracker_class(params)\n        tracker.visdom = self.visdom\n        return tracker\n\n    def run_sequence(self, seq, visualization=None, debug=None, visdom_info=None, multiobj_mode=None):\n        \"\"\"Run tracker on sequence.\n        args:\n            seq: Sequence to run the tracker on.\n            visualization: Set visualization flag (None means default value specified in the parameters).\n            debug: Set debug level (None means default value specified in the parameters).\n            visdom_info: Visdom info.\n            multiobj_mode: Which mode to use for multiple objects.\n        \"\"\"\n        params = self.get_parameters()\n        visualization_ = visualization\n\n        debug_ = debug\n        if debug is None:\n            debug_ = getattr(params, 'debug', 0)\n        if visualization is None:\n            if debug is None:\n                visualization_ = getattr(params, 'visualization', False)\n            else:\n                visualization_ = True if debug else False\n\n        params.visualization = visualization_\n        params.debug = debug_\n\n        self._init_visdom(visdom_info, debug_)\n        if visualization_ and self.visdom is None:\n            self.init_visualization()\n\n        # Get init information\n        init_info = seq.init_info()\n        is_single_object = not seq.multiobj_mode\n\n        if multiobj_mode is None:\n            multiobj_mode = getattr(params, 'multiobj_mode', getattr(self.tracker_class, 'multiobj_mode', 'default'))\n\n        if multiobj_mode == 'default' or is_single_object:\n            tracker = self.create_tracker(params)\n        elif multiobj_mode == 'parallel':\n            tracker = MultiObjectWrapper(self.tracker_class, params, self.visdom)\n        else:\n            raise ValueError('Unknown multi object mode {}'.format(multiobj_mode))\n\n        output = self._track_sequence(tracker, seq, init_info)\n        return output\n\n    def _track_sequence(self, tracker, seq, init_info):\n        # Define outputs\n        # Each field in output is a list containing tracker prediction for each frame.\n\n        # In case of single object tracking mode:\n        # target_bbox[i] is the predicted bounding box for frame i\n        # time[i] is the processing time for frame i\n        # segmentation[i] is the segmentation mask for frame i (numpy array)\n\n        # In case of multi object tracking mode:\n        # target_bbox[i] is an OrderedDict, where target_bbox[i][obj_id] is the predicted box for target obj_id in\n        # frame i\n        # time[i] is either the processing time for frame i, or an OrderedDict containing processing times for each\n        # object in frame i\n        # segmentation[i] is the multi-label segmentation mask for frame i (numpy array)\n\n        output = {'target_bbox': [],\n                  'time': [],\n                  'segmentation': []}\n\n        def _store_outputs(tracker_out: dict, defaults=None):\n            defaults = {} if defaults is None else defaults\n            for key in output.keys():\n                val = tracker_out.get(key, defaults.get(key, None))\n                if key in tracker_out or val is not None:\n                    output[key].append(val)\n\n        # Initialize\n        image = self._read_image(seq.frames[0])\n\n        if tracker.params.visualization and self.visdom is None:\n            self.visualize(image, init_info.get('init_bbox'))\n\n        start_time = time.time()\n        out = tracker.initialize(image, init_info)\n        if out is None:\n            out = {}\n\n        prev_output = OrderedDict(out)\n\n        init_default = {'target_bbox': init_info.get('init_bbox'),\n                        'time': time.time() - start_time,\n                        'segmentation': init_info.get('init_mask')}\n\n        _store_outputs(out, init_default)\n\n        for frame_num, frame_path in enumerate(seq.frames[1:], start=1):\n            while True:\n                if not self.pause_mode:\n                    break\n                elif self.step:\n                    self.step = False\n                    break\n                else:\n                    time.sleep(0.1)\n\n            image = self._read_image(frame_path)\n\n            start_time = time.time()\n\n            info = seq.frame_info(frame_num)\n            info['previous_output'] = prev_output\n\n            out = tracker.track(image, info)\n            prev_output = OrderedDict(out)\n            _store_outputs(out, {'time': time.time() - start_time})\n\n            segmentation = out['segmentation'] if 'segmentation' in out else None\n            if self.visdom is not None:\n                tracker.visdom_draw_tracking(image, out['target_bbox'], segmentation)\n            elif tracker.params.visualization:\n                self.visualize(image, out['target_bbox'], segmentation)\n\n        for key in ['target_bbox', 'segmentation']:\n            if key in output and len(output[key]) <= 1:\n                output.pop(key)\n\n        return output\n\n    def run_video(self, videofilepath, optional_box=None, debug=None, visdom_info=None):\n        \"\"\"Run the tracker with the vieofile.\n        args:\n            debug: Debug level.\n        \"\"\"\n\n        params = self.get_parameters()\n\n        debug_ = debug\n        if debug is None:\n            debug_ = getattr(params, 'debug', 0)\n        params.debug = debug_\n\n        params.tracker_name = self.name\n        params.param_name = self.parameter_name\n        self._init_visdom(visdom_info, debug_)\n\n        multiobj_mode = getattr(params, 'multiobj_mode', getattr(self.tracker_class, 'multiobj_mode', 'default'))\n\n        if multiobj_mode == 'default':\n            tracker = self.create_tracker(params)\n            if hasattr(tracker, 'initialize_features'):\n                tracker.initialize_features()\n\n        elif multiobj_mode == 'parallel':\n            tracker = MultiObjectWrapper(self.tracker_class, params, self.visdom, fast_load=True)\n        else:\n            raise ValueError('Unknown multi object mode {}'.format(multiobj_mode))\n\n        assert os.path.isfile(videofilepath), \"Invalid param {}\".format(videofilepath)\n        \", videofilepath must be a valid videofile\"\n\n        cap = cv.VideoCapture(videofilepath)\n        display_name = 'Display: ' + tracker.params.tracker_name\n        cv.namedWindow(display_name, cv.WINDOW_NORMAL | cv.WINDOW_KEEPRATIO)\n        cv.resizeWindow(display_name, 960, 720)\n        success, frame = cap.read()\n        cv.imshow(display_name, frame)\n\n        def _build_init_info(box):\n            return {'init_bbox': OrderedDict({1: box}), 'init_object_ids': [1, ], 'object_ids': [1, ],\n                    'sequence_object_ids': [1, ]}\n\n        if success is not True:\n            print(\"Read frame from {} failed.\".format(videofilepath))\n            exit(-1)\n        if optional_box is not None:\n            assert isinstance(optional_box, list, tuple)\n            assert len(optional_box) == 4, \"valid box's foramt is [x,y,w,h]\"\n            tracker.initialize(frame, _build_init_info(optional_box))\n        else:\n            while True:\n                # cv.waitKey()\n                frame_disp = frame.copy()\n\n                cv.putText(frame_disp, 'Select target ROI and press ENTER', (20, 30), cv.FONT_HERSHEY_COMPLEX_SMALL,\n                           1.5, (0, 0, 0), 1)\n\n                x, y, w, h = cv.selectROI(display_name, frame_disp, fromCenter=False)\n                init_state = [x, y, w, h]\n                tracker.initialize(frame, _build_init_info(init_state))\n                break\n\n        while True:\n            ret, frame = cap.read()\n\n            if frame is None:\n                return\n\n            frame_disp = frame.copy()\n\n            # Draw box\n            out = tracker.track(frame)\n            state = [int(s) for s in out['target_bbox'][1]]\n            cv.rectangle(frame_disp, (state[0], state[1]), (state[2] + state[0], state[3] + state[1]),\n                         (0, 255, 0), 5)\n\n            font_color = (0, 0, 0)\n            cv.putText(frame_disp, 'Tracking!', (20, 30), cv.FONT_HERSHEY_COMPLEX_SMALL, 1,\n                       font_color, 1)\n            cv.putText(frame_disp, 'Press r to reset', (20, 55), cv.FONT_HERSHEY_COMPLEX_SMALL, 1,\n                       font_color, 1)\n            cv.putText(frame_disp, 'Press q to quit', (20, 80), cv.FONT_HERSHEY_COMPLEX_SMALL, 1,\n                       font_color, 1)\n\n            # Display the resulting frame\n            cv.imshow(display_name, frame_disp)\n            key = cv.waitKey(1)\n            if key == ord('q'):\n                break\n            elif key == ord('r'):\n                ret, frame = cap.read()\n                frame_disp = frame.copy()\n\n                cv.putText(frame_disp, 'Select target ROI and press ENTER', (20, 30), cv.FONT_HERSHEY_COMPLEX_SMALL, 1.5,\n                           (0, 0, 0), 1)\n\n                cv.imshow(display_name, frame_disp)\n                x, y, w, h = cv.selectROI(display_name, frame_disp, fromCenter=False)\n                init_state = [x, y, w, h]\n                tracker.initialize(frame, _build_init_info(init_state))\n\n        # When everything done, release the capture\n        cap.release()\n        cv.destroyAllWindows()\n\n\n    def run_webcam(self, debug=None, visdom_info=None):\n        \"\"\"Run the tracker with the webcam.\n        args:\n            debug: Debug level.\n        \"\"\"\n\n        params = self.get_parameters()\n\n        debug_ = debug\n        if debug is None:\n            debug_ = getattr(params, 'debug', 0)\n        params.debug = debug_\n\n        params.tracker_name = self.name\n        params.param_name = self.parameter_name\n\n        self._init_visdom(visdom_info, debug_)\n\n        multiobj_mode = getattr(params, 'multiobj_mode', getattr(self.tracker_class, 'multiobj_mode', 'default'))\n\n        if multiobj_mode == 'default':\n            tracker = self.create_tracker(params)\n        elif multiobj_mode == 'parallel':\n            tracker = MultiObjectWrapper(self.tracker_class, params, self.visdom, fast_load=True)\n        else:\n            raise ValueError('Unknown multi object mode {}'.format(multiobj_mode))\n\n        class UIControl:\n            def __init__(self):\n                self.mode = 'init'  # init, select, track\n                self.target_tl = (-1, -1)\n                self.target_br = (-1, -1)\n                self.new_init = False\n\n            def mouse_callback(self, event, x, y, flags, param):\n                if event == cv.EVENT_LBUTTONDOWN and self.mode == 'init':\n                    self.target_tl = (x, y)\n                    self.target_br = (x, y)\n                    self.mode = 'select'\n                elif event == cv.EVENT_MOUSEMOVE and self.mode == 'select':\n                    self.target_br = (x, y)\n                elif event == cv.EVENT_LBUTTONDOWN and self.mode == 'select':\n                    self.target_br = (x, y)\n                    self.mode = 'init'\n                    self.new_init = True\n\n            def get_tl(self):\n                return self.target_tl if self.target_tl[0] < self.target_br[0] else self.target_br\n\n            def get_br(self):\n                return self.target_br if self.target_tl[0] < self.target_br[0] else self.target_tl\n\n            def get_bb(self):\n                tl = self.get_tl()\n                br = self.get_br()\n\n                bb = [min(tl[0], br[0]), min(tl[1], br[1]), abs(br[0] - tl[0]), abs(br[1] - tl[1])]\n                return bb\n\n        ui_control = UIControl()\n        cap = cv.VideoCapture(0)\n        display_name = 'Display: ' + self.name\n        cv.namedWindow(display_name, cv.WINDOW_NORMAL | cv.WINDOW_KEEPRATIO)\n        cv.resizeWindow(display_name, 960, 720)\n        cv.setMouseCallback(display_name, ui_control.mouse_callback)\n\n        next_object_id = 1\n        sequence_object_ids = []\n        prev_output = OrderedDict()\n        while True:\n            # Capture frame-by-frame\n            ret, frame = cap.read()\n            frame_disp = frame.copy()\n\n            info = OrderedDict()\n            info['previous_output'] = prev_output\n\n            if ui_control.new_init:\n                ui_control.new_init = False\n                init_state = ui_control.get_bb()\n\n                info['init_object_ids'] = [next_object_id, ]\n                info['init_bbox'] = OrderedDict({next_object_id: init_state})\n                sequence_object_ids.append(next_object_id)\n\n                next_object_id += 1\n\n            # Draw box\n            if ui_control.mode == 'select':\n                cv.rectangle(frame_disp, ui_control.get_tl(), ui_control.get_br(), (255, 0, 0), 2)\n\n            if len(sequence_object_ids) > 0:\n                info['sequence_object_ids'] = sequence_object_ids\n                out = tracker.track(frame, info)\n                prev_output = OrderedDict(out)\n\n                if 'segmentation' in out:\n                    frame_disp = overlay_mask(frame_disp, out['segmentation'])\n\n                if 'target_bbox' in out:\n                    for obj_id, state in out['target_bbox'].items():\n                        state = [int(s) for s in state]\n                        cv.rectangle(frame_disp, (state[0], state[1]), (state[2] + state[0], state[3] + state[1]),\n                                     _tracker_disp_colors[obj_id], 5)\n\n            # Put text\n            font_color = (0, 0, 0)\n            cv.putText(frame_disp, 'Select target', (20, 30), cv.FONT_HERSHEY_COMPLEX_SMALL, 1, font_color, 1)\n            cv.putText(frame_disp, 'Press r to reset', (20, 55), cv.FONT_HERSHEY_COMPLEX_SMALL, 1,\n                       font_color, 1)\n            cv.putText(frame_disp, 'Press q to quit', (20, 85), cv.FONT_HERSHEY_COMPLEX_SMALL, 1,\n                       font_color, 1)\n\n            # Display the resulting frame\n            cv.imshow(display_name, frame_disp)\n            key = cv.waitKey(1)\n            if key == ord('q'):\n                break\n            elif key == ord('r'):\n                next_object_id = 1\n                sequence_object_ids = []\n                prev_output = OrderedDict()\n\n                info = OrderedDict()\n\n                info['object_ids'] = []\n                info['init_object_ids'] = []\n                info['init_bbox'] = OrderedDict()\n                tracker.initialize(frame, info)\n                ui_control.mode = 'init'\n\n        # When everything done, release the capture\n        cap.release()\n        cv.destroyAllWindows()\n\n    def run_vot2020(self, debug=None, visdom_info=None):\n        params = self.get_parameters()\n        params.tracker_name = self.name\n        params.param_name = self.parameter_name\n        params.run_id = self.run_id\n\n        debug_ = debug\n        if debug is None:\n            debug_ = getattr(params, 'debug', 0)\n\n        if debug is None:\n            visualization_ = getattr(params, 'visualization', False)\n        else:\n            visualization_ = True if debug else False\n\n        params.visualization = visualization_\n        params.debug = debug_\n\n        self._init_visdom(visdom_info, debug_)\n\n        tracker = self.create_tracker(params)\n        tracker.initialize_features()\n\n        output_segmentation = tracker.predicts_segmentation_mask()\n\n        import pytracking.evaluation.vot2020 as vot\n\n        def _convert_anno_to_list(vot_anno):\n            vot_anno = [vot_anno[0], vot_anno[1], vot_anno[2], vot_anno[3]]\n            return vot_anno\n\n        def _convert_image_path(image_path):\n            return image_path\n\n        \"\"\"Run tracker on VOT.\"\"\"\n\n        if output_segmentation:\n            handle = vot.VOT(\"mask\")\n        else:\n            handle = vot.VOT(\"rectangle\")\n\n        vot_anno = handle.region()\n\n        image_path = handle.frame()\n        if not image_path:\n            return\n        image_path = _convert_image_path(image_path)\n\n        image = self._read_image(image_path)\n\n        if output_segmentation:\n            vot_anno_mask = vot.make_full_size(vot_anno, (image.shape[1], image.shape[0]))\n            bbox = masks_to_bboxes(torch.from_numpy(vot_anno_mask), fmt='t').squeeze().tolist()\n        else:\n            bbox = _convert_anno_to_list(vot_anno)\n            vot_anno_mask = None\n\n        out = tracker.initialize(image, {'init_mask': vot_anno_mask, 'init_bbox': bbox})\n\n        if out is None:\n            out = {}\n        prev_output = OrderedDict(out)\n\n        # Track\n        while True:\n            image_path = handle.frame()\n            if not image_path:\n                break\n            image_path = _convert_image_path(image_path)\n\n            image = self._read_image(image_path)\n\n            info = OrderedDict()\n            info['previous_output'] = prev_output\n\n            out = tracker.track(image, info)\n            prev_output = OrderedDict(out)\n\n            if output_segmentation:\n                pred = out['segmentation'].astype(np.uint8)\n            else:\n                state = out['target_bbox']\n                pred = vot.Rectangle(*state)\n            handle.report(pred, 1.0)\n\n            segmentation = out['segmentation'] if 'segmentation' in out else None\n            if self.visdom is not None:\n                tracker.visdom_draw_tracking(image, out['target_bbox'], segmentation)\n            elif tracker.params.visualization:\n                self.visualize(image, out['target_bbox'], segmentation)\n\n\n    def run_vot(self, debug=None, visdom_info=None):\n        params = self.get_parameters()\n        params.tracker_name = self.name\n        params.param_name = self.parameter_name\n        params.run_id = self.run_id\n\n        debug_ = debug\n        if debug is None:\n            debug_ = getattr(params, 'debug', 0)\n\n        if debug is None:\n            visualization_ = getattr(params, 'visualization', False)\n        else:\n            visualization_ = True if debug else False\n\n        params.visualization = visualization_\n        params.debug = debug_\n\n        self._init_visdom(visdom_info, debug_)\n\n        tracker = self.create_tracker(params)\n        tracker.initialize_features()\n\n        import pytracking.evaluation.vot as vot\n\n        def _convert_anno_to_list(vot_anno):\n            vot_anno = [vot_anno[0][0][0], vot_anno[0][0][1], vot_anno[0][1][0], vot_anno[0][1][1],\n                        vot_anno[0][2][0], vot_anno[0][2][1], vot_anno[0][3][0], vot_anno[0][3][1]]\n            return vot_anno\n\n        def _convert_image_path(image_path):\n            image_path_new = image_path[20:- 2]\n            return \"\".join(image_path_new)\n\n        \"\"\"Run tracker on VOT.\"\"\"\n\n        handle = vot.VOT(\"polygon\")\n\n        vot_anno_polygon = handle.region()\n        vot_anno_polygon = _convert_anno_to_list(vot_anno_polygon)\n\n        init_state = convert_vot_anno_to_rect(vot_anno_polygon, tracker.params.vot_anno_conversion_type)\n\n        image_path = handle.frame()\n        if not image_path:\n            return\n        image_path = _convert_image_path(image_path)\n\n        image = self._read_image(image_path)\n        tracker.initialize(image, {'init_bbox': init_state})\n\n        # Track\n        while True:\n            image_path = handle.frame()\n            if not image_path:\n                break\n            image_path = _convert_image_path(image_path)\n\n            image = self._read_image(image_path)\n            out = tracker.track(image)\n            state = out['target_bbox']\n\n            handle.report(vot.Rectangle(state[0], state[1], state[2], state[3]))\n\n            segmentation = out['segmentation'] if 'segmentation' in out else None\n            if self.visdom is not None:\n                tracker.visdom_draw_tracking(image, out['target_bbox'], segmentation)\n            elif tracker.params.visualization:\n                self.visualize(image, out['target_bbox'], segmentation)\n\n    def get_parameters(self):\n        \"\"\"Get parameters.\"\"\"\n        param_module = importlib.import_module('pytracking.parameter.{}.{}'.format(self.name, self.parameter_name))\n        params = param_module.parameters()\n        return params\n\n\n    def init_visualization(self):\n        self.pause_mode = False\n        self.fig, self.ax = plt.subplots(1)\n        self.fig.canvas.mpl_connect('key_press_event', self.press)\n        plt.tight_layout()\n\n\n    def visualize(self, image, state, segmentation=None):\n        self.ax.cla()\n        self.ax.imshow(image)\n        if segmentation is not None:\n            self.ax.imshow(segmentation, alpha=0.5)\n\n        if isinstance(state, (OrderedDict, dict)):\n            boxes = [v for k, v in state.items()]\n        else:\n            boxes = (state,)\n\n        for i, box in enumerate(boxes, start=1):\n            col = _tracker_disp_colors[i]\n            col = [float(c) / 255.0 for c in col]\n            rect = patches.Rectangle((box[0], box[1]), box[2], box[3], linewidth=1, edgecolor=col, facecolor='none')\n            self.ax.add_patch(rect)\n\n        if getattr(self, 'gt_state', None) is not None:\n            gt_state = self.gt_state\n            rect = patches.Rectangle((gt_state[0], gt_state[1]), gt_state[2], gt_state[3], linewidth=1, edgecolor='g', facecolor='none')\n            self.ax.add_patch(rect)\n        self.ax.set_axis_off()\n        self.ax.axis('equal')\n        draw_figure(self.fig)\n\n        if self.pause_mode:\n            keypress = False\n            while not keypress:\n                keypress = plt.waitforbuttonpress()\n\n    def reset_tracker(self):\n        pass\n\n    def press(self, event):\n        if event.key == 'p':\n            self.pause_mode = not self.pause_mode\n            print(\"Switching pause mode!\")\n        elif event.key == 'r':\n            self.reset_tracker()\n            print(\"Resetting target pos to gt!\")\n\n    def _read_image(self, image_file: str):\n        im = cv.imread(image_file)\n        return cv.cvtColor(im, cv.COLOR_BGR2RGB)\n\n\n\n"
  },
  {
    "path": "artrackv2_mindspore/external/AR/pytracking/evaluation/trackingnetdataset.py",
    "content": "import numpy as np\nfrom pytracking.evaluation.data import Sequence, BaseDataset, SequenceList\nimport os\nfrom pytracking.utils.load_text import load_text\n\n\nclass TrackingNetDataset(BaseDataset):\n    \"\"\" TrackingNet test set.\n\n    Publication:\n        TrackingNet: A Large-Scale Dataset and Benchmark for Object Tracking in the Wild.\n        Matthias Mueller,Adel Bibi, Silvio Giancola, Salman Al-Subaihi and Bernard Ghanem\n        ECCV, 2018\n        https://ivul.kaust.edu.sa/Documents/Publications/2018/TrackingNet%20A%20Large%20Scale%20Dataset%20and%20Benchmark%20for%20Object%20Tracking%20in%20the%20Wild.pdf\n\n    Download the dataset using the toolkit https://github.com/SilvioGiancola/TrackingNet-devkit.\n    \"\"\"\n    def __init__(self):\n        super().__init__()\n        self.base_path = self.env_settings.trackingnet_path\n\n        sets = 'TEST'\n        if not isinstance(sets, (list, tuple)):\n            if sets == 'TEST':\n                sets = ['TEST']\n            elif sets == 'TRAIN':\n                sets = ['TRAIN_{}'.format(i) for i in range(5)]\n\n        self.sequence_list = self._list_sequences(self.base_path, sets)\n\n    def get_sequence_list(self):\n        return SequenceList([self._construct_sequence(set, seq_name) for set, seq_name in self.sequence_list])\n\n    def _construct_sequence(self, set, sequence_name):\n        anno_path = '{}/{}/anno/{}.txt'.format(self.base_path, set, sequence_name)\n\n        ground_truth_rect = load_text(str(anno_path), delimiter=',', dtype=np.float64, backend='numpy')\n\n        frames_path = '{}/{}/frames/{}'.format(self.base_path, set, sequence_name)\n        frame_list = [frame for frame in os.listdir(frames_path) if frame.endswith(\".jpg\")]\n        frame_list.sort(key=lambda f: int(f[:-4]))\n        frames_list = [os.path.join(frames_path, frame) for frame in frame_list]\n\n        return Sequence(sequence_name, frames_list, 'trackingnet', ground_truth_rect.reshape(-1, 4))\n\n    def __len__(self):\n        return len(self.sequence_list)\n\n    def _list_sequences(self, root, set_ids):\n        sequence_list = []\n\n        for s in set_ids:\n            anno_dir = os.path.join(root, s, \"anno\")\n            sequences_cur_set = [(s, os.path.splitext(f)[0]) for f in os.listdir(anno_dir) if f.endswith('.txt')]\n\n            sequence_list += sequences_cur_set\n\n        return sequence_list\n"
  },
  {
    "path": "artrackv2_mindspore/external/AR/pytracking/evaluation/uavdataset.py",
    "content": "import numpy as np\nfrom pytracking.evaluation.data import Sequence, BaseDataset, SequenceList\nfrom pytracking.utils.load_text import load_text\n\n\nclass UAVDataset(BaseDataset):\n    \"\"\" UAV123 dataset.\n\n    Publication:\n        A Benchmark and Simulator for UAV Tracking.\n        Matthias Mueller, Neil Smith and Bernard Ghanem\n        ECCV, 2016\n        https://ivul.kaust.edu.sa/Documents/Publications/2016/A%20Benchmark%20and%20Simulator%20for%20UAV%20Tracking.pdf\n\n    Download the dataset from https://ivul.kaust.edu.sa/Pages/pub-benchmark-simulator-uav.aspx\n    \"\"\"\n    def __init__(self):\n        super().__init__()\n        self.base_path = self.env_settings.uav_path\n        self.sequence_info_list = self._get_sequence_info_list()\n\n    def get_sequence_list(self):\n        return SequenceList([self._construct_sequence(s) for s in self.sequence_info_list])\n\n    def _construct_sequence(self, sequence_info):\n        sequence_path = sequence_info['path']\n        nz = sequence_info['nz']\n        ext = sequence_info['ext']\n        start_frame = sequence_info['startFrame']\n        end_frame = sequence_info['endFrame']\n\n        init_omit = 0\n        if 'initOmit' in sequence_info:\n            init_omit = sequence_info['initOmit']\n\n        frames = ['{base_path}/{sequence_path}/{frame:0{nz}}.{ext}'.format(base_path=self.base_path, \n        sequence_path=sequence_path, frame=frame_num, nz=nz, ext=ext) for frame_num in range(start_frame+init_omit, end_frame+1)]\n\n        anno_path = '{}/{}'.format(self.base_path, sequence_info['anno_path'])\n\n        ground_truth_rect = load_text(str(anno_path), delimiter=',', dtype=np.float64, backend='numpy')\n\n        return Sequence(sequence_info['name'], frames, 'uav', ground_truth_rect[init_omit:,:],\n                        object_class=sequence_info['object_class'])\n\n    def __len__(self):\n        return len(self.sequence_info_list)\n\n    def _get_sequence_info_list(self):\n        sequence_info_list = [\n            {\"name\": \"uav_bike1\", \"path\": \"data_seq/UAV123/bike1\", \"startFrame\": 1, \"endFrame\": 3085, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/bike1.txt\", \"object_class\": \"vehicle\"},\n            {\"name\": \"uav_bike2\", \"path\": \"data_seq/UAV123/bike2\", \"startFrame\": 1, \"endFrame\": 553, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/bike2.txt\", \"object_class\": \"vehicle\"},\n            {\"name\": \"uav_bike3\", \"path\": \"data_seq/UAV123/bike3\", \"startFrame\": 1, \"endFrame\": 433, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/bike3.txt\", \"object_class\": \"vehicle\"},\n            {\"name\": \"uav_bird1_1\", \"path\": \"data_seq/UAV123/bird1\", \"startFrame\": 1, \"endFrame\": 253, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/bird1_1.txt\", \"object_class\": \"bird\"},\n            {\"name\": \"uav_bird1_2\", \"path\": \"data_seq/UAV123/bird1\", \"startFrame\": 775, \"endFrame\": 1477, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/bird1_2.txt\", \"object_class\": \"bird\"},\n            {\"name\": \"uav_bird1_3\", \"path\": \"data_seq/UAV123/bird1\", \"startFrame\": 1573, \"endFrame\": 2437, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/bird1_3.txt\", \"object_class\": \"bird\"},\n            {\"name\": \"uav_boat1\", \"path\": \"data_seq/UAV123/boat1\", \"startFrame\": 1, \"endFrame\": 901, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/boat1.txt\", \"object_class\": \"vessel\"},\n            {\"name\": \"uav_boat2\", \"path\": \"data_seq/UAV123/boat2\", \"startFrame\": 1, \"endFrame\": 799, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/boat2.txt\", \"object_class\": \"vessel\"},\n            {\"name\": \"uav_boat3\", \"path\": \"data_seq/UAV123/boat3\", \"startFrame\": 1, \"endFrame\": 901, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/boat3.txt\", \"object_class\": \"vessel\"},\n            {\"name\": \"uav_boat4\", \"path\": \"data_seq/UAV123/boat4\", \"startFrame\": 1, \"endFrame\": 553, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/boat4.txt\", \"object_class\": \"vessel\"},\n            {\"name\": \"uav_boat5\", \"path\": \"data_seq/UAV123/boat5\", \"startFrame\": 1, \"endFrame\": 505, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/boat5.txt\", \"object_class\": \"vessel\"},\n            {\"name\": \"uav_boat6\", \"path\": \"data_seq/UAV123/boat6\", \"startFrame\": 1, \"endFrame\": 805, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/boat6.txt\", \"object_class\": \"vessel\"},\n            {\"name\": \"uav_boat7\", \"path\": \"data_seq/UAV123/boat7\", \"startFrame\": 1, \"endFrame\": 535, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/boat7.txt\", \"object_class\": \"vessel\"},\n            {\"name\": \"uav_boat8\", \"path\": \"data_seq/UAV123/boat8\", \"startFrame\": 1, \"endFrame\": 685, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/boat8.txt\", \"object_class\": \"vessel\"},\n            {\"name\": \"uav_boat9\", \"path\": \"data_seq/UAV123/boat9\", \"startFrame\": 1, \"endFrame\": 1399, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/boat9.txt\", \"object_class\": \"vessel\"},\n            {\"name\": \"uav_building1\", \"path\": \"data_seq/UAV123/building1\", \"startFrame\": 1, \"endFrame\": 469, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/building1.txt\", \"object_class\": \"other\"},\n            {\"name\": \"uav_building2\", \"path\": \"data_seq/UAV123/building2\", \"startFrame\": 1, \"endFrame\": 577, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/building2.txt\", \"object_class\": \"other\"},\n            {\"name\": \"uav_building3\", \"path\": \"data_seq/UAV123/building3\", \"startFrame\": 1, \"endFrame\": 829, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/building3.txt\", \"object_class\": \"other\"},\n            {\"name\": \"uav_building4\", \"path\": \"data_seq/UAV123/building4\", \"startFrame\": 1, \"endFrame\": 787, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/building4.txt\", \"object_class\": \"other\"},\n            {\"name\": \"uav_building5\", \"path\": \"data_seq/UAV123/building5\", \"startFrame\": 1, \"endFrame\": 481, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/building5.txt\", \"object_class\": \"other\"},\n            {\"name\": \"uav_car1_1\", \"path\": \"data_seq/UAV123/car1\", \"startFrame\": 1, \"endFrame\": 751, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/car1_1.txt\", \"object_class\": \"car\"},\n            {\"name\": \"uav_car1_2\", \"path\": \"data_seq/UAV123/car1\", \"startFrame\": 751, \"endFrame\": 1627, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/car1_2.txt\", \"object_class\": \"car\"},\n            {\"name\": \"uav_car1_3\", \"path\": \"data_seq/UAV123/car1\", \"startFrame\": 1627, \"endFrame\": 2629, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/car1_3.txt\", \"object_class\": \"car\"},\n            {\"name\": \"uav_car10\", \"path\": \"data_seq/UAV123/car10\", \"startFrame\": 1, \"endFrame\": 1405, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/car10.txt\", \"object_class\": \"car\"},\n            {\"name\": \"uav_car11\", \"path\": \"data_seq/UAV123/car11\", \"startFrame\": 1, \"endFrame\": 337, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/car11.txt\", \"object_class\": \"car\"},\n            {\"name\": \"uav_car12\", \"path\": \"data_seq/UAV123/car12\", \"startFrame\": 1, \"endFrame\": 499, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/car12.txt\", \"object_class\": \"car\"},\n            {\"name\": \"uav_car13\", \"path\": \"data_seq/UAV123/car13\", \"startFrame\": 1, \"endFrame\": 415, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/car13.txt\", \"object_class\": \"car\"},\n            {\"name\": \"uav_car14\", \"path\": \"data_seq/UAV123/car14\", \"startFrame\": 1, \"endFrame\": 1327, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/car14.txt\", \"object_class\": \"car\"},\n            {\"name\": \"uav_car15\", \"path\": \"data_seq/UAV123/car15\", \"startFrame\": 1, \"endFrame\": 469, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/car15.txt\", \"object_class\": \"car\"},\n            {\"name\": \"uav_car16_1\", \"path\": \"data_seq/UAV123/car16\", \"startFrame\": 1, \"endFrame\": 415, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/car16_1.txt\", \"object_class\": \"car\"},\n            {\"name\": \"uav_car16_2\", \"path\": \"data_seq/UAV123/car16\", \"startFrame\": 415, \"endFrame\": 1993, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/car16_2.txt\", \"object_class\": \"car\"},\n            {\"name\": \"uav_car17\", \"path\": \"data_seq/UAV123/car17\", \"startFrame\": 1, \"endFrame\": 1057, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/car17.txt\", \"object_class\": \"car\"},\n            {\"name\": \"uav_car18\", \"path\": \"data_seq/UAV123/car18\", \"startFrame\": 1, \"endFrame\": 1207, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/car18.txt\", \"object_class\": \"car\"},\n            {\"name\": \"uav_car1_s\", \"path\": \"data_seq/UAV123/car1_s\", \"startFrame\": 1, \"endFrame\": 1475, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/car1_s.txt\", \"object_class\": \"car\"},\n            {\"name\": \"uav_car2\", \"path\": \"data_seq/UAV123/car2\", \"startFrame\": 1, \"endFrame\": 1321, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/car2.txt\", \"object_class\": \"car\"},\n            {\"name\": \"uav_car2_s\", \"path\": \"data_seq/UAV123/car2_s\", \"startFrame\": 1, \"endFrame\": 320, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/car2_s.txt\", \"object_class\": \"car\"},\n            {\"name\": \"uav_car3\", \"path\": \"data_seq/UAV123/car3\", \"startFrame\": 1, \"endFrame\": 1717, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/car3.txt\", \"object_class\": \"car\"},\n            {\"name\": \"uav_car3_s\", \"path\": \"data_seq/UAV123/car3_s\", \"startFrame\": 1, \"endFrame\": 1300, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/car3_s.txt\", \"object_class\": \"car\"},\n            {\"name\": \"uav_car4\", \"path\": \"data_seq/UAV123/car4\", \"startFrame\": 1, \"endFrame\": 1345, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/car4.txt\", \"object_class\": \"car\"},\n            {\"name\": \"uav_car4_s\", \"path\": \"data_seq/UAV123/car4_s\", \"startFrame\": 1, \"endFrame\": 830, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/car4_s.txt\", \"object_class\": \"car\"},\n            {\"name\": \"uav_car5\", \"path\": \"data_seq/UAV123/car5\", \"startFrame\": 1, \"endFrame\": 745, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/car5.txt\", \"object_class\": \"car\"},\n            {\"name\": \"uav_car6_1\", \"path\": \"data_seq/UAV123/car6\", \"startFrame\": 1, \"endFrame\": 487, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/car6_1.txt\", \"object_class\": \"car\"},\n            {\"name\": \"uav_car6_2\", \"path\": \"data_seq/UAV123/car6\", \"startFrame\": 487, \"endFrame\": 1807, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/car6_2.txt\", \"object_class\": \"car\"},\n            {\"name\": \"uav_car6_3\", \"path\": \"data_seq/UAV123/car6\", \"startFrame\": 1807, \"endFrame\": 2953, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/car6_3.txt\", \"object_class\": \"car\"},\n            {\"name\": \"uav_car6_4\", \"path\": \"data_seq/UAV123/car6\", \"startFrame\": 2953, \"endFrame\": 3925, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/car6_4.txt\", \"object_class\": \"car\"},\n            {\"name\": \"uav_car6_5\", \"path\": \"data_seq/UAV123/car6\", \"startFrame\": 3925, \"endFrame\": 4861, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/car6_5.txt\", \"object_class\": \"car\"},\n            {\"name\": \"uav_car7\", \"path\": \"data_seq/UAV123/car7\", \"startFrame\": 1, \"endFrame\": 1033, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/car7.txt\", \"object_class\": \"car\"},\n            {\"name\": \"uav_car8_1\", \"path\": \"data_seq/UAV123/car8\", \"startFrame\": 1, \"endFrame\": 1357, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/car8_1.txt\", \"object_class\": \"car\"},\n            {\"name\": \"uav_car8_2\", \"path\": \"data_seq/UAV123/car8\", \"startFrame\": 1357, \"endFrame\": 2575, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/car8_2.txt\", \"object_class\": \"car\"},\n            {\"name\": \"uav_car9\", \"path\": \"data_seq/UAV123/car9\", \"startFrame\": 1, \"endFrame\": 1879, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/car9.txt\", \"object_class\": \"car\"},\n            {\"name\": \"uav_group1_1\", \"path\": \"data_seq/UAV123/group1\", \"startFrame\": 1, \"endFrame\": 1333, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/group1_1.txt\", \"object_class\": \"person\"},\n            {\"name\": \"uav_group1_2\", \"path\": \"data_seq/UAV123/group1\", \"startFrame\": 1333, \"endFrame\": 2515, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/group1_2.txt\", \"object_class\": \"person\"},\n            {\"name\": \"uav_group1_3\", \"path\": \"data_seq/UAV123/group1\", \"startFrame\": 2515, \"endFrame\": 3925, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/group1_3.txt\", \"object_class\": \"person\"},\n            {\"name\": \"uav_group1_4\", \"path\": \"data_seq/UAV123/group1\", \"startFrame\": 3925, \"endFrame\": 4873, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/group1_4.txt\", \"object_class\": \"person\"},\n            {\"name\": \"uav_group2_1\", \"path\": \"data_seq/UAV123/group2\", \"startFrame\": 1, \"endFrame\": 907, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/group2_1.txt\", \"object_class\": \"person\"},\n            {\"name\": \"uav_group2_2\", \"path\": \"data_seq/UAV123/group2\", \"startFrame\": 907, \"endFrame\": 1771, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/group2_2.txt\", \"object_class\": \"person\"},\n            {\"name\": \"uav_group2_3\", \"path\": \"data_seq/UAV123/group2\", \"startFrame\": 1771, \"endFrame\": 2683, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/group2_3.txt\", \"object_class\": \"person\"},\n            {\"name\": \"uav_group3_1\", \"path\": \"data_seq/UAV123/group3\", \"startFrame\": 1, \"endFrame\": 1567, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/group3_1.txt\", \"object_class\": \"person\"},\n            {\"name\": \"uav_group3_2\", \"path\": \"data_seq/UAV123/group3\", \"startFrame\": 1567, \"endFrame\": 2827, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/group3_2.txt\", \"object_class\": \"person\"},\n            {\"name\": \"uav_group3_3\", \"path\": \"data_seq/UAV123/group3\", \"startFrame\": 2827, \"endFrame\": 4369, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/group3_3.txt\", \"object_class\": \"person\"},\n            {\"name\": \"uav_group3_4\", \"path\": \"data_seq/UAV123/group3\", \"startFrame\": 4369, \"endFrame\": 5527, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/group3_4.txt\", \"object_class\": \"person\"},\n            {\"name\": \"uav_person1\", \"path\": \"data_seq/UAV123/person1\", \"startFrame\": 1, \"endFrame\": 799, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/person1.txt\", \"object_class\": \"person\"},\n            {\"name\": \"uav_person10\", \"path\": \"data_seq/UAV123/person10\", \"startFrame\": 1, \"endFrame\": 1021, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/person10.txt\", \"object_class\": \"person\"},\n            {\"name\": \"uav_person11\", \"path\": \"data_seq/UAV123/person11\", \"startFrame\": 1, \"endFrame\": 721, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/person11.txt\", \"object_class\": \"person\"},\n            {\"name\": \"uav_person12_1\", \"path\": \"data_seq/UAV123/person12\", \"startFrame\": 1, \"endFrame\": 601, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/person12_1.txt\", \"object_class\": \"person\"},\n            {\"name\": \"uav_person12_2\", \"path\": \"data_seq/UAV123/person12\", \"startFrame\": 601, \"endFrame\": 1621, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/person12_2.txt\", \"object_class\": \"person\"},\n            {\"name\": \"uav_person13\", \"path\": \"data_seq/UAV123/person13\", \"startFrame\": 1, \"endFrame\": 883, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/person13.txt\", \"object_class\": \"person\"},\n            {\"name\": \"uav_person14_1\", \"path\": \"data_seq/UAV123/person14\", \"startFrame\": 1, \"endFrame\": 847, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/person14_1.txt\", \"object_class\": \"person\"},\n            {\"name\": \"uav_person14_2\", \"path\": \"data_seq/UAV123/person14\", \"startFrame\": 847, \"endFrame\": 1813, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/person14_2.txt\", \"object_class\": \"person\"},\n            {\"name\": \"uav_person14_3\", \"path\": \"data_seq/UAV123/person14\", \"startFrame\": 1813, \"endFrame\": 2923,\n             \"nz\": 6, \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/person14_3.txt\", \"object_class\": \"person\"},\n            {\"name\": \"uav_person15\", \"path\": \"data_seq/UAV123/person15\", \"startFrame\": 1, \"endFrame\": 1339, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/person15.txt\", \"object_class\": \"person\"},\n            {\"name\": \"uav_person16\", \"path\": \"data_seq/UAV123/person16\", \"startFrame\": 1, \"endFrame\": 1147, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/person16.txt\", \"object_class\": \"person\"},\n            {\"name\": \"uav_person17_1\", \"path\": \"data_seq/UAV123/person17\", \"startFrame\": 1, \"endFrame\": 1501, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/person17_1.txt\", \"object_class\": \"person\"},\n            {\"name\": \"uav_person17_2\", \"path\": \"data_seq/UAV123/person17\", \"startFrame\": 1501, \"endFrame\": 2347,\n             \"nz\": 6, \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/person17_2.txt\", \"object_class\": \"person\"},\n            {\"name\": \"uav_person18\", \"path\": \"data_seq/UAV123/person18\", \"startFrame\": 1, \"endFrame\": 1393, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/person18.txt\", \"object_class\": \"person\"},\n            {\"name\": \"uav_person19_1\", \"path\": \"data_seq/UAV123/person19\", \"startFrame\": 1, \"endFrame\": 1243, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/person19_1.txt\", \"object_class\": \"person\"},\n            {\"name\": \"uav_person19_2\", \"path\": \"data_seq/UAV123/person19\", \"startFrame\": 1243, \"endFrame\": 2791,\n             \"nz\": 6, \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/person19_2.txt\", \"object_class\": \"person\"},\n            {\"name\": \"uav_person19_3\", \"path\": \"data_seq/UAV123/person19\", \"startFrame\": 2791, \"endFrame\": 4357,\n             \"nz\": 6, \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/person19_3.txt\", \"object_class\": \"person\"},\n            {\"name\": \"uav_person1_s\", \"path\": \"data_seq/UAV123/person1_s\", \"startFrame\": 1, \"endFrame\": 1600, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/person1_s.txt\", \"object_class\": \"person\"},\n            {\"name\": \"uav_person2_1\", \"path\": \"data_seq/UAV123/person2\", \"startFrame\": 1, \"endFrame\": 1189, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/person2_1.txt\", \"object_class\": \"person\"},\n            {\"name\": \"uav_person2_2\", \"path\": \"data_seq/UAV123/person2\", \"startFrame\": 1189, \"endFrame\": 2623, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/person2_2.txt\", \"object_class\": \"person\"},\n            {\"name\": \"uav_person20\", \"path\": \"data_seq/UAV123/person20\", \"startFrame\": 1, \"endFrame\": 1783, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/person20.txt\", \"object_class\": \"person\"},\n            {\"name\": \"uav_person21\", \"path\": \"data_seq/UAV123/person21\", \"startFrame\": 1, \"endFrame\": 487, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/person21.txt\", \"object_class\": \"person\"},\n            {\"name\": \"uav_person22\", \"path\": \"data_seq/UAV123/person22\", \"startFrame\": 1, \"endFrame\": 199, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/person22.txt\", \"object_class\": \"person\"},\n            {\"name\": \"uav_person23\", \"path\": \"data_seq/UAV123/person23\", \"startFrame\": 1, \"endFrame\": 397, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/person23.txt\", \"object_class\": \"person\"},\n            {\"name\": \"uav_person2_s\", \"path\": \"data_seq/UAV123/person2_s\", \"startFrame\": 1, \"endFrame\": 250, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/person2_s.txt\", \"object_class\": \"person\"},\n            {\"name\": \"uav_person3\", \"path\": \"data_seq/UAV123/person3\", \"startFrame\": 1, \"endFrame\": 643, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/person3.txt\", \"object_class\": \"person\"},\n            {\"name\": \"uav_person3_s\", \"path\": \"data_seq/UAV123/person3_s\", \"startFrame\": 1, \"endFrame\": 505, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/person3_s.txt\", \"object_class\": \"person\"},\n            {\"name\": \"uav_person4_1\", \"path\": \"data_seq/UAV123/person4\", \"startFrame\": 1, \"endFrame\": 1501, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/person4_1.txt\", \"object_class\": \"person\"},\n            {\"name\": \"uav_person4_2\", \"path\": \"data_seq/UAV123/person4\", \"startFrame\": 1501, \"endFrame\": 2743, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/person4_2.txt\", \"object_class\": \"person\"},\n            {\"name\": \"uav_person5_1\", \"path\": \"data_seq/UAV123/person5\", \"startFrame\": 1, \"endFrame\": 877, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/person5_1.txt\", \"object_class\": \"person\"},\n            {\"name\": \"uav_person5_2\", \"path\": \"data_seq/UAV123/person5\", \"startFrame\": 877, \"endFrame\": 2101, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/person5_2.txt\", \"object_class\": \"person\"},\n            {\"name\": \"uav_person6\", \"path\": \"data_seq/UAV123/person6\", \"startFrame\": 1, \"endFrame\": 901, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/person6.txt\", \"object_class\": \"person\"},\n            {\"name\": \"uav_person7_1\", \"path\": \"data_seq/UAV123/person7\", \"startFrame\": 1, \"endFrame\": 1249, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/person7_1.txt\", \"object_class\": \"person\"},\n            {\"name\": \"uav_person7_2\", \"path\": \"data_seq/UAV123/person7\", \"startFrame\": 1249, \"endFrame\": 2065, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/person7_2.txt\", \"object_class\": \"person\"},\n            {\"name\": \"uav_person8_1\", \"path\": \"data_seq/UAV123/person8\", \"startFrame\": 1, \"endFrame\": 1075, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/person8_1.txt\", \"object_class\": \"person\"},\n            {\"name\": \"uav_person8_2\", \"path\": \"data_seq/UAV123/person8\", \"startFrame\": 1075, \"endFrame\": 1525, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/person8_2.txt\", \"object_class\": \"person\"},\n            {\"name\": \"uav_person9\", \"path\": \"data_seq/UAV123/person9\", \"startFrame\": 1, \"endFrame\": 661, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/person9.txt\", \"object_class\": \"person\"},\n            {\"name\": \"uav_truck1\", \"path\": \"data_seq/UAV123/truck1\", \"startFrame\": 1, \"endFrame\": 463, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/truck1.txt\", \"object_class\": \"truck\"},\n            {\"name\": \"uav_truck2\", \"path\": \"data_seq/UAV123/truck2\", \"startFrame\": 1, \"endFrame\": 385, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/truck2.txt\", \"object_class\": \"truck\"},\n            {\"name\": \"uav_truck3\", \"path\": \"data_seq/UAV123/truck3\", \"startFrame\": 1, \"endFrame\": 535, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/truck3.txt\", \"object_class\": \"truck\"},\n            {\"name\": \"uav_truck4_1\", \"path\": \"data_seq/UAV123/truck4\", \"startFrame\": 1, \"endFrame\": 577, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/truck4_1.txt\", \"object_class\": \"truck\"},\n            {\"name\": \"uav_truck4_2\", \"path\": \"data_seq/UAV123/truck4\", \"startFrame\": 577, \"endFrame\": 1261, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/truck4_2.txt\", \"object_class\": \"truck\"},\n            {\"name\": \"uav_uav1_1\", \"path\": \"data_seq/UAV123/uav1\", \"startFrame\": 1, \"endFrame\": 1555, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/uav1_1.txt\", \"object_class\": \"aircraft\"},\n            {\"name\": \"uav_uav1_2\", \"path\": \"data_seq/UAV123/uav1\", \"startFrame\": 1555, \"endFrame\": 2377, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/uav1_2.txt\", \"object_class\": \"aircraft\"},\n            {\"name\": \"uav_uav1_3\", \"path\": \"data_seq/UAV123/uav1\", \"startFrame\": 2473, \"endFrame\": 3469, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/uav1_3.txt\", \"object_class\": \"aircraft\"},\n            {\"name\": \"uav_uav2\", \"path\": \"data_seq/UAV123/uav2\", \"startFrame\": 1, \"endFrame\": 133, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/uav2.txt\", \"object_class\": \"aircraft\"},\n            {\"name\": \"uav_uav3\", \"path\": \"data_seq/UAV123/uav3\", \"startFrame\": 1, \"endFrame\": 265, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/uav3.txt\", \"object_class\": \"aircraft\"},\n            {\"name\": \"uav_uav4\", \"path\": \"data_seq/UAV123/uav4\", \"startFrame\": 1, \"endFrame\": 157, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/uav4.txt\", \"object_class\": \"aircraft\"},\n            {\"name\": \"uav_uav5\", \"path\": \"data_seq/UAV123/uav5\", \"startFrame\": 1, \"endFrame\": 139, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/uav5.txt\", \"object_class\": \"aircraft\"},\n            {\"name\": \"uav_uav6\", \"path\": \"data_seq/UAV123/uav6\", \"startFrame\": 1, \"endFrame\": 109, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/uav6.txt\", \"object_class\": \"aircraft\"},\n            {\"name\": \"uav_uav7\", \"path\": \"data_seq/UAV123/uav7\", \"startFrame\": 1, \"endFrame\": 373, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/uav7.txt\", \"object_class\": \"aircraft\"},\n            {\"name\": \"uav_uav8\", \"path\": \"data_seq/UAV123/uav8\", \"startFrame\": 1, \"endFrame\": 301, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/uav8.txt\", \"object_class\": \"aircraft\"},\n            {\"name\": \"uav_wakeboard1\", \"path\": \"data_seq/UAV123/wakeboard1\", \"startFrame\": 1, \"endFrame\": 421, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/wakeboard1.txt\", \"object_class\": \"person\"},\n            {\"name\": \"uav_wakeboard10\", \"path\": \"data_seq/UAV123/wakeboard10\", \"startFrame\": 1, \"endFrame\": 469,\n             \"nz\": 6, \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/wakeboard10.txt\", \"object_class\": \"person\"},\n            {\"name\": \"uav_wakeboard2\", \"path\": \"data_seq/UAV123/wakeboard2\", \"startFrame\": 1, \"endFrame\": 733, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/wakeboard2.txt\", \"object_class\": \"person\"},\n            {\"name\": \"uav_wakeboard3\", \"path\": \"data_seq/UAV123/wakeboard3\", \"startFrame\": 1, \"endFrame\": 823, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/wakeboard3.txt\", \"object_class\": \"person\"},\n            {\"name\": \"uav_wakeboard4\", \"path\": \"data_seq/UAV123/wakeboard4\", \"startFrame\": 1, \"endFrame\": 697, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/wakeboard4.txt\", \"object_class\": \"person\"},\n            {\"name\": \"uav_wakeboard5\", \"path\": \"data_seq/UAV123/wakeboard5\", \"startFrame\": 1, \"endFrame\": 1675, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/wakeboard5.txt\", \"object_class\": \"person\"},\n            {\"name\": \"uav_wakeboard6\", \"path\": \"data_seq/UAV123/wakeboard6\", \"startFrame\": 1, \"endFrame\": 1165, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/wakeboard6.txt\", \"object_class\": \"person\"},\n            {\"name\": \"uav_wakeboard7\", \"path\": \"data_seq/UAV123/wakeboard7\", \"startFrame\": 1, \"endFrame\": 199, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/wakeboard7.txt\", \"object_class\": \"person\"},\n            {\"name\": \"uav_wakeboard8\", \"path\": \"data_seq/UAV123/wakeboard8\", \"startFrame\": 1, \"endFrame\": 1543, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/wakeboard8.txt\", \"object_class\": \"person\"},\n            {\"name\": \"uav_wakeboard9\", \"path\": \"data_seq/UAV123/wakeboard9\", \"startFrame\": 1, \"endFrame\": 355, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/wakeboard9.txt\", \"object_class\": \"person\"}\n        ]\n\n        return sequence_info_list\n"
  },
  {
    "path": "artrackv2_mindspore/external/AR/pytracking/evaluation/vot.py",
    "content": "\"\"\"\n\\file vot.py\n\n@brief Python utility functions for VOT integration\n\n@author Luka Cehovin, Alessio Dore\n\n@date 2016\n\n\"\"\"\n\nimport sys\nimport copy\nimport collections\n\ntry:\n    import trax\n    import trax.server\n    TRAX = True\nexcept ImportError:\n    TRAX = False\n\nRectangle = collections.namedtuple('Rectangle', ['x', 'y', 'width', 'height'])\nPoint = collections.namedtuple('Point', ['x', 'y'])\nPolygon = collections.namedtuple('Polygon', ['points'])\n\ndef parse_region(string):\n    tokens = map(float, string.split(','))\n    if len(tokens) == 4:\n        return Rectangle(tokens[0], tokens[1], tokens[2], tokens[3])\n    elif len(tokens) % 2 == 0 and len(tokens) > 4:\n        return Polygon([Point(tokens[i],tokens[i+1]) for i in xrange(0,len(tokens),2)])\n    return None\n\ndef encode_region(region):\n    if isinstance(region, Polygon):\n        return ','.join(['{},{}'.format(p.x,p.y) for p in region.points])\n    elif isinstance(region, Rectangle):\n        return '{},{},{},{}'.format(region.x, region.y, region.width, region.height)\n    else:\n        return \"\"\n\ndef convert_region(region, to):\n\n    if to == 'rectangle':\n\n        if isinstance(region, Rectangle):\n            return copy.copy(region)\n        elif isinstance(region, Polygon):\n            top = sys.float_info.max\n            bottom = sys.float_info.min\n            left = sys.float_info.max\n            right = sys.float_info.min\n\n            for point in region.points:\n                top = min(top, point.y)\n                bottom = max(bottom, point.y)\n                left = min(left, point.x)\n                right = max(right, point.x)\n\n            return Rectangle(left, top, right - left, bottom - top)\n\n        else:\n            return None\n    if to == 'polygon':\n\n        if isinstance(region, Rectangle):\n            points = []\n            points.append((region.x, region.y))\n            points.append((region.x + region.width, region.y))\n            points.append((region.x + region.width, region.y + region.height))\n            points.append((region.x, region.y + region.height))\n            return Polygon(points)\n\n        elif isinstance(region, Polygon):\n            return copy.copy(region)\n        else:\n            return None\n\n    return None\n\nclass VOT(object):\n    \"\"\" Base class for Python VOT integration \"\"\"\n    def __init__(self, region_format):\n        \"\"\" Constructor\n\n        Args:\n            region_format: Region format options\n        \"\"\"\n        assert(region_format in ['rectangle', 'polygon'])\n        if TRAX:\n            options = trax.server.ServerOptions(region_format, trax.image.PATH)\n            self._trax = trax.server.Server(options)\n\n            request = self._trax.wait()\n            assert(request.type == 'initialize')\n            if request.region.type == 'polygon':\n                self._region = Polygon([Point(x[0], x[1]) for x in request.region.points])\n            else:\n                self._region = Rectangle(request.region.x, request.region.y, request.region.width, request.region.height)\n            self._image = str(request.image)\n            self._trax.status(request.region)\n        else:\n            self._files = [x.strip('\\n') for x in open('images.txt', 'r').readlines()]\n            self._frame = 0\n            self._region = convert_region(parse_region(open('region.txt', 'r').readline()), region_format)\n            self._result = []\n\n    def region(self):\n        \"\"\"\n        Send configuration message to the client and receive the initialization\n        region and the path of the first image\n\n        Returns:\n            initialization region\n        \"\"\"\n\n        return self._region\n\n    def report(self, region, confidence = 0):\n        \"\"\"\n        Report the tracking results to the client\n\n        Arguments:\n            region: region for the frame\n        \"\"\"\n        assert(isinstance(region, Rectangle) or isinstance(region, Polygon))\n        if TRAX:\n            if isinstance(region, Polygon):\n                tregion = trax.region.Polygon([(x.x, x.y) for x in region.points])\n            else:\n                tregion = trax.region.Rectangle(region.x, region.y, region.width, region.height)\n            self._trax.status(tregion, {\"confidence\" : confidence})\n        else:\n            self._result.append(region)\n            self._frame += 1\n\n    def frame(self):\n        \"\"\"\n        Get a frame (image path) from client\n\n        Returns:\n            absolute path of the image\n        \"\"\"\n        if TRAX:\n            if hasattr(self, \"_image\"):\n                image = str(self._image)\n                del self._image\n                return image\n\n            request = self._trax.wait()\n\n            if request.type == 'frame':\n                return str(request.image)\n            else:\n                return None\n\n        else:\n            if self._frame >= len(self._files):\n                return None\n            return self._files[self._frame]\n\n    def quit(self):\n        if TRAX:\n            self._trax.quit()\n        elif hasattr(self, '_result'):\n            with open('output.txt', 'w') as f:\n                for r in self._result:\n                    f.write(encode_region(r))\n                    f.write('\\n')\n\n    def __del__(self):\n        self.quit()\n\n"
  },
  {
    "path": "artrackv2_mindspore/external/AR/pytracking/evaluation/vot2020.py",
    "content": "\"\"\"\n\\file vot.py\n\n@brief Python utility functions for VOT integration\n\n@author Luka Cehovin, Alessio Dore\n\n@date 2016\n\n\"\"\"\n\nimport sys\nimport copy\nimport collections\nimport numpy as np\n\ntry:\n    import trax\nexcept ImportError:\n    raise Exception('TraX support not found. Please add trax module to Python path.')\n\n\ndef make_full_size(x, output_sz):\n    '''\n    zero-pad input x (right and down) to match output_sz\n    x: numpy array e.g., binary mask\n    output_sz: size of the output [width, height]\n    '''\n    if x.shape[0] == output_sz[1] and x.shape[1] == output_sz[0]:\n        return x\n    pad_x = output_sz[0] - x.shape[1]\n    if pad_x < 0:\n        x = x[:, :x.shape[1] + pad_x]\n        # padding has to be set to zero, otherwise pad function fails\n        pad_x = 0\n    pad_y = output_sz[1] - x.shape[0]\n    if pad_y < 0:\n        x = x[:x.shape[0] + pad_y, :]\n        # padding has to be set to zero, otherwise pad function fails\n        pad_y = 0\n    return np.pad(x, ((0, pad_y), (0, pad_x)), 'constant', constant_values=0)\n\n\nRectangle = collections.namedtuple('Rectangle', ['x', 'y', 'width', 'height'])\nPoint = collections.namedtuple('Point', ['x', 'y'])\nPolygon = collections.namedtuple('Polygon', ['points'])\n\nclass VOT(object):\n    \"\"\" Base class for Python VOT integration \"\"\"\n    def __init__(self, region_format, channels=None):\n        \"\"\" Constructor\n\n        Args:\n            region_format: Region format options\n        \"\"\"\n        assert(region_format in [trax.Region.RECTANGLE, trax.Region.POLYGON, trax.Region.MASK])\n\n        if channels is None:\n            channels = ['color']\n        elif channels == 'rgbd':\n            channels = ['color', 'depth']\n        elif channels == 'rgbt':\n            channels = ['color', 'ir']\n        elif channels == 'ir':\n            channels = ['ir']\n        else:\n            raise Exception('Illegal configuration {}.'.format(channels))\n\n        self._trax = trax.Server([region_format], [trax.Image.PATH], channels, customMetadata=dict(vot=\"python\"))\n\n        request = self._trax.wait()\n        assert(request.type == 'initialize')\n        if isinstance(request.region, trax.Polygon):\n            self._region = Polygon([Point(x[0], x[1]) for x in request.region])\n        if isinstance(request.region, trax.Mask):\n            self._region = request.region.array(True)\n        else:\n            self._region = Rectangle(*request.region.bounds())\n        self._image = [x.path() for k, x in request.image.items()]\n        if len(self._image) == 1:\n            self._image = self._image[0]\n\n        self._trax.status(request.region)\n\n    def region(self):\n        \"\"\"\n        Send configuration message to the client and receive the initialization\n        region and the path of the first image\n\n        Returns:\n            initialization region\n        \"\"\"\n\n        return self._region\n\n    def report(self, region, confidence = None):\n        \"\"\"\n        Report the tracking results to the client\n\n        Arguments:\n            region: region for the frame\n        \"\"\"\n        assert(isinstance(region, (Rectangle, Polygon, np.ndarray)))\n        if isinstance(region, Polygon):\n            tregion = trax.Polygon.create([(x.x, x.y) for x in region.points])\n        if isinstance(region, np.ndarray):\n            tregion = trax.Mask.create(region)\n        else:\n            tregion = trax.Rectangle.create(region.x, region.y, region.width, region.height)\n        properties = {}\n        if not confidence is None:\n            properties['confidence'] = confidence\n        self._trax.status(tregion, properties)\n\n    def frame(self):\n        \"\"\"\n        Get a frame (image path) from client\n\n        Returns:\n            absolute path of the image\n        \"\"\"\n        if hasattr(self, \"_image\"):\n            image = self._image\n            del self._image\n            return image\n\n        request = self._trax.wait()\n\n        if request.type == 'frame':\n            image = [x.path() for k, x in request.image.items()]\n            if len(image) == 1:\n                return image[0]\n            return image\n        else:\n            return None\n\n\n    def quit(self):\n        if hasattr(self, '_trax'):\n            self._trax.quit()\n\n    def __del__(self):\n        self.quit()\n"
  },
  {
    "path": "artrackv2_mindspore/external/AR/pytracking/evaluation/votdataset.py",
    "content": "import numpy as np\nfrom pytracking.evaluation.data import Sequence, BaseDataset, SequenceList\n\n\nclass VOTDataset(BaseDataset):\n    \"\"\"\n    VOT2018 dataset\n\n    Publication:\n        The sixth Visual Object Tracking VOT2018 challenge results.\n        Matej Kristan, Ales Leonardis, Jiri Matas, Michael Felsberg, Roman Pfugfelder, Luka Cehovin Zajc, Tomas Vojir,\n        Goutam Bhat, Alan Lukezic et al.\n        ECCV, 2018\n        https://prints.vicos.si/publications/365\n\n    Download the dataset from http://www.votchallenge.net/vot2018/dataset.html\n    \"\"\"\n    def __init__(self):\n        super().__init__()\n        self.base_path = self.env_settings.vot_path\n        self.sequence_list = self._get_sequence_list()\n\n    def get_sequence_list(self):\n        return SequenceList([self._construct_sequence(s) for s in self.sequence_list])\n\n    def _construct_sequence(self, sequence_name):\n        sequence_path = sequence_name\n        nz = 8\n        ext = 'jpg'\n        start_frame = 1\n\n        anno_path = '{}/{}/groundtruth.txt'.format(self.base_path, sequence_name)\n        try:\n            ground_truth_rect = np.loadtxt(str(anno_path), dtype=np.float64)\n        except:\n            ground_truth_rect = np.loadtxt(str(anno_path), delimiter=',', dtype=np.float64)\n\n        end_frame = ground_truth_rect.shape[0]\n\n        frames = ['{base_path}/{sequence_path}/color/{frame:0{nz}}.{ext}'.format(base_path=self.base_path,\n                  sequence_path=sequence_path, frame=frame_num, nz=nz, ext=ext)\n                  for frame_num in range(start_frame, end_frame+1)]\n\n        # Convert gt\n        if ground_truth_rect.shape[1] > 4:\n            gt_x_all = ground_truth_rect[:, [0, 2, 4, 6]]\n            gt_y_all = ground_truth_rect[:, [1, 3, 5, 7]]\n\n            x1 = np.amin(gt_x_all, 1).reshape(-1,1)\n            y1 = np.amin(gt_y_all, 1).reshape(-1,1)\n            x2 = np.amax(gt_x_all, 1).reshape(-1,1)\n            y2 = np.amax(gt_y_all, 1).reshape(-1,1)\n\n            ground_truth_rect = np.concatenate((x1, y1, x2-x1, y2-y1), 1)\n        return Sequence(sequence_name, frames, 'vot', ground_truth_rect)\n\n    def __len__(self):\n        return len(self.sequence_list)\n\n    def _get_sequence_list(self):\n        sequence_list= ['ants1',\n                        'ants3',\n                        'bag',\n                        'ball1',\n                        'ball2',\n                        'basketball',\n                        'birds1',\n                        'blanket',\n                        'bmx',\n                        'bolt1',\n                        'bolt2',\n                        'book',\n                        'butterfly',\n                        'car1',\n                        'conduction1',\n                        'crabs1',\n                        'crossing',\n                        'dinosaur',\n                        'drone_across',\n                        'drone_flip',\n                        'drone1',\n                        'fernando',\n                        'fish1',\n                        'fish2',\n                        'fish3',\n                        'flamingo1',\n                        'frisbee',\n                        'girl',\n                        'glove',\n                        'godfather',\n                        'graduate',\n                        'gymnastics1',\n                        'gymnastics2',\n                        'gymnastics3',\n                        'hand',\n                        'handball1',\n                        'handball2',\n                        'helicopter',\n                        'iceskater1',\n                        'iceskater2',\n                        'leaves',\n                        'matrix',\n                        'motocross1',\n                        'motocross2',\n                        'nature',\n                        'pedestrian1',\n                        'rabbit',\n                        'racing',\n                        'road',\n                        'shaking',\n                        'sheep',\n                        'singer2',\n                        'singer3',\n                        'soccer1',\n                        'soccer2',\n                        'soldier',\n                        'tiger',\n                        'traffic',\n                        'wiper',\n                        'zebrafish1']\n\n        return sequence_list\n"
  },
  {
    "path": "artrackv2_mindspore/external/AR/pytracking/experiments/__init__.py",
    "content": ""
  },
  {
    "path": "artrackv2_mindspore/external/AR/pytracking/experiments/myexperiments.py",
    "content": "from pytracking.evaluation import Tracker, get_dataset, trackerlist\n\n\ndef atom_nfs_uav():\n    # Run three runs of ATOM on NFS and UAV datasets\n    trackers = trackerlist('atom', 'default', range(3))\n\n    dataset = get_dataset('nfs', 'uav')\n    return trackers, dataset\n\n\ndef uav_test():\n    # Run DiMP18, ATOM and ECO on the UAV dataset\n    trackers = trackerlist('dimp', 'dimp18', range(1)) + \\\n               trackerlist('atom', 'default', range(1)) + \\\n               trackerlist('eco', 'default', range(1))\n\n    dataset = get_dataset('uav')\n    return trackers, dataset\n"
  },
  {
    "path": "artrackv2_mindspore/external/AR/pytracking/features/__init__.py",
    "content": ""
  },
  {
    "path": "artrackv2_mindspore/external/AR/pytracking/features/augmentation.py",
    "content": "import numpy as np\nimport math\nimport torch\nimport torch.nn.functional as F\nimport cv2 as cv\nimport random\nfrom pytracking.features.preprocessing import numpy_to_torch, torch_to_numpy\n\n\nclass Transform:\n    \"\"\"Base data augmentation transform class.\"\"\"\n\n    def __init__(self, output_sz = None, shift = None):\n        self.output_sz = output_sz\n        self.shift = (0,0) if shift is None else shift\n\n    def __call__(self, image, is_mask=False):\n        raise NotImplementedError\n\n    def crop_to_output(self, image):\n        if isinstance(image, torch.Tensor):\n            imsz = image.shape[2:]\n            if self.output_sz is None:\n                pad_h = 0\n                pad_w = 0\n            else:\n                pad_h = (self.output_sz[0] - imsz[0]) / 2\n                pad_w = (self.output_sz[1] - imsz[1]) / 2\n\n            pad_left = math.floor(pad_w) + self.shift[1]\n            pad_right = math.ceil(pad_w) - self.shift[1]\n            pad_top = math.floor(pad_h) + self.shift[0]\n            pad_bottom = math.ceil(pad_h) - self.shift[0]\n\n            return F.pad(image, (pad_left, pad_right, pad_top, pad_bottom), 'replicate')\n        else:\n            raise NotImplementedError\n\nclass Identity(Transform):\n    \"\"\"Identity transformation.\"\"\"\n    def __call__(self, image, is_mask=False):\n        return self.crop_to_output(image)\n\nclass FlipHorizontal(Transform):\n    \"\"\"Flip along horizontal axis.\"\"\"\n    def __call__(self, image, is_mask=False):\n        if isinstance(image, torch.Tensor):\n            return self.crop_to_output(image.flip((3,)))\n        else:\n            return np.fliplr(image)\n\nclass FlipVertical(Transform):\n    \"\"\"Flip along vertical axis.\"\"\"\n    def __call__(self, image: torch.Tensor, is_mask=False):\n        if isinstance(image, torch.Tensor):\n            return self.crop_to_output(image.flip((2,)))\n        else:\n            return np.flipud(image)\n\nclass Translation(Transform):\n    \"\"\"Translate.\"\"\"\n    def __init__(self, translation, output_sz = None, shift = None):\n        super().__init__(output_sz, shift)\n        self.shift = (self.shift[0] + translation[0], self.shift[1] + translation[1])\n\n    def __call__(self, image, is_mask=False):\n        if isinstance(image, torch.Tensor):\n            return self.crop_to_output(image)\n        else:\n            raise NotImplementedError\n\nclass Scale(Transform):\n    \"\"\"Scale.\"\"\"\n    def __init__(self, scale_factor, output_sz = None, shift = None):\n        super().__init__(output_sz, shift)\n        self.scale_factor = scale_factor\n\n    def __call__(self, image, is_mask=False):\n        if isinstance(image, torch.Tensor):\n            # Calculate new size. Ensure that it is even so that crop/pad becomes easier\n            h_orig, w_orig = image.shape[2:]\n\n            if h_orig != w_orig:\n                raise NotImplementedError\n\n            h_new = round(h_orig /self.scale_factor)\n            h_new += (h_new - h_orig) % 2\n            w_new = round(w_orig /self.scale_factor)\n            w_new += (w_new - w_orig) % 2\n\n            image_resized = F.interpolate(image, [h_new, w_new], mode='bilinear')\n\n            return self.crop_to_output(image_resized)\n        else:\n            raise NotImplementedError\n\n\nclass Affine(Transform):\n    \"\"\"Affine transformation.\"\"\"\n    def __init__(self, transform_matrix, output_sz = None, shift = None):\n        super().__init__(output_sz, shift)\n        self.transform_matrix = transform_matrix\n\n    def __call__(self, image, is_mask=False):\n        if isinstance(image, torch.Tensor):\n            return self.crop_to_output(numpy_to_torch(self(torch_to_numpy(image))))\n        else:\n            return cv.warpAffine(image, self.transform_matrix, image.shape[1::-1], borderMode=cv.BORDER_REPLICATE)\n\n\nclass Rotate(Transform):\n    \"\"\"Rotate with given angle.\"\"\"\n    def __init__(self, angle, output_sz = None, shift = None):\n        super().__init__(output_sz, shift)\n        self.angle = math.pi * angle/180\n\n    def __call__(self, image, is_mask=False):\n        if isinstance(image, torch.Tensor):\n            return self.crop_to_output(numpy_to_torch(self(torch_to_numpy(image))))\n        else:\n            c = (np.expand_dims(np.array(image.shape[:2]),1)-1)/2\n            R = np.array([[math.cos(self.angle), math.sin(self.angle)],\n                          [-math.sin(self.angle), math.cos(self.angle)]])\n            H =np.concatenate([R, c - R @ c], 1)\n            return cv.warpAffine(image, H, image.shape[1::-1], borderMode=cv.BORDER_REPLICATE)\n\n\nclass Blur(Transform):\n    \"\"\"Blur with given sigma (can be axis dependent).\"\"\"\n    def __init__(self, sigma, output_sz = None, shift = None):\n        super().__init__(output_sz, shift)\n        if isinstance(sigma, (float, int)):\n            sigma = (sigma, sigma)\n        self.sigma = sigma\n        self.filter_size = [math.ceil(2*s) for s in self.sigma]\n        x_coord = [torch.arange(-sz, sz+1, dtype=torch.float32) for sz in self.filter_size]\n        self.filter = [torch.exp(-(x**2)/(2*s**2)) for x, s in zip(x_coord, self.sigma)]\n        self.filter[0] = self.filter[0].view(1,1,-1,1) / self.filter[0].sum()\n        self.filter[1] = self.filter[1].view(1,1,1,-1) / self.filter[1].sum()\n\n    def __call__(self, image, is_mask=False):\n        if isinstance(image, torch.Tensor):\n            sz = image.shape[2:]\n            im1 = F.conv2d(image.view(-1,1,sz[0],sz[1]), self.filter[0], padding=(self.filter_size[0],0))\n            return self.crop_to_output(F.conv2d(im1, self.filter[1], padding=(0,self.filter_size[1])).view(1,-1,sz[0],sz[1]))\n        else:\n            raise NotImplementedError\n\n\nclass RandomAffine(Transform):\n    \"\"\"Affine transformation.\"\"\"\n    def __init__(self, p_flip=0.0, max_rotation=0.0, max_shear=0.0, max_scale=0.0, max_ar_factor=0.0,\n                 border_mode='constant', output_sz = None, shift = None):\n        super().__init__(output_sz, shift)\n        self.p_flip = p_flip\n        self.max_rotation = max_rotation\n        self.max_shear = max_shear\n        self.max_scale = max_scale\n        self.max_ar_factor = max_ar_factor\n\n        self.pad_amount = 0\n        if border_mode == 'constant':\n            self.border_flag = cv.BORDER_CONSTANT\n        elif border_mode == 'replicate':\n            self.border_flag == cv.BORDER_REPLICATE\n        else:\n            raise Exception\n\n        self.roll_values = self.roll()\n\n    def roll(self):\n        do_flip = random.random() < self.p_flip\n        theta = random.uniform(-self.max_rotation, self.max_rotation)\n\n        shear_x = random.uniform(-self.max_shear, self.max_shear)\n        shear_y = random.uniform(-self.max_shear, self.max_shear)\n\n        ar_factor = np.exp(random.uniform(-self.max_ar_factor, self.max_ar_factor))\n        scale_factor = np.exp(random.uniform(-self.max_scale, self.max_scale))\n\n        return do_flip, theta, (shear_x, shear_y), (scale_factor, scale_factor * ar_factor)\n\n    def _construct_t_mat(self, image_shape, do_flip, theta, shear_values, scale_factors):\n        im_h, im_w = image_shape\n        t_mat = np.identity(3)\n\n        if do_flip:\n            if do_flip:\n                t_mat[0, 0] = -1.0\n                t_mat[0, 2] = im_w\n\n        t_rot = cv.getRotationMatrix2D((im_w * 0.5, im_h * 0.5), theta, 1.0)\n        t_rot = np.concatenate((t_rot, np.array([0.0, 0.0, 1.0]).reshape(1, 3)))\n\n        t_shear = np.array([[1.0, shear_values[0], -shear_values[0] * 0.5 * im_w],\n                            [shear_values[1], 1.0, -shear_values[1] * 0.5 * im_h],\n                            [0.0, 0.0, 1.0]])\n\n        t_scale = np.array([[scale_factors[0], 0.0, (1.0 - scale_factors[0]) * 0.5 * im_w],\n                            [0.0, scale_factors[1], (1.0 - scale_factors[1]) * 0.5 * im_h],\n                            [0.0, 0.0, 1.0]])\n\n        t_mat = t_scale @ t_rot @ t_shear @ t_mat\n\n        t_mat[0, 2] += self.pad_amount\n        t_mat[1, 2] += self.pad_amount\n\n        t_mat = t_mat[:2, :]\n\n        return t_mat\n\n    def __call__(self, image, is_mask=False):\n        input_tensor = torch.is_tensor(image)\n        if input_tensor:\n            image = torch_to_numpy(image)\n\n        do_flip, theta, shear_values, scale_factors = self.roll_values\n        t_mat = self._construct_t_mat(image.shape[:2], do_flip, theta, shear_values, scale_factors)\n        output_sz = (image.shape[1] + 2*self.pad_amount, image.shape[0] + 2*self.pad_amount)\n\n        if not is_mask:\n            image_t = cv.warpAffine(image, t_mat, output_sz, flags=cv.INTER_LINEAR,\n                                    borderMode=self.border_flag)\n        else:\n            image_t = cv.warpAffine(image, t_mat, output_sz, flags=cv.INTER_NEAREST,\n                                    borderMode=self.border_flag)\n            image_t = image_t.reshape(image.shape)\n\n        if input_tensor:\n            image_t = numpy_to_torch(image_t)\n\n        return self.crop_to_output(image_t)\n"
  },
  {
    "path": "artrackv2_mindspore/external/AR/pytracking/features/color.py",
    "content": "import torch\nfrom pytracking.features.featurebase import FeatureBase\n\n\nclass RGB(FeatureBase):\n    \"\"\"RGB feature normalized to [-0.5, 0.5].\"\"\"\n    def dim(self):\n        return 3\n\n    def stride(self):\n        return self.pool_stride\n\n    def extract(self, im: torch.Tensor):\n        return im/255 - 0.5\n\n\nclass Grayscale(FeatureBase):\n    \"\"\"Grayscale feature normalized to [-0.5, 0.5].\"\"\"\n    def dim(self):\n        return 1\n\n    def stride(self):\n        return self.pool_stride\n\n    def extract(self, im: torch.Tensor):\n        return torch.mean(im/255 - 0.5, 1, keepdim=True)\n"
  },
  {
    "path": "artrackv2_mindspore/external/AR/pytracking/features/deep.py",
    "content": "from pytracking.features.featurebase import FeatureBase, MultiFeatureBase\nimport torch\nimport torchvision\nfrom pytracking import TensorList\nfrom pytracking.evaluation.environment import env_settings\nimport os\nfrom pytracking.utils.loading import load_network\nfrom ltr.models.backbone.resnet18_vggm import resnet18_vggmconv1\n\nnormalize = torchvision.transforms.Normalize(mean=[0.485, 0.456, 0.406],\n                                             std=[0.229, 0.224, 0.225])\n\n\nclass ResNet18m1(MultiFeatureBase):\n    \"\"\"ResNet18 feature together with the VGG-m conv1 layer.\n    args:\n        output_layers: List of layers to output.\n        net_path: Relative or absolute net path (default should be fine).\n        use_gpu: Use GPU or CPU.\n    \"\"\"\n\n    def __init__(self, output_layers, net_path=None, use_gpu=True, *args, **kwargs):\n        super(ResNet18m1, self).__init__(*args, **kwargs)\n\n        for l in output_layers:\n            if l not in ['vggconv1', 'conv1', 'layer1', 'layer2', 'layer3', 'layer4', 'fc']:\n                raise ValueError('Unknown layer')\n\n        self.output_layers = list(output_layers)\n        self.use_gpu = use_gpu\n        self.net_path = 'resnet18_vggmconv1/resnet18_vggmconv1.pth' if net_path is None else net_path\n\n    def initialize(self):\n\n        if isinstance(self.pool_stride, int) and self.pool_stride == 1:\n            self.pool_stride = [1] * len(self.output_layers)\n\n        self.layer_stride = {'vggconv1': 2, 'conv1': 2, 'layer1': 4, 'layer2': 8, 'layer3': 16, 'layer4': 32,\n                             'fc': None}\n        self.layer_dim = {'vggconv1': 96, 'conv1': 64, 'layer1': 64, 'layer2': 128, 'layer3': 256, 'layer4': 512,\n                          'fc': None}\n\n        self.mean = torch.Tensor([0.485, 0.456, 0.406]).view(1, -1, 1, 1)\n        self.std = torch.Tensor([0.229, 0.224, 0.225]).view(1, -1, 1, 1)\n\n        if os.path.isabs(self.net_path):\n            net_path_full = [self.net_path]\n        else:\n            root_paths = env_settings().network_path\n            if isinstance(root_paths, str):\n                root_paths = [root_paths]\n            net_path_full = [os.path.join(root, self.net_path) for root in root_paths]\n\n        self.net = None\n        for net_path in net_path_full:\n            try:\n                self.net = resnet18_vggmconv1(self.output_layers, path=net_path)\n                break\n            except:\n                pass\n        if self.net is None:\n            raise Exception('Did not find network file {}'.format(self.net_path))\n\n        if self.use_gpu:\n            self.net.cuda()\n        self.net.eval()\n\n    def dim(self):\n        return TensorList([self.layer_dim[l] for l in self.output_layers])\n\n    def stride(self):\n        return TensorList([s * self.layer_stride[l] for l, s in zip(self.output_layers, self.pool_stride)])\n\n    def extract(self, im: torch.Tensor):\n        im = im / 255\n        im -= self.mean\n        im /= self.std\n\n        if self.use_gpu:\n            im = im.cuda()\n\n        with torch.no_grad():\n            return TensorList(self.net(im).values())\n\n\nclass ATOMResNet18(MultiFeatureBase):\n    \"\"\"ResNet18 feature with the ATOM IoUNet.\n    args:\n        output_layers: List of layers to output.\n        net_path: Relative or absolute net path (default should be fine).\n        use_gpu: Use GPU or CPU.\n    \"\"\"\n\n    def __init__(self, output_layers=('layer3',), net_path='atom_iou', use_gpu=True, *args, **kwargs):\n        super().__init__(*args, **kwargs)\n\n        self.output_layers = list(output_layers)\n        self.use_gpu = use_gpu\n        self.net_path = net_path\n\n    def initialize(self):\n        self.net = load_network(self.net_path)\n\n        if self.use_gpu:\n            self.net.cuda()\n        self.net.eval()\n\n        self.iou_predictor = self.net.bb_regressor\n\n        self.layer_stride = {'conv1': 2, 'layer1': 4, 'layer2': 8, 'layer3': 16, 'layer4': 32, 'classification': 16,\n                             'fc': None}\n        self.layer_dim = {'conv1': 64, 'layer1': 64, 'layer2': 128, 'layer3': 256, 'layer4': 512, 'classification': 256,\n                          'fc': None}\n\n        self.iounet_feature_layers = self.net.bb_regressor_layer\n\n        if isinstance(self.pool_stride, int) and self.pool_stride == 1:\n            self.pool_stride = [1] * len(self.output_layers)\n\n        self.feature_layers = sorted(list(set(self.output_layers + self.iounet_feature_layers)))\n\n        self.mean = torch.Tensor([0.485, 0.456, 0.406]).view(1, -1, 1, 1)\n        self.std = torch.Tensor([0.229, 0.224, 0.225]).view(1, -1, 1, 1)\n\n    def dim(self):\n        return TensorList([self.layer_dim[l] for l in self.output_layers])\n\n    def stride(self):\n        return TensorList([s * self.layer_stride[l] for l, s in zip(self.output_layers, self.pool_stride)])\n\n    def extract(self, im: torch.Tensor):\n        im = im / 255\n        im -= self.mean\n        im /= self.std\n\n        if self.use_gpu:\n            im = im.cuda()\n\n        with torch.no_grad():\n            output_features = self.net.extract_features(im, self.feature_layers)\n\n        # Store the raw resnet features which are input to iounet\n        self.iounet_backbone_features = TensorList(\n            [output_features[layer].clone() for layer in self.iounet_feature_layers])\n\n        # Store the processed features from iounet, just before pooling\n        with torch.no_grad():\n            self.iounet_features = TensorList(self.iou_predictor.get_iou_feat(self.iounet_backbone_features))\n\n        return TensorList([output_features[layer] for layer in self.output_layers])\n"
  },
  {
    "path": "artrackv2_mindspore/external/AR/pytracking/features/extractor.py",
    "content": "import torch\nfrom pytracking.features.preprocessing import sample_patch\nfrom pytracking import TensorList\n\nclass ExtractorBase:\n    \"\"\"Base feature extractor class.\n    args:\n        features: List of features.\n    \"\"\"\n    def __init__(self, features):\n        self.features = features\n\n    def initialize(self):\n        for f in self.features:\n            f.initialize()\n\n\nclass SingleResolutionExtractor(ExtractorBase):\n    \"\"\"Single resolution feature extractor.\n    args:\n        features: List of features.\n    \"\"\"\n    def __init__(self, features):\n        super().__init__(features)\n\n        self.feature_stride = self.features[0].stride()\n        if isinstance(self.feature_stride, (list, TensorList)):\n            self.feature_stride = self.feature_stride[0]\n\n    def stride(self):\n        return self.feature_stride\n\n    def size(self, input_sz):\n        return input_sz // self.stride()\n\n    def extract(self, im, pos, scales, image_sz):\n        if isinstance(scales, (int, float)):\n            scales = [scales]\n\n        # Get image patches\n        im_patches = torch.cat([sample_patch(im, pos, s*image_sz, image_sz) for s in scales])\n\n        # Compute features\n        feature_map = torch.cat(TensorList([f.get_feature(im_patches) for f in self.features]).unroll(), dim=1)\n\n        return feature_map\n\n\nclass MultiResolutionExtractor(ExtractorBase):\n    \"\"\"Multi-resolution feature extractor.\n    args:\n        features: List of features.\n    \"\"\"\n    def __init__(self, features, patch_mode='replicate', max_scale_change=None):\n        super().__init__(features)\n        self.patch_mode = patch_mode\n        self.max_scale_change = max_scale_change\n        self.is_color = None\n\n    def stride(self):\n        return torch.Tensor(TensorList([f.stride() for f in self.features if self._return_feature(f)]).unroll().list())\n\n    def size(self, input_sz):\n        return TensorList([f.size(input_sz) for f in self.features if self._return_feature(f)]).unroll()\n\n    def dim(self):\n        return TensorList([f.dim() for f in self.features if self._return_feature(f)]).unroll()\n\n    def get_fparams(self, name: str = None):\n        if name is None:\n            return [f.fparams for f in self.features if self._return_feature(f)]\n        return TensorList([getattr(f.fparams, name) for f in self.features if self._return_feature(f)]).unroll()\n\n    def get_attribute(self, name: str, ignore_missing: bool = False):\n        if ignore_missing:\n            return TensorList([getattr(f, name) for f in self.features if self._return_feature(f) and hasattr(f, name)])\n        else:\n            return TensorList([getattr(f, name, None) for f in self.features if self._return_feature(f)])\n\n    def get_unique_attribute(self, name: str):\n        feat = None\n        for f in self.features:\n            if self._return_feature(f) and hasattr(f, name):\n                if feat is not None:\n                    raise RuntimeError('The attribute was not unique.')\n                feat = f\n        if feat is None:\n            raise RuntimeError('The attribute did not exist')\n        return getattr(feat, name)\n\n    def _return_feature(self, f):\n        return self.is_color is None or self.is_color and f.use_for_color or not self.is_color and f.use_for_gray\n\n    def set_is_color(self, is_color: bool):\n        self.is_color = is_color\n\n    def extract(self, im, pos, scales, image_sz, return_patches=False):\n        \"\"\"Extract features.\n        args:\n            im: Image.\n            pos: Center position for extraction.\n            scales: Image scales to extract features from.\n            image_sz: Size to resize the image samples to before extraction.\n        \"\"\"\n        if isinstance(scales, (int, float)):\n            scales = [scales]\n\n        # Get image patches\n        patch_iter, coord_iter = zip(*(sample_patch(im, pos, s*image_sz, image_sz, mode=self.patch_mode,\n                                                    max_scale_change=self.max_scale_change) for s in scales))\n        im_patches = torch.cat(list(patch_iter))\n        patch_coords = torch.cat(list(coord_iter))\n\n        # im_patches = torch.cat([sample_patch(im, pos, s*image_sz, image_sz) for s in scales])\n\n        # Compute features\n        feature_map = TensorList([f.get_feature(im_patches) for f in self.features]).unroll()\n\n        if return_patches:\n            return feature_map, patch_coords, im_patches\n        else:\n            return feature_map, patch_coords\n\n    def extract_transformed(self, im, pos, scale, image_sz, transforms):\n        \"\"\"Extract features from a set of transformed image samples.\n        args:\n            im: Image.\n            pos: Center position for extraction.\n            scale: Image scale to extract features from.\n            image_sz: Size to resize the image samples to before extraction.\n            transforms: A set of image transforms to apply.\n        \"\"\"\n\n        # Get image patche\n        im_patch, _ = sample_patch(im, pos, scale*image_sz, image_sz)\n\n        # Apply transforms\n        im_patches = torch.cat([T(im_patch) for T in transforms])\n\n        # Compute features\n        feature_map = TensorList([f.get_feature(im_patches) for f in self.features]).unroll()\n\n        return feature_map \n"
  },
  {
    "path": "artrackv2_mindspore/external/AR/pytracking/features/featurebase.py",
    "content": "import torch\nimport torch.nn.functional as F\nfrom pytracking import TensorList\n\n\nclass FeatureBase:\n    \"\"\"Base feature class.\n    args:\n        fparams: Feature specific parameters.\n        pool_stride: Amount of average pooling to apply do downsample the feature map.\n        output_size: Alternatively, specify the output size of the feature map. Adaptive average pooling will be applied.\n        normalize_power: The power exponent for the normalization. None means no normalization (default).\n        use_for_color: Use this feature for color images.\n        use_for_gray: Use this feature for grayscale images.\n    \"\"\"\n    def __init__(self, fparams = None, pool_stride = None, output_size = None, normalize_power = None, use_for_color = True, use_for_gray = True):\n        self.fparams = fparams\n        self.pool_stride = 1 if pool_stride is None else pool_stride\n        self.output_size = output_size\n        self.normalize_power = normalize_power\n        self.use_for_color = use_for_color\n        self.use_for_gray = use_for_gray\n\n    def initialize(self):\n        pass\n\n    def dim(self):\n        raise NotImplementedError\n\n    def stride(self):\n        raise NotImplementedError\n\n    def size(self, im_sz):\n        if self.output_size is None:\n            return im_sz // self.stride()\n        if isinstance(im_sz, torch.Tensor):\n            return torch.Tensor([self.output_size[0], self.output_size[1]])\n        return self.output_size\n\n    def extract(self, im):\n        \"\"\"Performs feature extraction.\"\"\"\n        raise NotImplementedError\n\n    def get_feature(self, im: torch.Tensor):\n        \"\"\"Get the feature. Generally, call this function.\n        args:\n            im: image patch as a torch.Tensor.\n        \"\"\"\n\n        # Return empty tensor if it should not be used\n        is_color = im.shape[1] == 3\n        if is_color and not self.use_for_color or not is_color and not self.use_for_gray:\n            return torch.Tensor([])\n\n        # Extract feature\n        feat = self.extract(im)\n\n        # Pool/downsample\n        if self.output_size is not None:\n            feat = F.adaptive_avg_pool2d(feat, self.output_size)\n        elif self.pool_stride != 1:\n            feat = F.avg_pool2d(feat, self.pool_stride, self.pool_stride)\n\n        # Normalize\n        if self.normalize_power is not None:\n            feat /= (torch.sum(feat.abs().view(feat.shape[0],1,1,-1)**self.normalize_power, dim=3, keepdim=True) /\n                     (feat.shape[1]*feat.shape[2]*feat.shape[3]) + 1e-10)**(1/self.normalize_power)\n\n        return feat\n\n\nclass MultiFeatureBase(FeatureBase):\n    \"\"\"Base class for features potentially having multiple feature blocks as output (like CNNs).\n    See FeatureBase for more info.\n    \"\"\"\n    def size(self, im_sz):\n        if self.output_size is None:\n            return TensorList([im_sz // s for s in self.stride()])\n        if isinstance(im_sz, torch.Tensor):\n            return TensorList([im_sz // s if sz is None else torch.Tensor([sz[0], sz[1]]) for sz, s in zip(self.output_size, self.stride())])\n\n    def get_feature(self, im: torch.Tensor):\n        \"\"\"Get the feature. Generally, call this function.\n        args:\n            im: image patch as a torch.Tensor.\n        \"\"\"\n\n        # Return empty tensor if it should not be used\n        is_color = im.shape[1] == 3\n        if is_color and not self.use_for_color or not is_color and not self.use_for_gray:\n            return torch.Tensor([])\n\n        feat_list = self.extract(im)\n\n        output_sz = [None]*len(feat_list) if self.output_size is None else self.output_size\n\n        # Pool/downsample\n        for i, (sz, s) in enumerate(zip(output_sz, self.pool_stride)):\n            if sz is not None:\n                feat_list[i] = F.adaptive_avg_pool2d(feat_list[i], sz)\n            elif s != 1:\n                feat_list[i] = F.avg_pool2d(feat_list[i], s, s)\n\n        # Normalize\n        if self.normalize_power is not None:\n            for feat in feat_list:\n                feat /= (torch.sum(feat.abs().view(feat.shape[0],1,1,-1)**self.normalize_power, dim=3, keepdim=True) /\n                         (feat.shape[1]*feat.shape[2]*feat.shape[3]) + 1e-10)**(1/self.normalize_power)\n\n        return feat_list"
  },
  {
    "path": "artrackv2_mindspore/external/AR/pytracking/features/net_wrappers.py",
    "content": "import torch\nfrom pytracking.utils.loading import load_network\n\n\nclass NetWrapper:\n    \"\"\"Used for wrapping networks in pytracking.\n    Network modules and functions can be accessed directly as if they were members of this class.\"\"\"\n    _rec_iter=0\n    def __init__(self, net_path, use_gpu=True, initialize=False, **kwargs):\n        self.net_path = net_path\n        self.use_gpu = use_gpu\n        self.net = None\n        self.net_kwargs = kwargs\n        if initialize:\n            self.initialize()\n\n    def __getattr__(self, name):\n        if self._rec_iter > 0:\n            self._rec_iter = 0\n            return None\n        self._rec_iter += 1\n        try:\n            ret_val = getattr(self.net, name)\n        except Exception as e:\n            self._rec_iter = 0\n            raise e\n        self._rec_iter = 0\n        return ret_val\n\n    def load_network(self):\n        self.net = load_network(self.net_path, **self.net_kwargs)\n        if self.use_gpu:\n            self.cuda()\n        self.eval()\n\n    def initialize(self):\n        self.load_network()\n\n\nclass NetWithBackbone(NetWrapper):\n    \"\"\"Wraps a network with a common backbone.\n    Assumes the network have a 'extract_backbone_features(image)' function.\"\"\"\n\n    def __init__(self, net_path, use_gpu=True, initialize=False, image_format='rgb',\n                 mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225), **kwargs):\n        super().__init__(net_path, use_gpu, initialize, **kwargs)\n\n        self.image_format = image_format\n        self._mean = torch.Tensor(mean).view(1, -1, 1, 1)\n        self._std = torch.Tensor(std).view(1, -1, 1, 1)\n\n    def initialize(self, image_format='rgb', mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)):\n        super().initialize()\n\n    def preprocess_image(self, im: torch.Tensor):\n        \"\"\"Normalize the image with the mean and standard deviation used by the network.\"\"\"\n\n        if self.image_format in ['rgb', 'bgr']:\n            im = im/255\n\n        if self.image_format in ['bgr', 'bgr255']:\n            im = im[:, [2, 1, 0], :, :]\n        im -= self._mean\n        im /= self._std\n\n        if self.use_gpu:\n            im = im.cuda()\n\n        return im\n\n    def extract_backbone(self, im: torch.Tensor):\n        \"\"\"Extract backbone features from the network.\n        Expects a float tensor image with pixel range [0, 255].\"\"\"\n        im = self.preprocess_image(im)\n        return self.net.extract_backbone_features(im)\n"
  },
  {
    "path": "artrackv2_mindspore/external/AR/pytracking/features/preprocessing.py",
    "content": "import torch\nimport torch.nn.functional as F\nimport numpy as np\n\n\ndef numpy_to_torch(a: np.ndarray):\n    return torch.from_numpy(a).float().permute(2, 0, 1).unsqueeze(0)\n\n\ndef torch_to_numpy(a: torch.Tensor):\n    return a.squeeze(0).permute(1,2,0).numpy()\n\n\ndef sample_patch_transformed(im, pos, scale, image_sz, transforms, is_mask=False):\n    \"\"\"Extract transformed image samples.\n    args:\n        im: Image.\n        pos: Center position for extraction.\n        scale: Image scale to extract features from.\n        image_sz: Size to resize the image samples to before extraction.\n        transforms: A set of image transforms to apply.\n    \"\"\"\n\n    # Get image patche\n    im_patch, _ = sample_patch(im, pos, scale*image_sz, image_sz, is_mask=is_mask)\n\n    # Apply transforms\n    im_patches = torch.cat([T(im_patch, is_mask=is_mask) for T in transforms])\n\n    return im_patches\n\n\ndef sample_patch_multiscale(im, pos, scales, image_sz, mode: str='replicate', max_scale_change=None):\n    \"\"\"Extract image patches at multiple scales.\n    args:\n        im: Image.\n        pos: Center position for extraction.\n        scales: Image scales to extract image patches from.\n        image_sz: Size to resize the image samples to\n        mode: how to treat image borders: 'replicate' (default), 'inside' or 'inside_major'\n        max_scale_change: maximum allowed scale change when using 'inside' and 'inside_major' mode\n    \"\"\"\n    if isinstance(scales, (int, float)):\n        scales = [scales]\n\n    # Get image patches\n    patch_iter, coord_iter = zip(*(sample_patch(im, pos, s*image_sz, image_sz, mode=mode,\n                                                max_scale_change=max_scale_change) for s in scales))\n    im_patches = torch.cat(list(patch_iter))\n    patch_coords = torch.cat(list(coord_iter))\n\n    return  im_patches, patch_coords\n\n\ndef sample_patch(im: torch.Tensor, pos: torch.Tensor, sample_sz: torch.Tensor, output_sz: torch.Tensor = None,\n                 mode: str = 'replicate', max_scale_change=None, is_mask=False):\n    \"\"\"Sample an image patch.\n\n    args:\n        im: Image\n        pos: center position of crop\n        sample_sz: size to crop\n        output_sz: size to resize to\n        mode: how to treat image borders: 'replicate' (default), 'inside' or 'inside_major'\n        max_scale_change: maximum allowed scale change when using 'inside' and 'inside_major' mode\n    \"\"\"\n\n    # if mode not in ['replicate', 'inside']:\n    #     raise ValueError('Unknown border mode \\'{}\\'.'.format(mode))\n\n    # copy and convert\n    posl = pos.long().clone()\n\n    pad_mode = mode\n\n    # Get new sample size if forced inside the image\n    if mode == 'inside' or mode == 'inside_major':\n        pad_mode = 'replicate'\n        im_sz = torch.Tensor([im.shape[2], im.shape[3]])\n        shrink_factor = (sample_sz.float() / im_sz)\n        if mode == 'inside':\n            shrink_factor = shrink_factor.max()\n        elif mode == 'inside_major':\n            shrink_factor = shrink_factor.min()\n        shrink_factor.clamp_(min=1, max=max_scale_change)\n        sample_sz = (sample_sz.float() / shrink_factor).long()\n\n    # Compute pre-downsampling factor\n    if output_sz is not None:\n        resize_factor = torch.min(sample_sz.float() / output_sz.float()).item()\n        df = int(max(int(resize_factor - 0.1), 1))\n    else:\n        df = int(1)\n\n    sz = sample_sz.float() / df     # new size\n\n    # Do downsampling\n    if df > 1:\n        os = posl % df              # offset\n        posl = (posl - os) / df     # new position\n        im2 = im[..., os[0].item()::df, os[1].item()::df]   # downsample\n    else:\n        im2 = im\n\n    # compute size to crop\n    szl = torch.max(sz.round(), torch.Tensor([2])).long()\n\n    # Extract top and bottom coordinates\n    tl = posl - (szl - 1)/2\n    br = posl + szl/2 + 1\n\n    # Shift the crop to inside\n    if mode == 'inside' or mode == 'inside_major':\n        im2_sz = torch.LongTensor([im2.shape[2], im2.shape[3]])\n        shift = (-tl).clamp(0) - (br - im2_sz).clamp(0)\n        tl += shift\n        br += shift\n\n        outside = ((-tl).clamp(0) + (br - im2_sz).clamp(0)) // 2\n        shift = (-tl - outside) * (outside > 0).long()\n        tl += shift\n        br += shift\n\n        # Get image patch\n        # im_patch = im2[...,tl[0].item():br[0].item(),tl[1].item():br[1].item()]\n\n    # Get image patch\n    if not is_mask:\n        im_patch = F.pad(im2, (-tl[1].item(), br[1].item() - im2.shape[3], -tl[0].item(), br[0].item() - im2.shape[2]), pad_mode)\n    else:\n        im_patch = F.pad(im2, (-tl[1].item(), br[1].item() - im2.shape[3], -tl[0].item(), br[0].item() - im2.shape[2]))\n\n    # Get image coordinates\n    patch_coord = df * torch.cat((tl, br)).view(1,4)\n\n    if output_sz is None or (im_patch.shape[-2] == output_sz[0] and im_patch.shape[-1] == output_sz[1]):\n        return im_patch.clone(), patch_coord\n\n    # Resample\n    if not is_mask:\n        im_patch = F.interpolate(im_patch, output_sz.long().tolist(), mode='bilinear')\n    else:\n        im_patch = F.interpolate(im_patch, output_sz.long().tolist(), mode='nearest')\n\n    return im_patch, patch_coord\n"
  },
  {
    "path": "artrackv2_mindspore/external/AR/pytracking/features/util.py",
    "content": "import torch\nfrom pytracking.features.featurebase import FeatureBase\n\n\nclass Concatenate(FeatureBase):\n    \"\"\"A feature that concatenates other features.\n    args:\n        features: List of features to concatenate.\n    \"\"\"\n    def __init__(self, features, pool_stride = None, normalize_power = None, use_for_color = True, use_for_gray = True):\n        super(Concatenate, self).__init__(pool_stride, normalize_power, use_for_color, use_for_gray)\n        self.features = features\n\n        self.input_stride = self.features[0].stride()\n\n        for feat in self.features:\n            if self.input_stride != feat.stride():\n                raise ValueError('Strides for the features must be the same for a bultiresolution feature.')\n\n    def dim(self):\n        return sum([f.dim() for f in self.features])\n\n    def stride(self):\n        return self.pool_stride * self.input_stride\n\n    def extract(self, im: torch.Tensor):\n        return torch.cat([f.get_feature(im) for f in self.features], 1)"
  },
  {
    "path": "artrackv2_mindspore/external/AR/pytracking/libs/__init__.py",
    "content": "from .tensorlist import TensorList\nfrom .tensordict import TensorDict"
  },
  {
    "path": "artrackv2_mindspore/external/AR/pytracking/libs/complex.py",
    "content": "import torch\nfrom pytracking.libs.tensorlist import tensor_operation\n\n\ndef is_complex(a: torch.Tensor) -> bool:\n    return a.dim() >= 4 and a.shape[-1] == 2\n\n\ndef is_real(a: torch.Tensor) -> bool:\n    return not is_complex(a)\n\n\n@tensor_operation\ndef mult(a: torch.Tensor, b: torch.Tensor):\n    \"\"\"Pointwise complex multiplication of complex tensors.\"\"\"\n\n    if is_real(a):\n        if a.dim() >= b.dim():\n            raise ValueError('Incorrect dimensions.')\n        # a is real\n        return mult_real_cplx(a, b)\n    if is_real(b):\n        if b.dim() >= a.dim():\n            raise ValueError('Incorrect dimensions.')\n        # b is real\n        return mult_real_cplx(b, a)\n\n    # Both complex\n    c = mult_real_cplx(a[..., 0], b)\n    c[..., 0] -= a[..., 1] * b[..., 1]\n    c[..., 1] += a[..., 1] * b[..., 0]\n    return c\n\n\n@tensor_operation\ndef mult_conj(a: torch.Tensor, b: torch.Tensor):\n    \"\"\"Pointwise complex multiplication of complex tensors, with conjugate on b: a*conj(b).\"\"\"\n\n    if is_real(a):\n        if a.dim() >= b.dim():\n            raise ValueError('Incorrect dimensions.')\n        # a is real\n        return mult_real_cplx(a, conj(b))\n    if is_real(b):\n        if b.dim() >= a.dim():\n            raise ValueError('Incorrect dimensions.')\n        # b is real\n        return mult_real_cplx(b, a)\n\n    # Both complex\n    c = mult_real_cplx(b[...,0], a)\n    c[..., 0] += a[..., 1] * b[..., 1]\n    c[..., 1] -= a[..., 0] * b[..., 1]\n    return c\n\n\n@tensor_operation\ndef mult_real_cplx(a: torch.Tensor, b: torch.Tensor):\n    \"\"\"Pointwise complex multiplication of real tensor a with complex tensor b.\"\"\"\n\n    if is_real(b):\n        raise ValueError('Last dimension must have length 2.')\n\n    return a.unsqueeze(-1) * b\n\n\n@tensor_operation\ndef div(a: torch.Tensor, b: torch.Tensor):\n    \"\"\"Pointwise complex division of complex tensors.\"\"\"\n\n    if is_real(b):\n        if b.dim() >= a.dim():\n            raise ValueError('Incorrect dimensions.')\n        # b is real\n        return div_cplx_real(a, b)\n\n    return div_cplx_real(mult_conj(a, b), abs_sqr(b))\n\n\n@tensor_operation\ndef div_cplx_real(a: torch.Tensor, b: torch.Tensor):\n    \"\"\"Pointwise complex division of complex tensor a with real tensor b.\"\"\"\n\n    if is_real(a):\n        raise ValueError('Last dimension must have length 2.')\n\n    return a / b.unsqueeze(-1)\n\n\n@tensor_operation\ndef abs_sqr(a: torch.Tensor):\n    \"\"\"Squared absolute value.\"\"\"\n\n    if is_real(a):\n        raise ValueError('Last dimension must have length 2.')\n\n    return torch.sum(a*a, -1)\n\n\n@tensor_operation\ndef abs(a: torch.Tensor):\n    \"\"\"Absolute value.\"\"\"\n\n    if is_real(a):\n        raise ValueError('Last dimension must have length 2.')\n\n    return torch.sqrt(abs_sqr(a))\n\n\n@tensor_operation\ndef conj(a: torch.Tensor):\n    \"\"\"Complex conjugate.\"\"\"\n\n    if is_real(a):\n        raise ValueError('Last dimension must have length 2.')\n\n    # return a * torch.Tensor([1, -1], device=a.device)\n    return complex(a[...,0], -a[...,1])\n\n\n@tensor_operation\ndef real(a: torch.Tensor):\n    \"\"\"Real part.\"\"\"\n\n    if is_real(a):\n        raise ValueError('Last dimension must have length 2.')\n\n    return a[..., 0]\n\n\n@tensor_operation\ndef imag(a: torch.Tensor):\n    \"\"\"Imaginary part.\"\"\"\n\n    if is_real(a):\n        raise ValueError('Last dimension must have length 2.')\n\n    return a[..., 1]\n\n\n@tensor_operation\ndef complex(a: torch.Tensor, b: torch.Tensor = None):\n    \"\"\"Create complex tensor from real and imaginary part.\"\"\"\n\n    if b is None:\n        b = a.new_zeros(a.shape)\n    elif a is None:\n        a = b.new_zeros(b.shape)\n\n    return torch.cat((a.unsqueeze(-1), b.unsqueeze(-1)), -1)\n\n\n@tensor_operation\ndef mtimes(a: torch.Tensor, b: torch.Tensor, conj_a=False, conj_b=False):\n    \"\"\"Complex matrix multiplication of complex tensors.\n    The dimensions (-3, -2) are matrix multiplied. -1 is the complex dimension.\"\"\"\n\n    if is_real(a):\n        if a.dim() >= b.dim():\n            raise ValueError('Incorrect dimensions.')\n        return mtimes_real_complex(a, b, conj_b=conj_b)\n    if is_real(b):\n        if b.dim() >= a.dim():\n            raise ValueError('Incorrect dimensions.')\n        return mtimes_complex_real(a, b, conj_a=conj_a)\n\n    if not conj_a and not conj_b:\n        return complex(torch.matmul(a[..., 0], b[..., 0]) - torch.matmul(a[..., 1], b[..., 1]),\n                       torch.matmul(a[..., 0], b[..., 1]) + torch.matmul(a[..., 1], b[..., 0]))\n    if conj_a and not conj_b:\n        return complex(torch.matmul(a[..., 0], b[..., 0]) + torch.matmul(a[..., 1], b[..., 1]),\n                       torch.matmul(a[..., 0], b[..., 1]) - torch.matmul(a[..., 1], b[..., 0]))\n    if not conj_a and conj_b:\n        return complex(torch.matmul(a[..., 0], b[..., 0]) + torch.matmul(a[..., 1], b[..., 1]),\n                       torch.matmul(a[..., 1], b[..., 0]) - torch.matmul(a[..., 0], b[..., 1]))\n    if conj_a and conj_b:\n        return complex(torch.matmul(a[..., 0], b[..., 0]) - torch.matmul(a[..., 1], b[..., 1]),\n                       -torch.matmul(a[..., 0], b[..., 1]) - torch.matmul(a[..., 1], b[..., 0]))\n\n\n@tensor_operation\ndef mtimes_real_complex(a: torch.Tensor, b: torch.Tensor, conj_b=False):\n    if is_real(b):\n        raise ValueError('Incorrect dimensions.')\n\n    if not conj_b:\n        return complex(torch.matmul(a, b[..., 0]), torch.matmul(a, b[..., 1]))\n    if conj_b:\n        return complex(torch.matmul(a, b[..., 0]), -torch.matmul(a, b[..., 1]))\n\n\n@tensor_operation\ndef mtimes_complex_real(a: torch.Tensor, b: torch.Tensor, conj_a=False):\n    if is_real(a):\n        raise ValueError('Incorrect dimensions.')\n\n    if not conj_a:\n        return complex(torch.matmul(a[..., 0], b), torch.matmul(a[..., 1], b))\n    if conj_a:\n        return complex(torch.matmul(a[..., 0], b), -torch.matmul(a[..., 1], b))\n\n\n@tensor_operation\ndef exp_imag(a: torch.Tensor):\n    \"\"\"Complex exponential with imaginary input: e^(i*a)\"\"\"\n\n    a = a.unsqueeze(-1)\n    return torch.cat((torch.cos(a), torch.sin(a)), -1)\n\n\n\n"
  },
  {
    "path": "artrackv2_mindspore/external/AR/pytracking/libs/dcf.py",
    "content": "import torch\nimport math\nfrom pytracking import fourier\nfrom pytracking import complex\nimport torch.nn.functional as F\n\n\ndef hann1d(sz: int, centered = True) -> torch.Tensor:\n    \"\"\"1D cosine window.\"\"\"\n    if centered:\n        return 0.5 * (1 - torch.cos((2 * math.pi / (sz + 1)) * torch.arange(1, sz + 1).float()))\n    w = 0.5 * (1 + torch.cos((2 * math.pi / (sz + 2)) * torch.arange(0, sz//2 + 1).float()))\n    return torch.cat([w, w[1:sz-sz//2].flip((0,))])\n\n\ndef hann2d(sz: torch.Tensor, centered = True) -> torch.Tensor:\n    \"\"\"2D cosine window.\"\"\"\n    return hann1d(sz[0].item(), centered).reshape(1, 1, -1, 1) * hann1d(sz[1].item(), centered).reshape(1, 1, 1, -1)\n\n\ndef hann2d_clipped(sz: torch.Tensor, effective_sz: torch.Tensor, centered = True) -> torch.Tensor:\n    \"\"\"1D clipped cosine window.\"\"\"\n\n    # Ensure that the difference is even\n    effective_sz += (effective_sz - sz) % 2\n    effective_window = hann1d(effective_sz[0].item(), True).reshape(1, 1, -1, 1) * hann1d(effective_sz[1].item(), True).reshape(1, 1, 1, -1)\n\n    pad = (sz - effective_sz) / 2\n\n    window = F.pad(effective_window, (pad[1].item(), pad[1].item(), pad[0].item(), pad[0].item()), 'replicate')\n\n    if centered:\n        return window\n    else:\n        mid = (sz / 2).int()\n        window_shift_lr = torch.cat((window[:, :, :, mid[1]:], window[:, :, :, :mid[1]]), 3)\n        return torch.cat((window_shift_lr[:, :, mid[0]:, :], window_shift_lr[:, :, :mid[0], :]), 2)\n\n\ndef gauss_fourier(sz: int, sigma: float, half: bool = False) -> torch.Tensor:\n    if half:\n        k = torch.arange(0, int(sz/2+1))\n    else:\n        k = torch.arange(-int((sz-1)/2), int(sz/2+1))\n    return (math.sqrt(2*math.pi) * sigma / sz) * torch.exp(-2 * (math.pi * sigma * k.float() / sz)**2)\n\n\ndef gauss_spatial(sz, sigma, center=0, end_pad=0):\n    k = torch.arange(-(sz-1)/2, (sz+1)/2+end_pad)\n    return torch.exp(-1.0/(2*sigma**2) * (k - center)**2)\n\n\ndef label_function(sz: torch.Tensor, sigma: torch.Tensor):\n    return gauss_fourier(sz[0].item(), sigma[0].item()).reshape(1, 1, -1, 1) * gauss_fourier(sz[1].item(), sigma[1].item(), True).reshape(1, 1, 1, -1)\n\ndef label_function_spatial(sz: torch.Tensor, sigma: torch.Tensor, center: torch.Tensor = torch.zeros(2), end_pad: torch.Tensor = torch.zeros(2)):\n    \"\"\"The origin is in the middle of the image.\"\"\"\n    return gauss_spatial(sz[0].item(), sigma[0].item(), center[0], end_pad[0].item()).reshape(1, 1, -1, 1) * \\\n           gauss_spatial(sz[1].item(), sigma[1].item(), center[1], end_pad[1].item()).reshape(1, 1, 1, -1)\n\n\ndef cubic_spline_fourier(f, a):\n    \"\"\"The continuous Fourier transform of a cubic spline kernel.\"\"\"\n\n    bf = (6*(1 - torch.cos(2 * math.pi * f)) + 3*a*(1 - torch.cos(4 * math.pi * f))\n           - (6 + 8*a)*math.pi*f*torch.sin(2 * math.pi * f) - 2*a*math.pi*f*torch.sin(4 * math.pi * f)) \\\n         / (4 * math.pi**4 * f**4)\n\n    bf[f == 0] = 1\n\n    return bf\n\n\ndef get_interp_fourier(sz: torch.Tensor, method='ideal', bicubic_param=0.5, centering=True, windowing=False, device='cpu'):\n\n    ky, kx = fourier.get_frequency_coord(sz)\n\n    if method=='ideal':\n        interp_y = torch.ones(ky.shape) / sz[0]\n        interp_x = torch.ones(kx.shape) / sz[1]\n    elif method=='bicubic':\n        interp_y = cubic_spline_fourier(ky / sz[0], bicubic_param) / sz[0]\n        interp_x = cubic_spline_fourier(kx / sz[1], bicubic_param) / sz[1]\n    else:\n        raise ValueError('Unknown method.')\n\n    if centering:\n        interp_y = complex.mult(interp_y, complex.exp_imag((-math.pi/sz[0]) * ky))\n        interp_x = complex.mult(interp_x, complex.exp_imag((-math.pi/sz[1]) * kx))\n\n    if windowing:\n        raise NotImplementedError\n\n    return interp_y.to(device), interp_x.to(device)\n\n\ndef interpolate_dft(a: torch.Tensor, interp_fs) -> torch.Tensor:\n\n    if isinstance(interp_fs, torch.Tensor):\n        return complex.mult(a, interp_fs)\n    if isinstance(interp_fs, (tuple, list)):\n        return complex.mult(complex.mult(a, interp_fs[0]), interp_fs[1])\n    raise ValueError('\"interp_fs\" must be tensor or tuple of tensors.')\n\n\ndef get_reg_filter(sz: torch.Tensor, target_sz: torch.Tensor, params):\n    \"\"\"Computes regularization filter in CCOT and ECO.\"\"\"\n\n    if not params.use_reg_window:\n        return params.reg_window_min * torch.ones(1,1,1,1)\n\n    if getattr(params, 'reg_window_square', False):\n        target_sz = target_sz.prod().sqrt() * torch.ones(2)\n\n    # Normalization factor\n    reg_scale = 0.5 * target_sz\n\n    # Construct grid\n    if getattr(params, 'reg_window_centered', True):\n        wrg = torch.arange(-int((sz[0]-1)/2), int(sz[0]/2+1), dtype=torch.float32).view(1,1,-1,1)\n        wcg = torch.arange(-int((sz[1]-1)/2), int(sz[1]/2+1), dtype=torch.float32).view(1,1,1,-1)\n    else:\n        wrg = torch.cat([torch.arange(0, int(sz[0]/2+1), dtype=torch.float32),\n                         torch.arange(-int((sz[0] - 1) / 2), 0, dtype=torch.float32)]).view(1,1,-1,1)\n        wcg = torch.cat([torch.arange(0, int(sz[1]/2+1), dtype=torch.float32),\n                         torch.arange(-int((sz[1] - 1) / 2), 0, dtype=torch.float32)]).view(1,1,1,-1)\n\n    # Construct regularization window\n    reg_window = (params.reg_window_edge - params.reg_window_min) * \\\n                 (torch.abs(wrg/reg_scale[0])**params.reg_window_power +\n                  torch.abs(wcg/reg_scale[1])**params.reg_window_power) + params.reg_window_min\n\n    # Compute DFT and enforce sparsity\n    reg_window_dft = torch.rfft(reg_window, 2) / sz.prod()\n    reg_window_dft_abs = complex.abs(reg_window_dft)\n    reg_window_dft[reg_window_dft_abs < params.reg_sparsity_threshold * reg_window_dft_abs.max(), :] = 0\n\n    # Do the inverse transform to correct for the window minimum\n    reg_window_sparse = torch.irfft(reg_window_dft, 2, signal_sizes=sz.long().tolist())\n    reg_window_dft[0,0,0,0,0] += params.reg_window_min - sz.prod() * reg_window_sparse.min()\n    reg_window_dft = complex.real(fourier.rfftshift2(reg_window_dft))\n\n    # Remove zeros\n    max_inds,_ = reg_window_dft.nonzero().max(dim=0)\n    mid_ind = int((reg_window_dft.shape[2]-1)/2)\n    top = max_inds[-2].item() + 1\n    bottom = 2*mid_ind - max_inds[-2].item()\n    right = max_inds[-1].item() + 1\n    reg_window_dft = reg_window_dft[..., bottom:top, :right]\n    if reg_window_dft.shape[-1] > 1:\n        reg_window_dft = torch.cat([reg_window_dft[..., 1:].flip((2, 3)), reg_window_dft], -1)\n\n    return reg_window_dft\n\n\ndef max2d(a: torch.Tensor) -> (torch.Tensor, torch.Tensor):\n    \"\"\"Computes maximum and argmax in the last two dimensions.\"\"\"\n\n    max_val_row, argmax_row = torch.max(a, dim=-2)\n    max_val, argmax_col = torch.max(max_val_row, dim=-1)\n    argmax_row = argmax_row.view(argmax_col.numel(),-1)[torch.arange(argmax_col.numel()), argmax_col.view(-1)]\n    argmax_row = argmax_row.reshape(argmax_col.shape)\n    argmax = torch.cat((argmax_row.unsqueeze(-1), argmax_col.unsqueeze(-1)), -1)\n    return max_val, argmax\n"
  },
  {
    "path": "artrackv2_mindspore/external/AR/pytracking/libs/fourier.py",
    "content": "import torch\nimport torch.nn.functional as F\nfrom pytracking import complex, TensorList\nfrom pytracking.libs.tensorlist import tensor_operation\n\n\n@tensor_operation\ndef rfftshift2(a: torch.Tensor):\n    h = a.shape[2] + 2\n    return torch.cat((a[:,:,(h-1)//2:,...], a[:,:,:h//2,...]), 2)\n\n\n@tensor_operation\ndef irfftshift2(a: torch.Tensor):\n    mid = int((a.shape[2]-1)/2)\n    return torch.cat((a[:,:,mid:,...], a[:,:,:mid,...]), 2)\n\n\n@tensor_operation\ndef cfft2(a):\n    \"\"\"Do FFT and center the low frequency component.\n    Always produces odd (full) output sizes.\"\"\"\n\n    return rfftshift2(torch.rfft(a, 2))\n\n\n@tensor_operation\ndef cifft2(a, signal_sizes=None):\n    \"\"\"Do inverse FFT corresponding to cfft2.\"\"\"\n\n    return torch.irfft(irfftshift2(a), 2, signal_sizes=signal_sizes)\n\n\n@tensor_operation\ndef sample_fs(a: torch.Tensor, grid_sz: torch.Tensor = None, rescale = True):\n    \"\"\"Samples the Fourier series.\"\"\"\n\n    # Size of the fourier series\n    sz = torch.Tensor([a.shape[2], 2*a.shape[3]-1]).float()\n\n    # Default grid\n    if grid_sz is None or sz[0] == grid_sz[0] and sz[1] == grid_sz[1]:\n        if rescale:\n            return sz.prod().item() * cifft2(a)\n        return cifft2(a)\n\n    if sz[0] > grid_sz[0] or sz[1] > grid_sz[1]:\n        raise ValueError(\"Only grid sizes that are smaller than the Fourier series size are supported.\")\n\n    tot_pad = (grid_sz - sz).tolist()\n    is_even = [s.item() % 2 == 0 for s in sz]\n\n    # Compute paddings\n    pad_top = int((tot_pad[0]+1)/2) if is_even[0] else int(tot_pad[0]/2)\n    pad_bottom = int(tot_pad[0] - pad_top)\n    pad_right = int((tot_pad[1]+1)/2)\n\n    if rescale:\n        return grid_sz.prod().item() * cifft2(F.pad(a, (0, 0, 0, pad_right, pad_top, pad_bottom)), signal_sizes=grid_sz.long().tolist())\n    else:\n        return cifft2(F.pad(a, (0, 0, 0, pad_right, pad_top, pad_bottom)), signal_sizes=grid_sz.long().tolist())\n\n\ndef get_frequency_coord(sz, add_complex_dim = False, device='cpu'):\n    \"\"\"Frequency coordinates.\"\"\"\n\n    ky = torch.arange(-int((sz[0]-1)/2), int(sz[0]/2+1), dtype=torch.float32, device=device).view(1,1,-1,1)\n    kx = torch.arange(0, int(sz[1]/2+1), dtype=torch.float32, device=device).view(1,1,1,-1)\n\n    if add_complex_dim:\n        ky = ky.unsqueeze(-1)\n        kx = kx.unsqueeze(-1)\n\n    return ky, kx\n\n\n@tensor_operation\ndef shift_fs(a: torch.Tensor, shift: torch.Tensor):\n    \"\"\"Shift a sample a in the Fourier domain.\n    Params:\n        a : The fourier coefficiens of the sample.\n        shift : The shift to be performed normalized to the range [-pi, pi].\"\"\"\n\n    if a.dim() != 5:\n        raise ValueError('a must be the Fourier coefficients, a 5-dimensional tensor.')\n\n    if shift[0] == 0 and shift[1] == 0:\n        return a\n\n    ky, kx = get_frequency_coord((a.shape[2], 2*a.shape[3]-1), device=a.device)\n\n    return complex.mult(complex.mult(a, complex.exp_imag(shift[0].item()*ky)), complex.exp_imag(shift[1].item()*kx))\n\n\ndef sum_fs(a: TensorList) -> torch.Tensor:\n    \"\"\"Sum a list of Fourier series expansions.\"\"\"\n\n    s = None\n    mid = None\n\n    for e in sorted(a, key=lambda elem: elem.shape[-3], reverse=True):\n        if s is None:\n            s = e.clone()\n            mid = int((s.shape[-3] - 1) / 2)\n        else:\n            # Compute coordinates\n            top = mid - int((e.shape[-3] - 1) / 2)\n            bottom = mid + int(e.shape[-3] / 2) + 1\n            right = e.shape[-2]\n\n            # Add the data\n            s[..., top:bottom, :right, :] += e\n\n    return s\n\n\ndef sum_fs12(a: TensorList) -> torch.Tensor:\n    \"\"\"Sum a list of Fourier series expansions.\"\"\"\n\n    s = None\n    mid = None\n\n    for e in sorted(a, key=lambda elem: elem.shape[0], reverse=True):\n        if s is None:\n            s = e.clone()\n            mid = int((s.shape[0] - 1) / 2)\n        else:\n            # Compute coordinates\n            top = mid - int((e.shape[0] - 1) / 2)\n            bottom = mid + int(e.shape[0] / 2) + 1\n            right = e.shape[1]\n\n            # Add the data\n            s[top:bottom, :right, ...] += e\n\n    return s\n\n\n@tensor_operation\ndef inner_prod_fs(a: torch.Tensor, b: torch.Tensor):\n    if complex.is_complex(a) and complex.is_complex(b):\n        return 2 * (a.reshape(-1) @ b.reshape(-1)) - a[:, :, :, 0, :].reshape(-1) @ b[:, :, :, 0, :].reshape(-1)\n    elif complex.is_real(a) and complex.is_real(b):\n        return 2 * (a.reshape(-1) @ b.reshape(-1)) - a[:, :, :, 0].reshape(-1) @ b[:, :, :, 0].reshape(-1)\n    else:\n        raise NotImplementedError('Not implemented for mixed real and complex.')"
  },
  {
    "path": "artrackv2_mindspore/external/AR/pytracking/libs/operation.py",
    "content": "import torch\nimport torch.nn.functional as F\nfrom pytracking.libs.tensorlist import tensor_operation, TensorList\n\n\n@tensor_operation\ndef conv2d(input: torch.Tensor, weight: torch.Tensor, bias: torch.Tensor = None, stride=1, padding=0, dilation=1, groups=1, mode=None):\n    \"\"\"Standard conv2d. Returns the input if weight=None.\"\"\"\n\n    if weight is None:\n        return input\n\n    ind = None\n    if mode is not None:\n        if padding != 0:\n            raise ValueError('Cannot input both padding and mode.')\n        if mode == 'same':\n            padding = (weight.shape[2]//2, weight.shape[3]//2)\n            if weight.shape[2] % 2 == 0 or weight.shape[3] % 2 == 0:\n                ind = (slice(-1) if weight.shape[2] % 2 == 0 else slice(None),\n                       slice(-1) if weight.shape[3] % 2 == 0 else slice(None))\n        elif mode == 'valid':\n            padding = (0, 0)\n        elif mode == 'full':\n            padding = (weight.shape[2]-1, weight.shape[3]-1)\n        else:\n            raise ValueError('Unknown mode for padding.')\n\n    out = F.conv2d(input, weight, bias=bias, stride=stride, padding=padding, dilation=dilation, groups=groups)\n    if ind is None:\n        return out\n    return out[:,:,ind[0],ind[1]]\n\n\n@tensor_operation\ndef conv1x1(input: torch.Tensor, weight: torch.Tensor):\n    \"\"\"Do a convolution with a 1x1 kernel weights. Implemented with matmul, which can be faster than using conv.\"\"\"\n\n    if weight is None:\n        return input\n\n    return torch.conv2d(input, weight)\n"
  },
  {
    "path": "artrackv2_mindspore/external/AR/pytracking/libs/optimization.py",
    "content": "import torch\nimport torch.autograd\nimport math\nfrom pytracking.libs import TensorList\nfrom pytracking.utils.plotting import plot_graph\nfrom ltr.models.layers.activation import softmax_reg\n\n\nclass L2Problem:\n    \"\"\"Base class for representing an L2 optimization problem.\"\"\"\n\n    def __call__(self, x: TensorList) -> TensorList:\n        \"\"\"Shall compute the residuals of the problem.\"\"\"\n        raise NotImplementedError\n\n    def ip_input(self, a, b):\n        \"\"\"Inner product of the input space.\"\"\"\n        return sum(a.view(-1) @ b.view(-1))\n\n    def ip_output(self, a, b):\n        \"\"\"Inner product of the output space.\"\"\"\n        return sum(a.view(-1) @ b.view(-1))\n\n    def M1(self, x):\n        \"\"\"M1 preconditioner.\"\"\"\n        return x\n\n    def M2(self, x):\n        \"\"\"M2 preconditioner.\"\"\"\n        return x\n\nclass MinimizationProblem:\n    \"\"\"General minimization problem.\"\"\"\n    def __call__(self, x: TensorList) -> TensorList:\n        \"\"\"Shall compute the loss.\"\"\"\n        raise NotImplementedError\n\n    def ip_input(self, a, b):\n        \"\"\"Inner product of the input space.\"\"\"\n        return sum(a.view(-1) @ b.view(-1))\n\n    def M1(self, x):\n        return x\n\n    def M2(self, x):\n        return x\n\n\nclass ConjugateGradientBase:\n    \"\"\"Conjugate Gradient optimizer base class. Implements the CG loop.\"\"\"\n\n    def __init__(self, fletcher_reeves = True, standard_alpha = True, direction_forget_factor = 0, debug = False):\n        self.fletcher_reeves = fletcher_reeves\n        self.standard_alpha = standard_alpha\n        self.direction_forget_factor = direction_forget_factor\n        self.debug = debug\n\n        # State\n        self.p = None\n        self.rho = torch.ones(1)\n        self.r_prev = None\n\n        # Right hand side\n        self.b = None\n\n    def reset_state(self):\n        self.p = None\n        self.rho = torch.ones(1)\n        self.r_prev = None\n\n\n    def run_CG(self, num_iter, x=None, eps=0.0):\n        \"\"\"Main conjugate gradient method.\n\n        args:\n            num_iter: Number of iterations.\n            x: Initial guess. Assumed zero if None.\n            eps: Stop if the residual norm gets smaller than this.\n        \"\"\"\n\n        # Apply forgetting factor\n        if self.direction_forget_factor == 0:\n            self.reset_state()\n        elif self.p is not None:\n            self.rho /= self.direction_forget_factor\n\n        if x is None:\n            r = self.b.clone()\n        else:\n            r = self.b - self.A(x)\n\n        # Norms of residuals etc for debugging\n        resvec = None\n        if self.debug:\n            normr = self.residual_norm(r)\n            resvec = torch.zeros(num_iter+1)\n            resvec[0] = normr\n\n        # Loop over iterations\n        for ii in range(num_iter):\n            # Preconditioners\n            y = self.M1(r)\n            z = self.M2(y)\n\n            rho1 = self.rho\n            self.rho = self.ip(r, z)\n\n            if self.check_zero(self.rho):\n                if self.debug:\n                    print('Stopped CG since rho = 0')\n                    if resvec is not None:\n                        resvec = resvec[:ii+1]\n                return x, resvec\n\n            if self.p is None:\n                self.p = z.clone()\n            else:\n                if self.fletcher_reeves:\n                    beta = self.rho / rho1\n                else:\n                    rho2 = self.ip(self.r_prev, z)\n                    beta = (self.rho - rho2) / rho1\n\n                beta = beta.clamp(0)\n                self.p = z + self.p * beta\n\n            q = self.A(self.p)\n            pq = self.ip(self.p, q)\n\n            if self.standard_alpha:\n                alpha = self.rho / pq\n            else:\n                alpha = self.ip(self.p, r) / pq\n\n            # Save old r for PR formula\n            if not self.fletcher_reeves:\n                self.r_prev = r.clone()\n\n            # Form new iterate\n            if x is None:\n                x = self.p * alpha\n            else:\n                x += self.p * alpha\n\n            if ii < num_iter - 1 or self.debug:\n                r -= q * alpha\n\n            if eps > 0.0 or self.debug:\n                normr = self.residual_norm(r)\n\n            if self.debug:\n                self.evaluate_CG_iteration(x)\n                resvec[ii+1] = normr\n\n            if eps > 0 and normr <= eps:\n                if self.debug:\n                    print('Stopped CG since norm smaller than eps')\n                break\n\n        if resvec is not None:\n            resvec = resvec[:ii+2]\n\n        return x, resvec\n\n\n    def A(self, x):\n        # Implements the left hand operation\n        raise NotImplementedError\n\n    def ip(self, a, b):\n        # Implements the inner product\n        return a.view(-1) @ b.view(-1)\n\n    def residual_norm(self, r):\n        res = self.ip(r, r).sum()\n        if isinstance(res, (TensorList, list, tuple)):\n            res = sum(res)\n        return res.sqrt()\n\n    def check_zero(self, s, eps = 0.0):\n        ss = s.abs() <= eps\n        if isinstance(ss, (TensorList, list, tuple)):\n            ss = sum(ss)\n        return ss.item() > 0\n\n    def M1(self, x):\n        # M1 preconditioner\n        return x\n\n    def M2(self, x):\n        # M2 preconditioner\n        return x\n\n    def evaluate_CG_iteration(self, x):\n        pass\n\n\n\nclass ConjugateGradient(ConjugateGradientBase):\n    \"\"\"Conjugate Gradient optimizer, performing single linearization of the residuals in the start.\"\"\"\n\n    def __init__(self, problem: L2Problem, variable: TensorList, cg_eps = 0.0, fletcher_reeves = True,\n                 standard_alpha = True, direction_forget_factor = 0, debug = False, plotting = False, visdom=None):\n        super().__init__(fletcher_reeves, standard_alpha, direction_forget_factor, debug or plotting)\n\n        self.problem = problem\n        self.x = variable\n\n        self.plotting = plotting\n        self.fig_num = (10,11)\n        self.visdom = visdom\n\n        self.cg_eps = cg_eps\n        self.f0 = None\n        self.g = None\n        self.dfdxt_g = None\n\n        self.residuals = torch.zeros(0)\n        self.losses = torch.zeros(0)\n\n    def clear_temp(self):\n        self.f0 = None\n        self.g = None\n        self.dfdxt_g = None\n\n\n    def run(self, num_cg_iter):\n        \"\"\"Run the oprimizer with the provided number of iterations.\"\"\"\n\n        if num_cg_iter == 0:\n            return\n\n        lossvec = None\n        if self.debug:\n            lossvec = torch.zeros(2)\n\n        self.x.requires_grad_(True)\n\n        # Evaluate function at current estimate\n        self.f0 = self.problem(self.x)\n\n        # Create copy with graph detached\n        self.g = self.f0.detach()\n\n        if self.debug:\n            lossvec[0] = self.problem.ip_output(self.g, self.g)\n\n        self.g.requires_grad_(True)\n\n        # Get df/dx^t @ f0\n        self.dfdxt_g = TensorList(torch.autograd.grad(self.f0, self.x, self.g, create_graph=True))\n\n        # Get the right hand side\n        self.b = - self.dfdxt_g.detach()\n\n        # Run CG\n        delta_x, res = self.run_CG(num_cg_iter, eps=self.cg_eps)\n\n        self.x.detach_()\n        self.x += delta_x\n\n        if self.debug:\n            self.f0 = self.problem(self.x)\n            lossvec[-1] = self.problem.ip_output(self.f0, self.f0)\n            self.residuals = torch.cat((self.residuals, res))\n            self.losses = torch.cat((self.losses, lossvec))\n            if self.visdom is not None:\n                self.visdom.register(self.losses, 'lineplot', 3, 'Loss')\n                self.visdom.register(self.residuals, 'lineplot', 3, 'CG residuals')\n            elif self.plotting:\n                plot_graph(self.losses, self.fig_num[0], title='Loss')\n                plot_graph(self.residuals, self.fig_num[1], title='CG residuals')\n\n        self.x.detach_()\n        self.clear_temp()\n\n\n    def A(self, x):\n        dfdx_x = torch.autograd.grad(self.dfdxt_g, self.g, x, retain_graph=True)\n        return TensorList(torch.autograd.grad(self.f0, self.x, dfdx_x, retain_graph=True))\n\n    def ip(self, a, b):\n        return self.problem.ip_input(a, b)\n\n    def M1(self, x):\n        return self.problem.M1(x)\n\n    def M2(self, x):\n        return self.problem.M2(x)\n\n\n\nclass GaussNewtonCG(ConjugateGradientBase):\n    \"\"\"Gauss-Newton with Conjugate Gradient optimizer.\"\"\"\n\n    def __init__(self, problem: L2Problem, variable: TensorList, cg_eps = 0.0, fletcher_reeves = True,\n                 standard_alpha = True, direction_forget_factor = 0, debug = False, analyze = False, plotting = False,\n                 visdom=None):\n        super().__init__(fletcher_reeves, standard_alpha, direction_forget_factor, debug or analyze or plotting)\n\n        self.problem = problem\n        self.x = variable\n\n        self.analyze_convergence = analyze\n        self.plotting = plotting\n        self.fig_num = (10,11,12)\n        self.visdom = visdom\n\n        self.cg_eps = cg_eps\n        self.f0 = None\n        self.g = None\n        self.dfdxt_g = None\n\n        self.residuals = torch.zeros(0)\n        self.losses = torch.zeros(0)\n        self.gradient_mags = torch.zeros(0)\n\n    def clear_temp(self):\n        self.f0 = None\n        self.g = None\n        self.dfdxt_g = None\n\n\n    def run_GN(self, *args, **kwargs):\n        return self.run(*args, **kwargs)\n\n\n    def run(self, num_cg_iter, num_gn_iter=None):\n        \"\"\"Run the optimizer.\n        args:\n            num_cg_iter: Number of CG iterations per GN iter. If list, then each entry specifies number of CG iterations\n                         and number of GN iterations is given by the length of the list.\n            num_gn_iter: Number of GN iterations. Shall only be given if num_cg_iter is an integer.\n        \"\"\"\n\n        if isinstance(num_cg_iter, int):\n            if num_gn_iter is None:\n                raise ValueError('Must specify number of GN iter if CG iter is constant')\n            num_cg_iter = [num_cg_iter]*num_gn_iter\n\n        num_gn_iter = len(num_cg_iter)\n        if num_gn_iter == 0:\n            return\n\n        if self.analyze_convergence:\n            self.evaluate_CG_iteration(0)\n\n        # Outer loop for running the GN iterations.\n        for cg_iter in num_cg_iter:\n            self.run_GN_iter(cg_iter)\n\n        if self.debug:\n            if not self.analyze_convergence:\n                self.f0 = self.problem(self.x)\n                loss = self.problem.ip_output(self.f0, self.f0)\n                self.losses = torch.cat((self.losses, loss.detach().cpu().view(-1)))\n\n            if self.visdom is not None:\n                self.visdom.register(self.losses, 'lineplot', 3, 'Loss')\n                self.visdom.register(self.residuals, 'lineplot', 3, 'CG residuals')\n\n                if self.analyze_convergence:\n                    self.visdom.register(self.gradient_mags, 'lineplot', 4, 'Gradient magnitude')\n            elif self.plotting:\n                plot_graph(self.losses, self.fig_num[0], title='Loss')\n                plot_graph(self.residuals, self.fig_num[1], title='CG residuals')\n                if self.analyze_convergence:\n                    plot_graph(self.gradient_mags, self.fig_num[2], 'Gradient magnitude')\n\n\n        self.x.detach_()\n        self.clear_temp()\n\n        return self.losses, self.residuals\n\n\n    def run_GN_iter(self, num_cg_iter):\n        \"\"\"Runs a single GN iteration.\"\"\"\n\n        self.x.requires_grad_(True)\n\n        # Evaluate function at current estimate\n        self.f0 = self.problem(self.x)\n\n        # Create copy with graph detached\n        self.g = self.f0.detach()\n\n        if self.debug and not self.analyze_convergence:\n            loss = self.problem.ip_output(self.g, self.g)\n            self.losses = torch.cat((self.losses, loss.detach().cpu().view(-1)))\n\n        self.g.requires_grad_(True)\n\n        # Get df/dx^t @ f0\n        self.dfdxt_g = TensorList(torch.autograd.grad(self.f0, self.x, self.g, create_graph=True))\n\n        # Get the right hand side\n        self.b = - self.dfdxt_g.detach()\n\n        # Run CG\n        delta_x, res = self.run_CG(num_cg_iter, eps=self.cg_eps)\n\n        self.x.detach_()\n        self.x += delta_x\n\n        if self.debug:\n            self.residuals = torch.cat((self.residuals, res))\n\n\n    def A(self, x):\n        dfdx_x = torch.autograd.grad(self.dfdxt_g, self.g, x, retain_graph=True)\n        return TensorList(torch.autograd.grad(self.f0, self.x, dfdx_x, retain_graph=True))\n\n    def ip(self, a, b):\n        return self.problem.ip_input(a, b)\n\n    def M1(self, x):\n        return self.problem.M1(x)\n\n    def M2(self, x):\n        return self.problem.M2(x)\n\n    def evaluate_CG_iteration(self, delta_x):\n        if self.analyze_convergence:\n            x = (self.x + delta_x).detach()\n            x.requires_grad_(True)\n\n            # compute loss and gradient\n            f = self.problem(x)\n            loss = self.problem.ip_output(f, f)\n            grad = TensorList(torch.autograd.grad(loss, x))\n\n            # store in the vectors\n            self.losses = torch.cat((self.losses, loss.detach().cpu().view(-1)))\n            self.gradient_mags = torch.cat((self.gradient_mags, sum(grad.view(-1) @ grad.view(-1)).cpu().sqrt().detach().view(-1)))\n\n\nclass GradientDescentL2:\n    \"\"\"Gradient descent with momentum for L2 problems.\"\"\"\n\n    def __init__(self, problem: L2Problem, variable: TensorList, step_length: float, momentum: float = 0.0, debug = False, plotting = False, visdom=None):\n\n        self.problem = problem\n        self.x = variable\n\n        self.step_legnth = step_length\n        self.momentum = momentum\n\n        self.debug = debug or plotting\n        self.plotting = plotting\n        self.fig_num = (10,11)\n        self.visdom = visdom\n        self.losses = torch.zeros(0)\n        self.gradient_mags = torch.zeros(0)\n        self.residuals = None\n\n        self.clear_temp()\n\n\n    def clear_temp(self):\n        self.f0 = None\n        self.dir = None\n\n\n    def run(self, num_iter, dummy = None):\n\n        if num_iter == 0:\n            return\n\n        lossvec = None\n        if self.debug:\n            lossvec = torch.zeros(num_iter+1)\n            grad_mags = torch.zeros(num_iter+1)\n\n        for i in range(num_iter):\n            self.x.requires_grad_(True)\n\n            # Evaluate function at current estimate\n            self.f0 = self.problem(self.x)\n\n            # Compute loss\n            loss = self.problem.ip_output(self.f0, self.f0)\n\n            # Compute grad\n            grad = TensorList(torch.autograd.grad(loss, self.x))\n\n            # Update direction\n            if self.dir is None:\n                self.dir = grad\n            else:\n                self.dir = grad + self.momentum * self.dir\n\n            self.x.detach_()\n            self.x -= self.step_legnth * self.dir\n\n            if self.debug:\n                lossvec[i] = loss.item()\n                grad_mags[i] = sum(grad.view(-1) @ grad.view(-1)).sqrt().item()\n\n        if self.debug:\n            self.x.requires_grad_(True)\n            self.f0 = self.problem(self.x)\n            loss = self.problem.ip_output(self.f0, self.f0)\n            grad = TensorList(torch.autograd.grad(loss, self.x))\n            lossvec[-1] = self.problem.ip_output(self.f0, self.f0).item()\n            grad_mags[-1] = sum(grad.view(-1) @ grad.view(-1)).cpu().sqrt().item()\n            self.losses = torch.cat((self.losses, lossvec))\n            self.gradient_mags = torch.cat((self.gradient_mags, grad_mags))\n\n            if self.visdom is not None:\n                self.visdom.register(self.losses, 'lineplot', 3, 'Loss')\n                self.visdom.register(self.gradient_mags, 'lineplot', 4, 'Gradient magnitude')\n            elif self.plotting:\n                plot_graph(self.losses, self.fig_num[0], title='Loss')\n                plot_graph(self.gradient_mags, self.fig_num[1], title='Gradient magnitude')\n\n        self.x.detach_()\n        self.clear_temp()\n\n\n\nclass NewtonCG(ConjugateGradientBase):\n    \"\"\"Newton with Conjugate Gradient. Handels general minimization problems.\"\"\"\n\n    def __init__(self, problem: MinimizationProblem, variable: TensorList, init_hessian_reg = 0.0, hessian_reg_factor = 1.0,\n                 cg_eps = 0.0, fletcher_reeves = True, standard_alpha = True, direction_forget_factor = 0,\n                 debug = False, analyze = False, plotting = False, fig_num=(10, 11, 12)):\n        super().__init__(fletcher_reeves, standard_alpha, direction_forget_factor, debug or analyze or plotting)\n\n        self.problem = problem\n        self.x = variable\n\n        self.analyze_convergence = analyze\n        self.plotting = plotting\n        self.fig_num = fig_num\n\n        self.hessian_reg = init_hessian_reg\n        self.hessian_reg_factor = hessian_reg_factor\n        self.cg_eps = cg_eps\n        self.f0 = None\n        self.g = None\n\n        self.residuals = torch.zeros(0)\n        self.losses = torch.zeros(0)\n        self.gradient_mags = torch.zeros(0)\n\n    def clear_temp(self):\n        self.f0 = None\n        self.g = None\n\n\n    def run(self, num_cg_iter, num_newton_iter=None):\n\n        if isinstance(num_cg_iter, int):\n            if num_cg_iter == 0:\n                return\n            if num_newton_iter is None:\n                num_newton_iter = 1\n            num_cg_iter = [num_cg_iter] * num_newton_iter\n\n        num_newton_iter = len(num_cg_iter)\n        if num_newton_iter == 0:\n            return\n\n        if self.analyze_convergence:\n            self.evaluate_CG_iteration(0)\n\n        for cg_iter in num_cg_iter:\n            self.run_newton_iter(cg_iter)\n            self.hessian_reg *= self.hessian_reg_factor\n\n        if self.debug:\n            if not self.analyze_convergence:\n                loss = self.problem(self.x)\n                self.losses = torch.cat((self.losses, loss.detach().cpu().view(-1)))\n\n            if self.plotting:\n                plot_graph(self.losses, self.fig_num[0], title='Loss')\n                plot_graph(self.residuals, self.fig_num[1], title='CG residuals')\n                if self.analyze_convergence:\n                    plot_graph(self.gradient_mags, self.fig_num[2], 'Gradient magnitude')\n\n        self.x.detach_()\n        self.clear_temp()\n\n        return self.losses, self.residuals\n\n\n    def run_newton_iter(self, num_cg_iter):\n\n        self.x.requires_grad_(True)\n\n        # Evaluate function at current estimate\n        self.f0 = self.problem(self.x)\n\n        if self.debug and not self.analyze_convergence:\n            self.losses = torch.cat((self.losses, self.f0.detach().cpu().view(-1)))\n\n        # Gradient of loss\n        self.g = TensorList(torch.autograd.grad(self.f0, self.x, create_graph=True))\n\n        # Get the right hand side\n        self.b = - self.g.detach()\n\n        # Run CG\n        delta_x, res = self.run_CG(num_cg_iter, eps=self.cg_eps)\n\n        self.x.detach_()\n        self.x += delta_x\n\n        if self.debug:\n            self.residuals = torch.cat((self.residuals, res))\n\n\n    def A(self, x):\n        return TensorList(torch.autograd.grad(self.g, self.x, x, retain_graph=True)) + self.hessian_reg * x\n\n    def ip(self, a, b):\n        # Implements the inner product\n        return self.problem.ip_input(a, b)\n\n    def M1(self, x):\n        return self.problem.M1(x)\n\n    def M2(self, x):\n        return self.problem.M2(x)\n\n    def evaluate_CG_iteration(self, delta_x):\n        if self.analyze_convergence:\n            x = (self.x + delta_x).detach()\n            x.requires_grad_(True)\n\n            # compute loss and gradient\n            loss = self.problem(x)\n            grad = TensorList(torch.autograd.grad(loss, x))\n\n            # store in the vectors\n            self.losses = torch.cat((self.losses, loss.detach().cpu().view(-1)))\n            self.gradient_mags = torch.cat((self.gradient_mags, sum(grad.view(-1) @ grad.view(-1)).cpu().sqrt().detach().view(-1)))\n\n\nclass GradientDescent:\n    \"\"\"Gradient descent for general minimization problems.\"\"\"\n\n    def __init__(self, problem: MinimizationProblem, variable: TensorList, step_length: float, momentum: float = 0.0,\n                 debug = False, plotting = False, fig_num=(10,11)):\n\n        self.problem = problem\n        self.x = variable\n\n        self.step_legnth = step_length\n        self.momentum = momentum\n\n        self.debug = debug or plotting\n        self.plotting = plotting\n        self.fig_num = fig_num\n\n        self.losses = torch.zeros(0)\n        self.gradient_mags = torch.zeros(0)\n        self.residuals = None\n\n        self.clear_temp()\n\n\n    def clear_temp(self):\n        self.dir = None\n\n\n    def run(self, num_iter, dummy = None):\n\n        if num_iter == 0:\n            return\n\n        lossvec = None\n        if self.debug:\n            lossvec = torch.zeros(num_iter+1)\n            grad_mags = torch.zeros(num_iter+1)\n\n        for i in range(num_iter):\n            self.x.requires_grad_(True)\n\n            # Evaluate function at current estimate\n            loss = self.problem(self.x)\n\n            # Compute grad\n            grad = TensorList(torch.autograd.grad(loss, self.x))\n\n            # Update direction\n            if self.dir is None:\n                self.dir = grad\n            else:\n                self.dir = grad + self.momentum * self.dir\n\n            self.x.detach_()\n            self.x -= self.step_legnth * self.dir\n\n            if self.debug:\n                lossvec[i] = loss.item()\n                grad_mags[i] = sum(grad.view(-1) @ grad.view(-1)).sqrt().item()\n\n        if self.debug:\n            self.x.requires_grad_(True)\n            loss = self.problem(self.x)\n            grad = TensorList(torch.autograd.grad(loss, self.x))\n            lossvec[-1] = loss.item()\n            grad_mags[-1] = sum(grad.view(-1) @ grad.view(-1)).cpu().sqrt().item()\n            self.losses = torch.cat((self.losses, lossvec))\n            self.gradient_mags = torch.cat((self.gradient_mags, grad_mags))\n            if self.plotting:\n                plot_graph(self.losses, self.fig_num[0], title='Loss')\n                plot_graph(self.gradient_mags, self.fig_num[1], title='Gradient magnitude')\n\n        self.x.detach_()\n        self.clear_temp()"
  },
  {
    "path": "artrackv2_mindspore/external/AR/pytracking/libs/tensordict.py",
    "content": "from collections import OrderedDict\nimport torch\nimport copy\n\n\nclass TensorDict(OrderedDict):\n    \"\"\"Container mainly used for dicts of torch tensors. Extends OrderedDict with pytorch functionality.\"\"\"\n\n    def concat(self, other):\n        \"\"\"Concatenates two dicts without copying internal data.\"\"\"\n        return TensorDict(self, **other)\n\n    def copy(self):\n        return TensorDict(super(TensorDict, self).copy())\n\n    def __deepcopy__(self, memodict={}):\n        return TensorDict(copy.deepcopy(list(self), memodict))\n\n    def __getattr__(self, name):\n        if not hasattr(torch.Tensor, name):\n            raise AttributeError('\\'TensorDict\\' object has not attribute \\'{}\\''.format(name))\n\n        def apply_attr(*args, **kwargs):\n            return TensorDict({n: getattr(e, name)(*args, **kwargs) if hasattr(e, name) else e for n, e in self.items()})\n        return apply_attr\n\n    def attribute(self, attr: str, *args):\n        return TensorDict({n: getattr(e, attr, *args) for n, e in self.items()})\n\n    def apply(self, fn, *args, **kwargs):\n        return TensorDict({n: fn(e, *args, **kwargs) for n, e in self.items()})\n\n    @staticmethod\n    def _iterable(a):\n        return isinstance(a, (TensorDict, list))\n\n"
  },
  {
    "path": "artrackv2_mindspore/external/AR/pytracking/libs/tensorlist.py",
    "content": "import functools\nimport torch\nimport copy\n\n\nclass TensorList(list):\n    \"\"\"Container mainly used for lists of torch tensors. Extends lists with pytorch functionality.\"\"\"\n\n    def __init__(self, list_of_tensors = None):\n        if list_of_tensors is None:\n            list_of_tensors = list()\n        super(TensorList, self).__init__(list_of_tensors)\n\n    def __deepcopy__(self, memodict={}):\n        return TensorList(copy.deepcopy(list(self), memodict))\n\n    def __getitem__(self, item):\n        if isinstance(item, int):\n            return super(TensorList, self).__getitem__(item)\n        elif isinstance(item, (tuple, list)):\n            return TensorList([super(TensorList, self).__getitem__(i) for i in item])\n        else:\n            return TensorList(super(TensorList, self).__getitem__(item))\n\n    def __add__(self, other):\n        if TensorList._iterable(other):\n            return TensorList([e1 + e2 for e1, e2 in zip(self, other)])\n        return TensorList([e + other for e in self])\n\n    def __radd__(self, other):\n        if TensorList._iterable(other):\n            return TensorList([e2 + e1 for e1, e2 in zip(self, other)])\n        return TensorList([other + e for e in self])\n\n    def __iadd__(self, other):\n        if TensorList._iterable(other):\n            for i, e2 in enumerate(other):\n                self[i] += e2\n        else:\n            for i in range(len(self)):\n                self[i] += other\n        return self\n\n    def __sub__(self, other):\n        if TensorList._iterable(other):\n            return TensorList([e1 - e2 for e1, e2 in zip(self, other)])\n        return TensorList([e - other for e in self])\n\n    def __rsub__(self, other):\n        if TensorList._iterable(other):\n            return TensorList([e2 - e1 for e1, e2 in zip(self, other)])\n        return TensorList([other - e for e in self])\n\n    def __isub__(self, other):\n        if TensorList._iterable(other):\n            for i, e2 in enumerate(other):\n                self[i] -= e2\n        else:\n            for i in range(len(self)):\n                self[i] -= other\n        return self\n\n    def __mul__(self, other):\n        if TensorList._iterable(other):\n            return TensorList([e1 * e2 for e1, e2 in zip(self, other)])\n        return TensorList([e * other for e in self])\n\n    def __rmul__(self, other):\n        if TensorList._iterable(other):\n            return TensorList([e2 * e1 for e1, e2 in zip(self, other)])\n        return TensorList([other * e for e in self])\n\n    def __imul__(self, other):\n        if TensorList._iterable(other):\n            for i, e2 in enumerate(other):\n                self[i] *= e2\n        else:\n            for i in range(len(self)):\n                self[i] *= other\n        return self\n\n    def __truediv__(self, other):\n        if TensorList._iterable(other):\n            return TensorList([e1 / e2 for e1, e2 in zip(self, other)])\n        return TensorList([e / other for e in self])\n\n    def __rtruediv__(self, other):\n        if TensorList._iterable(other):\n            return TensorList([e2 / e1 for e1, e2 in zip(self, other)])\n        return TensorList([other / e for e in self])\n\n    def __itruediv__(self, other):\n        if TensorList._iterable(other):\n            for i, e2 in enumerate(other):\n                self[i] /= e2\n        else:\n            for i in range(len(self)):\n                self[i] /= other\n        return self\n\n    def __matmul__(self, other):\n        if TensorList._iterable(other):\n            return TensorList([e1 @ e2 for e1, e2 in zip(self, other)])\n        return TensorList([e @ other for e in self])\n\n    def __rmatmul__(self, other):\n        if TensorList._iterable(other):\n            return TensorList([e2 @ e1 for e1, e2 in zip(self, other)])\n        return TensorList([other @ e for e in self])\n\n    def __imatmul__(self, other):\n        if TensorList._iterable(other):\n            for i, e2 in enumerate(other):\n                self[i] @= e2\n        else:\n            for i in range(len(self)):\n                self[i] @= other\n        return self\n\n    def __mod__(self, other):\n        if TensorList._iterable(other):\n            return TensorList([e1 % e2 for e1, e2 in zip(self, other)])\n        return TensorList([e % other for e in self])\n\n    def __rmod__(self, other):\n        if TensorList._iterable(other):\n            return TensorList([e2 % e1 for e1, e2 in zip(self, other)])\n        return TensorList([other % e for e in self])\n\n    def __pos__(self):\n        return TensorList([+e for e in self])\n\n    def __neg__(self):\n        return TensorList([-e for e in self])\n\n    def __le__(self, other):\n        if TensorList._iterable(other):\n            return TensorList([e1 <= e2 for e1, e2 in zip(self, other)])\n        return TensorList([e <= other for e in self])\n\n    def __ge__(self, other):\n        if TensorList._iterable(other):\n            return TensorList([e1 >= e2 for e1, e2 in zip(self, other)])\n        return TensorList([e >= other for e in self])\n\n    def concat(self, other):\n        return TensorList(super(TensorList, self).__add__(other))\n\n    def copy(self):\n        return TensorList(super(TensorList, self).copy())\n\n    def unroll(self):\n        if not any(isinstance(t, TensorList) for t in self):\n            return self\n\n        new_list = TensorList()\n        for t in self:\n            if isinstance(t, TensorList):\n                new_list.extend(t.unroll())\n            else:\n                new_list.append(t)\n        return new_list\n\n    def list(self):\n        return list(self)\n\n    def attribute(self, attr: str, *args):\n        return TensorList([getattr(e, attr, *args) for e in self])\n\n    def apply(self, fn):\n        return TensorList([fn(e) for e in self])\n\n    def __getattr__(self, name):\n        if not hasattr(torch.Tensor, name):\n            raise AttributeError('\\'TensorList\\' object has not attribute \\'{}\\''.format(name))\n\n        def apply_attr(*args, **kwargs):\n            return TensorList([getattr(e, name)(*args, **kwargs) for e in self])\n\n        return apply_attr\n\n    @staticmethod\n    def _iterable(a):\n        return isinstance(a, (TensorList, list))\n\n\n\ndef tensor_operation(op):\n    def islist(a):\n        return isinstance(a, TensorList)\n\n    @functools.wraps(op)\n    def oplist(*args, **kwargs):\n        if len(args) == 0:\n            raise ValueError('Must be at least one argument without keyword (i.e. operand).')\n\n        if len(args) == 1:\n            if islist(args[0]):\n                return TensorList([op(a, **kwargs) for a in args[0]])\n        else:\n            # Multiple operands, assume max two\n            if islist(args[0]) and islist(args[1]):\n                return TensorList([op(a, b, *args[2:], **kwargs) for a, b in zip(*args[:2])])\n            if islist(args[0]):\n                return TensorList([op(a, *args[1:], **kwargs) for a in args[0]])\n            if islist(args[1]):\n                return TensorList([op(args[0], b, *args[2:], **kwargs) for b in args[1]])\n\n        # None of the operands are lists\n        return op(*args, **kwargs)\n\n    return oplist\n"
  },
  {
    "path": "artrackv2_mindspore/external/AR/pytracking/parameter/__init__.py",
    "content": ""
  },
  {
    "path": "artrackv2_mindspore/external/AR/pytracking/parameter/atom/__init__.py",
    "content": ""
  },
  {
    "path": "artrackv2_mindspore/external/AR/pytracking/parameter/atom/atom_gmm_sampl.py",
    "content": "from pytracking.utils import TrackerParams, FeatureParams, Choice\nfrom pytracking.features.extractor import MultiResolutionExtractor\nfrom pytracking.features import deep\nimport torch\n\ndef parameters():\n    params = TrackerParams()\n\n    # These are usually set from outside\n    params.debug = 0                        # Debug level\n    params.visualization = False            # Do visualization\n\n    # Use GPU or not (IoUNet requires this to be True)\n    params.use_gpu = True\n\n    # Feature specific parameters\n    deep_params = TrackerParams()\n\n    # Patch sampling parameters\n    params.max_image_sample_size = (18*16)**2   # Maximum image sample size\n    params.min_image_sample_size = (18*16)**2   # Minimum image sample size\n    params.search_area_scale = 5                # Scale relative to target size\n    params.feature_size_odd = False             # Good to use False for even-sized kernels and vice versa\n\n    # Optimization parameters\n    params.CG_iter = 5                  # The number of Conjugate Gradient iterations in each update after the first frame\n    params.init_CG_iter = 60            # The total number of Conjugate Gradient iterations used in the first frame\n    params.init_GN_iter = 6             # The number of Gauss-Newton iterations used in the first frame (only if the projection matrix is updated)\n    params.post_init_CG_iter = 0        # CG iterations to run after GN\n    params.fletcher_reeves = False      # Use the Fletcher-Reeves (true) or Polak-Ribiere (false) formula in the Conjugate Gradient\n    params.standard_alpha = True        # Use the standard formula for computing the step length in Conjugate Gradient\n    params.CG_forgetting_rate = None\t# Forgetting rate of the last conjugate direction\n\n    # Learning parameters for each feature type\n    deep_params.learning_rate = 0.01                # Learning rate\n    deep_params.init_samples_minimum_weight = 0.25  # Minimum weight of initial samples in memory\n    deep_params.output_sigma_factor = 1/4           # Standard deviation of Gaussian label relative to target size\n\n    # Training parameters\n    params.sample_memory_size = 250     # Memory size\n    params.train_skipping = 10          # How often to run training (every n-th frame)\n\n    # Online model parameters\n    deep_params.kernel_size = (4,4)     # Kernel size of filter\n    deep_params.compressed_dim = 64     # Dimension output of projection matrix\n    deep_params.filter_reg = 1e-1       # Filter regularization factor\n    deep_params.projection_reg = 1e-4   # Projection regularization factor\n\n    # Windowing\n    params.feature_window = False       # Perform windowing of features\n    params.window_output = False        # Perform windowing of output scores\n\n    # Detection parameters\n    params.scale_factors = torch.ones(1) # What scales to use for localization (only one scale if IoUNet is used)\n    params.score_upsample_factor = 1     # How much Fourier upsampling to use\n\n    # Init data augmentation parameters\n    params.augmentation = {'fliplr': True,\n                           'rotate': [5, -5, 10, -10, 20, -20, 30, -30, 45,-45, -60, 60],\n                           'blur': [(2, 0.2), (0.2, 2), (3,1), (1, 3), (2, 2)],\n                           'relativeshift': [(0.6, 0.6), (-0.6, 0.6), (0.6, -0.6), (-0.6,-0.6)],\n                           'dropout': (7, 0.2)}\n\n    params.augmentation_expansion_factor = 2    # How much to expand sample when doing augmentation\n    params.random_shift_factor = 1/3            # How much random shift to do on each augmented sample\n    deep_params.use_augmentation = True         # Whether to use augmentation for this feature\n\n    # Factorized convolution parameters\n    # params.use_projection_matrix = True       # Use projection matrix, i.e. use the factorized convolution formulation\n    params.update_projection_matrix = True      # Whether the projection matrix should be optimized or not\n    params.proj_init_method = 'randn'           # Method for initializing the projection matrix\n    params.filter_init_method = 'randn'         # Method for initializing the spatial filter\n    params.projection_activation = 'none'       # Activation function after projection ('none', 'relu', 'elu' or 'mlu')\n    params.response_activation = ('mlu', 0.05)  # Activation function on the output scores ('none', 'relu', 'elu' or 'mlu')\n\n    # Advanced localization parameters\n    params.advanced_localization = True         # Use this or not\n    params.target_not_found_threshold = 0.25    # Absolute score threshold to detect target missing\n    params.distractor_threshold = 0.8           # Relative threshold to find distractors\n    params.hard_negative_threshold = 0.5        # Relative threshold to find hard negative samples\n    params.target_neighborhood_scale = 2.2      # Target neighborhood to remove\n    params.dispalcement_scale = 0.8             # Dispacement to consider for distractors\n    params.hard_negative_learning_rate = 0.02   # Learning rate if hard negative detected\n    params.hard_negative_CG_iter = 5            # Number of optimization iterations to use if hard negative detected\n    params.update_scale_when_uncertain = True   # Update scale or not if distractor is close\n\n    # IoUNet parameters\n    params.use_iou_net = True               # Use IoU net or not\n    params.box_refinement_space = 'relative'\n    params.iounet_augmentation = False      # Use the augmented samples to compute the modulation vector\n    params.iounet_k = 3                     # Top-k average to estimate final box\n    params.num_init_random_boxes = 9        # Num extra random boxes in addition to the classifier prediction\n    params.box_jitter_pos = 0.1             # How much to jitter the translation for random boxes\n    params.box_jitter_sz = 0.5              # How much to jitter the scale for random boxes\n    params.maximal_aspect_ratio = 6         # Limit on the aspect ratio\n    params.box_refinement_iter = 10          # Number of iterations for refining the boxes\n    params.box_refinement_step_length = (1e-2, 5e-2) # 1   # Gradient step length in the bounding box refinement 5e-3 2e-2\n    params.box_refinement_step_decay = 1    # Multiplicative step length decay (1 means no decay)\n\n    # Setup the feature extractor (which includes the IoUNet)\n    deep_fparams = FeatureParams(feature_params=[deep_params])\n    deep_feat = deep.ATOMResNet18(net_path='atom_gmm_sampl', output_layers=['layer3'], fparams=deep_fparams, normalize_power=2)\n    params.features = MultiResolutionExtractor([deep_feat])\n\n    return params\n"
  },
  {
    "path": "artrackv2_mindspore/external/AR/pytracking/parameter/atom/atom_prob_ml.py",
    "content": "from pytracking.utils import TrackerParams, FeatureParams, Choice\nfrom pytracking.features.extractor import MultiResolutionExtractor\nfrom pytracking.features import deep\nimport torch\n\ndef parameters():\n    params = TrackerParams()\n\n    # These are usually set from outside\n    params.debug = 0                        # Debug level\n    params.visualization = False            # Do visualization\n\n    # Use GPU or not (IoUNet requires this to be True)\n    params.use_gpu = True\n\n    # Feature specific parameters\n    deep_params = TrackerParams()\n\n    # Patch sampling parameters\n    params.max_image_sample_size = (18*16)**2   # Maximum image sample size\n    params.min_image_sample_size = (18*16)**2   # Minimum image sample size\n    params.search_area_scale = 5                # Scale relative to target size\n    params.feature_size_odd = False             # Good to use False for even-sized kernels and vice versa\n\n    # Optimization parameters\n    params.CG_iter = 5                  # The number of Conjugate Gradient iterations in each update after the first frame\n    params.init_CG_iter = 60            # The total number of Conjugate Gradient iterations used in the first frame\n    params.init_GN_iter = 6             # The number of Gauss-Newton iterations used in the first frame (only if the projection matrix is updated)\n    params.post_init_CG_iter = 0        # CG iterations to run after GN\n    params.fletcher_reeves = False      # Use the Fletcher-Reeves (true) or Polak-Ribiere (false) formula in the Conjugate Gradient\n    params.standard_alpha = True        # Use the standard formula for computing the step length in Conjugate Gradient\n    params.CG_forgetting_rate = None\t# Forgetting rate of the last conjugate direction\n\n    # Learning parameters for each feature type\n    deep_params.learning_rate = 0.01                # Learning rate\n    deep_params.init_samples_minimum_weight = 0.25  # Minimum weight of initial samples in memory\n    deep_params.output_sigma_factor = 1/4           # Standard deviation of Gaussian label relative to target size\n\n    # Training parameters\n    params.sample_memory_size = 250     # Memory size\n    params.train_skipping = 10          # How often to run training (every n-th frame)\n\n    # Online model parameters\n    deep_params.kernel_size = (4,4)     # Kernel size of filter\n    deep_params.compressed_dim = 64     # Dimension output of projection matrix\n    deep_params.filter_reg = 1e-1       # Filter regularization factor\n    deep_params.projection_reg = 1e-4   # Projection regularization factor\n\n    # Windowing\n    params.feature_window = False       # Perform windowing of features\n    params.window_output = False        # Perform windowing of output scores\n\n    # Detection parameters\n    params.scale_factors = torch.ones(1) # What scales to use for localization (only one scale if IoUNet is used)\n    params.score_upsample_factor = 1     # How much Fourier upsampling to use\n\n    # Init data augmentation parameters\n    params.augmentation = {'fliplr': True,\n                           'rotate': [5, -5, 10, -10, 20, -20, 30, -30, 45,-45, -60, 60],\n                           'blur': [(2, 0.2), (0.2, 2), (3,1), (1, 3), (2, 2)],\n                           'relativeshift': [(0.6, 0.6), (-0.6, 0.6), (0.6, -0.6), (-0.6,-0.6)],\n                           'dropout': (7, 0.2)}\n\n    params.augmentation_expansion_factor = 2    # How much to expand sample when doing augmentation\n    params.random_shift_factor = 1/3            # How much random shift to do on each augmented sample\n    deep_params.use_augmentation = True         # Whether to use augmentation for this feature\n\n    # Factorized convolution parameters\n    # params.use_projection_matrix = True       # Use projection matrix, i.e. use the factorized convolution formulation\n    params.update_projection_matrix = True      # Whether the projection matrix should be optimized or not\n    params.proj_init_method = 'randn'           # Method for initializing the projection matrix\n    params.filter_init_method = 'randn'         # Method for initializing the spatial filter\n    params.projection_activation = 'none'       # Activation function after projection ('none', 'relu', 'elu' or 'mlu')\n    params.response_activation = ('mlu', 0.05)  # Activation function on the output scores ('none', 'relu', 'elu' or 'mlu')\n\n    # Advanced localization parameters\n    params.advanced_localization = True         # Use this or not\n    params.target_not_found_threshold = 0.25    # Absolute score threshold to detect target missing\n    params.distractor_threshold = 0.8           # Relative threshold to find distractors\n    params.hard_negative_threshold = 0.5        # Relative threshold to find hard negative samples\n    params.target_neighborhood_scale = 2.2      # Target neighborhood to remove\n    params.dispalcement_scale = 0.8             # Dispacement to consider for distractors\n    params.hard_negative_learning_rate = 0.02   # Learning rate if hard negative detected\n    params.hard_negative_CG_iter = 5            # Number of optimization iterations to use if hard negative detected\n    params.update_scale_when_uncertain = True   # Update scale or not if distractor is close\n\n    # IoUNet parameters\n    params.use_iou_net = True               # Use IoU net or not\n    params.box_refinement_space = 'relative'\n    params.iounet_augmentation = False      # Use the augmented samples to compute the modulation vector\n    params.iounet_k = 3                     # Top-k average to estimate final box\n    params.num_init_random_boxes = 9        # Num extra random boxes in addition to the classifier prediction\n    params.box_jitter_pos = 0.1             # How much to jitter the translation for random boxes\n    params.box_jitter_sz = 0.5              # How much to jitter the scale for random boxes\n    params.maximal_aspect_ratio = 6         # Limit on the aspect ratio\n    params.box_refinement_iter = 10          # Number of iterations for refining the boxes\n    params.box_refinement_step_length = (2e-4, 10e-4) # 1   # Gradient step length in the bounding box refinement\n    params.box_refinement_step_decay = 1    # Multiplicative step length decay (1 means no decay)\n\n    # Setup the feature extractor (which includes the IoUNet)\n    deep_fparams = FeatureParams(feature_params=[deep_params])\n    deep_feat = deep.ATOMResNet18(net_path='atom_prob_ml', output_layers=['layer3'], fparams=deep_fparams, normalize_power=2)\n    params.features = MultiResolutionExtractor([deep_feat])\n\n    return params\n"
  },
  {
    "path": "artrackv2_mindspore/external/AR/pytracking/parameter/atom/default.py",
    "content": "from pytracking.utils import TrackerParams, FeatureParams, Choice\nfrom pytracking.features.extractor import MultiResolutionExtractor\nfrom pytracking.features import deep\nimport torch\n\ndef parameters():\n    params = TrackerParams()\n\n    # These are usually set from outside\n    params.debug = 0                        # Debug level\n    params.visualization = False            # Do visualization\n\n    # Use GPU or not (IoUNet requires this to be True)\n    params.use_gpu = True\n\n    # Feature specific parameters\n    deep_params = TrackerParams()\n\n    # Patch sampling parameters\n    params.max_image_sample_size = (18*16)**2   # Maximum image sample size\n    params.min_image_sample_size = (18*16)**2   # Minimum image sample size\n    params.search_area_scale = 5                # Scale relative to target size\n    params.feature_size_odd = False             # Good to use False for even-sized kernels and vice versa\n\n    # Optimization parameters\n    params.CG_iter = 5                  # The number of Conjugate Gradient iterations in each update after the first frame\n    params.init_CG_iter = 60            # The total number of Conjugate Gradient iterations used in the first frame\n    params.init_GN_iter = 6             # The number of Gauss-Newton iterations used in the first frame (only if the projection matrix is updated)\n    params.post_init_CG_iter = 0        # CG iterations to run after GN\n    params.fletcher_reeves = False      # Use the Fletcher-Reeves (true) or Polak-Ribiere (false) formula in the Conjugate Gradient\n    params.standard_alpha = True        # Use the standard formula for computing the step length in Conjugate Gradient\n    params.CG_forgetting_rate = None\t# Forgetting rate of the last conjugate direction\n\n    # Learning parameters for each feature type\n    deep_params.learning_rate = 0.01                # Learning rate\n    deep_params.init_samples_minimum_weight = 0.25  # Minimum weight of initial samples in memory\n    deep_params.output_sigma_factor = 1/4           # Standard deviation of Gaussian label relative to target size\n\n    # Training parameters\n    params.sample_memory_size = 250     # Memory size\n    params.train_skipping = 10          # How often to run training (every n-th frame)\n\n    # Online model parameters\n    deep_params.kernel_size = (4,4)     # Kernel size of filter\n    deep_params.compressed_dim = 64     # Dimension output of projection matrix\n    deep_params.filter_reg = 1e-1       # Filter regularization factor\n    deep_params.projection_reg = 1e-4   # Projection regularization factor\n\n    # Windowing\n    params.feature_window = False       # Perform windowing of features\n    params.window_output = False        # Perform windowing of output scores\n\n    # Detection parameters\n    params.scale_factors = torch.ones(1) # What scales to use for localization (only one scale if IoUNet is used)\n    params.score_upsample_factor = 1     # How much Fourier upsampling to use\n\n    # Init data augmentation parameters\n    params.augmentation = {'fliplr': True,\n                           'rotate': [5, -5, 10, -10, 20, -20, 30, -30, 45,-45, -60, 60],\n                           'blur': [(2, 0.2), (0.2, 2), (3,1), (1, 3), (2, 2)],\n                           'relativeshift': [(0.6, 0.6), (-0.6, 0.6), (0.6, -0.6), (-0.6,-0.6)],\n                           'dropout': (7, 0.2)}\n\n    params.augmentation_expansion_factor = 2    # How much to expand sample when doing augmentation\n    params.random_shift_factor = 1/3            # How much random shift to do on each augmented sample\n    deep_params.use_augmentation = True         # Whether to use augmentation for this feature\n\n    # Factorized convolution parameters\n    # params.use_projection_matrix = True       # Use projection matrix, i.e. use the factorized convolution formulation\n    params.update_projection_matrix = True      # Whether the projection matrix should be optimized or not\n    params.proj_init_method = 'randn'           # Method for initializing the projection matrix\n    params.filter_init_method = 'randn'         # Method for initializing the spatial filter\n    params.projection_activation = 'none'       # Activation function after projection ('none', 'relu', 'elu' or 'mlu')\n    params.response_activation = ('mlu', 0.05)  # Activation function on the output scores ('none', 'relu', 'elu' or 'mlu')\n\n    # Advanced localization parameters\n    params.advanced_localization = True         # Use this or not\n    params.target_not_found_threshold = 0.25    # Absolute score threshold to detect target missing\n    params.distractor_threshold = 0.8           # Relative threshold to find distractors\n    params.hard_negative_threshold = 0.5        # Relative threshold to find hard negative samples\n    params.target_neighborhood_scale = 2.2      # Target neighborhood to remove\n    params.dispalcement_scale = 0.8             # Dispacement to consider for distractors\n    params.hard_negative_learning_rate = 0.02   # Learning rate if hard negative detected\n    params.hard_negative_CG_iter = 5            # Number of optimization iterations to use if hard negative detected\n    params.update_scale_when_uncertain = True   # Update scale or not if distractor is close\n\n    # IoUNet parameters\n    params.use_iou_net = True               # Use IoU net or not\n    params.iounet_augmentation = False      # Use the augmented samples to compute the modulation vector\n    params.iounet_k = 3                     # Top-k average to estimate final box\n    params.num_init_random_boxes = 9        # Num extra random boxes in addition to the classifier prediction\n    params.box_jitter_pos = 0.1             # How much to jitter the translation for random boxes\n    params.box_jitter_sz = 0.5              # How much to jitter the scale for random boxes\n    params.maximal_aspect_ratio = 6         # Limit on the aspect ratio\n    params.box_refinement_iter = 5          # Number of iterations for refining the boxes\n    params.box_refinement_step_length = 1   # Gradient step length in the bounding box refinement\n    params.box_refinement_step_decay = 1    # Multiplicative step length decay (1 means no decay)\n\n    # Setup the feature extractor (which includes the IoUNet)\n    deep_fparams = FeatureParams(feature_params=[deep_params])\n    deep_feat = deep.ATOMResNet18(net_path='atom_default.pth', output_layers=['layer3'], fparams=deep_fparams, normalize_power=2)\n    params.features = MultiResolutionExtractor([deep_feat])\n\n    return params\n"
  },
  {
    "path": "artrackv2_mindspore/external/AR/pytracking/parameter/atom/default_vot.py",
    "content": "from pytracking.utils import TrackerParams, FeatureParams, Choice\nfrom pytracking.features.extractor import MultiResolutionExtractor\nfrom pytracking.features import deep\nimport torch\n\ndef parameters():\n    params = TrackerParams()\n\n    # These are usually set from outside\n    params.debug = 0                        # Debug level\n    params.visualization = False            # Do visualization\n\n    # Use GPU or not (IoUNet requires this to be True)\n    params.use_gpu = True\n\n    # Feature specific parameters\n    deep_params = TrackerParams()\n\n    # Patch sampling parameters\n    params.max_image_sample_size = (14 * 16) ** 2   # Maximum image sample size\n    params.min_image_sample_size = (14 * 16) ** 2   # Minimum image sample size\n    params.search_area_scale = 4                    # Scale relative to target size\n    params.feature_size_odd = False                 # Good to use False for even-sized kernels and vice versa\n\n    # Optimization parameters\n    params.CG_iter = 5                    # The number of Conjugate Gradient iterations in each update after the first frame\n    params.init_CG_iter = 60              # The total number of Conjugate Gradient iterations used in the first frame\n    params.init_GN_iter = 6               # The number of Gauss-Newton iterations used in the first frame (only if the projection matrix is updated)\n    params.post_init_CG_iter = 0          # CG iterations to run after GN\n    params.fletcher_reeves = False        # Use the Fletcher-Reeves (true) or Polak-Ribiere (false) formula in the Conjugate Gradient\n    params.standard_alpha = True          # Use the standard formula for computing the step length in Conjugate Gradient\n    params.CG_forgetting_rate = None\t  # Forgetting rate of the last conjugate direction\n\n    # Learning parameters for each feature type\n    deep_params.learning_rate = 0.0075           # Learning rate\n    deep_params.output_sigma_factor = 1/4        # Standard deviation of Gaussian label relative to target size\n\n    # Training parameters\n    params.sample_memory_size = 250              # Memory size\n    params.train_skipping = 10                   # How often to run training (every n-th frame)\n\n    # Online model parameters\n    deep_params.kernel_size = (4, 4)             # Kernel size of filter\n    deep_params.compressed_dim = 64              # Dimension output of projection matrix\n    deep_params.filter_reg = 1e-1                # Filter regularization factor\n    deep_params.projection_reg = 1e-4            # Projection regularization factor\n\n    # Windowing\n    params.feature_window = False                # Perform windowing of features\n    params.window_output = True                  # Perform windowing of output scores\n\n    # Detection parameters\n    params.scale_factors = torch.ones(1)        # What scales to use for localization (only one scale if IoUNet is used)\n    params.score_upsample_factor = 1            # How much Fourier upsampling to use\n\n    # Init data augmentation parameters\n    params.augmentation = {'fliplr': True,\n                           'rotate': [5, -5, 10, -10, 20, -20, 30, -30, 45,-45, -60, 60],\n                           'blur': [(2, 0.2), (0.2, 2), (3,1), (1, 3), (2, 2)],\n                           'relativeshift': [(0.6, 0.6), (-0.6, 0.6), (0.6, -0.6), (-0.6,-0.6)],\n                           'dropout': (7, 0.2)}\n\n    params.augmentation_expansion_factor = 2    # How much to expand sample when doing augmentation\n    params.random_shift_factor = 1 / 3          # How much random shift to do on each augmented sample\n    deep_params.use_augmentation = True         # Whether to use augmentation for this feature\n\n    # Factorized convolution parameters\n    # params.use_projection_matrix = True       # Use projection matrix, i.e. use the factorized convolution formulation\n    params.update_projection_matrix = True      # Whether the projection matrix should be optimized or not\n    params.proj_init_method = 'randn'           # Method for initializing the projection matrix\n    params.filter_init_method = 'randn'         # Method for initializing the spatial filter\n    params.projection_activation = 'none'       # Activation function after projection ('none', 'relu', 'elu' or 'mlu')\n    params.response_activation = ('mlu', 0.05)  # Activation function on the output scores ('none', 'relu', 'elu' or 'mlu')\n\n    # Advanced localization parameters\n    params.advanced_localization = True         # Use this or not\n    params.target_not_found_threshold = -1      # Absolute score threshold to detect target missing\n    params.distractor_threshold = 100           # Relative threshold to find distractors\n    params.hard_negative_threshold = 0.3        # Relative threshold to find hard negative samples\n    params.target_neighborhood_scale = 2.2      # Target neighborhood to remove\n    params.dispalcement_scale = 0.7             # Dispacement to consider for distractors\n    params.hard_negative_learning_rate = 0.02   # Learning rate if hard negative detected\n    params.hard_negative_CG_iter = 5            # Number of optimization iterations to use if hard negative detected\n    params.update_scale_when_uncertain = True   # Update scale or not if distractor is close\n\n    # IoUNet parameters\n    params.iounet_augmentation = False      # Use the augmented samples to compute the modulation vector\n    params.iounet_k = 3                     # Top-k average to estimate final box\n    params.num_init_random_boxes = 9        # Num extra random boxes in addition to the classifier prediction\n    params.box_jitter_pos = 0.1             # How much to jitter the translation for random boxes\n    params.box_jitter_sz = 0.5              # How much to jitter the scale for random boxes\n    params.maximal_aspect_ratio = 6         # Limit on the aspect ratio\n    params.box_refinement_iter = 5          # Number of iterations for refining the boxes\n    params.box_refinement_step_length = 1   # Gradient step length in the bounding box refinement\n    params.box_refinement_step_decay = 1    # Multiplicative step length decay (1 means no decay)\n\n    # Setup the feature extractor (which includes the IoUNet)\n    deep_fparams = FeatureParams(feature_params=[deep_params])\n    deep_feat = deep.ATOMResNet18(net_path='atom_default.pth', output_layers=['layer3'], fparams=deep_fparams,\n                                  normalize_power=2)\n    params.features = MultiResolutionExtractor([deep_feat])\n\n    params.vot_anno_conversion_type = 'preserve_area'\n    return params"
  },
  {
    "path": "artrackv2_mindspore/external/AR/pytracking/parameter/atom/multiscale_no_iounet.py",
    "content": "from pytracking.utils import TrackerParams, FeatureParams, Choice\nfrom pytracking.features.extractor import MultiResolutionExtractor\nfrom pytracking.features import deep\nimport torch\n\ndef parameters():\n    params = TrackerParams()\n\n    # These are usually set from outside\n    params.debug = 0                        # Debug level\n    params.visualization = False            # Do visualization\n\n    # Use GPU or not (IoUNet requires this to be True)\n    params.use_gpu = True\n\n    # Feature specific parameters\n    deep_params = TrackerParams()\n\n    # Patch sampling parameters\n    params.max_image_sample_size = (18*16)**2   # Maximum image sample size\n    params.min_image_sample_size = (18*16)**2   # Minimum image sample size\n    params.search_area_scale = 5                # Scale relative to target size\n    params.feature_size_odd = False             # Good to use False for even-sized kernels and vice versa\n\n    # Optimization parameters\n    params.CG_iter = 5                  # The number of Conjugate Gradient iterations in each update after the first frame\n    params.init_CG_iter = 60            # The total number of Conjugate Gradient iterations used in the first frame\n    params.init_GN_iter = 6             # The number of Gauss-Newton iterations used in the first frame (only if the projection matrix is updated)\n    params.post_init_CG_iter = 0        # CG iterations to run after GN\n    params.fletcher_reeves = False      # Use the Fletcher-Reeves (true) or Polak-Ribiere (false) formula in the Conjugate Gradient\n    params.standard_alpha = True        # Use the standard formula for computing the step length in Conjugate Gradient\n    params.CG_forgetting_rate = None\t# Forgetting rate of the last conjugate direction\n\n    # Learning parameters for each feature type\n    deep_params.learning_rate = 0.01                # Learning rate\n    deep_params.init_samples_minimum_weight = 0.25  # Minimum weight of initial samples in memory\n    deep_params.output_sigma_factor = 1/4           # Standard deviation of Gaussian label relative to target size\n\n    # Training parameters\n    params.sample_memory_size = 250     # Memory size\n    params.train_skipping = 10          # How often to run training (every n-th frame)\n\n    # Online model parameters\n    deep_params.kernel_size = (4,4)     # Kernel size of filter\n    deep_params.compressed_dim = 64     # Dimension output of projection matrix\n    deep_params.filter_reg = 1e-1       # Filter regularization factor\n    deep_params.projection_reg = 1e-4   # Projection regularization factor\n\n    # Windowing\n    params.feature_window = False       # Perform windowing of features\n    params.window_output = False        # Perform windowing of output scores\n\n    # Detection parameters\n    params.scale_factors = 1.02**torch.arange(-2, 3).float() # What scales to use for localization (only one scale if IoUNet is used)\n    params.score_upsample_factor = 1     # How much Fourier upsampling to use\n\n    # Init data augmentation parameters\n    params.augmentation = {'fliplr': True,\n                           'rotate': [5, -5, 10, -10, 20, -20, 30, -30, 45,-45, -60, 60],\n                           'blur': [(2, 0.2), (0.2, 2), (3,1), (1, 3), (2, 2)],\n                           'relativeshift': [(0.6, 0.6), (-0.6, 0.6), (0.6, -0.6), (-0.6,-0.6)],\n                           'dropout': (7, 0.2)}\n\n    params.augmentation_expansion_factor = 2    # How much to expand sample when doing augmentation\n    params.random_shift_factor = 1/3            # How much random shift to do on each augmented sample\n    deep_params.use_augmentation = True         # Whether to use augmentation for this feature\n\n    # Factorized convolution parameters\n    # params.use_projection_matrix = True       # Use projection matrix, i.e. use the factorized convolution formulation\n    params.update_projection_matrix = True      # Whether the projection matrix should be optimized or not\n    params.proj_init_method = 'randn'           # Method for initializing the projection matrix\n    params.filter_init_method = 'randn'         # Method for initializing the spatial filter\n    params.projection_activation = 'none'       # Activation function after projection ('none', 'relu', 'elu' or 'mlu')\n    params.response_activation = ('mlu', 0.05)  # Activation function on the output scores ('none', 'relu', 'elu' or 'mlu')\n\n    # Advanced localization parameters\n    params.advanced_localization = True         # Use this or not\n    params.target_not_found_threshold = 0.25    # Absolute score threshold to detect target missing\n    params.distractor_threshold = 0.8           # Relative threshold to find distractors\n    params.hard_negative_threshold = 0.5        # Relative threshold to find hard negative samples\n    params.target_neighborhood_scale = 2.2      # Target neighborhood to remove\n    params.dispalcement_scale = 0.8             # Dispacement to consider for distractors\n    params.hard_negative_learning_rate = 0.02   # Learning rate if hard negative detected\n    params.hard_negative_CG_iter = 5            # Number of optimization iterations to use if hard negative detected\n    params.update_scale_when_uncertain = True   # Update scale or not if distractor is close\n\n    # IoUNet parameters\n    params.use_iou_net = False               # Use IoU net or not\n    params.iounet_augmentation = False      # Use the augmented samples to compute the modulation vector\n    params.iounet_k = 3                     # Top-k average to estimate final box\n    params.num_init_random_boxes = 9        # Num extra random boxes in addition to the classifier prediction\n    params.box_jitter_pos = 0.1             # How much to jitter the translation for random boxes\n    params.box_jitter_sz = 0.5              # How much to jitter the scale for random boxes\n    params.maximal_aspect_ratio = 6         # Limit on the aspect ratio\n    params.box_refinement_iter = 5          # Number of iterations for refining the boxes\n    params.box_refinement_step_length = 1   # Gradient step length in the bounding box refinement\n    params.box_refinement_step_decay = 1    # Multiplicative step length decay (1 means no decay)\n\n    # Setup the feature extractor (which includes the IoUNet)\n    deep_fparams = FeatureParams(feature_params=[deep_params])\n    deep_feat = deep.ATOMResNet18(net_path='atom_default.pth', output_layers=['layer3'], fparams=deep_fparams, normalize_power=2)\n    params.features = MultiResolutionExtractor([deep_feat])\n\n    return params\n"
  },
  {
    "path": "artrackv2_mindspore/external/AR/pytracking/parameter/dimp/__init__.py",
    "content": ""
  },
  {
    "path": "artrackv2_mindspore/external/AR/pytracking/parameter/dimp/dimp18.py",
    "content": "from pytracking.utils import TrackerParams\nfrom pytracking.features.net_wrappers import NetWithBackbone\n\ndef parameters():\n    params = TrackerParams()\n\n    params.debug = 0\n    params.visualization = False\n\n    params.use_gpu = True\n\n    params.image_sample_size = 18*16\n    params.search_area_scale = 5\n\n    # Learning parameters\n    params.sample_memory_size = 50\n    params.learning_rate = 0.01\n    params.init_samples_minimum_weight = 0.25\n    params.train_skipping = 20\n\n    # Net optimization params\n    params.update_classifier = True\n    params.net_opt_iter = 10\n    params.net_opt_update_iter = 2\n    params.net_opt_hn_iter = 1\n\n    # Detection parameters\n    params.window_output = False\n\n    # Init augmentation parameters\n    params.use_augmentation = True\n    params.augmentation = {'fliplr': True,\n                           'rotate': [10, -10, 45, -45],\n                           'blur': [(3,1), (1, 3), (2, 2)],\n                           'relativeshift': [(0.6, 0.6), (-0.6, 0.6), (0.6, -0.6), (-0.6,-0.6)],\n                           'dropout': (2, 0.2)}\n\n    params.augmentation_expansion_factor = 2\n    params.random_shift_factor = 1/3\n\n    # Advanced localization parameters\n    params.advanced_localization = True\n    params.target_not_found_threshold = 0.25\n    params.distractor_threshold = 0.8\n    params.hard_negative_threshold = 0.5\n    params.target_neighborhood_scale = 2.2\n    params.dispalcement_scale = 0.8\n    params.hard_negative_learning_rate = 0.02\n    params.update_scale_when_uncertain = True\n\n    # IoUnet parameters\n    params.iounet_augmentation = False\n    params.iounet_use_log_scale = True\n    params.iounet_k = 3\n    params.num_init_random_boxes = 9\n    params.box_jitter_pos = 0.1\n    params.box_jitter_sz = 0.5\n    params.maximal_aspect_ratio = 6\n    params.box_refinement_iter = 5\n    params.box_refinement_step_length = 1\n    params.box_refinement_step_decay = 1\n\n    params.net = NetWithBackbone(net_path='dimp18.pth',\n                                 use_gpu=params.use_gpu)\n\n    params.vot_anno_conversion_type = 'preserve_area'\n\n    return params\n"
  },
  {
    "path": "artrackv2_mindspore/external/AR/pytracking/parameter/dimp/dimp18_vot.py",
    "content": "from pytracking.utils import TrackerParams\nfrom pytracking.features.net_wrappers import NetWithBackbone\n\ndef parameters():\n    params = TrackerParams()\n\n    params.debug = 0\n    params.visualization = False\n\n    params.use_gpu = True\n\n    params.image_sample_size = 14 * 16\n    params.search_area_scale = 4\n    params.feature_size_odd = False\n\n    # Learning parameters\n    params.sample_memory_size = 250\n    params.learning_rate = 0.0075\n    params.init_samples_minimum_weight = 0.0\n    params.train_skipping = 10\n\n    # Net optimization params\n    params.update_classifier = True\n    params.net_opt_iter = 25\n    params.net_opt_update_iter = 3\n    params.net_opt_hn_iter = 3\n\n    # Detection parameters\n    params.window_output = True\n\n    # Init augmentation parameters\n    params.use_augmentation = True\n    params.augmentation = {'fliplr': True,\n                           'rotate': [5, -5, 10, -10, 20, -20, 30, -30, 45, -45, -60, 60],\n                           'blur': [(2, 0.2), (0.2, 2), (3, 1), (1, 3), (2, 2)],\n                           'relativeshift': [(0.6, 0.6), (-0.6, 0.6), (0.6, -0.6), (-0.6, -0.6)],\n                           'dropout': (7, 0.2)}\n\n    params.augmentation_expansion_factor = 2\n    params.random_shift_factor = 1/3\n\n    # Advanced localization parameters\n    params.advanced_localization = True\n    params.target_not_found_threshold = 0.0\n    params.distractor_threshold = 100\n    params.hard_negative_threshold = 0.45\n    params.target_neighborhood_scale = 2.2\n    params.dispalcement_scale = 0.7\n\n    params.perform_hn_without_windowing = True\n\n    params.hard_negative_learning_rate = 0.02\n    params.update_scale_when_uncertain = True\n\n    # IoUnet parameters\n    params.iounet_augmentation = False\n    params.iounet_use_log_scale = True\n    params.iounet_k = 3\n    params.num_init_random_boxes = 9\n    params.box_jitter_pos = 0.1\n    params.box_jitter_sz = 0.5\n    params.maximal_aspect_ratio = 6\n    params.box_refinement_iter = 5\n    params.box_refinement_step_length = 1\n    params.box_refinement_step_decay = 1\n\n    params.net = NetWithBackbone(net_path='dimp18.pth',\n                                 use_gpu=params.use_gpu)\n\n    params.vot_anno_conversion_type = 'preserve_area'\n\n    return params\n"
  },
  {
    "path": "artrackv2_mindspore/external/AR/pytracking/parameter/dimp/dimp50.py",
    "content": "from pytracking.utils import TrackerParams\nfrom pytracking.features.net_wrappers import NetWithBackbone\n\ndef parameters():\n    params = TrackerParams()\n\n    params.debug = 0\n    params.visualization = False\n\n    params.use_gpu = True\n\n    params.image_sample_size = 18*16\n    params.search_area_scale = 5\n\n    # Learning parameters\n    params.sample_memory_size = 50\n    params.learning_rate = 0.01\n    params.init_samples_minimum_weight = 0.25\n    params.train_skipping = 20\n\n    # Net optimization params\n    params.update_classifier = True\n    params.net_opt_iter = 10\n    params.net_opt_update_iter = 2\n    params.net_opt_hn_iter = 1\n\n    # Detection parameters\n    params.window_output = False\n\n    # Init augmentation parameters\n    params.use_augmentation = True\n    params.augmentation = {'fliplr': True,\n                           'rotate': [10, -10, 45, -45],\n                           'blur': [(3,1), (1, 3), (2, 2)],\n                           'relativeshift': [(0.6, 0.6), (-0.6, 0.6), (0.6, -0.6), (-0.6,-0.6)],\n                           'dropout': (2, 0.2)}\n\n    params.augmentation_expansion_factor = 2\n    params.random_shift_factor = 1/3\n\n    # Advanced localization parameters\n    params.advanced_localization = True\n    params.target_not_found_threshold = 0.25\n    params.distractor_threshold = 0.8\n    params.hard_negative_threshold = 0.5\n    params.target_neighborhood_scale = 2.2\n    params.dispalcement_scale = 0.8\n    params.hard_negative_learning_rate = 0.02\n    params.update_scale_when_uncertain = True\n\n    # IoUnet parameters\n    params.iounet_augmentation = False\n    params.iounet_use_log_scale = True\n    params.iounet_k = 3\n    params.num_init_random_boxes = 9\n    params.box_jitter_pos = 0.1\n    params.box_jitter_sz = 0.5\n    params.maximal_aspect_ratio = 6\n    params.box_refinement_iter = 5\n    params.box_refinement_step_length = 1\n    params.box_refinement_step_decay = 1\n\n    params.net = NetWithBackbone(net_path='dimp50.pth',\n                                 use_gpu=params.use_gpu)\n\n    params.vot_anno_conversion_type = 'preserve_area'\n\n    return params\n"
  },
  {
    "path": "artrackv2_mindspore/external/AR/pytracking/parameter/dimp/dimp50_vot.py",
    "content": "from pytracking.utils import TrackerParams\nfrom pytracking.features.net_wrappers import NetWithBackbone\n\ndef parameters():\n    params = TrackerParams()\n\n    params.debug = 0\n    params.visualization = False\n\n    params.use_gpu = True\n\n    params.image_sample_size = 14 * 16\n    params.search_area_scale = 4\n\n    # Learning parameters\n    params.sample_memory_size = 250\n    params.learning_rate = 0.0075\n    params.init_samples_minimum_weight = 0.0\n    params.train_skipping = 10\n\n    # Net optimization params\n    params.update_classifier = True\n    params.net_opt_iter = 25\n    params.net_opt_update_iter = 3\n    params.net_opt_hn_iter = 3\n\n    # Detection parameters\n    params.window_output = True\n\n    # Init augmentation parameters\n    params.use_augmentation = True\n    params.augmentation = {'fliplr': True,\n                           'rotate': [5, -5, 10, -10, 20, -20, 30, -30, 45, -45, -60, 60],\n                           'blur': [(2, 0.2), (0.2, 2), (3, 1), (1, 3), (2, 2)],\n                           'relativeshift': [(0.6, 0.6), (-0.6, 0.6), (0.6, -0.6), (-0.6, -0.6)],\n                           'dropout': (7, 0.2)}\n\n    params.augmentation_expansion_factor = 2\n    params.random_shift_factor = 1/3\n\n    # Advanced localization parameters\n    params.advanced_localization = True\n    params.target_not_found_threshold = 0.0\n    params.distractor_threshold = 100\n    params.hard_negative_threshold = 0.45\n    params.target_neighborhood_scale = 2.2\n    params.dispalcement_scale = 0.7\n\n    params.perform_hn_without_windowing = True\n\n    params.hard_negative_learning_rate = 0.02\n    params.update_scale_when_uncertain = True\n\n    # IoUnet parameters\n    params.iounet_augmentation = False\n    params.iounet_use_log_scale = True\n    params.iounet_k = 3\n    params.num_init_random_boxes = 9\n    params.box_jitter_pos = 0.1\n    params.box_jitter_sz = 0.5\n    params.maximal_aspect_ratio = 6\n    params.box_refinement_iter = 5\n    params.box_refinement_step_length = 1\n    params.box_refinement_step_decay = 1\n\n    params.net = NetWithBackbone(net_path='dimp50.pth',\n                                 use_gpu=params.use_gpu)\n\n    params.vot_anno_conversion_type = 'preserve_area'\n\n    return params\n"
  },
  {
    "path": "artrackv2_mindspore/external/AR/pytracking/parameter/dimp/dimp50_vot19.py",
    "content": "from pytracking.utils import TrackerParams\nfrom pytracking.features.net_wrappers import NetWithBackbone\n\ndef parameters():\n    params = TrackerParams()\n\n    params.debug = 0\n    params.visualization = False\n\n    params.use_gpu = True\n\n    params.image_sample_size = 16 * 16\n    params.search_area_scale = 4.5\n\n    # Learning parameters\n    params.sample_memory_size = 100\n    params.learning_rate = 0.0075\n    params.init_samples_minimum_weight = 0.0\n    params.train_skipping = 10\n\n    # Net optimization params\n    params.update_classifier = True\n    params.net_opt_iter = 15\n    params.net_opt_update_iter = 2\n    params.net_opt_hn_iter = 2\n\n    # Detection parameters\n    params.window_output = True\n\n    # Init augmentation parameters\n    params.use_augmentation = True\n    params.augmentation = {'fliplr': True,\n                           'rotate': [-5, 10, -30, 60],\n                           'blur': [(2, 0.2), (1, 3)],\n                           'relativeshift': [(0.6, 0.6), (-0.6, -0.6)],\n                           'dropout': (3, 0.2)}\n\n    params.augmentation_expansion_factor = 1.4\n    params.random_shift_factor = 1/3\n\n    # Advanced localization parameters\n    params.advanced_localization = True\n    params.target_not_found_threshold = 0.0\n    params.distractor_threshold = 100\n    params.hard_negative_threshold = 0.45\n    params.target_neighborhood_scale = 2.2\n    params.dispalcement_scale = 0.7\n\n    params.perform_hn_without_windowing = True\n\n    params.hard_negative_learning_rate = 0.02\n    params.update_scale_when_uncertain = True\n\n    # IoUnet parameters\n    params.iounet_augmentation = False\n    params.iounet_use_log_scale = True\n    params.iounet_k = 3\n    params.num_init_random_boxes = 9\n    params.box_jitter_pos = 0.1\n    params.box_jitter_sz = 0.5\n    params.maximal_aspect_ratio = 6\n    params.box_refinement_iter = 3\n    params.box_refinement_step_length = 1\n    params.box_refinement_step_decay = 1\n\n    params.net = NetWithBackbone(net_path='dimp50.pth',\n                                 use_gpu=params.use_gpu)\n\n    params.vot_anno_conversion_type = 'preserve_area'\n\n    return params\n"
  },
  {
    "path": "artrackv2_mindspore/external/AR/pytracking/parameter/dimp/prdimp18.py",
    "content": "from pytracking.utils import TrackerParams\nfrom pytracking.features.net_wrappers import NetWithBackbone\n\ndef parameters():\n    params = TrackerParams()\n\n    params.debug = 0\n    params.visualization = False\n\n    params.use_gpu = True\n\n    params.image_sample_size = 18*16\n    params.search_area_scale = 5\n\n    # Learning parameters\n    params.sample_memory_size = 50\n    params.learning_rate = 0.01\n    params.init_samples_minimum_weight = 0.25\n    params.train_skipping = 20\n\n    # Net optimization params\n    params.update_classifier = True\n    params.net_opt_iter = 10\n    params.net_opt_update_iter = 2\n    params.net_opt_hn_iter = 1\n\n    # Detection parameters\n    params.window_output = False\n\n    # Init augmentation parameters\n    params.use_augmentation = True\n    params.augmentation = {'fliplr': True,\n                           'rotate': [10, -10, 45, -45],\n                           'blur': [(3,1), (1, 3), (2, 2)],\n                           'relativeshift': [(0.6, 0.6), (-0.6, 0.6), (0.6, -0.6), (-0.6,-0.6)],\n                           'dropout': (2, 0.2)}\n\n    params.augmentation_expansion_factor = 2\n    params.random_shift_factor = 1/3\n\n    # Advanced localization parameters\n    params.advanced_localization = True\n    params.score_preprocess = 'softmax'\n    params.target_not_found_threshold = 0.04\n    params.distractor_threshold = 0.8\n    params.hard_negative_threshold = 0.5\n    params.target_neighborhood_scale = 2.2\n    params.dispalcement_scale = 0.8\n    params.hard_negative_learning_rate = 0.02\n    params.update_scale_when_uncertain = True\n\n    # IoUnet parameters\n    params.box_refinement_space = 'relative'\n    params.iounet_augmentation = False      # Use the augmented samples to compute the modulation vector\n    params.iounet_k = 3                     # Top-k average to estimate final box\n    params.num_init_random_boxes = 9        # Num extra random boxes in addition to the classifier prediction\n    params.box_jitter_pos = 0.1             # How much to jitter the translation for random boxes\n    params.box_jitter_sz = 0.5              # How much to jitter the scale for random boxes\n    params.maximal_aspect_ratio = 6         # Limit on the aspect ratio\n    params.box_refinement_iter = 10          # Number of iterations for refining the boxes\n    params.box_refinement_step_length = 2.5e-3 # 1   # Gradient step length in the bounding box refinement\n    params.box_refinement_step_decay = 1    # Multiplicative step length decay (1 means no decay)\n\n    params.net = NetWithBackbone(net_path='prdimp18.pth.tar',\n                                 use_gpu=params.use_gpu)\n\n    params.vot_anno_conversion_type = 'preserve_area'\n\n    return params\n"
  },
  {
    "path": "artrackv2_mindspore/external/AR/pytracking/parameter/dimp/prdimp50.py",
    "content": "from pytracking.utils import TrackerParams\nfrom pytracking.features.net_wrappers import NetWithBackbone\n\ndef parameters():\n    params = TrackerParams()\n\n    params.debug = 0\n    params.visualization = False\n\n    params.use_gpu = True\n\n    params.image_sample_size = 22*16\n    params.search_area_scale = 6\n    params.border_mode = 'inside_major'\n    params.patch_max_scale_change = 1.5\n\n    # Learning parameters\n    params.sample_memory_size = 50\n    params.learning_rate = 0.01\n    params.init_samples_minimum_weight = 0.25\n    params.train_skipping = 20\n\n    # Net optimization params\n    params.update_classifier = True\n    params.net_opt_iter = 10\n    params.net_opt_update_iter = 2\n    params.net_opt_hn_iter = 1\n\n    # Detection parameters\n    params.window_output = False\n\n    # Init augmentation parameters\n    params.use_augmentation = True\n    params.augmentation = {'fliplr': True,\n                           'rotate': [10, -10, 45, -45],\n                           'blur': [(3,1), (1, 3), (2, 2)],\n                           'relativeshift': [(0.6, 0.6), (-0.6, 0.6), (0.6, -0.6), (-0.6,-0.6)],\n                           'dropout': (2, 0.2)}\n\n    params.augmentation_expansion_factor = 2\n    params.random_shift_factor = 1/3\n\n    # Advanced localization parameters\n    params.advanced_localization = True\n    params.score_preprocess = 'softmax'\n    params.target_not_found_threshold = 0.04\n    params.distractor_threshold = 0.8\n    params.hard_negative_threshold = 0.5\n    params.target_neighborhood_scale = 2.2\n    params.dispalcement_scale = 0.8\n    params.hard_negative_learning_rate = 0.02\n    params.update_scale_when_uncertain = True\n\n    # IoUnet parameters\n    params.box_refinement_space = 'relative'\n    params.iounet_augmentation = False      # Use the augmented samples to compute the modulation vector\n    params.iounet_k = 3                     # Top-k average to estimate final box\n    params.num_init_random_boxes = 9        # Num extra random boxes in addition to the classifier prediction\n    params.box_jitter_pos = 0.1             # How much to jitter the translation for random boxes\n    params.box_jitter_sz = 0.5              # How much to jitter the scale for random boxes\n    params.maximal_aspect_ratio = 6         # Limit on the aspect ratio\n    params.box_refinement_iter = 10          # Number of iterations for refining the boxes\n    params.box_refinement_step_length = 2.5e-3 # 1   # Gradient step length in the bounding box refinement\n    params.box_refinement_step_decay = 1    # Multiplicative step length decay (1 means no decay)\n\n    params.net = NetWithBackbone(net_path='prdimp50.pth.tar',\n                                 use_gpu=params.use_gpu)\n\n    params.vot_anno_conversion_type = 'preserve_area'\n\n    return params\n"
  },
  {
    "path": "artrackv2_mindspore/external/AR/pytracking/parameter/dimp/super_dimp.py",
    "content": "from pytracking.utils import TrackerParams\nfrom pytracking.features.net_wrappers import NetWithBackbone\n\ndef parameters():\n    params = TrackerParams()\n\n    params.debug = 0\n    params.visualization = False\n\n    params.use_gpu = True\n\n    params.image_sample_size = 22*16\n    params.search_area_scale = 6\n    params.border_mode = 'inside_major'\n    params.patch_max_scale_change = 1.5\n\n    # Learning parameters\n    params.sample_memory_size = 50\n    params.learning_rate = 0.01\n    params.init_samples_minimum_weight = 0.25\n    params.train_skipping = 20\n\n    # Net optimization params\n    params.update_classifier = True\n    params.net_opt_iter = 10\n    params.net_opt_update_iter = 2\n    params.net_opt_hn_iter = 1\n\n    # Detection parameters\n    params.window_output = False\n\n    # Init augmentation parameters\n    params.use_augmentation = True\n    params.augmentation = {'fliplr': True,\n                           'rotate': [10, -10, 45, -45],\n                           'blur': [(3,1), (1, 3), (2, 2)],\n                           'relativeshift': [(0.6, 0.6), (-0.6, 0.6), (0.6, -0.6), (-0.6,-0.6)],\n                           'dropout': (2, 0.2)}\n\n    params.augmentation_expansion_factor = 2\n    params.random_shift_factor = 1/3\n\n    # Advanced localization parameters\n    params.advanced_localization = True\n    params.target_not_found_threshold = 0.25\n    params.distractor_threshold = 0.8\n    params.hard_negative_threshold = 0.5\n    params.target_neighborhood_scale = 2.2\n    params.dispalcement_scale = 0.8\n    params.hard_negative_learning_rate = 0.02\n    params.update_scale_when_uncertain = True\n\n    # IoUnet parameters\n    params.box_refinement_space = 'relative'\n    params.iounet_augmentation = False      # Use the augmented samples to compute the modulation vector\n    params.iounet_k = 3                     # Top-k average to estimate final box\n    params.num_init_random_boxes = 9        # Num extra random boxes in addition to the classifier prediction\n    params.box_jitter_pos = 0.1             # How much to jitter the translation for random boxes\n    params.box_jitter_sz = 0.5              # How much to jitter the scale for random boxes\n    params.maximal_aspect_ratio = 6         # Limit on the aspect ratio\n    params.box_refinement_iter = 10          # Number of iterations for refining the boxes\n    params.box_refinement_step_length = 2.5e-3 # 1   # Gradient step length in the bounding box refinement\n    params.box_refinement_step_decay = 1    # Multiplicative step length decay (1 means no decay)\n\n    params.net = NetWithBackbone(net_path='super_dimp.pth.tar',\n                                 use_gpu=params.use_gpu)\n\n    params.vot_anno_conversion_type = 'preserve_area'\n\n    return params\n"
  },
  {
    "path": "artrackv2_mindspore/external/AR/pytracking/parameter/eco/__init__.py",
    "content": ""
  },
  {
    "path": "artrackv2_mindspore/external/AR/pytracking/parameter/eco/default.py",
    "content": "from pytracking.utils import TrackerParams, FeatureParams\nfrom pytracking.features.extractor import MultiResolutionExtractor\nfrom pytracking.features import deep\nimport torch\n\ndef parameters():\n    params = TrackerParams()\n\n    params.debug = 0\n    params.visualization = False\n\n    params.use_gpu = True\n\n    # Feature specific parameters\n    shallow_params = TrackerParams()\n    deep_params = TrackerParams()\n\n    # Patch sampling parameters\n    params.max_image_sample_size = 250**2   # Maximum image sample size\n    params.min_image_sample_size = 200**2   # Minimum image sample size\n    params.search_area_scale = 4.5          # Scale relative to target size\n\n    # Conjugate Gradient parameters\n    params.CG_iter = 5                  # The number of Conjugate Gradient iterations in each update after the first frame\n    params.init_CG_iter = 100           # The total number of Conjugate Gradient iterations used in the first frame\n    params.init_GN_iter = 10            # The number of Gauss-Newton iterations used in the first frame (only if the projection matrix is updated)\n    params.post_init_CG_iter = 0        # CG iterations to run after GN\n    params.fletcher_reeves = False      # Use the Fletcher-Reeves (true) or Polak-Ribiere (false) formula in the Conjugate Gradient\n    params.standard_alpha = True        # Use the standard formula for computing the step length in Conjugate Gradient\n    params.CG_forgetting_rate = 75\t \t# Forgetting rate of the last conjugate direction\n    params.precond_data_param = 0.3\t \t# Weight of the data term in the preconditioner\n    params.precond_reg_param = 0.15\t \t# Weight of the regularization term in the preconditioner\n    params.precond_proj_param = 35\t \t# Weight of the projection matrix part in the preconditioner\n\n    # Learning parameters\n    shallow_params.learning_rate = 0.025\n    deep_params.learning_rate = 0.0075\n    shallow_params.output_sigma_factor = 1/16\n    deep_params.output_sigma_factor = 1/4\n\n    # Training parameters\n    params.sample_memory_size = 200     # Memory size\n    params.train_skipping = 10          # How often to run training (every n-th frame)\n\n    # Detection parameters\n    params.scale_factors = 1.02**torch.arange(-2, 3).float()     # What scales to use for localization\n    params.score_upsample_factor = 1                             # How much Fourier upsampling to use\n    params.score_fusion_strategy = 'weightedsum'                 # Fusion strategy\n    shallow_params.translation_weight = 0.4                      # Weight of this feature\n    deep_params.translation_weight = 1 - shallow_params.translation_weight\n\n    # Init augmentation parameters\n    params.augmentation = {'fliplr': True,\n                           'rotate': [5, -5, 10, -10, 20, -20, 30, -30, 45,-45, -60, 60],\n                           'blur': [(2, 0.2), (0.2, 2), (3,1), (1, 3), (2, 2)],\n                           'shift': [(6, 6), (-6, 6), (6, -6), (-6,-6)],\n                           'dropout': (7, 0.2)}\n\n    # Whether to use augmentation for this feature\n    deep_params.use_augmentation = True\n    shallow_params.use_augmentation = True\n\n    # Factorized convolution parameters\n    # params.use_projection_matrix = True    # Use projection matrix, i.e. use the factorized convolution formulation\n    params.update_projection_matrix = True   # Whether the projection matrix should be optimized or not\n    # params.proj_init_method = 'pca'        # Method for initializing the projection matrix\n    params.projection_reg = 5e-8\t \t \t # Regularization paremeter of the projection matrix\n    shallow_params.compressed_dim = 16       # Dimension output of projection matrix for shallow features\n    deep_params.compressed_dim = 64          # Dimension output of projection matrix for deep features\n\n    # Interpolation parameters\n    params.interpolation_method = 'bicubic'    # The kind of interpolation kernel\n    params.interpolation_bicubic_a = -0.75     # The parameter for the bicubic interpolation kernel\n    params.interpolation_centering = True      # Center the kernel at the feature sample\n    params.interpolation_windowing = False     # Do additional windowing on the Fourier coefficients of the kernel\n\n    # Regularization parameters\n    shallow_params.use_reg_window = True           # Use spatial regularization or not\n    shallow_params.reg_window_min = 1e-4\t\t   # The minimum value of the regularization window\n    shallow_params.reg_window_edge = 10e-3         # The impact of the spatial regularization\n    shallow_params.reg_window_power = 2            # The degree of the polynomial to use (e.g. 2 is a quadratic window)\n    shallow_params.reg_sparsity_threshold = 0.05   # A relative threshold of which DFT coefficients that should be set to zero\n\n    deep_params.use_reg_window = True           # Use spatial regularization or not\n    deep_params.reg_window_min = 10e-4\t\t\t# The minimum value of the regularization window\n    deep_params.reg_window_edge = 50e-3         # The impact of the spatial regularization\n    deep_params.reg_window_power = 2            # The degree of the polynomial to use (e.g. 2 is a quadratic window)\n    deep_params.reg_sparsity_threshold = 0.1    # A relative threshold of which DFT coefficients that should be set to zero\n\n\n    fparams = FeatureParams(feature_params=[shallow_params, deep_params])\n    features = deep.ResNet18m1(output_layers=['vggconv1', 'layer3'], use_gpu=params.use_gpu, fparams=fparams,\n                               pool_stride=[2, 1], normalize_power=2)\n\n    params.features = MultiResolutionExtractor([features])\n\n    return params"
  },
  {
    "path": "artrackv2_mindspore/external/AR/pytracking/tracker/__init__.py",
    "content": ""
  },
  {
    "path": "artrackv2_mindspore/external/AR/pytracking/tracker/atom/__init__.py",
    "content": "from .atom import ATOM\n\ndef get_tracker_class():\n    return ATOM"
  },
  {
    "path": "artrackv2_mindspore/external/AR/pytracking/tracker/atom/atom.py",
    "content": "from pytracking.tracker.base import BaseTracker\nimport torch\nimport torch.nn.functional as F\nimport torch.nn\nimport math\nimport time\nfrom pytracking import dcf, fourier, TensorList, operation\nfrom pytracking.features.preprocessing import numpy_to_torch\nfrom pytracking.utils.plotting import show_tensor\nfrom pytracking.libs.optimization import GaussNewtonCG, ConjugateGradient, GradientDescentL2\nfrom .optim import ConvProblem, FactorizedConvProblem\nfrom pytracking.features import augmentation\nimport ltr.data.bounding_box_utils as bbutils\n\n\nclass ATOM(BaseTracker):\n\n    multiobj_mode = 'parallel'\n\n    def initialize_features(self):\n        if not getattr(self, 'features_initialized', False):\n            self.params.features.initialize()\n        self.features_initialized = True\n\n\n    def initialize(self, image, info: dict) -> dict:\n        state = info['init_bbox']\n\n        # Initialize some stuff\n        self.frame_num = 1\n        if not self.params.has('device'):\n            self.params.device = 'cuda' if self.params.use_gpu else 'cpu'\n\n        # Initialize features\n        self.initialize_features()\n\n        # Check if image is color\n        self.params.features.set_is_color(image.shape[2] == 3)\n\n        # Get feature specific params\n        self.fparams = self.params.features.get_fparams('feature_params')\n\n        tic = time.time()\n\n        # Get position and size\n        self.pos = torch.Tensor([state[1] + (state[3] - 1)/2, state[0] + (state[2] - 1)/2])\n        self.target_sz = torch.Tensor([state[3], state[2]])\n\n        # Set search area\n        self.target_scale = 1.0\n        search_area = torch.prod(self.target_sz * self.params.search_area_scale).item()\n        if search_area > self.params.max_image_sample_size:\n            self.target_scale =  math.sqrt(search_area / self.params.max_image_sample_size)\n        elif search_area < self.params.min_image_sample_size:\n            self.target_scale =  math.sqrt(search_area / self.params.min_image_sample_size)\n\n        # Check if IoUNet is used\n        self.use_iou_net = self.params.get('use_iou_net', True)\n\n        # Target size in base scale\n        self.base_target_sz = self.target_sz / self.target_scale\n\n        # Use odd square search area and set sizes\n        feat_max_stride = max(self.params.features.stride())\n        if self.params.get('search_area_shape', 'square') == 'square':\n            self.img_sample_sz = torch.round(torch.sqrt(torch.prod(self.base_target_sz * self.params.search_area_scale))) * torch.ones(2)\n        elif self.params.search_area_shape == 'initrect':\n            self.img_sample_sz = torch.round(self.base_target_sz * self.params.search_area_scale)\n        else:\n            raise ValueError('Unknown search area shape')\n        if self.params.feature_size_odd:\n            self.img_sample_sz += feat_max_stride - self.img_sample_sz % (2 * feat_max_stride)\n        else:\n            self.img_sample_sz += feat_max_stride - (self.img_sample_sz + feat_max_stride) % (2 * feat_max_stride)\n\n        # Set sizes\n        self.img_support_sz = self.img_sample_sz\n        self.feature_sz = self.params.features.size(self.img_sample_sz)\n        self.output_sz = self.params.score_upsample_factor * self.img_support_sz  # Interpolated size of the output\n        self.kernel_size = self.fparams.attribute('kernel_size')\n\n        self.iou_img_sample_sz = self.img_sample_sz\n\n        # Optimization options\n        self.params.precond_learning_rate = self.fparams.attribute('learning_rate')\n        if self.params.CG_forgetting_rate is None or max(self.params.precond_learning_rate) >= 1:\n            self.params.direction_forget_factor = 0\n        else:\n            self.params.direction_forget_factor = (1 - max(self.params.precond_learning_rate))**self.params.CG_forgetting_rate\n\n        self.output_window = None\n        if self.params.get('window_output', False):\n            if self.params.get('use_clipped_window', False):\n                self.output_window = dcf.hann2d_clipped(self.output_sz.long(), self.output_sz.long()*self.params.effective_search_area / self.params.search_area_scale, centered=False).to(self.params.device)\n            else:\n                self.output_window = dcf.hann2d(self.output_sz.long(), centered=False).to(self.params.device)\n\n        # Initialize some learning things\n        self.init_learning()\n\n        # Convert image\n        im = numpy_to_torch(image)\n        self.im = im    # For debugging only\n\n        # Setup scale bounds\n        self.image_sz = torch.Tensor([im.shape[2], im.shape[3]])\n        self.min_scale_factor = torch.max(10 / self.base_target_sz)\n        self.max_scale_factor = torch.min(self.image_sz / self.base_target_sz)\n\n        # Extract and transform sample\n        x = self.generate_init_samples(im)\n\n        # Initialize iounet\n        if self.use_iou_net:\n            self.init_iou_net()\n\n        # Initialize projection matrix\n        self.init_projection_matrix(x)\n\n        # Transform to get the training sample\n        train_x = self.preprocess_sample(x)\n\n        # Generate label function\n        init_y = self.init_label_function(train_x)\n\n        # Init memory\n        self.init_memory(train_x)\n\n        # Init optimizer and do initial optimization\n        self.init_optimization(train_x, init_y)\n\n        self.pos_iounet = self.pos.clone()\n\n        out = {'time': time.time() - tic}\n        return out\n\n\n    def init_optimization(self, train_x, init_y):\n        # Initialize filter\n        filter_init_method = self.params.get('filter_init_method', 'zeros')\n        self.filter = TensorList(\n            [x.new_zeros(1, cdim, sz[0], sz[1]) for x, cdim, sz in zip(train_x, self.compressed_dim, self.kernel_size)])\n        if filter_init_method == 'zeros':\n            pass\n        elif filter_init_method == 'randn':\n            for f in self.filter:\n                f.normal_(0, 1/f.numel())\n        else:\n            raise ValueError('Unknown \"filter_init_method\"')\n\n        # Get parameters\n        self.params.update_projection_matrix = self.params.get('update_projection_matrix', True) and self.params.use_projection_matrix\n        optimizer = self.params.get('optimizer', 'GaussNewtonCG')\n\n        # Setup factorized joint optimization\n        if self.params.update_projection_matrix:\n            self.joint_problem = FactorizedConvProblem(self.init_training_samples, init_y, self.filter_reg,\n                                                       self.fparams.attribute('projection_reg'), self.params, self.init_sample_weights,\n                                                       self.projection_activation, self.response_activation)\n\n            # Variable containing both filter and projection matrix\n            joint_var = self.filter.concat(self.projection_matrix)\n\n            # Initialize optimizer\n            analyze_convergence = self.params.get('analyze_convergence', False)\n            if optimizer == 'GaussNewtonCG':\n                self.joint_optimizer = GaussNewtonCG(self.joint_problem, joint_var, debug=(self.params.debug >= 1),\n                                                     plotting=(self.params.debug >= 3), analyze=analyze_convergence,\n                                                     visdom=self.visdom)\n            elif optimizer == 'GradientDescentL2':\n                self.joint_optimizer = GradientDescentL2(self.joint_problem, joint_var, self.params.optimizer_step_length, self.params.optimizer_momentum, plotting=(self.params.debug >= 3), debug=(self.params.debug >= 1),\n                                                         visdom=self.visdom)\n\n            # Do joint optimization\n            if isinstance(self.params.init_CG_iter, (list, tuple)):\n                self.joint_optimizer.run(self.params.init_CG_iter)\n            else:\n                self.joint_optimizer.run(self.params.init_CG_iter // self.params.init_GN_iter, self.params.init_GN_iter)\n\n            if analyze_convergence:\n                opt_name = 'CG' if self.params.get('CG_optimizer', True) else 'GD'\n                for val_name, values in zip(['loss', 'gradient'], [self.joint_optimizer.losses, self.joint_optimizer.gradient_mags]):\n                    val_str = ' '.join(['{:.8e}'.format(v.item()) for v in values])\n                    file_name = '{}_{}.txt'.format(opt_name, val_name)\n                    with open(file_name, 'a') as f:\n                        f.write(val_str + '\\n')\n                raise RuntimeError('Exiting')\n\n        # Re-project samples with the new projection matrix\n        compressed_samples = self.project_sample(self.init_training_samples, self.projection_matrix)\n        for train_samp, init_samp in zip(self.training_samples, compressed_samples):\n            train_samp[:init_samp.shape[0],...] = init_samp\n\n        self.hinge_mask = None\n\n        # Initialize optimizer\n        self.conv_problem = ConvProblem(self.training_samples, self.y, self.filter_reg, self.sample_weights, self.response_activation)\n\n        if optimizer == 'GaussNewtonCG':\n            self.filter_optimizer = ConjugateGradient(self.conv_problem, self.filter, fletcher_reeves=self.params.fletcher_reeves,\n                                                      direction_forget_factor=self.params.direction_forget_factor, debug=(self.params.debug>=1),\n                                                      plotting=(self.params.debug>=3), visdom=self.visdom)\n        elif optimizer == 'GradientDescentL2':\n            self.filter_optimizer = GradientDescentL2(self.conv_problem, self.filter, self.params.optimizer_step_length,\n                                                      self.params.optimizer_momentum, debug=(self.params.debug >= 1),\n                                                      plotting=(self.params.debug>=3), visdom=self.visdom)\n\n        # Transfer losses from previous optimization\n        if self.params.update_projection_matrix:\n            self.filter_optimizer.residuals = self.joint_optimizer.residuals\n            self.filter_optimizer.losses = self.joint_optimizer.losses\n\n        if not self.params.update_projection_matrix:\n            self.filter_optimizer.run(self.params.init_CG_iter)\n\n        # Post optimization\n        self.filter_optimizer.run(self.params.post_init_CG_iter)\n\n        # Free memory\n        del self.init_training_samples\n        if self.params.use_projection_matrix:\n            del self.joint_problem, self.joint_optimizer\n\n\n    def track(self, image, info: dict = None) -> dict:\n        self.debug_info = {}\n\n        self.frame_num += 1\n        self.debug_info['frame_num'] = self.frame_num\n\n        # Convert image\n        im = numpy_to_torch(image)\n        self.im = im    # For debugging only\n\n        # ------- LOCALIZATION ------- #\n\n        # Get sample\n        sample_pos = self.pos.round()\n        sample_scales = self.target_scale * self.params.scale_factors\n        test_x = self.extract_processed_sample(im, self.pos, sample_scales, self.img_sample_sz)\n\n        # Compute scores\n        scores_raw = self.apply_filter(test_x)\n        translation_vec, scale_ind, s, flag = self.localize_target(scores_raw)\n\n        # Update position and scale\n        if flag != 'not_found':\n            if self.use_iou_net:\n                update_scale_flag = self.params.get('update_scale_when_uncertain', True) or flag != 'uncertain'\n                if self.params.get('use_classifier', True):\n                    self.update_state(sample_pos + translation_vec)\n                self.refine_target_box(sample_pos, sample_scales[scale_ind], scale_ind, update_scale_flag)\n            elif self.params.get('use_classifier', True):\n                self.update_state(sample_pos + translation_vec, sample_scales[scale_ind])\n\n        score_map = s[scale_ind, ...]\n        max_score = torch.max(score_map).item()\n        self.debug_info['max_score'] = max_score\n        self.debug_info['flag'] = flag\n\n        if self.visdom is not None:\n            self.visdom.register(score_map, 'heatmap', 2, 'Score Map')\n            self.visdom.register(self.debug_info, 'info_dict', 1, 'Status')\n        elif self.params.debug >= 2:\n            show_tensor(score_map, 5, title='Max score = {:.2f}'.format(max_score))\n\n        # ------- UPDATE ------- #\n\n        # Check flags and set learning rate if hard negative\n        update_flag = flag not in ['not_found', 'uncertain']\n        hard_negative = (flag == 'hard_negative')\n        learning_rate = self.params.hard_negative_learning_rate if hard_negative else None\n\n        if update_flag:\n            # Get train sample\n            train_x = TensorList([x[scale_ind:scale_ind+1, ...] for x in test_x])\n\n            # Create label for sample\n            train_y = self.get_label_function(sample_pos, sample_scales[scale_ind])\n\n            # Update memory\n            self.update_memory(train_x, train_y, learning_rate)\n\n        # Train filter\n        if hard_negative:\n            self.filter_optimizer.run(self.params.hard_negative_CG_iter)\n        elif (self.frame_num-1) % self.params.train_skipping == 0:\n            self.filter_optimizer.run(self.params.CG_iter)\n\n        # Set the pos of the tracker to iounet pos\n        if self.use_iou_net and flag != 'not_found':\n            self.pos = self.pos_iounet.clone()\n\n        # Return new state\n        new_state = torch.cat((self.pos[[1,0]] - (self.target_sz[[1,0]]-1)/2, self.target_sz[[1,0]]))\n\n        out = {'target_bbox': new_state.tolist()}\n        return out\n\n\n    def apply_filter(self, sample_x: TensorList):\n        return operation.conv2d(sample_x, self.filter, mode='same')\n\n    def localize_target(self, scores_raw):\n        # Weighted sum (if multiple features) with interpolation in fourier domain\n        weight = self.fparams.attribute('translation_weight', 1.0)\n        scores_raw = weight * scores_raw\n        sf_weighted = fourier.cfft2(scores_raw) / (scores_raw.size(2) * scores_raw.size(3))\n        for i, (sz, ksz) in enumerate(zip(self.feature_sz, self.kernel_size)):\n            sf_weighted[i] = fourier.shift_fs(sf_weighted[i], math.pi * (1 - torch.Tensor([ksz[0]%2, ksz[1]%2]) / sz))\n\n        scores_fs = fourier.sum_fs(sf_weighted)\n        scores = fourier.sample_fs(scores_fs, self.output_sz)\n\n        if self.output_window is not None and not self.params.get('perform_hn_without_windowing', False):\n            scores *= self.output_window\n\n        if self.params.get('advanced_localization', False):\n            return self.localize_advanced(scores)\n\n        # Get maximum\n        max_score, max_disp = dcf.max2d(scores)\n        _, scale_ind = torch.max(max_score, dim=0)\n        max_disp = max_disp.float().cpu()\n\n        # Convert to displacements in the base scale\n        disp = (max_disp + self.output_sz / 2) % self.output_sz - self.output_sz / 2\n\n        # Compute translation vector and scale change factor\n        translation_vec = disp[scale_ind, ...].view(-1) * (self.img_support_sz / self.output_sz) * self.target_scale\n        translation_vec *= self.params.scale_factors[scale_ind]\n\n        # Shift the score output for visualization purposes\n        if self.params.debug >= 2:\n            sz = scores.shape[-2:]\n            scores = torch.cat([scores[...,sz[0]//2:,:], scores[...,:sz[0]//2,:]], -2)\n            scores = torch.cat([scores[...,:,sz[1]//2:], scores[...,:,:sz[1]//2]], -1)\n\n        return translation_vec, scale_ind, scores, None\n\n    def localize_advanced(self, scores):\n        \"\"\"Dows the advanced localization with hard negative detection and target not found.\"\"\"\n\n        sz = scores.shape[-2:]\n\n        if self.output_window is not None and self.params.get('perform_hn_without_windowing', False):\n            scores_orig = scores.clone()\n\n            scores_orig = torch.cat([scores_orig[..., (sz[0] + 1) // 2:, :], scores_orig[..., :(sz[0] + 1) // 2, :]], -2)\n            scores_orig = torch.cat([scores_orig[..., :, (sz[1] + 1) // 2:], scores_orig[..., :, :(sz[1] + 1) // 2]], -1)\n\n            scores *= self.output_window\n\n        # Shift scores back\n        scores = torch.cat([scores[...,(sz[0]+1)//2:,:], scores[...,:(sz[0]+1)//2,:]], -2)\n        scores = torch.cat([scores[...,:,(sz[1]+1)//2:], scores[...,:,:(sz[1]+1)//2]], -1)\n\n        # Find maximum\n        max_score1, max_disp1 = dcf.max2d(scores)\n        _, scale_ind = torch.max(max_score1, dim=0)\n        max_score1 = max_score1[scale_ind]\n        max_disp1 = max_disp1[scale_ind,...].float().cpu().view(-1)\n        target_disp1 = max_disp1 - self.output_sz // 2\n        translation_vec1 = target_disp1 * (self.img_support_sz / self.output_sz) * self.target_scale\n\n        if max_score1.item() < self.params.target_not_found_threshold:\n            return translation_vec1, scale_ind, scores, 'not_found'\n\n        if self.output_window is not None and self.params.get('perform_hn_without_windowing', False):\n            scores = scores_orig\n\n        # Mask out target neighborhood\n        target_neigh_sz = self.params.target_neighborhood_scale * self.target_sz / self.target_scale\n        tneigh_top = max(round(max_disp1[0].item() - target_neigh_sz[0].item() / 2), 0)\n        tneigh_bottom = min(round(max_disp1[0].item() + target_neigh_sz[0].item() / 2 + 1), sz[0])\n        tneigh_left = max(round(max_disp1[1].item() - target_neigh_sz[1].item() / 2), 0)\n        tneigh_right = min(round(max_disp1[1].item() + target_neigh_sz[1].item() / 2 + 1), sz[1])\n        scores_masked = scores[scale_ind:scale_ind+1,...].clone()\n        scores_masked[...,tneigh_top:tneigh_bottom,tneigh_left:tneigh_right] = 0\n\n        # Find new maximum\n        max_score2, max_disp2 = dcf.max2d(scores_masked)\n        max_disp2 = max_disp2.float().cpu().view(-1)\n        target_disp2 = max_disp2 - self.output_sz // 2\n        translation_vec2 = target_disp2 * (self.img_support_sz / self.output_sz) * self.target_scale\n\n        # Handle the different cases\n        if max_score2 > self.params.distractor_threshold * max_score1:\n            disp_norm1 = torch.sqrt(torch.sum(target_disp1**2))\n            disp_norm2 = torch.sqrt(torch.sum(target_disp2**2))\n            disp_threshold = self.params.dispalcement_scale * math.sqrt(sz[0] * sz[1]) / 2\n\n            if disp_norm2 > disp_threshold and disp_norm1 < disp_threshold:\n                return translation_vec1, scale_ind, scores, 'hard_negative'\n            if disp_norm2 < disp_threshold and disp_norm1 > disp_threshold:\n                return translation_vec2, scale_ind, scores, 'hard_negative'\n            if disp_norm2 > disp_threshold and disp_norm1 > disp_threshold:\n                return translation_vec1, scale_ind, scores, 'uncertain'\n\n            # If also the distractor is close, return with highest score\n            return translation_vec1, scale_ind, scores, 'uncertain'\n\n        if max_score2 > self.params.hard_negative_threshold * max_score1 and max_score2 > self.params.target_not_found_threshold:\n            return translation_vec1, scale_ind, scores, 'hard_negative'\n\n        return translation_vec1, scale_ind, scores, None\n\n\n    def extract_sample(self, im: torch.Tensor, pos: torch.Tensor, scales, sz: torch.Tensor):\n        return self.params.features.extract(im, pos, scales, sz)[0]\n\n    def get_iou_features(self):\n        return self.params.features.get_unique_attribute('iounet_features')\n\n    def get_iou_backbone_features(self):\n        return self.params.features.get_unique_attribute('iounet_backbone_features')\n\n    def extract_processed_sample(self, im: torch.Tensor, pos: torch.Tensor, scales, sz: torch.Tensor) -> (TensorList, TensorList):\n        x = self.extract_sample(im, pos, scales, sz)\n        return self.preprocess_sample(self.project_sample(x))\n\n    def preprocess_sample(self, x: TensorList) -> (TensorList, TensorList):\n        if self.params.get('_feature_window', False):\n            x = x * self.feature_window\n        return x\n\n    def project_sample(self, x: TensorList, proj_matrix = None):\n        # Apply projection matrix\n        if proj_matrix is None:\n            proj_matrix = self.projection_matrix\n        return operation.conv2d(x, proj_matrix).apply(self.projection_activation)\n\n    def init_learning(self):\n        # Get window function\n        self.feature_window = TensorList([dcf.hann2d(sz).to(self.params.device) for sz in self.feature_sz])\n\n        # Filter regularization\n        self.filter_reg = self.fparams.attribute('filter_reg')\n\n        # Activation function after the projection matrix (phi_1 in the paper)\n        projection_activation = self.params.get('projection_activation', 'none')\n        if isinstance(projection_activation, tuple):\n            projection_activation, act_param = projection_activation\n\n        if projection_activation == 'none':\n            self.projection_activation = lambda x: x\n        elif projection_activation == 'relu':\n            self.projection_activation = torch.nn.ReLU(inplace=True)\n        elif projection_activation == 'elu':\n            self.projection_activation = torch.nn.ELU(inplace=True)\n        elif projection_activation == 'mlu':\n            self.projection_activation = lambda x: F.elu(F.leaky_relu(x, 1 / act_param), act_param)\n        else:\n            raise ValueError('Unknown activation')\n\n        # Activation function after the output scores (phi_2 in the paper)\n        response_activation = self.params.get('response_activation', 'none')\n        if isinstance(response_activation, tuple):\n            response_activation, act_param = response_activation\n\n        if response_activation == 'none':\n            self.response_activation = lambda x: x\n        elif response_activation == 'relu':\n            self.response_activation = torch.nn.ReLU(inplace=True)\n        elif response_activation == 'elu':\n            self.response_activation = torch.nn.ELU(inplace=True)\n        elif response_activation == 'mlu':\n            self.response_activation = lambda x: F.elu(F.leaky_relu(x, 1 / act_param), act_param)\n        else:\n            raise ValueError('Unknown activation')\n\n\n    def generate_init_samples(self, im: torch.Tensor) -> TensorList:\n        \"\"\"Generate augmented initial samples.\"\"\"\n\n        # Compute augmentation size\n        aug_expansion_factor = self.params.get('augmentation_expansion_factor', None)\n        aug_expansion_sz = self.img_sample_sz.clone()\n        aug_output_sz = None\n        if aug_expansion_factor is not None and aug_expansion_factor != 1:\n            aug_expansion_sz = (self.img_sample_sz * aug_expansion_factor).long()\n            aug_expansion_sz += (aug_expansion_sz - self.img_sample_sz.long()) % 2\n            aug_expansion_sz = aug_expansion_sz.float()\n            aug_output_sz = self.img_sample_sz.long().tolist()\n\n        # Random shift operator\n        get_rand_shift = lambda: None\n        random_shift_factor = self.params.get('random_shift_factor', 0)\n        if random_shift_factor > 0:\n            get_rand_shift = lambda: ((torch.rand(2) - 0.5) * self.img_sample_sz * random_shift_factor).long().tolist()\n\n        # Create transofmations\n        self.transforms = [augmentation.Identity(aug_output_sz)]\n        if 'shift' in self.params.augmentation:\n            self.transforms.extend([augmentation.Translation(shift, aug_output_sz) for shift in self.params.augmentation['shift']])\n        if 'relativeshift' in self.params.augmentation:\n            get_absolute = lambda shift: (torch.Tensor(shift) * self.img_sample_sz/2).long().tolist()\n            self.transforms.extend([augmentation.Translation(get_absolute(shift), aug_output_sz) for shift in self.params.augmentation['relativeshift']])\n        if 'fliplr' in self.params.augmentation and self.params.augmentation['fliplr']:\n            self.transforms.append(augmentation.FlipHorizontal(aug_output_sz, get_rand_shift()))\n        if 'blur' in self.params.augmentation:\n            self.transforms.extend([augmentation.Blur(sigma, aug_output_sz, get_rand_shift()) for sigma in self.params.augmentation['blur']])\n        if 'scale' in self.params.augmentation:\n            self.transforms.extend([augmentation.Scale(scale_factor, aug_output_sz, get_rand_shift()) for scale_factor in self.params.augmentation['scale']])\n        if 'rotate' in self.params.augmentation:\n            self.transforms.extend([augmentation.Rotate(angle, aug_output_sz, get_rand_shift()) for angle in self.params.augmentation['rotate']])\n\n        # Generate initial samples\n        init_samples = self.params.features.extract_transformed(im, self.pos, self.target_scale, aug_expansion_sz, self.transforms)\n\n        # Remove augmented samples for those that shall not have\n        for i, use_aug in enumerate(self.fparams.attribute('use_augmentation')):\n            if not use_aug:\n                init_samples[i] = init_samples[i][0:1, ...]\n\n        # Add dropout samples\n        if 'dropout' in self.params.augmentation:\n            num, prob = self.params.augmentation['dropout']\n            self.transforms.extend(self.transforms[:1]*num)\n            for i, use_aug in enumerate(self.fparams.attribute('use_augmentation')):\n                if use_aug:\n                    init_samples[i] = torch.cat([init_samples[i], F.dropout2d(init_samples[i][0:1,...].expand(num,-1,-1,-1), p=prob, training=True)])\n\n        return init_samples\n\n\n    def init_projection_matrix(self, x):\n        # Set if using projection matrix\n        self.params.use_projection_matrix = self.params.get('use_projection_matrix', True)\n\n        if self.params.use_projection_matrix:\n            self.compressed_dim = self.fparams.attribute('compressed_dim', None)\n\n            proj_init_method = self.params.get('proj_init_method', 'pca')\n            if proj_init_method == 'pca':\n                x_mat = TensorList([e.permute(1, 0, 2, 3).reshape(e.shape[1], -1).clone() for e in x])\n                x_mat -= x_mat.mean(dim=1, keepdim=True)\n                cov_x = x_mat @ x_mat.t()\n                self.projection_matrix = TensorList(\n                    [None if cdim is None else torch.svd(C)[0][:, :cdim].t().unsqueeze(-1).unsqueeze(-1).clone() for C, cdim in\n                     zip(cov_x, self.compressed_dim)])\n            elif proj_init_method == 'randn':\n                self.projection_matrix = TensorList(\n                    [None if cdim is None else ex.new_zeros(cdim,ex.shape[1],1,1).normal_(0,1/math.sqrt(ex.shape[1])) for ex, cdim in\n                     zip(x, self.compressed_dim)])\n        else:\n            self.compressed_dim = x.size(1)\n            self.projection_matrix = TensorList([None]*len(x))\n\n    def init_label_function(self, train_x):\n        # Allocate label function\n        self.y = TensorList([x.new_zeros(self.params.sample_memory_size, 1, x.shape[2], x.shape[3]) for x in train_x])\n\n        # Output sigma factor\n        output_sigma_factor = self.fparams.attribute('output_sigma_factor')\n        self.sigma = (self.feature_sz / self.img_support_sz * self.base_target_sz).prod().sqrt() * output_sigma_factor * torch.ones(2)\n\n        # Center pos in normalized coords\n        target_center_norm = (self.pos - self.pos.round()) / (self.target_scale * self.img_support_sz)\n\n        # Generate label functions\n        for y, sig, sz, ksz, x in zip(self.y, self.sigma, self.feature_sz, self.kernel_size, train_x):\n            center_pos = sz * target_center_norm + 0.5 * torch.Tensor([(ksz[0] + 1) % 2, (ksz[1] + 1) % 2])\n            for i, T in enumerate(self.transforms[:x.shape[0]]):\n                sample_center = center_pos + torch.Tensor(T.shift) / self.img_support_sz * sz\n                y[i, 0, ...] = dcf.label_function_spatial(sz, sig, sample_center)\n\n        # Return only the ones to use for initial training\n        return TensorList([y[:x.shape[0], ...] for y, x in zip(self.y, train_x)])\n\n\n    def init_memory(self, train_x):\n        # Initialize first-frame training samples\n        self.num_init_samples = train_x.size(0)\n        self.init_sample_weights = TensorList([x.new_ones(1) / x.shape[0] for x in train_x])\n        self.init_training_samples = train_x\n\n        # Sample counters and weights\n        self.num_stored_samples = self.num_init_samples.copy()\n        self.previous_replace_ind = [None] * len(self.num_stored_samples)\n        self.sample_weights = TensorList([x.new_zeros(self.params.sample_memory_size) for x in train_x])\n        for sw, init_sw, num in zip(self.sample_weights, self.init_sample_weights, self.num_init_samples):\n            sw[:num] = init_sw\n\n        # Initialize memory\n        self.training_samples = TensorList(\n            [x.new_zeros(self.params.sample_memory_size, cdim, x.shape[2], x.shape[3]) for x, cdim in\n             zip(train_x, self.compressed_dim)])\n\n    def update_memory(self, sample_x: TensorList, sample_y: TensorList, learning_rate = None):\n        replace_ind = self.update_sample_weights(self.sample_weights, self.previous_replace_ind, self.num_stored_samples, self.num_init_samples, self.fparams, learning_rate)\n        self.previous_replace_ind = replace_ind\n        for train_samp, x, ind in zip(self.training_samples, sample_x, replace_ind):\n            train_samp[ind:ind+1,...] = x\n        for y_memory, y, ind in zip(self.y, sample_y, replace_ind):\n            y_memory[ind:ind+1,...] = y\n        if self.hinge_mask is not None:\n            for m, y, ind in zip(self.hinge_mask, sample_y, replace_ind):\n                m[ind:ind+1,...] = (y >= self.params.hinge_threshold).float()\n        self.num_stored_samples += 1\n\n\n    def update_sample_weights(self, sample_weights, previous_replace_ind, num_stored_samples, num_init_samples, fparams, learning_rate = None):\n        # Update weights and get index to replace in memory\n        replace_ind = []\n        for sw, prev_ind, num_samp, num_init, fpar in zip(sample_weights, previous_replace_ind, num_stored_samples, num_init_samples, fparams):\n            lr = learning_rate\n            if lr is None:\n                lr = fpar.learning_rate\n\n            init_samp_weight = getattr(fpar, 'init_samples_minimum_weight', None)\n            if init_samp_weight == 0:\n                init_samp_weight = None\n            s_ind = 0 if init_samp_weight is None else num_init\n\n            if num_samp == 0 or lr == 1:\n                sw[:] = 0\n                sw[0] = 1\n                r_ind = 0\n            else:\n                # Get index to replace\n                _, r_ind = torch.min(sw[s_ind:], 0)\n                r_ind = r_ind.item() + s_ind\n\n                # Update weights\n                if prev_ind is None:\n                    sw /= 1 - lr\n                    sw[r_ind] = lr\n                else:\n                    sw[r_ind] = sw[prev_ind] / (1 - lr)\n\n            sw /= sw.sum()\n            if init_samp_weight is not None and sw[:num_init].sum() < init_samp_weight:\n                sw /= init_samp_weight + sw[num_init:].sum()\n                sw[:num_init] = init_samp_weight / num_init\n\n            replace_ind.append(r_ind)\n\n        return replace_ind\n\n    def get_label_function(self, sample_pos, sample_scale):\n        # Generate label function\n        train_y = TensorList()\n        target_center_norm = (self.pos - sample_pos) / (sample_scale * self.img_support_sz)\n        for sig, sz, ksz in zip(self.sigma, self.feature_sz, self.kernel_size):\n            center = sz * target_center_norm + 0.5 * torch.Tensor([(ksz[0] + 1) % 2, (ksz[1] + 1) % 2])\n            train_y.append(dcf.label_function_spatial(sz, sig, center))\n        return train_y\n\n    def update_state(self, new_pos, new_scale = None):\n        # Update scale\n        if new_scale is not None:\n            self.target_scale = new_scale.clamp(self.min_scale_factor, self.max_scale_factor)\n            self.target_sz = self.base_target_sz * self.target_scale\n\n        # Update pos\n        inside_ratio = 0.2\n        inside_offset = (inside_ratio - 0.5) * self.target_sz\n        self.pos = torch.max(torch.min(new_pos, self.image_sz - inside_offset), inside_offset)\n\n    def get_iounet_box(self, pos, sz, sample_pos, sample_scale):\n        \"\"\"All inputs in original image coordinates\"\"\"\n        box_center = (pos - sample_pos) / sample_scale + (self.iou_img_sample_sz - 1) / 2\n        box_sz = sz / sample_scale\n        target_ul = box_center - (box_sz - 1) / 2\n        return torch.cat([target_ul.flip((0,)), box_sz.flip((0,))])\n\n    def init_iou_net(self):\n        # Setup IoU net\n        self.iou_predictor = self.params.features.get_unique_attribute('iou_predictor')\n        for p in self.iou_predictor.parameters():\n            p.requires_grad = False\n\n        # Get target boxes for the different augmentations\n        self.iou_target_box = self.get_iounet_box(self.pos, self.target_sz, self.pos.round(), self.target_scale)\n        target_boxes = TensorList()\n        if self.params.iounet_augmentation:\n            for T in self.transforms:\n                if not isinstance(T, (augmentation.Identity, augmentation.Translation, augmentation.FlipHorizontal, augmentation.FlipVertical, augmentation.Blur)):\n                    break\n                target_boxes.append(self.iou_target_box + torch.Tensor([T.shift[1], T.shift[0], 0, 0]))\n        else:\n            target_boxes.append(self.iou_target_box.clone())\n        target_boxes = torch.cat(target_boxes.view(1,4), 0).to(self.params.device)\n\n        # Get iou features\n        iou_backbone_features = self.get_iou_backbone_features()\n\n        # Remove other augmentations such as rotation\n        iou_backbone_features = TensorList([x[:target_boxes.shape[0],...] for x in iou_backbone_features])\n\n        # Extract target feat\n        with torch.no_grad():\n            target_feat = self.iou_predictor.get_modulation(iou_backbone_features, target_boxes)\n        self.target_feat = TensorList([x.detach().mean(0) for x in target_feat])\n\n        if self.params.get('iounet_not_use_reference', False):\n            self.target_feat = TensorList([torch.full_like(tf, tf.norm() / tf.numel()) for tf in self.target_feat])\n\n\n    def refine_target_box(self, sample_pos, sample_scale, scale_ind, update_scale = True):\n        # Initial box for refinement\n        init_box = self.get_iounet_box(self.pos, self.target_sz, sample_pos, sample_scale)\n\n        # Extract features from the relevant scale\n        iou_features = self.get_iou_features()\n        iou_features = TensorList([x[scale_ind:scale_ind+1,...] for x in iou_features])\n\n        init_boxes = init_box.view(1,4).clone()\n        if self.params.num_init_random_boxes > 0:\n            # Get random initial boxes\n            square_box_sz = init_box[2:].prod().sqrt()\n            rand_factor = square_box_sz * torch.cat([self.params.box_jitter_pos * torch.ones(2), self.params.box_jitter_sz * torch.ones(2)])\n            minimal_edge_size = init_box[2:].min()/3\n            rand_bb = (torch.rand(self.params.num_init_random_boxes, 4) - 0.5) * rand_factor\n            new_sz = (init_box[2:] + rand_bb[:,2:]).clamp(minimal_edge_size)\n            new_center = (init_box[:2] + init_box[2:]/2) + rand_bb[:,:2]\n            init_boxes = torch.cat([new_center - new_sz/2, new_sz], 1)\n            init_boxes = torch.cat([init_box.view(1,4), init_boxes])\n\n        # Refine boxes by maximizing iou\n        output_boxes, output_iou = self.optimize_boxes(iou_features, init_boxes)\n\n        # Remove weird boxes with extreme aspect ratios\n        output_boxes[:, 2:].clamp_(1)\n        aspect_ratio = output_boxes[:,2] / output_boxes[:,3]\n        keep_ind = (aspect_ratio < self.params.maximal_aspect_ratio) * (aspect_ratio > 1/self.params.maximal_aspect_ratio)\n        output_boxes = output_boxes[keep_ind,:]\n        output_iou = output_iou[keep_ind]\n\n        # If no box found\n        if output_boxes.shape[0] == 0:\n            return\n\n        # Take average of top k boxes\n        k = self.params.get('iounet_k', 5)\n        topk = min(k, output_boxes.shape[0])\n        _, inds = torch.topk(output_iou, topk)\n        predicted_box = output_boxes[inds, :].mean(0)\n        predicted_iou = output_iou.view(-1, 1)[inds, :].mean(0)\n\n        # Update position\n        new_pos = predicted_box[:2] + predicted_box[2:]/2 - (self.iou_img_sample_sz - 1) / 2\n        new_pos = new_pos.flip((0,)) * sample_scale + sample_pos\n        new_target_sz = predicted_box[2:].flip((0,)) * sample_scale\n        new_scale = torch.sqrt(new_target_sz.prod() / self.base_target_sz.prod())\n\n        self.pos_iounet = new_pos.clone()\n\n        if self.params.get('use_iounet_pos_for_learning', True):\n            self.pos = new_pos.clone()\n\n        self.target_sz = new_target_sz\n\n        if update_scale:\n            self.target_scale = new_scale\n\n    def optimize_boxes(self, iou_features, init_boxes):\n        # Optimize iounet boxes\n        output_boxes = init_boxes.view(1, -1, 4).to(self.params.device)\n        step_length = self.params.box_refinement_step_length\n        init_step_length = self.params.box_refinement_step_length\n        if isinstance(step_length, (tuple, list)):\n            init_step_length = torch.Tensor([step_length[0], step_length[0], step_length[1], step_length[1]]).to(\n                self.params.device).view(1, 1, 4)\n        box_refinement_space = self.params.get('box_refinement_space', 'default')\n\n        step_length = init_step_length * output_boxes.new_ones(1, output_boxes.shape[1], 1)\n        outputs_prev = -99999999 * output_boxes.new_ones(1, output_boxes.shape[1])\n        step = torch.zeros_like(output_boxes)\n\n        if box_refinement_space == 'default':\n            # Optimization using bounding box space used in original IoUNet\n            for i_ in range(self.params.box_refinement_iter):\n                # forward pass\n                bb_init = output_boxes.clone().detach()\n                bb_init.requires_grad = True\n\n                outputs = self.iou_predictor.predict_iou(self.target_feat, iou_features, bb_init)\n\n                if isinstance(outputs, (list, tuple)):\n                    outputs = outputs[0]\n\n                outputs.backward(gradient=torch.ones_like(outputs))\n\n                # Update mask and step length\n                update_mask = (outputs.detach() > outputs_prev) | (self.params.box_refinement_step_decay >= 1)\n                update_mask_float = update_mask.view(1, -1, 1).float()\n                step_length[~update_mask, :] *= self.params.box_refinement_step_decay\n                outputs_prev = outputs.detach().clone()\n\n                # Update proposal\n                step = update_mask_float * step_length * bb_init.grad * bb_init[:, :, 2:].repeat(1, 1, 2) - (\n                            1.0 - update_mask_float) * step\n                output_boxes = bb_init + step\n                output_boxes.detach_()\n\n        elif box_refinement_space == 'relative':\n            # Optimization using relative bounding box space\n            sz_norm = output_boxes[:, :1, 2:].clone()\n            output_boxes_rel = bbutils.rect_to_rel(output_boxes, sz_norm)\n            for i_ in range(self.params.box_refinement_iter):\n                # forward pass\n                bb_init_rel = output_boxes_rel.clone().detach()\n                bb_init_rel.requires_grad = True\n\n                bb_init = bbutils.rel_to_rect(bb_init_rel, sz_norm)\n                outputs = self.iou_predictor.predict_iou(self.target_feat, iou_features, bb_init)\n\n                if isinstance(outputs, (list, tuple)):\n                    outputs = outputs[0]\n\n                outputs.backward(gradient=torch.ones_like(outputs))\n\n                # Update mask and step length\n                update_mask = (outputs.detach() > outputs_prev) | (self.params.box_refinement_step_decay >= 1)\n                update_mask_float = update_mask.view(1, -1, 1).float()\n                step_length[~update_mask, :] *= self.params.box_refinement_step_decay\n                outputs_prev = outputs.detach().clone()\n\n                # Update proposal\n                step = update_mask_float * step_length * bb_init_rel.grad - (1.0 - update_mask_float) * step\n                output_boxes_rel = bb_init_rel + step\n                output_boxes_rel.detach_()\n\n                # for s in outputs.view(-1):\n                #     print('{:.2f}  '.format(s.item()), end='')\n                # print('')\n            # print('')\n\n            output_boxes = bbutils.rel_to_rect(output_boxes_rel, sz_norm)\n\n        else:\n            raise ValueError('Unknown box_refinement_space {}'.format(box_refinement_space))\n\n        return output_boxes.view(-1, 4).cpu(), outputs.detach().view(-1).cpu()\n"
  },
  {
    "path": "artrackv2_mindspore/external/AR/pytracking/tracker/atom/optim.py",
    "content": "import torch\nfrom pytracking import optimization, TensorList, operation\nimport math\n\n\nclass FactorizedConvProblem(optimization.L2Problem):\n    def __init__(self, training_samples: TensorList, y:TensorList, filter_reg: torch.Tensor, projection_reg, params, sample_weights: TensorList,\n                 projection_activation, response_activation):\n        self.training_samples = training_samples\n        self.y = y\n        self.filter_reg = filter_reg\n        self.sample_weights = sample_weights\n        self.params = params\n        self.projection_reg = projection_reg\n        self.projection_activation = projection_activation\n        self.response_activation = response_activation\n\n        self.diag_M = self.filter_reg.concat(projection_reg)\n\n    def __call__(self, x: TensorList):\n        \"\"\"\n        Compute residuals\n        :param x: [filters, projection_matrices]\n        :return: [data_terms, filter_regularizations, proj_mat_regularizations]\n        \"\"\"\n        filter = x[:len(x)//2]  # w2 in paper\n        P = x[len(x)//2:]       # w1 in paper\n\n        # Do first convolution\n        compressed_samples = operation.conv1x1(self.training_samples, P).apply(self.projection_activation)\n\n        # Do second convolution\n        residuals = operation.conv2d(compressed_samples, filter, mode='same').apply(self.response_activation)\n\n        # Compute data residuals\n        residuals = residuals - self.y\n\n        residuals = self.sample_weights.sqrt().view(-1, 1, 1, 1) * residuals\n\n        # Add regularization for projection matrix\n        residuals.extend(self.filter_reg.apply(math.sqrt) * filter)\n\n        # Add regularization for projection matrix\n        residuals.extend(self.projection_reg.apply(math.sqrt) * P)\n\n        return residuals\n\n\n    def ip_input(self, a: TensorList, b: TensorList):\n        num = len(a) // 2       # Number of filters\n        a_filter = a[:num]\n        b_filter = b[:num]\n        a_P = a[num:]\n        b_P = b[num:]\n\n        # Filter inner product\n        # ip_out = a_filter.reshape(-1) @ b_filter.reshape(-1)\n        ip_out = operation.conv2d(a_filter, b_filter).view(-1)\n\n        # Add projection matrix part\n        # ip_out += a_P.reshape(-1) @ b_P.reshape(-1)\n        ip_out += operation.conv2d(a_P.view(1,-1,1,1), b_P.view(1,-1,1,1)).view(-1)\n\n        # Have independent inner products for each filter\n        return ip_out.concat(ip_out.clone())\n\n    def M1(self, x: TensorList):\n        return x / self.diag_M\n\n\nclass ConvProblem(optimization.L2Problem):\n    def __init__(self, training_samples: TensorList, y:TensorList, filter_reg: torch.Tensor, sample_weights: TensorList, response_activation):\n        self.training_samples = training_samples\n        self.y = y\n        self.filter_reg = filter_reg\n        self.sample_weights = sample_weights\n        self.response_activation = response_activation\n\n    def __call__(self, x: TensorList):\n        \"\"\"\n        Compute residuals\n        :param x: [filters]\n        :return: [data_terms, filter_regularizations]\n        \"\"\"\n        # Do convolution and compute residuals\n        residuals = operation.conv2d(self.training_samples, x, mode='same').apply(self.response_activation)\n        residuals = residuals - self.y\n\n        residuals = self.sample_weights.sqrt().view(-1, 1, 1, 1) * residuals\n\n        # Add regularization for projection matrix\n        residuals.extend(self.filter_reg.apply(math.sqrt) * x)\n\n        return residuals\n\n    def ip_input(self, a: TensorList, b: TensorList):\n        # return a.reshape(-1) @ b.reshape(-1)\n        # return (a * b).sum()\n        return operation.conv2d(a, b).view(-1)\n"
  },
  {
    "path": "artrackv2_mindspore/external/AR/pytracking/tracker/base/__init__.py",
    "content": "from .basetracker import BaseTracker"
  },
  {
    "path": "artrackv2_mindspore/external/AR/pytracking/tracker/base/basetracker.py",
    "content": "from _collections import OrderedDict\n\nclass BaseTracker:\n    \"\"\"Base class for all trackers.\"\"\"\n\n    def __init__(self, params):\n        self.params = params\n        self.visdom = None\n\n\n    def initialize(self, image, info: dict) -> dict:\n        \"\"\"Overload this function in your tracker. This should initialize the model.\"\"\"\n        raise NotImplementedError\n\n\n    def track(self, image, info: dict = None) -> dict:\n        \"\"\"Overload this function in your tracker. This should track in the frame and update the model.\"\"\"\n        raise NotImplementedError\n\n\n    def visdom_draw_tracking(self, image, box, segmentation=None):\n        if isinstance(box, OrderedDict):\n            box = [v for k, v in box.items()]\n        else:\n            box = (box,)\n        if segmentation is None:\n            self.visdom.register((image, *box), 'Tracking', 1, 'Tracking')\n        else:\n            self.visdom.register((image, *box, segmentation), 'Tracking', 1, 'Tracking')"
  },
  {
    "path": "artrackv2_mindspore/external/AR/pytracking/tracker/dimp/__init__.py",
    "content": "from .dimp import DiMP\n\ndef get_tracker_class():\n    return DiMP"
  },
  {
    "path": "artrackv2_mindspore/external/AR/pytracking/tracker/dimp/dimp.py",
    "content": "from pytracking.tracker.base import BaseTracker\nimport torch\nimport torch.nn.functional as F\nimport math\nimport time\nfrom pytracking import dcf, TensorList\nfrom pytracking.features.preprocessing import numpy_to_torch\nfrom pytracking.utils.plotting import show_tensor, plot_graph\nfrom pytracking.features.preprocessing import sample_patch_multiscale, sample_patch_transformed\nfrom pytracking.features import augmentation\nimport ltr.data.bounding_box_utils as bbutils\nfrom ltr.models.target_classifier.initializer import FilterInitializerZero\nfrom ltr.models.layers import activation\n\n\nclass DiMP(BaseTracker):\n\n    multiobj_mode = 'parallel'\n\n    def initialize_features(self):\n        if not getattr(self, 'features_initialized', False):\n            self.params.net.initialize()\n        self.features_initialized = True\n\n    def initialize(self, image, info: dict) -> dict:\n        # Initialize some stuff\n        self.frame_num = 1\n        if not self.params.has('device'):\n            self.params.device = 'cuda' if self.params.use_gpu else 'cpu'\n\n        # Initialize network\n        self.initialize_features()\n\n        # The DiMP network\n        self.net = self.params.net\n\n        # Time initialization\n        tic = time.time()\n\n        # Convert image\n        im = numpy_to_torch(image)\n\n        # Get target position and size\n        state = info['init_bbox']\n        self.pos = torch.Tensor([state[1] + (state[3] - 1)/2, state[0] + (state[2] - 1)/2])\n        self.target_sz = torch.Tensor([state[3], state[2]])\n\n        # Get object id\n        self.object_id = info.get('object_ids', [None])[0]\n        self.id_str = '' if self.object_id is None else ' {}'.format(self.object_id)\n\n        # Set sizes\n        self.image_sz = torch.Tensor([im.shape[2], im.shape[3]])\n        sz = self.params.image_sample_size\n        sz = torch.Tensor([sz, sz] if isinstance(sz, int) else sz)\n        if self.params.get('use_image_aspect_ratio', False):\n            sz = self.image_sz * sz.prod().sqrt() / self.image_sz.prod().sqrt()\n            stride = self.params.get('feature_stride', 32)\n            sz = torch.round(sz / stride) * stride\n        self.img_sample_sz = sz\n        self.img_support_sz = self.img_sample_sz\n\n        # Set search area\n        search_area = torch.prod(self.target_sz * self.params.search_area_scale).item()\n        self.target_scale =  math.sqrt(search_area) / self.img_sample_sz.prod().sqrt()\n\n        # Target size in base scale\n        self.base_target_sz = self.target_sz / self.target_scale\n\n        # Setup scale factors\n        if not self.params.has('scale_factors'):\n            self.params.scale_factors = torch.ones(1)\n        elif isinstance(self.params.scale_factors, (list, tuple)):\n            self.params.scale_factors = torch.Tensor(self.params.scale_factors)\n\n        # Setup scale bounds\n        self.min_scale_factor = torch.max(10 / self.base_target_sz)\n        self.max_scale_factor = torch.min(self.image_sz / self.base_target_sz)\n\n        # Extract and transform sample\n        init_backbone_feat = self.generate_init_samples(im)\n\n        # Initialize classifier\n        self.init_classifier(init_backbone_feat)\n\n        # Initialize IoUNet\n        if self.params.get('use_iou_net', True):\n            self.init_iou_net(init_backbone_feat)\n\n        out = {'time': time.time() - tic}\n        return out\n\n\n    def track(self, image, info: dict = None) -> dict:\n        self.debug_info = {}\n\n        self.frame_num += 1\n        self.debug_info['frame_num'] = self.frame_num\n\n        # Convert image\n        im = numpy_to_torch(image)\n\n        # ------- LOCALIZATION ------- #\n\n        # Extract backbone features\n        backbone_feat, sample_coords, im_patches = self.extract_backbone_features(im, self.get_centered_sample_pos(),\n                                                                      self.target_scale * self.params.scale_factors,\n                                                                      self.img_sample_sz)\n        # Extract classification features\n        test_x = self.get_classification_features(backbone_feat)\n\n        # Location of sample\n        sample_pos, sample_scales = self.get_sample_location(sample_coords)\n\n        # Compute classification scores\n        scores_raw = self.classify_target(test_x)\n\n        # Localize the target\n        translation_vec, scale_ind, s, flag = self.localize_target(scores_raw, sample_pos, sample_scales)\n        new_pos = sample_pos[scale_ind,:] + translation_vec\n\n        # Update position and scale\n        if flag != 'not_found':\n            if self.params.get('use_iou_net', True):\n                update_scale_flag = self.params.get('update_scale_when_uncertain', True) or flag != 'uncertain'\n                if self.params.get('use_classifier', True):\n                    self.update_state(new_pos)\n                self.refine_target_box(backbone_feat, sample_pos[scale_ind,:], sample_scales[scale_ind], scale_ind, update_scale_flag)\n            elif self.params.get('use_classifier', True):\n                self.update_state(new_pos, sample_scales[scale_ind])\n\n\n        # ------- UPDATE ------- #\n\n        update_flag = flag not in ['not_found', 'uncertain']\n        hard_negative = (flag == 'hard_negative')\n        learning_rate = self.params.get('hard_negative_learning_rate', None) if hard_negative else None\n\n        if update_flag and self.params.get('update_classifier', False):\n            # Get train sample\n            train_x = test_x[scale_ind:scale_ind+1, ...]\n\n            # Create target_box and label for spatial sample\n            target_box = self.get_iounet_box(self.pos, self.target_sz, sample_pos[scale_ind,:], sample_scales[scale_ind])\n\n            # Update the classifier model\n            self.update_classifier(train_x, target_box, learning_rate, s[scale_ind,...])\n\n        # Set the pos of the tracker to iounet pos\n        if self.params.get('use_iou_net', True) and flag != 'not_found' and hasattr(self, 'pos_iounet'):\n            self.pos = self.pos_iounet.clone()\n\n        score_map = s[scale_ind, ...]\n        max_score = torch.max(score_map).item()\n\n        # Visualize and set debug info\n        self.search_area_box = torch.cat((sample_coords[scale_ind,[1,0]], sample_coords[scale_ind,[3,2]] - sample_coords[scale_ind,[1,0]] - 1))\n        self.debug_info['flag' + self.id_str] = flag\n        self.debug_info['max_score' + self.id_str] = max_score\n        if self.visdom is not None:\n            self.visdom.register(score_map, 'heatmap', 2, 'Score Map' + self.id_str)\n            self.visdom.register(self.debug_info, 'info_dict', 1, 'Status')\n        elif self.params.debug >= 2:\n            show_tensor(score_map, 5, title='Max score = {:.2f}'.format(max_score))\n\n        # Compute output bounding box\n        new_state = torch.cat((self.pos[[1,0]] - (self.target_sz[[1,0]]-1)/2, self.target_sz[[1,0]]))\n\n        if self.params.get('output_not_found_box', False) and flag == 'not_found':\n            output_state = [-1, -1, -1, -1]\n        else:\n            output_state = new_state.tolist()\n        '''2020.4.26 '''\n        out = {'target_bbox': output_state,\n               'dcf_center':new_pos[[1,0]]}\n        return out\n\n\n    def get_sample_location(self, sample_coord):\n        \"\"\"Get the location of the extracted sample.\"\"\"\n        sample_coord = sample_coord.float()\n        sample_pos = 0.5*(sample_coord[:,:2] + sample_coord[:,2:] - 1)\n        sample_scales = ((sample_coord[:,2:] - sample_coord[:,:2]) / self.img_sample_sz).prod(dim=1).sqrt()\n        return sample_pos, sample_scales\n\n    def get_centered_sample_pos(self):\n        \"\"\"Get the center position for the new sample. Make sure the target is correctly centered.\"\"\"\n        return self.pos + ((self.feature_sz + self.kernel_size) % 2) * self.target_scale * \\\n               self.img_support_sz / (2*self.feature_sz)\n\n    def classify_target(self, sample_x: TensorList):\n        \"\"\"Classify target by applying the DiMP filter.\"\"\"\n        with torch.no_grad():\n            scores = self.net.classifier.classify(self.target_filter, sample_x)\n        return scores\n\n    def localize_target(self, scores, sample_pos, sample_scales):\n        \"\"\"Run the target localization.\"\"\"\n\n        scores = scores.squeeze(1)\n\n        preprocess_method = self.params.get('score_preprocess', 'none')\n        if preprocess_method == 'none':\n            pass\n        elif preprocess_method == 'exp':\n            scores = scores.exp()\n        elif preprocess_method == 'softmax':\n            reg_val = getattr(self.net.classifier.filter_optimizer, 'softmax_reg', None)\n            scores_view = scores.view(scores.shape[0], -1)\n            scores_softmax = activation.softmax_reg(scores_view, dim=-1, reg=reg_val)\n            scores = scores_softmax.view(scores.shape)\n        else:\n            raise Exception('Unknown score_preprocess in params.')\n\n        score_filter_ksz = self.params.get('score_filter_ksz', 1)\n        if score_filter_ksz > 1:\n            assert score_filter_ksz % 2 == 1\n            kernel = scores.new_ones(1,1,score_filter_ksz,score_filter_ksz)\n            scores = F.conv2d(scores.view(-1,1,*scores.shape[-2:]), kernel, padding=score_filter_ksz//2).view(scores.shape)\n\n        if self.params.get('advanced_localization', False):\n            return self.localize_advanced(scores, sample_pos, sample_scales)\n\n        # Get maximum\n        score_sz = torch.Tensor(list(scores.shape[-2:]))\n        score_center = (score_sz - 1)/2\n        max_score, max_disp = dcf.max2d(scores)\n        _, scale_ind = torch.max(max_score, dim=0)\n        max_disp = max_disp[scale_ind,...].float().cpu().view(-1)\n        target_disp = max_disp - score_center\n\n        # Compute translation vector and scale change factor\n        output_sz = score_sz - (self.kernel_size + 1) % 2\n        translation_vec = target_disp * (self.img_support_sz / output_sz) * sample_scales[scale_ind]\n\n        return translation_vec, scale_ind, scores, None\n\n\n    def localize_advanced(self, scores, sample_pos, sample_scales):\n        \"\"\"Run the target advanced localization (as in ATOM).\"\"\"\n\n        sz = scores.shape[-2:]\n        score_sz = torch.Tensor(list(sz))\n        output_sz = score_sz - (self.kernel_size + 1) % 2\n        score_center = (score_sz - 1)/2\n\n        scores_hn = scores\n        if self.output_window is not None and self.params.get('perform_hn_without_windowing', False):\n            scores_hn = scores.clone()\n            scores *= self.output_window\n\n        max_score1, max_disp1 = dcf.max2d(scores)\n        _, scale_ind = torch.max(max_score1, dim=0)\n        sample_scale = sample_scales[scale_ind]\n        max_score1 = max_score1[scale_ind]\n        max_disp1 = max_disp1[scale_ind,...].float().cpu().view(-1)\n        target_disp1 = max_disp1 - score_center\n        translation_vec1 = target_disp1 * (self.img_support_sz / output_sz) * sample_scale\n\n        if max_score1.item() < self.params.target_not_found_threshold:\n            return translation_vec1, scale_ind, scores_hn, 'not_found'\n        if max_score1.item() < self.params.get('uncertain_threshold', -float('inf')):\n            return translation_vec1, scale_ind, scores_hn, 'uncertain'\n        if max_score1.item() < self.params.get('hard_sample_threshold', -float('inf')):\n            return translation_vec1, scale_ind, scores_hn, 'hard_negative'\n\n        # Mask out target neighborhood\n        target_neigh_sz = self.params.target_neighborhood_scale * (self.target_sz / sample_scale) * (output_sz / self.img_support_sz)\n\n        tneigh_top = max(round(max_disp1[0].item() - target_neigh_sz[0].item() / 2), 0)\n        tneigh_bottom = min(round(max_disp1[0].item() + target_neigh_sz[0].item() / 2 + 1), sz[0])\n        tneigh_left = max(round(max_disp1[1].item() - target_neigh_sz[1].item() / 2), 0)\n        tneigh_right = min(round(max_disp1[1].item() + target_neigh_sz[1].item() / 2 + 1), sz[1])\n        scores_masked = scores_hn[scale_ind:scale_ind + 1, ...].clone()\n        scores_masked[...,tneigh_top:tneigh_bottom,tneigh_left:tneigh_right] = 0\n\n        # Find new maximum\n        max_score2, max_disp2 = dcf.max2d(scores_masked)\n        max_disp2 = max_disp2.float().cpu().view(-1)\n        target_disp2 = max_disp2 - score_center\n        translation_vec2 = target_disp2 * (self.img_support_sz / output_sz) * sample_scale\n\n        prev_target_vec = (self.pos - sample_pos[scale_ind,:]) / ((self.img_support_sz / output_sz) * sample_scale)\n\n        # Handle the different cases\n        if max_score2 > self.params.distractor_threshold * max_score1:\n            disp_norm1 = torch.sqrt(torch.sum((target_disp1-prev_target_vec)**2))\n            disp_norm2 = torch.sqrt(torch.sum((target_disp2-prev_target_vec)**2))\n            disp_threshold = self.params.dispalcement_scale * math.sqrt(sz[0] * sz[1]) / 2\n\n            if disp_norm2 > disp_threshold and disp_norm1 < disp_threshold:\n                return translation_vec1, scale_ind, scores_hn, 'hard_negative'\n            if disp_norm2 < disp_threshold and disp_norm1 > disp_threshold:\n                return translation_vec2, scale_ind, scores_hn, 'hard_negative'\n            if disp_norm2 > disp_threshold and disp_norm1 > disp_threshold:\n                return translation_vec1, scale_ind, scores_hn, 'uncertain'\n\n            # If also the distractor is close, return with highest score\n            return translation_vec1, scale_ind, scores_hn, 'uncertain'\n\n        if max_score2 > self.params.hard_negative_threshold * max_score1 and max_score2 > self.params.target_not_found_threshold:\n            return translation_vec1, scale_ind, scores_hn, 'hard_negative'\n\n        return translation_vec1, scale_ind, scores_hn, 'normal'\n\n    def extract_backbone_features(self, im: torch.Tensor, pos: torch.Tensor, scales, sz: torch.Tensor):\n        im_patches, patch_coords = sample_patch_multiscale(im, pos, scales, sz,\n                                                           mode=self.params.get('border_mode', 'replicate'),\n                                                           max_scale_change=self.params.get('patch_max_scale_change', None))\n        with torch.no_grad():\n            backbone_feat = self.net.extract_backbone(im_patches)\n        return backbone_feat, patch_coords, im_patches\n\n    def get_classification_features(self, backbone_feat):\n        with torch.no_grad():\n            return self.net.extract_classification_feat(backbone_feat)\n\n    def get_iou_backbone_features(self, backbone_feat):\n        return self.net.get_backbone_bbreg_feat(backbone_feat)\n\n    def get_iou_features(self, backbone_feat):\n        with torch.no_grad():\n            return self.net.bb_regressor.get_iou_feat(self.get_iou_backbone_features(backbone_feat))\n\n    def get_iou_modulation(self, iou_backbone_feat, target_boxes):\n        with torch.no_grad():\n            return self.net.bb_regressor.get_modulation(iou_backbone_feat, target_boxes)\n\n\n    def generate_init_samples(self, im: torch.Tensor) -> TensorList:\n        \"\"\"Perform data augmentation to generate initial training samples.\"\"\"\n\n        mode = self.params.get('border_mode', 'replicate')\n        if mode == 'inside':\n            # Get new sample size if forced inside the image\n            im_sz = torch.Tensor([im.shape[2], im.shape[3]])\n            sample_sz = self.target_scale * self.img_sample_sz\n            shrink_factor = (sample_sz.float() / im_sz)\n            if mode == 'inside':\n                shrink_factor = shrink_factor.max()\n            elif mode == 'inside_major':\n                shrink_factor = shrink_factor.min()\n            shrink_factor.clamp_(min=1, max=self.params.get('patch_max_scale_change', None))\n            sample_sz = (sample_sz.float() / shrink_factor)\n            self.init_sample_scale = (sample_sz / self.img_sample_sz).prod().sqrt()\n            tl = self.pos - (sample_sz - 1) / 2\n            br = self.pos + sample_sz / 2 + 1\n            global_shift = - ((-tl).clamp(0) - (br - im_sz).clamp(0)) / self.init_sample_scale\n        else:\n            self.init_sample_scale = self.target_scale\n            global_shift = torch.zeros(2)\n\n        self.init_sample_pos = self.pos.round()\n\n        # Compute augmentation size\n        aug_expansion_factor = self.params.get('augmentation_expansion_factor', None)\n        aug_expansion_sz = self.img_sample_sz.clone()\n        aug_output_sz = None\n        if aug_expansion_factor is not None and aug_expansion_factor != 1:\n            aug_expansion_sz = (self.img_sample_sz * aug_expansion_factor).long()\n            aug_expansion_sz += (aug_expansion_sz - self.img_sample_sz.long()) % 2\n            aug_expansion_sz = aug_expansion_sz.float()\n            aug_output_sz = self.img_sample_sz.long().tolist()\n\n        # Random shift for each sample\n        get_rand_shift = lambda: None\n        random_shift_factor = self.params.get('random_shift_factor', 0)\n        if random_shift_factor > 0:\n            get_rand_shift = lambda: ((torch.rand(2) - 0.5) * self.img_sample_sz * random_shift_factor + global_shift).long().tolist()\n\n        # Always put identity transformation first, since it is the unaugmented sample that is always used\n        self.transforms = [augmentation.Identity(aug_output_sz, global_shift.long().tolist())]\n\n        augs = self.params.augmentation if self.params.get('use_augmentation', True) else {}\n\n        # Add all augmentations\n        if 'shift' in augs:\n            self.transforms.extend([augmentation.Translation(shift, aug_output_sz, global_shift.long().tolist()) for shift in augs['shift']])\n        if 'relativeshift' in augs:\n            get_absolute = lambda shift: (torch.Tensor(shift) * self.img_sample_sz/2).long().tolist()\n            self.transforms.extend([augmentation.Translation(get_absolute(shift), aug_output_sz, global_shift.long().tolist()) for shift in augs['relativeshift']])\n        if 'fliplr' in augs and augs['fliplr']:\n            self.transforms.append(augmentation.FlipHorizontal(aug_output_sz, get_rand_shift()))\n        if 'blur' in augs:\n            self.transforms.extend([augmentation.Blur(sigma, aug_output_sz, get_rand_shift()) for sigma in augs['blur']])\n        if 'scale' in augs:\n            self.transforms.extend([augmentation.Scale(scale_factor, aug_output_sz, get_rand_shift()) for scale_factor in augs['scale']])\n        if 'rotate' in augs:\n            self.transforms.extend([augmentation.Rotate(angle, aug_output_sz, get_rand_shift()) for angle in augs['rotate']])\n\n        # Extract augmented image patches\n        im_patches = sample_patch_transformed(im, self.init_sample_pos, self.init_sample_scale, aug_expansion_sz, self.transforms)\n\n        # Extract initial backbone features\n        with torch.no_grad():\n            init_backbone_feat = self.net.extract_backbone(im_patches)\n\n        return init_backbone_feat\n\n    def init_target_boxes(self):\n        \"\"\"Get the target bounding boxes for the initial augmented samples.\"\"\"\n        self.classifier_target_box = self.get_iounet_box(self.pos, self.target_sz, self.init_sample_pos, self.init_sample_scale)\n        init_target_boxes = TensorList()\n        for T in self.transforms:\n            init_target_boxes.append(self.classifier_target_box + torch.Tensor([T.shift[1], T.shift[0], 0, 0]))\n        init_target_boxes = torch.cat(init_target_boxes.view(1, 4), 0).to(self.params.device)\n        self.target_boxes = init_target_boxes.new_zeros(self.params.sample_memory_size, 4)\n        self.target_boxes[:init_target_boxes.shape[0],:] = init_target_boxes\n        return init_target_boxes\n\n    def init_memory(self, train_x: TensorList):\n        # Initialize first-frame spatial training samples\n        self.num_init_samples = train_x.size(0)\n        init_sample_weights = TensorList([x.new_ones(1) / x.shape[0] for x in train_x])\n\n        # Sample counters and weights for spatial\n        self.num_stored_samples = self.num_init_samples.copy()\n        self.previous_replace_ind = [None] * len(self.num_stored_samples)\n        self.sample_weights = TensorList([x.new_zeros(self.params.sample_memory_size) for x in train_x])\n        for sw, init_sw, num in zip(self.sample_weights, init_sample_weights, self.num_init_samples):\n            sw[:num] = init_sw\n\n        # Initialize memory\n        self.training_samples = TensorList(\n            [x.new_zeros(self.params.sample_memory_size, x.shape[1], x.shape[2], x.shape[3]) for x in train_x])\n\n        for ts, x in zip(self.training_samples, train_x):\n            ts[:x.shape[0],...] = x\n\n\n    def update_memory(self, sample_x: TensorList, target_box, learning_rate = None):\n        # Update weights and get replace ind\n        replace_ind = self.update_sample_weights(self.sample_weights, self.previous_replace_ind, self.num_stored_samples, self.num_init_samples, learning_rate)\n        self.previous_replace_ind = replace_ind\n\n        # Update sample and label memory\n        for train_samp, x, ind in zip(self.training_samples, sample_x, replace_ind):\n            train_samp[ind:ind+1,...] = x\n\n        # Update bb memory\n        self.target_boxes[replace_ind[0],:] = target_box\n\n        self.num_stored_samples += 1\n\n\n    def update_sample_weights(self, sample_weights, previous_replace_ind, num_stored_samples, num_init_samples, learning_rate = None):\n        # Update weights and get index to replace\n        replace_ind = []\n        for sw, prev_ind, num_samp, num_init in zip(sample_weights, previous_replace_ind, num_stored_samples, num_init_samples):\n            lr = learning_rate\n            if lr is None:\n                lr = self.params.learning_rate\n\n            init_samp_weight = self.params.get('init_samples_minimum_weight', None)\n            if init_samp_weight == 0:\n                init_samp_weight = None\n            s_ind = 0 if init_samp_weight is None else num_init\n\n            if num_samp == 0 or lr == 1:\n                sw[:] = 0\n                sw[0] = 1\n                r_ind = 0\n            else:\n                # Get index to replace\n                if num_samp < sw.shape[0]:\n                    r_ind = num_samp\n                else:\n                    _, r_ind = torch.min(sw[s_ind:], 0)\n                    r_ind = r_ind.item() + s_ind\n\n                # Update weights\n                if prev_ind is None:\n                    sw /= 1 - lr\n                    sw[r_ind] = lr\n                else:\n                    sw[r_ind] = sw[prev_ind] / (1 - lr)\n\n            sw /= sw.sum()\n            if init_samp_weight is not None and sw[:num_init].sum() < init_samp_weight:\n                sw /= init_samp_weight + sw[num_init:].sum()\n                sw[:num_init] = init_samp_weight / num_init\n\n            replace_ind.append(r_ind)\n\n        return replace_ind\n\n    def update_state(self, new_pos, new_scale = None):\n        # Update scale\n        if new_scale is not None:\n            self.target_scale = new_scale.clamp(self.min_scale_factor, self.max_scale_factor)\n            self.target_sz = self.base_target_sz * self.target_scale\n\n        # Update pos\n        inside_ratio = self.params.get('target_inside_ratio', 0.2)\n        inside_offset = (inside_ratio - 0.5) * self.target_sz\n        self.pos = torch.max(torch.min(new_pos, self.image_sz - inside_offset), inside_offset)\n\n\n    def get_iounet_box(self, pos, sz, sample_pos, sample_scale):\n        \"\"\"All inputs in original image coordinates.\n        Generates a box in the cropped image sample reference frame, in the format used by the IoUNet.\"\"\"\n        box_center = (pos - sample_pos) / sample_scale + (self.img_sample_sz - 1) / 2\n        box_sz = sz / sample_scale\n        target_ul = box_center - (box_sz - 1) / 2\n        return torch.cat([target_ul.flip((0,)), box_sz.flip((0,))])\n\n\n    def init_iou_net(self, backbone_feat):\n        # Setup IoU net and objective\n        for p in self.net.bb_regressor.parameters():\n            p.requires_grad = False\n\n        # Get target boxes for the different augmentations\n        self.classifier_target_box = self.get_iounet_box(self.pos, self.target_sz, self.init_sample_pos, self.init_sample_scale)\n        target_boxes = TensorList()\n        if self.params.iounet_augmentation:\n            for T in self.transforms:\n                if not isinstance(T, (augmentation.Identity, augmentation.Translation, augmentation.FlipHorizontal, augmentation.FlipVertical, augmentation.Blur)):\n                    break\n                target_boxes.append(self.classifier_target_box + torch.Tensor([T.shift[1], T.shift[0], 0, 0]))\n        else:\n            target_boxes.append(self.classifier_target_box + torch.Tensor([self.transforms[0].shift[1], self.transforms[0].shift[0], 0, 0]))\n        target_boxes = torch.cat(target_boxes.view(1,4), 0).to(self.params.device)\n\n        # Get iou features\n        iou_backbone_feat = self.get_iou_backbone_features(backbone_feat)\n\n        # Remove other augmentations such as rotation\n        iou_backbone_feat = TensorList([x[:target_boxes.shape[0],...] for x in iou_backbone_feat])\n\n        # Get modulation vector\n        self.iou_modulation = self.get_iou_modulation(iou_backbone_feat, target_boxes)\n        if torch.is_tensor(self.iou_modulation[0]):\n            self.iou_modulation = TensorList([x.detach().mean(0) for x in self.iou_modulation])\n\n\n    def init_classifier(self, init_backbone_feat):\n        # Get classification features\n        x = self.get_classification_features(init_backbone_feat)\n\n        # Overwrite some parameters in the classifier. (These are not generally changed)\n        self._overwrite_classifier_params(feature_dim=x.shape[-3])\n\n        # Add the dropout augmentation here, since it requires extraction of the classification features\n        if 'dropout' in self.params.augmentation and self.params.get('use_augmentation', True):\n            num, prob = self.params.augmentation['dropout']\n            self.transforms.extend(self.transforms[:1]*num)\n            x = torch.cat([x, F.dropout2d(x[0:1,...].expand(num,-1,-1,-1), p=prob, training=True)])\n\n        # Set feature size and other related sizes\n        self.feature_sz = torch.Tensor(list(x.shape[-2:]))\n        ksz = self.net.classifier.filter_size\n        self.kernel_size = torch.Tensor([ksz, ksz] if isinstance(ksz, (int, float)) else ksz)\n        self.output_sz = self.feature_sz + (self.kernel_size + 1)%2\n\n        # Construct output window\n        self.output_window = None\n        if self.params.get('window_output', False):\n            if self.params.get('use_clipped_window', False):\n                self.output_window = dcf.hann2d_clipped(self.output_sz.long(), (self.output_sz*self.params.effective_search_area / self.params.search_area_scale).long(), centered=True).to(self.params.device)\n            else:\n                self.output_window = dcf.hann2d(self.output_sz.long(), centered=True).to(self.params.device)\n            self.output_window = self.output_window.squeeze(0)\n\n        # Get target boxes for the different augmentations\n        target_boxes = self.init_target_boxes()\n\n        # Set number of iterations\n        plot_loss = self.params.debug > 0\n        num_iter = self.params.get('net_opt_iter', None)\n\n        # Get target filter by running the discriminative model prediction module\n        with torch.no_grad():\n            self.target_filter, _, losses = self.net.classifier.get_filter(x, target_boxes, num_iter=num_iter,\n                                                                           compute_losses=plot_loss)\n\n        # Init memory\n        if self.params.get('update_classifier', True):\n            self.init_memory(TensorList([x]))\n\n        if plot_loss:\n            if isinstance(losses, dict):\n                losses = losses['train']\n            self.losses = torch.cat(losses)\n            if self.visdom is not None:\n                self.visdom.register((self.losses, torch.arange(self.losses.numel())), 'lineplot', 3, 'Training Loss' + self.id_str)\n            elif self.params.debug >= 3:\n                plot_graph(self.losses, 10, title='Training Loss' + self.id_str)\n\n    def _overwrite_classifier_params(self, feature_dim):\n        # Overwrite some parameters in the classifier. (These are not generally changed)\n        pred_module = getattr(self.net.classifier.filter_optimizer, 'score_predictor', self.net.classifier.filter_optimizer)\n        if self.params.get('label_threshold', None) is not None:\n            self.net.classifier.filter_optimizer.label_threshold = self.params.label_threshold\n        if self.params.get('label_shrink', None) is not None:\n            self.net.classifier.filter_optimizer.label_shrink = self.params.label_shrink\n        if self.params.get('softmax_reg', None) is not None:\n            self.net.classifier.filter_optimizer.softmax_reg = self.params.softmax_reg\n        if self.params.get('filter_reg', None) is not None:\n            pred_module.filter_reg[0] = self.params.filter_reg\n            pred_module.min_filter_reg = self.params.filter_reg\n        if self.params.get('filter_init_zero', False):\n            self.net.classifier.filter_initializer = FilterInitializerZero(self.net.classifier.filter_size, feature_dim)\n\n\n    def update_classifier(self, train_x, target_box, learning_rate=None, scores=None):\n        # Set flags and learning rate\n        hard_negative_flag = learning_rate is not None\n        if learning_rate is None:\n            learning_rate = self.params.learning_rate\n\n        # Update the tracker memory\n        if hard_negative_flag or self.frame_num % self.params.get('train_sample_interval', 1) == 0:\n            self.update_memory(TensorList([train_x]), target_box, learning_rate)\n\n        # Decide the number of iterations to run\n        num_iter = 0\n        low_score_th = self.params.get('low_score_opt_threshold', None)\n        if hard_negative_flag:\n            num_iter = self.params.get('net_opt_hn_iter', None)\n        elif low_score_th is not None and low_score_th > scores.max().item():\n            num_iter = self.params.get('net_opt_low_iter', None)\n        elif (self.frame_num - 1) % self.params.train_skipping == 0:\n            num_iter = self.params.get('net_opt_update_iter', None)\n\n        plot_loss = self.params.debug > 0\n\n        if num_iter > 0:\n            # Get inputs for the DiMP filter optimizer module\n            samples = self.training_samples[0][:self.num_stored_samples[0],...]\n            target_boxes = self.target_boxes[:self.num_stored_samples[0],:].clone()\n            sample_weights = self.sample_weights[0][:self.num_stored_samples[0]]\n\n            # Run the filter optimizer module\n            with torch.no_grad():\n                self.target_filter, _, losses = self.net.classifier.filter_optimizer(self.target_filter,\n                                                                                     num_iter=num_iter, feat=samples,\n                                                                                     bb=target_boxes,\n                                                                                     sample_weight=sample_weights,\n                                                                                     compute_losses=plot_loss)\n\n            if plot_loss:\n                if isinstance(losses, dict):\n                    losses = losses['train']\n                self.losses = torch.cat((self.losses, torch.cat(losses)))\n                if self.visdom is not None:\n                    self.visdom.register((self.losses, torch.arange(self.losses.numel())), 'lineplot', 3, 'Training Loss' + self.id_str)\n                elif self.params.debug >= 3:\n                    plot_graph(self.losses, 10, title='Training Loss' + self.id_str)\n\n    def refine_target_box(self, backbone_feat, sample_pos, sample_scale, scale_ind, update_scale = True):\n        \"\"\"Run the ATOM IoUNet to refine the target bounding box.\"\"\"\n\n        if hasattr(self.net.bb_regressor, 'predict_bb'):\n            return self.direct_box_regression(backbone_feat, sample_pos, sample_scale, scale_ind, update_scale)\n\n        # Initial box for refinement\n        init_box = self.get_iounet_box(self.pos, self.target_sz, sample_pos, sample_scale)\n\n        # Extract features from the relevant scale\n        iou_features = self.get_iou_features(backbone_feat)\n        iou_features = TensorList([x[scale_ind:scale_ind+1,...] for x in iou_features])\n\n        # Generate random initial boxes\n        init_boxes = init_box.view(1,4).clone()\n        if self.params.num_init_random_boxes > 0:\n            square_box_sz = init_box[2:].prod().sqrt()\n            rand_factor = square_box_sz * torch.cat([self.params.box_jitter_pos * torch.ones(2), self.params.box_jitter_sz * torch.ones(2)])\n\n            minimal_edge_size = init_box[2:].min()/3\n            rand_bb = (torch.rand(self.params.num_init_random_boxes, 4) - 0.5) * rand_factor\n            new_sz = (init_box[2:] + rand_bb[:,2:]).clamp(minimal_edge_size)\n            new_center = (init_box[:2] + init_box[2:]/2) + rand_bb[:,:2]\n            init_boxes = torch.cat([new_center - new_sz/2, new_sz], 1)\n            init_boxes = torch.cat([init_box.view(1,4), init_boxes])\n\n        # Optimize the boxes\n        output_boxes, output_iou = self.optimize_boxes(iou_features, init_boxes)\n\n        # Remove weird boxes\n        output_boxes[:, 2:].clamp_(1)\n        aspect_ratio = output_boxes[:,2] / output_boxes[:,3]\n        keep_ind = (aspect_ratio < self.params.maximal_aspect_ratio) * (aspect_ratio > 1/self.params.maximal_aspect_ratio)\n        output_boxes = output_boxes[keep_ind,:]\n        output_iou = output_iou[keep_ind]\n\n        # If no box found\n        if output_boxes.shape[0] == 0:\n            return\n\n        # Predict box\n        k = self.params.get('iounet_k', 5)\n        topk = min(k, output_boxes.shape[0])\n        _, inds = torch.topk(output_iou, topk)\n        predicted_box = output_boxes[inds, :].mean(0)\n        predicted_iou = output_iou.view(-1, 1)[inds, :].mean(0)\n\n        # Get new position and size\n        new_pos = predicted_box[:2] + predicted_box[2:] / 2\n        new_pos = (new_pos.flip((0,)) - (self.img_sample_sz - 1) / 2) * sample_scale + sample_pos\n        new_target_sz = predicted_box[2:].flip((0,)) * sample_scale\n        new_scale = torch.sqrt(new_target_sz.prod() / self.base_target_sz.prod())\n\n        self.pos_iounet = new_pos.clone()\n\n        if self.params.get('use_iounet_pos_for_learning', True):\n            self.pos = new_pos.clone()\n\n        self.target_sz = new_target_sz\n\n        if update_scale:\n            self.target_scale = new_scale\n\n        # self.visualize_iou_pred(iou_features, predicted_box)\n\n\n    def optimize_boxes(self, iou_features, init_boxes):\n        box_refinement_space = self.params.get('box_refinement_space', 'default')\n        if box_refinement_space == 'default':\n            return self.optimize_boxes_default(iou_features, init_boxes)\n        if box_refinement_space == 'relative':\n            return self.optimize_boxes_relative(iou_features, init_boxes)\n        raise ValueError('Unknown box_refinement_space {}'.format(box_refinement_space))\n\n\n    def optimize_boxes_default(self, iou_features, init_boxes):\n        \"\"\"Optimize iounet boxes with the default parametrization\"\"\"\n        output_boxes = init_boxes.view(1, -1, 4).to(self.params.device)\n        step_length = self.params.box_refinement_step_length\n        if isinstance(step_length, (tuple, list)):\n            step_length = torch.Tensor([step_length[0], step_length[0], step_length[1], step_length[1]], device=self.params.device).view(1,1,4)\n\n        for i_ in range(self.params.box_refinement_iter):\n            # forward pass\n            bb_init = output_boxes.clone().detach()\n            bb_init.requires_grad = True\n\n            outputs = self.net.bb_regressor.predict_iou(self.iou_modulation, iou_features, bb_init)\n\n            if isinstance(outputs, (list, tuple)):\n                outputs = outputs[0]\n\n            outputs.backward(gradient = torch.ones_like(outputs))\n\n            # Update proposal\n            output_boxes = bb_init + step_length * bb_init.grad * bb_init[:, :, 2:].repeat(1, 1, 2)\n            output_boxes.detach_()\n\n            step_length *= self.params.box_refinement_step_decay\n\n        return output_boxes.view(-1,4).cpu(), outputs.detach().view(-1).cpu()\n\n\n    def optimize_boxes_relative(self, iou_features, init_boxes):\n        \"\"\"Optimize iounet boxes with the relative parametrization ised in PrDiMP\"\"\"\n        output_boxes = init_boxes.view(1, -1, 4).to(self.params.device)\n        step_length = self.params.box_refinement_step_length\n        if isinstance(step_length, (tuple, list)):\n            step_length = torch.Tensor([step_length[0], step_length[0], step_length[1], step_length[1]]).to(self.params.device).view(1,1,4)\n\n        sz_norm = output_boxes[:,:1,2:].clone()\n        output_boxes_rel = bbutils.rect_to_rel(output_boxes, sz_norm)\n        for i_ in range(self.params.box_refinement_iter):\n            # forward pass\n            bb_init_rel = output_boxes_rel.clone().detach()\n            bb_init_rel.requires_grad = True\n\n            bb_init = bbutils.rel_to_rect(bb_init_rel, sz_norm)\n            outputs = self.net.bb_regressor.predict_iou(self.iou_modulation, iou_features, bb_init)\n\n            if isinstance(outputs, (list, tuple)):\n                outputs = outputs[0]\n\n            outputs.backward(gradient = torch.ones_like(outputs))\n\n            # Update proposal\n            output_boxes_rel = bb_init_rel + step_length * bb_init_rel.grad\n            output_boxes_rel.detach_()\n\n            step_length *= self.params.box_refinement_step_decay\n\n        #     for s in outputs.view(-1):\n        #         print('{:.2f}  '.format(s.item()), end='')\n        #     print('')\n        # print('')\n\n        output_boxes = bbutils.rel_to_rect(output_boxes_rel, sz_norm)\n\n        return output_boxes.view(-1,4).cpu(), outputs.detach().view(-1).cpu()\n\n    def direct_box_regression(self, backbone_feat, sample_pos, sample_scale, scale_ind, update_scale = True):\n        \"\"\"Implementation of direct bounding box regression.\"\"\"\n\n        # Initial box for refinement\n        init_box = self.get_iounet_box(self.pos, self.target_sz, sample_pos, sample_scale)\n\n        # Extract features from the relevant scale\n        iou_features = self.get_iou_features(backbone_feat)\n        iou_features = TensorList([x[scale_ind:scale_ind+1,...] for x in iou_features])\n\n        # Generate random initial boxes\n        init_boxes = init_box.view(1, 1, 4).clone().to(self.params.device)\n\n        # Optimize the boxes\n        output_boxes = self.net.bb_regressor.predict_bb(self.iou_modulation, iou_features, init_boxes).view(-1,4).cpu()\n\n        # Remove weird boxes\n        output_boxes[:, 2:].clamp_(1)\n\n        predicted_box = output_boxes[0, :]\n\n        # Get new position and size\n        new_pos = predicted_box[:2] + predicted_box[2:] / 2\n        new_pos = (new_pos.flip((0,)) - (self.img_sample_sz - 1) / 2) * sample_scale + sample_pos\n        new_target_sz = predicted_box[2:].flip((0,)) * sample_scale\n        new_scale_bbr = torch.sqrt(new_target_sz.prod() / self.base_target_sz.prod())\n        new_scale = new_scale_bbr\n\n        self.pos_iounet = new_pos.clone()\n\n        if self.params.get('use_iounet_pos_for_learning', True):\n            self.pos = new_pos.clone()\n\n        self.target_sz = new_target_sz\n\n        if update_scale:\n            self.target_scale = new_scale\n\n\n    def visualize_iou_pred(self, iou_features, center_box):\n        center_box = center_box.view(1,1,4)\n        sz_norm = center_box[...,2:].clone()\n        center_box_rel = bbutils.rect_to_rel(center_box, sz_norm)\n\n        pos_dist = 1.0\n        sz_dist = math.log(3.0)\n        pos_step = 0.01\n        sz_step = 0.01\n\n        pos_scale = torch.arange(-pos_dist, pos_dist+pos_step, step=pos_step)\n        sz_scale = torch.arange(-sz_dist, sz_dist+sz_step, step=sz_step)\n\n        bbx = torch.zeros(1, pos_scale.numel(), 4)\n        bbx[0,:,0] = pos_scale.clone()\n        bby = torch.zeros(pos_scale.numel(), 1, 4)\n        bby[:,0,1] = pos_scale.clone()\n        bbw = torch.zeros(1, sz_scale.numel(), 4)\n        bbw[0,:,2] = sz_scale.clone()\n        bbh = torch.zeros(sz_scale.numel(), 1, 4)\n        bbh[:,0,3] = sz_scale.clone()\n\n        pos_boxes = bbutils.rel_to_rect((center_box_rel + bbx) + bby, sz_norm).view(1,-1,4).to(self.params.device)\n        sz_boxes = bbutils.rel_to_rect((center_box_rel + bbw) + bbh, sz_norm).view(1,-1,4).to(self.params.device)\n\n        pos_scores = self.net.bb_regressor.predict_iou(self.iou_modulation, iou_features, pos_boxes).exp()\n        sz_scores = self.net.bb_regressor.predict_iou(self.iou_modulation, iou_features, sz_boxes).exp()\n\n        show_tensor(pos_scores.view(pos_scale.numel(),-1), title='Position scores', fig_num=21)\n        show_tensor(sz_scores.view(sz_scale.numel(),-1), title='Size scores', fig_num=22)\n\n\n    def visdom_draw_tracking(self, image, box, segmentation=None):\n        if hasattr(self, 'search_area_box'):\n            self.visdom.register((image, box, self.search_area_box), 'Tracking', 1, 'Tracking')\n        else:\n            self.visdom.register((image, box), 'Tracking', 1, 'Tracking')"
  },
  {
    "path": "artrackv2_mindspore/external/AR/pytracking/tracker/eco/__init__.py",
    "content": "from .eco import ECO\n\ndef get_tracker_class():\n    return ECO"
  },
  {
    "path": "artrackv2_mindspore/external/AR/pytracking/tracker/eco/eco.py",
    "content": "from pytracking.tracker.base import BaseTracker\nimport torch\nimport torch.nn.functional as F\nimport math\nfrom pytracking import complex, dcf, fourier, TensorList\nfrom pytracking.libs.tensorlist import tensor_operation\nfrom pytracking.features.preprocessing import numpy_to_torch\nfrom pytracking.utils.plotting import show_tensor\nfrom pytracking.libs.optimization import GaussNewtonCG\nfrom .optim import FilterOptim, FactorizedConvProblem\nfrom pytracking.features import augmentation\n\n\n\nclass ECO(BaseTracker):\n\n    multiobj_mode = 'parallel'\n\n    def initialize_features(self):\n        if not getattr(self, 'features_initialized', False):\n            self.params.features.initialize()\n        self.features_initialized = True\n\n\n    def initialize(self, image, info: dict) -> dict:\n        state = info['init_bbox']\n\n        # Initialize some stuff\n        self.frame_num = 1\n        if not self.params.has('device'):\n            self.params.device = 'cuda' if self.params.use_gpu else 'cpu'\n\n        # Initialize features\n        self.initialize_features()\n\n        # Chack if image is color\n        self.params.features.set_is_color(image.shape[2] == 3)\n\n        # Get feature specific params\n        self.fparams = self.params.features.get_fparams('feature_params')\n\n        # Get position and size\n        self.pos = torch.Tensor([state[1] + (state[3] - 1)/2, state[0] + (state[2] - 1)/2])\n        self.target_sz = torch.Tensor([state[3], state[2]])\n\n        # Set search area\n        self.target_scale = 1.0\n        search_area = torch.prod(self.target_sz * self.params.search_area_scale).item()\n        if search_area > self.params.max_image_sample_size:\n            self.target_scale =  math.sqrt(search_area / self.params.max_image_sample_size)\n        elif search_area < self.params.min_image_sample_size:\n            self.target_scale =  math.sqrt(search_area / self.params.min_image_sample_size)\n\n        # Target size in base scale\n        self.base_target_sz = self.target_sz / self.target_scale\n\n        # Use odd square search area and set sizes\n        feat_max_stride = max(self.params.features.stride())\n        self.img_sample_sz = torch.round(torch.sqrt(torch.prod(self.base_target_sz * self.params.search_area_scale))) * torch.ones(2)\n        self.img_sample_sz += feat_max_stride - self.img_sample_sz % (2 * feat_max_stride)\n\n        # Set other sizes (corresponds to ECO code)\n        self.img_support_sz = self.img_sample_sz\n        self.feature_sz = self.params.features.size(self.img_sample_sz)\n        self.filter_sz = self.feature_sz + (self.feature_sz + 1) % 2\n        self.output_sz = self.params.score_upsample_factor * self.img_support_sz    # Interpolated size of the output\n        self.compressed_dim = self.fparams.attribute('compressed_dim')\n\n        # Number of filters\n        self.num_filters = len(self.filter_sz)\n\n        # Get window function\n        self.window = TensorList([dcf.hann2d(sz).to(self.params.device) for sz in self.feature_sz])\n\n        # Get interpolation function\n        self.interp_fs = TensorList([dcf.get_interp_fourier(sz, self.params.interpolation_method,\n                                                self.params.interpolation_bicubic_a, self.params.interpolation_centering,\n                                                self.params.interpolation_windowing, self.params.device) for sz in self.filter_sz])\n\n        # Get regularization filter\n        self.reg_filter = TensorList([dcf.get_reg_filter(self.img_support_sz, self.base_target_sz, fparams).to(self.params.device)\n                                      for fparams in self.fparams])\n        self.reg_energy = self.reg_filter.view(-1) @ self.reg_filter.view(-1)\n\n        # Get label function\n        output_sigma_factor = self.fparams.attribute('output_sigma_factor')\n        sigma = (self.filter_sz / self.img_support_sz) * torch.sqrt(self.base_target_sz.prod()) * output_sigma_factor\n        self.yf = TensorList([dcf.label_function(sz, sig).to(self.params.device) for sz, sig in zip(self.filter_sz, sigma)])\n\n        # Optimization options\n        self.params.precond_learning_rate = self.fparams.attribute('learning_rate')\n        if self.params.CG_forgetting_rate is None or max(self.params.precond_learning_rate) >= 1:\n            self.params.direction_forget_factor = 0\n        else:\n            self.params.direction_forget_factor = (1 - max(self.params.precond_learning_rate))**self.params.CG_forgetting_rate\n\n\n        # Convert image\n        im = numpy_to_torch(image)\n\n        # Setup bounds\n        self.image_sz = torch.Tensor([im.shape[2], im.shape[3]])\n        self.min_scale_factor = torch.max(10 / self.base_target_sz)\n        self.max_scale_factor = torch.min(self.image_sz / self.base_target_sz)\n\n        # Extract and transform sample\n        x = self.generate_init_samples(im)\n\n        # Initialize projection matrix\n        x_mat = TensorList([e.permute(1,0,2,3).reshape(e.shape[1], -1).clone() for e in x])\n        x_mat -= x_mat.mean(dim=1, keepdim=True)\n        cov_x = x_mat @ x_mat.t()\n        self.projection_matrix = TensorList([torch.svd(C)[0][:,:cdim].clone() for C, cdim in zip(cov_x, self.compressed_dim)])\n\n        # Transform to get the training sample\n        train_xf = self.preprocess_sample(x)\n\n        # Shift the samples back\n        if 'shift' in self.params.augmentation:\n            for xf in train_xf:\n                if xf.shape[0] == 1:\n                    continue\n                for i, shift in enumerate(self.params.augmentation['shift']):\n                    shift_samp = 2 * math.pi * torch.Tensor(shift) / self.img_support_sz\n                    xf[1+i:2+i,...] = fourier.shift_fs(xf[1+i:2+i,...], shift=shift_samp)\n\n        # Shift sample\n        shift_samp = 2*math.pi * (self.pos - self.pos.round()) / (self.target_scale * self.img_support_sz)\n        train_xf = fourier.shift_fs(train_xf, shift=shift_samp)\n\n        # Initialize first-frame training samples\n        num_init_samples = train_xf.size(0)\n        self.init_sample_weights = TensorList([xf.new_ones(1) / xf.shape[0] for xf in train_xf])\n        self.init_training_samples = train_xf.permute(2, 3, 0, 1, 4)\n\n\n        # Sample counters and weights\n        self.num_stored_samples = num_init_samples\n        self.previous_replace_ind = [None]*len(self.num_stored_samples)\n        self.sample_weights = TensorList([xf.new_zeros(self.params.sample_memory_size) for xf in train_xf])\n        for sw, init_sw, num in zip(self.sample_weights, self.init_sample_weights, num_init_samples):\n            sw[:num] = init_sw\n\n        # Initialize memory\n        self.training_samples = TensorList(\n            [xf.new_zeros(xf.shape[2], xf.shape[3], self.params.sample_memory_size, cdim, 2) for xf, cdim in zip(train_xf, self.compressed_dim)])\n\n        # Initialize filter\n        self.filter = TensorList(\n            [xf.new_zeros(1, cdim, xf.shape[2], xf.shape[3], 2) for xf, cdim in zip(train_xf, self.compressed_dim)])\n\n        # Do joint optimization\n        self.joint_problem = FactorizedConvProblem(self.init_training_samples, self.yf, self.reg_filter, self.projection_matrix, self.params, self.init_sample_weights)\n        joint_var = self.filter.concat(self.projection_matrix)\n        self.joint_optimizer = GaussNewtonCG(self.joint_problem, joint_var, debug=(self.params.debug>=1), visdom=self.visdom)\n\n        if self.params.update_projection_matrix:\n            self.joint_optimizer.run(self.params.init_CG_iter // self.params.init_GN_iter, self.params.init_GN_iter)\n\n        # Re-project samples with the new projection matrix\n        compressed_samples = complex.mtimes(self.init_training_samples, self.projection_matrix)\n        for train_samp, init_samp in zip(self.training_samples, compressed_samples):\n            train_samp[:,:,:init_samp.shape[2],:,:] = init_samp\n\n        # Initialize optimizer\n        self.filter_optimizer = FilterOptim(self.params, self.reg_energy)\n        self.filter_optimizer.register(self.filter, self.training_samples, self.yf, self.sample_weights, self.reg_filter)\n        self.filter_optimizer.sample_energy = self.joint_problem.sample_energy\n        self.filter_optimizer.residuals = self.joint_optimizer.residuals.clone()\n\n        if not self.params.update_projection_matrix:\n            self.filter_optimizer.run(self.params.init_CG_iter)\n\n        # Post optimization\n        self.filter_optimizer.run(self.params.post_init_CG_iter)\n\n        self.symmetrize_filter()\n\n\n\n    def track(self, image, info: dict = None) -> dict:\n        self.debug_info = {}\n\n        self.frame_num += 1\n        self.debug_info['frame_num'] = self.frame_num\n\n        # Convert image\n        im = numpy_to_torch(image)\n\n        # ------- LOCALIZATION ------- #\n\n        # Get sample\n        sample_pos = self.pos.round()\n        sample_scales = self.target_scale * self.params.scale_factors\n        test_xf = self.extract_fourier_sample(im, self.pos, sample_scales, self.img_sample_sz)\n\n        # Compute scores\n        sf = self.apply_filter(test_xf)\n        translation_vec, scale_ind, s = self.localize_target(sf)\n        scale_change_factor = self.params.scale_factors[scale_ind]\n\n        # Update position and scale\n        self.update_state(sample_pos + translation_vec, self.target_scale * scale_change_factor)\n\n        score_map = s[scale_ind, ...]\n        max_score = torch.max(score_map).item()\n        self.debug_info['max_score'] = max_score\n\n        if self.visdom is not None:\n            self.visdom.register(score_map, 'heatmap', 2, 'Score Map')\n            self.visdom.register(self.debug_info, 'info_dict', 1, 'Status')\n        elif self.params.debug >= 2:\n            show_tensor(score_map, 5, title='Max score = {:.2f}'.format(max_score))\n\n        # if self.params.debug >= 3:\n        #     for i, hf in enumerate(self.filter):\n        #         show_tensor(fourier.sample_fs(hf).abs().mean(1), 6+i)\n\n\n        # ------- UPDATE ------- #\n\n        # Get train sample\n        train_xf = TensorList([xf[scale_ind:scale_ind+1, ...] for xf in test_xf])\n\n        # Shift the sample\n        shift_samp = 2*math.pi * (self.pos - sample_pos) / (sample_scales[scale_ind] * self.img_support_sz)\n        train_xf = fourier.shift_fs(train_xf, shift=shift_samp)\n\n        # Update memory\n        self.update_memory(train_xf)\n\n        # Train filter\n        if self.frame_num % self.params.train_skipping == 1:\n            self.filter_optimizer.run(self.params.CG_iter, train_xf)\n            self.symmetrize_filter()\n\n        # Return new state\n        new_state = torch.cat((self.pos[[1,0]] - (self.target_sz[[1,0]]-1)/2, self.target_sz[[1,0]]))\n\n        out = {'target_bbox': new_state.tolist()}\n        return out\n\n\n    def apply_filter(self, sample_xf: TensorList) -> torch.Tensor:\n        return complex.mult(self.filter, sample_xf).sum(1, keepdim=True)\n\n    def localize_target(self, sf: TensorList):\n        if self.params.score_fusion_strategy == 'sum':\n            scores = fourier.sample_fs(fourier.sum_fs(sf), self.output_sz)\n        elif self.params.score_fusion_strategy == 'weightedsum':\n            weight = self.fparams.attribute('translation_weight')\n            scores = fourier.sample_fs(fourier.sum_fs(weight * sf), self.output_sz)\n        elif self.params.score_fusion_strategy == 'transcale':\n            alpha = self.fparams.attribute('scale_weight')\n            beta = self.fparams.attribute('translation_weight')\n            sample_sz = torch.round(self.output_sz.view(1,-1) * self.params.scale_factors.view(-1,1))\n            scores = 0\n            for sfe, a, b in zip(sf, alpha, beta):\n                sfe = fourier.shift_fs(sfe, math.pi*torch.ones(2))\n                scores_scales = []\n                for sind, sz in enumerate(sample_sz):\n                    pd = (self.output_sz-sz)/2\n                    scores_scales.append(F.pad(fourier.sample_fs(sfe[sind:sind+1,...], sz),\n                                        (math.floor(pd[1].item()), math.ceil(pd[1].item()),\n                                         math.floor(pd[0].item()), math.ceil(pd[0].item()))))\n                scores_cat = torch.cat(scores_scales)\n                scores = scores + (b - a) * scores_cat.mean(dim=0, keepdim=True) + a * scores_cat\n        else:\n            raise ValueError('Unknown score fusion strategy.')\n\n        # Get maximum\n        max_score, max_disp = dcf.max2d(scores)\n        _, scale_ind = torch.max(max_score, dim=0)\n        max_disp = max_disp.float().cpu()\n\n        # Convert to displacements in the base scale\n        if self.params.score_fusion_strategy in ['sum', 'weightedsum']:\n            disp = (max_disp + self.output_sz / 2) % self.output_sz - self.output_sz / 2\n        elif self.params.score_fusion_strategy == 'transcale':\n            disp = max_disp - self.output_sz / 2\n\n        # Compute translation vector and scale change factor\n        translation_vec = disp[scale_ind, ...].view(-1) * (self.img_support_sz / self.output_sz) * self.target_scale\n        if self.params.score_fusion_strategy in ['sum', 'weightedsum']:\n            translation_vec *= self.params.scale_factors[scale_ind]\n\n        return translation_vec, scale_ind, scores\n\n\n    def extract_sample(self, im: torch.Tensor, pos: torch.Tensor, scales, sz: torch.Tensor):\n        return self.params.features.extract(im, pos, scales, sz)[0]\n\n    def extract_fourier_sample(self, im: torch.Tensor, pos: torch.Tensor, scales, sz: torch.Tensor) -> TensorList:\n        x = self.extract_sample(im, pos, scales, sz)\n        return self.preprocess_sample(self.project_sample(x))\n\n    def preprocess_sample(self, x: TensorList) -> TensorList:\n        x *= self.window\n        sample_xf = fourier.cfft2(x)\n        return TensorList([dcf.interpolate_dft(xf, bf) for xf, bf in zip(sample_xf, self.interp_fs)])\n\n    def project_sample(self, x: TensorList):\n        @tensor_operation\n        def _project_sample(x: torch.Tensor, P: torch.Tensor):\n            if P is None:\n                return x\n            return torch.matmul(x.permute(2, 3, 0, 1), P).permute(2, 3, 0, 1)\n\n        return _project_sample(x, self.projection_matrix)\n\n    def generate_init_samples(self, im: torch.Tensor) -> TensorList:\n        # Do data augmentation\n        transforms = [augmentation.Identity()]\n        if 'shift' in self.params.augmentation:\n            transforms.extend([augmentation.Translation(shift) for shift in self.params.augmentation['shift']])\n        if 'fliplr' in self.params.augmentation and self.params.augmentation['fliplr']:\n            transforms.append(augmentation.FlipHorizontal())\n        if 'rotate' in self.params.augmentation:\n            transforms.extend([augmentation.Rotate(angle) for angle in self.params.augmentation['rotate']])\n        if 'blur' in self.params.augmentation:\n            transforms.extend([augmentation.Blur(sigma) for sigma in self.params.augmentation['blur']])\n\n        init_samples = self.params.features.extract_transformed(im, self.pos, self.target_scale, self.img_sample_sz, transforms)\n\n        # Remove augmented samples for those that shall not have\n        for i, use_aug in enumerate(self.fparams.attribute('use_augmentation')):\n            if not use_aug:\n                init_samples[i] = init_samples[i][0:1, ...]\n\n        if 'dropout' in self.params.augmentation:\n            num, prob = self.params.augmentation['dropout']\n            for i, use_aug in enumerate(self.fparams.attribute('use_augmentation')):\n                if use_aug:\n                    init_samples[i] = torch.cat([init_samples[i], F.dropout2d(init_samples[i][0:1,...].expand(num,-1,-1,-1), p=prob, training=True)])\n\n        return init_samples\n\n\n    def update_memory(self, sample_xf: TensorList):\n        # Update weights and get index to replace\n        replace_ind = self.update_sample_weights()\n        for train_samp, xf, ind in zip(self.training_samples, sample_xf, replace_ind):\n            train_samp[:,:,ind:ind+1,:,:] = xf.permute(2, 3, 0, 1, 4)\n\n\n    def update_sample_weights(self):\n        replace_ind = []\n        for sw, prev_ind, num_samp, fparams in zip(self.sample_weights, self.previous_replace_ind, self.num_stored_samples, self.fparams):\n            if num_samp == 0 or fparams.learning_rate == 1:\n                sw[:] = 0\n                sw[0] = 1\n                r_ind = 0\n            else:\n                # Get index to replace\n                _, r_ind = torch.min(sw, 0)\n                r_ind = r_ind.item()\n\n                # Update weights\n                if prev_ind is None:\n                    sw /= 1 - fparams.learning_rate\n                    sw[r_ind] = fparams.learning_rate\n                else:\n                    sw[r_ind] = sw[prev_ind] / (1 - fparams.learning_rate)\n\n            sw /= sw.sum()\n            replace_ind.append(r_ind)\n\n        self.previous_replace_ind = replace_ind.copy()\n        self.num_stored_samples += 1\n        return replace_ind\n\n    def update_state(self, new_pos, new_scale):\n        # Update scale\n        self.target_scale = new_scale.clamp(self.min_scale_factor, self.max_scale_factor)\n        self.target_sz = self.base_target_sz * self.target_scale\n\n        # Update pos\n        inside_ratio = 0.2\n        inside_offset = (inside_ratio - 0.5) * self.target_sz\n        self.pos = torch.max(torch.min(new_pos, self.image_sz - inside_offset), inside_offset)\n\n    def symmetrize_filter(self):\n        for hf in self.filter:\n            hf[:,:,:,0,:] /= 2\n            hf[:,:,:,0,:] += complex.conj(hf[:,:,:,0,:].flip((2,)))"
  },
  {
    "path": "artrackv2_mindspore/external/AR/pytracking/tracker/eco/optim.py",
    "content": "import torch\nimport torch.nn.functional as F\nfrom pytracking import complex, optimization, fourier, TensorList\nfrom pytracking.utils.plotting import plot_graph\nimport math\n\n\nclass FactorizedConvProblem(optimization.L2Problem):\n    def __init__(self, training_samples: TensorList, yf:TensorList, reg_filter: torch.Tensor, init_proj_mat: TensorList, params, sample_weights: torch.Tensor = None):\n        self.training_samples = training_samples\n        self.yf = complex.complex(yf).permute(2, 3, 0, 1, 4)\n        self.reg_filter = reg_filter\n        self.sample_weights_sqrt = None if sample_weights is None else sample_weights.sqrt()\n        self.params = params\n\n        # Sample energy for preconditioner\n        compressed_samples = complex.mtimes(self.training_samples, init_proj_mat)\n        self.sample_energy = complex.abs_sqr(compressed_samples).mean(dim=2, keepdim=True).permute(2, 3, 0, 1)\n        self.reg_energy = self.reg_filter.view(-1) @ self.reg_filter.view(-1)\n\n        # Projection energy for preconditioner\n        self.proj_energy = 2 * fourier.inner_prod_fs(yf, yf) / self.training_samples.size(3)\n\n        # Filter part of preconditioner\n        self.diag_M = (1 - self.params.precond_reg_param) * (self.params.precond_data_param * self.sample_energy +\n                            (1 - self.params.precond_data_param) * self.sample_energy.mean(1, keepdim=True)) + \\\n                      self.params.precond_reg_param * self.reg_energy\n        self.diag_M.unsqueeze_(-1)\n\n        # Projection matrix part of preconditioner\n        self.diag_M.extend(self.params.precond_proj_param * (self.proj_energy + self.params.projection_reg))\n\n\n    def __call__(self, x: TensorList):\n        \"\"\"\n        Compute residuals\n        :param x: [filters, projection_matrices]\n        :return: [data_terms, filter_regularizations, proj_mat_regularizations]\n        \"\"\"\n        hf = x[:len(x)//2]\n        P = x[len(x)//2:]\n\n        compressed_samples = complex.mtimes(self.training_samples, P)\n        residuals = complex.mtimes(compressed_samples, hf.permute(2, 3, 1, 0, 4))  # (h, w, num_samp, num_filt, 2)\n        residuals = residuals - self.yf\n\n        if self.sample_weights_sqrt is not None:\n            residuals = complex.mult(self.sample_weights_sqrt.view(1, 1, -1, 1), residuals)\n\n\n        # Add spatial regularization\n        for hfe, reg_filter in zip(hf, self.reg_filter):\n            reg_pad1 = min(reg_filter.shape[-2] - 1, hfe.shape[-3] - 1)\n            reg_pad2 = min(reg_filter.shape[-1] - 1, hfe.shape[-2] - 1)\n\n            # Add part needed for convolution\n            if reg_pad2 > 0:\n                hfe_left_padd = complex.conj(hfe[...,1:reg_pad2+1,:].clone().detach().flip((2,3)))\n                hfe_conv = torch.cat([hfe_left_padd, hfe], -2)\n            else:\n                hfe_conv = hfe.clone()\n\n            # Shift data to batch dimension\n            hfe_conv = hfe_conv.permute(0,1,4,2,3).reshape(-1, 1, hfe_conv.shape[-3], hfe_conv.shape[-2])\n\n            # Do first convolution\n            hfe_conv = F.conv2d(hfe_conv, reg_filter, padding=(reg_pad1, reg_pad2))\n\n            residuals.append(hfe_conv)\n\n        # Add regularization for projection matrix\n        residuals.extend(math.sqrt(self.params.projection_reg) * P)\n\n        return residuals\n\n\n    def ip_input(self, a: TensorList, b: TensorList):\n        num = len(a) // 2       # Number of filters\n        a_filter = a[:num]\n        b_filter = b[:num]\n        a_P = a[num:]\n        b_P = b[num:]\n\n        # Filter inner product\n        ip_out = fourier.inner_prod_fs(a_filter, b_filter)\n\n        # Add projection matrix part\n        ip_out += a_P.reshape(-1) @ b_P.reshape(-1)\n\n        # Have independent inner products for each filter\n        return ip_out.concat(ip_out.clone())\n\n\n    def ip_output(self, a: TensorList, b: TensorList):\n        num = len(a) // 3       # Number of filters\n        a_data = a[:num].permute(2,3,0,1,4)\n        b_data = b[:num].permute(2,3,0,1,4)\n        a_filt_reg = a[num:2*num]\n        b_filt_reg = b[num:2*num]\n        a_P_reg = a[2*num:]\n        b_P_reg = b[2*num:]\n\n        ip_data = sum(fourier.inner_prod_fs(a_data, b_data))\n        ip_filt_reg = ip_data.new_zeros(1)\n\n        for ar, br, res_data, reg_filter in zip(a_filt_reg, b_filt_reg, a_data, self.reg_filter):\n            reg_pad2 = min(reg_filter.shape[-1] - 1, res_data.shape[-2] - 1)\n            arp = ar.reshape(1, -1, 2, ar.shape[2], ar.shape[3]).permute(0, 1, 3, 4, 2)\n            brp = br.reshape(1, -1, 2, br.shape[2], br.shape[3]).permute(0, 1, 3, 4, 2)\n            ip_filt_reg += fourier.inner_prod_fs(arp[:,:,:,2*reg_pad2:,:], brp[:,:,:,2*reg_pad2:,:])\n\n        ip_P_reg = sum(a_P_reg.view(-1) @ b_P_reg.view(-1))\n\n        return ip_data + ip_filt_reg + ip_P_reg\n\n\n    def M1(self, x: TensorList):\n        return x / self.diag_M\n\n\nclass FilterOptim(optimization.ConjugateGradientBase):\n    def __init__(self, params, reg_energy):\n        super(FilterOptim, self).__init__(params.fletcher_reeves, params.standard_alpha, params.direction_forget_factor, (params.debug >= 3))\n\n        # Parameters\n        self.params = params\n\n        self.reg_energy = reg_energy\n        self.sample_energy = None\n\n        self.residuals = torch.zeros(0)\n\n\n    def register(self, filter, training_samples, yf, sample_weights, reg_filter):\n        self.filter = filter\n        self.training_samples = training_samples    # (h, w, num_samples, num_channels, 2)\n        self.yf = yf\n        self.sample_weights = sample_weights\n        self.reg_filter = reg_filter\n\n\n    def run(self, num_iter, new_xf: TensorList = None):\n        if num_iter == 0:\n            return\n\n        if new_xf is not None:\n            new_sample_energy = complex.abs_sqr(new_xf)\n            if self.sample_energy is None:\n                self.sample_energy = new_sample_energy\n            else:\n                self.sample_energy = (1 - self.params.precond_learning_rate) * self.sample_energy + self.params.precond_learning_rate * new_sample_energy\n\n        # Compute right hand side\n        self.b = complex.mtimes(self.sample_weights.view(1,1,1,-1), self.training_samples).permute(2,3,0,1,4)\n        self.b = complex.mult_conj(self.yf, self.b)\n\n        self.diag_M = (1 - self.params.precond_reg_param) * (self.params.precond_data_param * self.sample_energy +\n                            (1 - self.params.precond_data_param) * self.sample_energy.mean(1, keepdim=True)) + self.params.precond_reg_param * self.reg_energy\n\n        _, res = self.run_CG(num_iter, self.filter)\n\n        if self.debug:\n            self.residuals = torch.cat((self.residuals, res))\n            plot_graph(self.residuals, 9)\n\n\n\n    def A(self, hf: TensorList):\n        # Classify\n        sh = complex.mtimes(self.training_samples, hf.permute(2,3,1,0,4)) # (h, w, num_samp, num_filt, 2)\n        sh = complex.mult(self.sample_weights.view(1,1,-1,1), sh)\n\n        # Multiply with transpose\n        hf_out = complex.mtimes(sh.permute(0,1,3,2,4), self.training_samples, conj_b=True).permute(2,3,0,1,4)\n\n        # Add regularization\n        for hfe, hfe_out, reg_filter in zip(hf, hf_out, self.reg_filter):\n            reg_pad1 = min(reg_filter.shape[-2] - 1, hfe.shape[-3] - 1)\n            reg_pad2 = min(reg_filter.shape[-1] - 1, 2*hfe.shape[-2]- 2)\n\n            # Add part needed for convolution\n            if reg_pad2 > 0:\n                hfe_conv = torch.cat([complex.conj(hfe[...,1:reg_pad2+1,:].flip((2,3))), hfe], -2)\n            else:\n                hfe_conv = hfe.clone()\n\n            # Shift data to batch dimension\n            hfe_conv = hfe_conv.permute(0,1,4,2,3).reshape(-1, 1, hfe_conv.shape[-3], hfe_conv.shape[-2])\n\n            # Do first convolution\n            hfe_conv = F.conv2d(hfe_conv, reg_filter, padding=(reg_pad1, reg_pad2))\n\n            # Do second convolution\n            remove_size = min(reg_pad2, hfe.shape[-2]-1)\n            hfe_conv = F.conv2d(hfe_conv[...,remove_size:], reg_filter)\n\n            # Reshape back and add\n            hfe_out += hfe_conv.reshape(hfe.shape[0], hfe.shape[1], 2, hfe.shape[2], hfe.shape[3]).permute(0,1,3,4,2)\n\n        return hf_out\n\n\n    def ip(self, a: torch.Tensor, b: torch.Tensor):\n        return fourier.inner_prod_fs(a, b)\n        \n\n    def M1(self, hf):\n        return complex.div(hf, self.diag_M)\n"
  },
  {
    "path": "artrackv2_mindspore/external/AR/pytracking/util_scripts/__init__.py",
    "content": ""
  },
  {
    "path": "artrackv2_mindspore/external/AR/pytracking/util_scripts/download_results.py",
    "content": "import os\nimport sys\nimport gdown\nimport re\nimport shutil\nimport argparse\nimport tempfile\n\nenv_path = os.path.join(os.path.dirname(__file__), '../..')\nif env_path not in sys.path:\n    sys.path.append(env_path)\n\nfrom pytracking.evaluation.environment import env_settings\n\nresults_link_dict = {\n    \"dimp\": {\n        \"prdimp50_003.zip\": \"1p13j3iwcOCubBi3ms0hLwqnP6-x0J8Mc\",\n        \"prdimp50_002.zip\": \"1PPKgrAepbuyM2kjfzYAozQKTL6AjcQOz\",\n        \"prdimp50_001.zip\": \"17NFBObEDeK6mW4Mk2vN5Ekk1SGbFvxRS\",\n        \"prdimp50_000.zip\": \"1r3Efq7AumML2yGQ_KV4zmf4ATKVE1bo6\",\n        \"prdimp18_004.zip\": \"1DF4ZJQAa4CwvN_OiT4te33AV0kpsO7JM\",\n        \"prdimp18_003.zip\": \"1RgwJAN4TxnzgVgsfvrHIg1OUXD1EBZkO\",\n        \"prdimp18_002.zip\": \"17lMllYhygCqgE81DoHX4BZar3xc3auzM\",\n        \"prdimp18_001.zip\": \"1Yg7DmGYOnn2k0MYtSjjKlGyzO1Uimj4G\",\n        \"prdimp18_000.zip\": \"1DuZJSBJ-23WJBQTOWSAaoPYSbGAJJN2Z\",\n        \"prdimp50_004.zip\": \"1f9bx9-dtx3B5_IvIJhjjJyp-cnXciqLO\",\n        \"dimp50_004.zip\": \"1Lj3p8mYCoIqxzdQXZkWFTw-MA8c6eeLa\",\n        \"dimp50_000.zip\": \"1LCgf5sg453Z4bY37A_W5mbXeG68U1fET\",\n        \"dimp18_000.zip\": \"17M7dJZ1oKrIY4-O5lL_mlQPEubUn034g\",\n        \"dimp18_001.zip\": \"1AsiliVgISyDTouYOQYVOXA0srj3YskhJ\",\n        \"dimp50_got_001.zip\": \"1EE5FcPXqMBkv_0ghfzytCMmbKxWxy04p\",\n        \"dimp18_002.zip\": \"1I0GrBaPnySOyPWSvItHhXH8182tFCi_Y\",\n        \"dimp50_got_002.zip\": \"1ALXzVkn58GZ1E0I22vrbXkEXwy5u0xOc\",\n        \"dimp18_got_000.zip\": \"1BxowlgGEonnuaVXwiDwiYr7VV7BRWLvr\",\n        \"dimp50_001.zip\": \"1XfPvwAcymW88J1rq7RlhyKmqsawJDK-K\",\n        \"dimp18_got_002.zip\": \"1awqXQnFRr5NwjLfI-Ngtt3zT7XmQIwzs\",\n        \"dimp18_got_001.zip\": \"1rr2J6NuuYJ5E4wDUw-PrxaNKjIsfgAyk\",\n        \"dimp50_got_000.zip\": \"1ruP8XJOu0woq-bvKdHJ9_Y9RceHDrDjm\",\n        \"dimp18_004.zip\": \"1EztF6bpROFwZ1PSJWgMB7bQ4G_Z08YIg\",\n        \"dimp18_003.zip\": \"1iuiFLv04WE7GfBjm8UkZXFq4gheG2Ru8\",\n        \"dimp50_003.zip\": \"1rLsgeQXyKpD6ryl9BjlIVdO3vd27ekwy\",\n        \"dimp50_002.zip\": \"1wj2jUwlpHgsP1hAcuxXAVriUPuEspsu4\",\n    },\n    \"atom\": {\n        \"default_004.zip\": \"1BapnQh_8iRM44DXj862eOZV4q8zQLdmT\",\n        \"default_003.zip\": \"1YpfOBLBEUQQiX0fWMPA5pnW3dm0NG3E5\",\n        \"default_got_000.zip\": \"1uJnC0PPQhavwRbAL7VQ2Zow8YdLVzeCb\",\n        \"default_got_001.zip\": \"1YzJm0H31veDW-lMxwy8MYNpMULgsYHKf\",\n        \"default_000.zip\": \"1x6fKGZk3V839mX99Gl_pw7JUaiMaTxc5\",\n        \"default_002.zip\": \"1QIlQFv3p6MBTwsYdIMYmzUDBDQGxGsUC\",\n        \"default_001.zip\": \"1-K2--GNCURDKEgUuiEF18K4DcCLvDEVt\",\n        \"default_got_002.zip\": \"1qGtArxdAy0uWSd-HqFT5zmXpR6TCm4Vc\",\n    },\n}\n\n\ndef _download_file(file_id, path):\n    link = 'https://drive.google.com/uc?id=' + file_id\n    gdown.download(link, path, quiet=True)\n\n\ndef download_results(download_path, trackers='all'):\n    \"\"\"\n    Script to automatically download tracker results for PyTracking.\n\n    args:\n        download_path - Directory where the zipped results are downloaded\n        trackers - Tracker results which are to be downloaded. If set to 'all', all available results are downloaded.\n                   If set to a name of a tracker (e.g. atom), all results for that tracker are downloaded.\n                   Otherwise, it can be set to a dict, where the keys are the names of the trackers for which results are\n                   downloaded. The value can be set to either 'all', in which case all available results for the\n                    tracker are downloaded. Else the value should be a list of parameter file names.\n    \"\"\"\n    print('Using download path ''{}'''.format(download_path))\n\n    os.makedirs(download_path, exist_ok=True)\n\n    if isinstance(trackers, str):\n        if trackers == 'all':\n            trackers = {k: 'all' for k in results_link_dict.keys()}\n        elif trackers in results_link_dict:\n            trackers = {trackers: 'all'}\n        else:\n            raise Exception('tracker_list must be set to ''all'', a tracker name, or be a dict')\n    elif isinstance(trackers, dict):\n        pass\n    else:\n        raise Exception('tracker_list must be set to ''all'', or be a dict')\n\n    for trk, runfiles in trackers.items():\n        trk_path = os.path.join(download_path, trk)\n        if not os.path.exists(trk_path):\n            os.makedirs(trk_path)\n\n        if runfiles == 'all':\n            for params, fileid in results_link_dict[trk].items():\n                print('Downloading: {}/{}'.format(trk, params))\n                _download_file(fileid, os.path.join(trk_path, params))\n        elif isinstance(runfiles, (list, tuple)):\n            for p in runfiles:\n                for params, fileid in results_link_dict[trk].items():\n                    if re.match(r'{}(|_(\\d\\d\\d)).zip'.format(p), params) is not None:\n                        print('Downloading: {}/{}'.format(trk, params))\n                        _download_file(fileid, os.path.join(trk_path, params))\n\n        else:\n            raise Exception('tracker_list values must either be set to ''all'', or be a list of param names')\n\n\n\ndef unpack_tracking_results(download_path, output_path=None):\n    \"\"\"\n    Unpacks zipped benchmark results. The directory 'download_path' should have the following structure\n    - root\n        - tracker1\n            - param1.zip\n            - param2.zip\n            .\n            .\n        - tracker2\n            - param1.zip\n            - param2.zip\n        .\n        .\n\n    args:\n        download_path - Path to the directory where the zipped results are stored\n        output_path - Path to the directory where the results will be unpacked. Set to env_settings().results_path\n                      by default\n    \"\"\"\n\n    if output_path is None:\n        output_path = env_settings().results_path\n\n    if not os.path.exists(output_path):\n        os.makedirs(output_path)\n\n    trackers = os.listdir(download_path)\n\n    for t in trackers:\n        runfiles = os.listdir(os.path.join(download_path, t))\n\n        for r in runfiles:\n            save_path = os.path.join(output_path, t)\n            if not os.path.exists(save_path):\n                os.makedirs(save_path)\n            shutil.unpack_archive(os.path.join(download_path, t, r), os.path.join(save_path, r[:-4]), 'zip')\n\n\ndef main():\n    parser = argparse.ArgumentParser(description='Download and unpack zipped results')\n    parser.add_argument('--tracker', type=str, default='all',\n                        help='Name of tracker results to download, or ''all''.')\n    parser.add_argument('--output_path', type=str, default=None,\n                        help='Path to the directory where the results will be unpacked.')\n    parser.add_argument('--temp_download_path', type=str, default=None,\n                        help='Temporary path used for downloading the Zip files.')\n    parser.add_argument('--download', type=bool, default=True,\n                        help='Whether to download results or unpack existing downloaded files.')\n    args = parser.parse_args()\n\n    download_path = args.temp_download_path\n    if download_path is None:\n        download_path = '{}/pytracking_results/'.format(tempfile.gettempdir())\n\n    if args.download:\n        download_results(download_path, args.tracker)\n\n    unpack_tracking_results(download_path, args.output_path)\n\n\nif __name__ == '__main__':\n    main()\n"
  },
  {
    "path": "artrackv2_mindspore/external/AR/pytracking/util_scripts/pack_got10k_results.py",
    "content": "import numpy as np\nimport os\nimport shutil\nfrom pytracking.evaluation.environment import env_settings\n\n\ndef pack_got10k_results(tracker_name, param_name, output_name):\n    \"\"\" Packs got10k results into a zip folder which can be directly uploaded to the evaluation server. The packed\n    file is saved in the folder env_settings().got_packed_results_path\n\n    args:\n        tracker_name - name of the tracker\n        param_name - name of the parameter file\n        output_name - name of the packed zip file\n    \"\"\"\n    output_path = os.path.join(env_settings().got_packed_results_path, output_name)\n\n    if not os.path.exists(output_path):\n        os.makedirs(output_path)\n\n    results_path = env_settings().results_path\n    for i in range(1,181):\n        seq_name = 'GOT-10k_Test_{:06d}'.format(i)\n\n        seq_output_path = '{}/{}'.format(output_path, seq_name)\n        if not os.path.exists(seq_output_path):\n            os.makedirs(seq_output_path)\n\n        for run_id in range(3):\n            res = np.loadtxt('{}/{}/{}_{:03d}/{}.txt'.format(results_path, tracker_name, param_name, run_id, seq_name), dtype=np.float64)\n            times = np.loadtxt(\n                '{}/{}/{}_{:03d}/{}_time.txt'.format(results_path, tracker_name, param_name, run_id, seq_name),\n                dtype=np.float64)\n\n            np.savetxt('{}/{}_{:03d}.txt'.format(seq_output_path, seq_name, run_id+1), res, delimiter=',', fmt='%f')\n            np.savetxt('{}/{}_time.txt'.format(seq_output_path, seq_name), times, fmt='%f')\n\n    # Generate ZIP file\n    shutil.make_archive(output_path, 'zip', output_path)\n\n    # Remove raw text files\n    shutil.rmtree(output_path)\n"
  },
  {
    "path": "artrackv2_mindspore/external/AR/pytracking/util_scripts/pack_trackingnet_results.py",
    "content": "import numpy as np\nimport os\nimport shutil\nfrom pytracking.evaluation.environment import env_settings\nfrom pytracking.evaluation.datasets import get_dataset\n\n\ndef pack_trackingnet_results(tracker_name, param_name, run_id=None, output_name=None):\n    \"\"\" Packs trackingnet results into a zip folder which can be directly uploaded to the evaluation server. The packed\n    file is saved in the folder env_settings().tn_packed_results_path\n\n    args:\n        tracker_name - name of the tracker\n        param_name - name of the parameter file\n        run_id - run id for the tracker\n        output_name - name of the packed zip file\n    \"\"\"\n\n    if output_name is None:\n        if run_id is None:\n            output_name = '{}_{}'.format(tracker_name, param_name)\n        else:\n            output_name = '{}_{}_{:03d}'.format(tracker_name, param_name, run_id)\n\n    output_path = os.path.join(env_settings().tn_packed_results_path, output_name)\n\n    if not os.path.exists(output_path):\n        os.makedirs(output_path)\n\n    results_path = env_settings().results_path\n\n    tn_dataset = get_dataset('trackingnet')\n\n    for seq in tn_dataset:\n        seq_name = seq.name\n\n        if run_id is None:\n            seq_results_path = '{}/{}/{}/{}.txt'.format(results_path, tracker_name, param_name, seq_name)\n        else:\n            seq_results_path = '{}/{}/{}_{:03d}/{}.txt'.format(results_path, tracker_name, param_name, run_id, seq_name)\n\n        results = np.loadtxt(seq_results_path, dtype=np.float64)\n\n        np.savetxt('{}/{}.txt'.format(output_path, seq_name), results, delimiter=',', fmt='%.2f')\n\n    # Generate ZIP file\n    shutil.make_archive(output_path, 'zip', output_path)\n\n    # Remove raw text files\n    shutil.rmtree(output_path)\n"
  },
  {
    "path": "artrackv2_mindspore/external/AR/pytracking/utils/__init__.py",
    "content": "from .params import TrackerParams, FeatureParams, Choice"
  },
  {
    "path": "artrackv2_mindspore/external/AR/pytracking/utils/convert_vot_anno_to_rect.py",
    "content": "import numpy as np\n\n\ndef convert_vot_anno_to_rect(vot_anno, type):\n    if len(vot_anno) == 4:\n        return vot_anno\n\n    if type == 'union':\n        x1 = min(vot_anno[0::2])\n        x2 = max(vot_anno[0::2])\n        y1 = min(vot_anno[1::2])\n        y2 = max(vot_anno[1::2])\n        return [x1, y1, x2 - x1, y2 - y1]\n    elif type == 'preserve_area':\n        if len(vot_anno) != 8:\n            raise ValueError\n\n        vot_anno = np.array(vot_anno)\n        cx = np.mean(vot_anno[0::2])\n        cy = np.mean(vot_anno[1::2])\n\n        x1 = min(vot_anno[0::2])\n        x2 = max(vot_anno[0::2])\n        y1 = min(vot_anno[1::2])\n        y2 = max(vot_anno[1::2])\n\n        A1 = np.linalg.norm(vot_anno[0:2] - vot_anno[2: 4]) * np.linalg.norm(vot_anno[2: 4] - vot_anno[4:6])\n        A2 = (x2 - x1) * (y2 - y1)\n        s = np.sqrt(A1 / A2)\n        w = s * (x2 - x1) + 1\n        h = s * (y2 - y1) + 1\n\n        x = cx - 0.5*w\n        y = cy - 0.5*h\n        return [x, y, w, h]\n    else:\n        raise ValueError\n"
  },
  {
    "path": "artrackv2_mindspore/external/AR/pytracking/utils/load_text.py",
    "content": "import numpy as np\nimport pandas as pd\n\n\ndef load_text_numpy(path, delimiter, dtype):\n    if isinstance(delimiter, (tuple, list)):\n        for d in delimiter:\n            try:\n                ground_truth_rect = np.loadtxt(path, delimiter=d, dtype=dtype)\n                return ground_truth_rect\n            except:\n                pass\n\n        raise Exception('Could not read file {}'.format(path))\n    else:\n        ground_truth_rect = np.loadtxt(path, delimiter=delimiter, dtype=dtype)\n        return ground_truth_rect\n\n\ndef load_text_pandas(path, delimiter, dtype):\n    if isinstance(delimiter, (tuple, list)):\n        for d in delimiter:\n            try:\n                ground_truth_rect = pd.read_csv(path, delimiter=d, header=None, dtype=dtype, na_filter=False,\n                                                low_memory=False).values\n                return ground_truth_rect\n            except Exception as e:\n                pass\n\n        raise Exception('Could not read file {}'.format(path))\n    else:\n        ground_truth_rect = pd.read_csv(path, delimiter=delimiter, header=None, dtype=dtype, na_filter=False,\n                                        low_memory=False).values\n        return ground_truth_rect\n\n\ndef load_text(path, delimiter=' ', dtype=np.float32, backend='numpy'):\n    if backend == 'numpy':\n        return load_text_numpy(path, delimiter, dtype)\n    elif backend == 'pandas':\n        return load_text_pandas(path, delimiter, dtype)\n"
  },
  {
    "path": "artrackv2_mindspore/external/AR/pytracking/utils/loading.py",
    "content": "import os\nimport ltr.admin.loading as ltr_loading\nfrom pytracking.evaluation.environment import env_settings\n\n\ndef load_network(net_path, **kwargs):\n    \"\"\"Load network for tracking.\n    args:\n        net_path - Path to network. If it is not an absolute path, it is relative to the network_path in the local.py.\n                   See ltr.admin.loading.load_network for further details.\n        **kwargs - Additional key-word arguments that are sent to ltr.admin.loading.load_network.\n    \"\"\"\n    kwargs['backbone_pretrained'] = False\n    if os.path.isabs(net_path):\n        path_full = net_path\n        net, _ = ltr_loading.load_network(path_full, **kwargs)\n    elif isinstance(env_settings().network_path, (list, tuple)):\n        net = None\n        for p in env_settings().network_path:\n            path_full = os.path.join(p, net_path)\n            try:\n                net, _ = ltr_loading.load_network(path_full, **kwargs)\n                break\n            except Exception as e:\n                # print(e)\n                pass\n\n        assert net is not None, 'Failed to load network'\n    else:\n        path_full = os.path.join(env_settings().network_path, net_path)\n        net, _ = ltr_loading.load_network(path_full, **kwargs)\n\n    return net\n"
  },
  {
    "path": "artrackv2_mindspore/external/AR/pytracking/utils/params.py",
    "content": "from pytracking import TensorList\nimport random\n\n\nclass TrackerParams:\n    \"\"\"Class for tracker parameters.\"\"\"\n    def set_default_values(self, default_vals: dict):\n        for name, val in default_vals.items():\n            if not hasattr(self, name):\n                setattr(self, name, val)\n\n    def get(self, name: str, *default):\n        \"\"\"Get a parameter value with the given name. If it does not exists, it return the default value given as a\n        second argument or returns an error if no default value is given.\"\"\"\n        if len(default) > 1:\n            raise ValueError('Can only give one default value.')\n\n        if not default:\n            return getattr(self, name)\n\n        return getattr(self, name, default[0])\n\n    def has(self, name: str):\n        \"\"\"Check if there exist a parameter with the given name.\"\"\"\n        return hasattr(self, name)\n\n\nclass FeatureParams:\n    \"\"\"Class for feature specific parameters\"\"\"\n    def __init__(self, *args, **kwargs):\n        if len(args) > 0:\n            raise ValueError\n\n        for name, val in kwargs.items():\n            if isinstance(val, list):\n                setattr(self, name, TensorList(val))\n            else:\n                setattr(self, name, val)\n\n\ndef Choice(*args):\n    \"\"\"Can be used to sample random parameter values.\"\"\"\n    return random.choice(args)\n"
  },
  {
    "path": "artrackv2_mindspore/external/AR/pytracking/utils/plotting.py",
    "content": "import matplotlib.pyplot as plt\nimport numpy as np\nimport torch\nimport cv2\n\n\ndef draw_figure(fig):\n    fig.canvas.draw()\n    fig.canvas.flush_events()\n    plt.pause(0.001)\n\n\ndef show_tensor(a: torch.Tensor, fig_num = None, title = None, range=(None, None), ax=None):\n    \"\"\"Display a 2D tensor.\n    args:\n        fig_num: Figure number.\n        title: Title of figure.\n    \"\"\"\n    a_np = a.squeeze().cpu().clone().detach().numpy()\n    if a_np.ndim == 3:\n        a_np = np.transpose(a_np, (1, 2, 0))\n\n    if ax is None:\n        fig = plt.figure(fig_num)\n        plt.tight_layout()\n        plt.cla()\n        plt.imshow(a_np, vmin=range[0], vmax=range[1])\n        plt.axis('off')\n        plt.axis('equal')\n        if title is not None:\n            plt.title(title)\n        draw_figure(fig)\n    else:\n        ax.cla()\n        ax.imshow(a_np, vmin=range[0], vmax=range[1])\n        ax.set_axis_off()\n        ax.axis('equal')\n        if title is not None:\n            ax.set_title(title)\n        draw_figure(plt.gcf())\n\n\ndef plot_graph(a: torch.Tensor, fig_num = None, title = None):\n    \"\"\"Plot graph. Data is a 1D tensor.\n    args:\n        fig_num: Figure number.\n        title: Title of figure.\n    \"\"\"\n    a_np = a.squeeze().cpu().clone().detach().numpy()\n    if a_np.ndim > 1:\n        raise ValueError\n    fig = plt.figure(fig_num)\n    # plt.tight_layout()\n    plt.cla()\n    plt.plot(a_np)\n    if title is not None:\n        plt.title(title)\n    draw_figure(fig)\n\n\ndef show_image_with_boxes(im, boxes, iou_pred=None, disp_ids=None):\n    im_np = im.clone().cpu().squeeze().numpy()\n    im_np = np.ascontiguousarray(im_np.transpose(1, 2, 0).astype(np.uint8))\n\n    boxes = boxes.view(-1, 4).cpu().numpy().round().astype(int)\n\n    # Draw proposals\n    for i_ in range(boxes.shape[0]):\n        if disp_ids is None or disp_ids[i_]:\n            bb = boxes[i_, :]\n            disp_color = (i_*38 % 256, (255 - i_*97) % 256, (123 + i_*66) % 256)\n            cv2.rectangle(im_np, (bb[0], bb[1]), (bb[0] + bb[2], bb[1] + bb[3]),\n                          disp_color, 1)\n\n            if iou_pred is not None:\n                text_pos = (bb[0], bb[1] - 5)\n                cv2.putText(im_np, 'ID={} IOU = {:3.2f}'.format(i_, iou_pred[i_]), text_pos,\n                            cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 1, bottomLeftOrigin=False)\n\n    im_tensor = torch.from_numpy(im_np.transpose(2, 0, 1)).float()\n\n    return im_tensor\n\n\n\ndef _pascal_color_map(N=256, normalized=False):\n    \"\"\"\n    Python implementation of the color map function for the PASCAL VOC data set.\n    Official Matlab version can be found in the PASCAL VOC devkit\n    http://host.robots.ox.ac.uk/pascal/VOC/voc2012/index.html#devkit\n    \"\"\"\n\n    def bitget(byteval, idx):\n        return (byteval & (1 << idx)) != 0\n\n    dtype = 'float32' if normalized else 'uint8'\n    cmap = np.zeros((N, 3), dtype=dtype)\n    for i in range(N):\n        r = g = b = 0\n        c = i\n        for j in range(8):\n            r = r | (bitget(c, 0) << 7 - j)\n            g = g | (bitget(c, 1) << 7 - j)\n            b = b | (bitget(c, 2) << 7 - j)\n            c = c >> 3\n\n        cmap[i] = np.array([r, g, b])\n\n    cmap = cmap / 255 if normalized else cmap\n    return cmap\n\n\ndef overlay_mask(im, ann, alpha=0.5, colors=None, contour_thickness=None):\n    \"\"\" Overlay mask over image.\n    Source: https://github.com/albertomontesg/davis-interactive/blob/master/davisinteractive/utils/visualization.py\n    This function allows you to overlay a mask over an image with some\n    transparency.\n    # Arguments\n        im: Numpy Array. Array with the image. The shape must be (H, W, 3) and\n            the pixels must be represented as `np.uint8` data type.\n        ann: Numpy Array. Array with the mask. The shape must be (H, W) and the\n            values must be intergers\n        alpha: Float. Proportion of alpha to apply at the overlaid mask.\n        colors: Numpy Array. Optional custom colormap. It must have shape (N, 3)\n            being N the maximum number of colors to represent.\n        contour_thickness: Integer. Thickness of each object index contour draw\n            over the overlay. This function requires to have installed the\n            package `opencv-python`.\n    # Returns\n        Numpy Array: Image of the overlay with shape (H, W, 3) and data type\n            `np.uint8`.\n    \"\"\"\n    im, ann = np.asarray(im, dtype=np.uint8), np.asarray(ann, dtype=np.int)\n    if im.shape[:-1] != ann.shape:\n        raise ValueError('First two dimensions of `im` and `ann` must match')\n    if im.shape[-1] != 3:\n        raise ValueError('im must have three channels at the 3 dimension')\n\n    colors = colors or _pascal_color_map()\n    colors = np.asarray(colors, dtype=np.uint8)\n\n    mask = colors[ann]\n    fg = im * alpha + (1 - alpha) * mask\n\n    img = im.copy()\n    img[ann > 0] = fg[ann > 0]\n\n    if contour_thickness:  # pragma: no cover\n        import cv2\n        for obj_id in np.unique(ann[ann > 0]):\n            contours = cv2.findContours((ann == obj_id).astype(\n                np.uint8), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)[-2:]\n            cv2.drawContours(img, contours[0], -1, colors[obj_id].tolist(),\n                             contour_thickness)\n    return img\n"
  },
  {
    "path": "artrackv2_mindspore/external/AR/pytracking/utils/visdom.py",
    "content": "import visdom\nimport visdom.server\nfrom pytracking.features.preprocessing import numpy_to_torch\nfrom pytracking.utils.plotting import show_image_with_boxes, overlay_mask\nimport cv2\nimport torch\nimport copy\nimport numpy as np\nfrom collections import OrderedDict\n\n\nclass VisBase:\n    def __init__(self, visdom, show_data, title):\n        self.visdom = visdom\n        self.show_data = show_data\n        self.title = title\n        self.raw_data = None\n\n    def update(self, data, **kwargs):\n        self.save_data(data, **kwargs)\n\n        if self.show_data:\n            self.draw_data()\n\n    def save_data(self, data, **kwargs):\n        raise NotImplementedError\n\n    def draw_data(self):\n        raise NotImplementedError\n\n    def toggle_display(self, new_mode=None):\n        if new_mode is not None:\n            self.show_data = new_mode\n        else:\n            self.show_data = not self.show_data\n\n        if self.show_data:\n            self.draw_data()\n        else:\n            self.visdom.close(self.title)\n\n\nclass VisImage(VisBase):\n    def __init__(self, visdom, show_data, title):\n        super().__init__(visdom, show_data, title)\n\n    def save_data(self, data):\n        data = data.float()\n        self.raw_data = data\n\n    def draw_data(self):\n        self.visdom.image(self.raw_data.clone(), opts={'title': self.title}, win=self.title)\n\n\nclass VisHeatmap(VisBase):\n    def __init__(self, visdom, show_data, title):\n        super().__init__(visdom, show_data, title)\n\n    def save_data(self, data):\n        data = data.squeeze().flip(0)\n        self.raw_data = data\n\n    def draw_data(self):\n        self.visdom.heatmap(self.raw_data.clone(),  opts={'title': self.title}, win=self.title)\n\n\nclass VisFeaturemap(VisBase):\n    def __init__(self, visdom, show_data, title):\n        super().__init__(visdom, show_data, title)\n        self.block_list = None\n\n    def block_list_callback_handler(self, data):\n        self.block_list[data['propertyId']]['value'] = data['value']\n        self.visdom.properties(self.block_list, opts={'title': 'Featuremap UI'}, win='featuremap_ui')\n        self.draw_data()\n\n    def save_data(self, data):\n        data = data.view(-1, *data.shape[-2:])\n        data = data.flip(1)\n        if self.block_list is None:\n            self.block_list = []\n            self.draw_feat = []\n            for i in range(data.shape[0]):\n                self.block_list.append({'type': 'checkbox', 'name': 'Channel {:04d}'.format(i), 'value': False})\n\n            self.visdom.properties(self.block_list, opts={'title': 'Featuremap UI'}, win='featuremap_ui')\n            self.visdom.register_event_handler(self.block_list_callback_handler, 'featuremap_ui')\n\n        self.raw_data = data\n\n    def draw_data(self):\n        if self.block_list is not None and self.show_data:\n            for i, d in enumerate(self.block_list):\n                if d['value']:\n                    fig_title = '{} ch: {:04d}'.format(self.title, i)\n                    self.visdom.heatmap(self.raw_data[i, :, :].clone(),\n                                        opts={'title': fig_title}, win=fig_title)\n\n\nclass VisCostVolume(VisBase):\n    def __init__(self, visdom, show_data, title, flip=False):\n        super().__init__(visdom, show_data, title)\n        self.show_slice = False\n        self.slice_pos = None\n        self.flip = flip\n\n    def show_cost_volume(self):\n        data = self.raw_data.clone()\n\n        # data_perm = data.permute(2, 0, 3, 1).contiguous()\n        data_perm = data.permute(0, 2, 1, 3).contiguous()\n        if self.flip:\n            data_perm = data_perm.permute(2, 3, 0, 1).contiguous()\n\n        data_perm = data_perm.view(data_perm.shape[0] * data_perm.shape[1], -1)\n        self.visdom.heatmap(data_perm.flip(0), opts={'title': self.title}, win=self.title)\n\n    def set_zoom_pos(self, slice_pos):\n        self.slice_pos = slice_pos\n\n    def toggle_show_slice(self, new_mode=None):\n        if new_mode is not None:\n            self.show_slice = new_mode\n        else:\n            self.show_slice = not self.show_slice\n\n    def show_cost_volume_slice(self):\n        slice_pos = self.slice_pos\n\n        # slice_pos: [row, col]\n        cost_volume_data = self.raw_data.clone()\n\n        if self.flip:\n            cost_volume_slice = cost_volume_data[:, :, slice_pos[0], slice_pos[1]]\n        else:\n            cost_volume_slice = cost_volume_data[slice_pos[0], slice_pos[1], :, :]\n        self.visdom.heatmap(cost_volume_slice.flip(0), opts={'title': self.title}, win=self.title)\n\n    def save_data(self, data):\n        data = data.view(data.shape[-2], data.shape[-1], data.shape[-2], data.shape[-1])\n        self.raw_data = data\n\n    def draw_data(self):\n        if self.show_slice:\n            self.show_cost_volume_slice()\n        else:\n            self.show_cost_volume()\n\n\nclass VisCostVolumeUI(VisBase):\n    def cv_ui_handler(self, data):\n        zoom_toggled = False\n        if data['event_type'] == 'KeyPress':\n            if data['key'] == 'ArrowRight':\n                self.zoom_pos[1] = min(self.zoom_pos[1] + 1, self.feat_shape[1]-1)\n            elif data['key'] == 'ArrowLeft':\n                self.zoom_pos[1] = max(self.zoom_pos[1] - 1, 0)\n            elif data['key'] == 'ArrowUp':\n                self.zoom_pos[0] = max(self.zoom_pos[0] - 1, 0)\n            elif data['key'] == 'ArrowDown':\n                self.zoom_pos[0] = min(self.zoom_pos[0] + 1, self.feat_shape[0]-1)\n            elif data['key'] == 'Enter':\n                self.zoom_mode = not self.zoom_mode\n                zoom_toggled = True\n\n        # Update image\n        self.show_image()\n\n        # Update cost volumes\n        for block_title, block in self.registered_blocks.items():\n            if isinstance(block, VisCostVolume):\n                block.set_zoom_pos(self.zoom_pos)\n                block.toggle_show_slice(self.zoom_mode)\n\n                if (self.zoom_mode or zoom_toggled) and block.show_data:\n                    block.draw_data()\n\n    def __init__(self, visdom, show_data, title, feat_shape, registered_blocks):\n        super().__init__(visdom, show_data, title)\n        self.feat_shape = feat_shape\n        self.zoom_mode = False\n        self.zoom_pos = [int((feat_shape[0] - 1) / 2), int((feat_shape[1] - 1) / 2)]\n        self.registered_blocks = registered_blocks\n\n        self.visdom.register_event_handler(self.cv_ui_handler, title)\n\n    def draw_grid(self, data):\n        stride_r = int(data.shape[1] / self.feat_shape[0])\n        stride_c = int(data.shape[2] / self.feat_shape[1])\n\n        # Draw grid\n        data[:, list(range(0, data.shape[1], stride_r)), :] = 0\n        data[:, :, list(range(0, data.shape[2], stride_c))] = 0\n\n        data[0, list(range(0, data.shape[1], stride_r)), :] = 255\n        data[0, :, list(range(0, data.shape[2], stride_c))] = 255\n\n        return data\n\n    def shade_cell(self, data):\n        stride_r = int(data.shape[1] / self.feat_shape[0])\n        stride_c = int(data.shape[2] / self.feat_shape[1])\n\n        r1 = self.zoom_pos[0]*stride_r\n        r2 = min((self.zoom_pos[0] + 1)*stride_r, data.shape[1])\n\n        c1 = self.zoom_pos[1] * stride_c\n        c2 = min((self.zoom_pos[1] + 1) * stride_c, data.shape[2])\n\n        factor = 0.8 if self.zoom_mode else 0.5\n        data[:, r1:r2, c1:c2] = data[:, r1:r2, c1:c2] * (1 - factor) + torch.tensor([255.0, 0.0, 0.0]).view(3, 1, 1).to(data.device) * factor\n        return data\n\n    def show_image(self, data=None):\n        if data is None:\n            data = self.raw_data.clone()\n\n        data = self.draw_grid(data)\n        data = self.shade_cell(data)\n        self.visdom.image(data, opts={'title': self.title}, win=self.title)\n\n    def save_data(self, data):\n        # Ignore feat shape\n        data = data[0]\n        data = data.float()\n        self.raw_data = data\n\n    def draw_data(self):\n        self.show_image(self.raw_data.clone())\n\n\nclass VisInfoDict(VisBase):\n    def __init__(self, visdom, show_data, title):\n        super().__init__(visdom, show_data, title)\n        self.raw_data = OrderedDict()\n\n    def generate_display_text(self, data):\n        display_text = ''\n        for key, value in data.items():\n            key = key.replace('_', ' ')\n            if value is None:\n                display_text += '<b>{}</b>: {}<br>'.format(key, 'None')\n            elif isinstance(value, (str, int)):\n                display_text += '<b>{}</b>: {}<br>'.format(key, value)\n            else:\n                display_text += '<b>{}</b>: {:.2f}<br>'.format(key, value)\n\n        return display_text\n\n    def save_data(self, data):\n        for key, val in data.items():\n            self.raw_data[key] = val\n\n    def draw_data(self):\n        data = copy.deepcopy(self.raw_data)\n        display_text = self.generate_display_text(data)\n        self.visdom.text(display_text, opts={'title': self.title}, win=self.title)\n\n\nclass VisText(VisBase):\n    def __init__(self, visdom, show_data, title):\n        super().__init__(visdom, show_data, title)\n\n    def save_data(self, data):\n        self.raw_data = data\n\n    def draw_data(self):\n        data = copy.deepcopy(self.raw_data)\n        self.visdom.text(data, opts={'title': self.title}, win=self.title)\n\n\nclass VisLinePlot(VisBase):\n    def __init__(self, visdom, show_data, title):\n        super().__init__(visdom, show_data, title)\n\n    def save_data(self, data):\n        self.raw_data = data\n\n    def draw_data(self):\n        if isinstance(self.raw_data, (list, tuple)):\n            data_y = self.raw_data[0].clone()\n            data_x = self.raw_data[1].clone()\n        else:\n            data_y = self.raw_data.clone()\n            data_x = torch.arange(data_y.shape[0])\n\n        self.visdom.line(data_y, data_x, opts={'title': self.title}, win=self.title)\n\n\nclass VisTracking(VisBase):\n    def __init__(self, visdom, show_data, title):\n        super().__init__(visdom, show_data, title)\n\n    def save_data(self, data):\n        image = data[0]\n        boxes_masks = data[1:]\n\n        boxes, masks = [], []\n        for bm in boxes_masks:\n            if bm is None:\n                continue\n            if isinstance(bm, list):\n                boxes.append(torch.Tensor(bm)); continue\n            if len(bm.shape) > 1:\n                # Binarize segmentation if a float tensor is provided\n                if bm.dtype != np.uint8:\n                    bm = (bm > 0.5).astype(np.uint8)\n                masks.append(bm); continue\n            boxes.append(bm.float())\n\n        self.raw_data = [image, boxes, masks]\n\n    def draw_data(self):\n        disp_image = self.raw_data[0].copy()\n\n        resize_factor = 1\n        if max(disp_image.shape) > 480:\n            resize_factor = 480.0 / float(max(disp_image.shape))\n            disp_image = cv2.resize(disp_image, None, fx=resize_factor, fy=resize_factor)\n            for i, mask in enumerate(self.raw_data[2]):\n                self.raw_data[2][i] = cv2.resize(mask, None, fx=resize_factor, fy=resize_factor)\n\n        boxes = [resize_factor * b.clone() for b in self.raw_data[1]]\n\n        for i, disp_rect in enumerate(boxes):\n            color = ((255*((i%3)>0)), 255*((i+1)%2), (255*(i%5))//4)\n            cv2.rectangle(disp_image,\n                          (int(disp_rect[0]), int(disp_rect[1])),\n                          (int(disp_rect[0] + disp_rect[2]), int(disp_rect[1] + disp_rect[3])), color, 2)\n        for i, mask in enumerate(self.raw_data[2], 1):\n            disp_image = overlay_mask(disp_image, mask * i)\n        disp_image = numpy_to_torch(disp_image).squeeze(0)\n        disp_image = disp_image.float()\n        self.visdom.image(disp_image, opts={'title': self.title}, win=self.title)\n\n\nclass VisBBReg(VisBase):\n    def __init__(self, visdom, show_data, title):\n        super().__init__(visdom, show_data, title)\n        self.block_list = []\n\n    def block_list_callback_handler(self, data):\n        self.block_list[data['propertyId']]['value'] = data['value']\n        self.visdom.properties(self.block_list, opts={'title': 'BBReg Vis'}, win='bbreg_vis')\n        self.draw_data()\n\n    def save_data(self, data):\n        self.image = data[0].float()\n        self.init_boxes = data[1]\n        self.final_boxes = data[2]\n        self.final_ious = data[3]\n\n    def draw_data(self):\n        if len(self.block_list) == 0:\n            self.block_list.append({'type': 'checkbox', 'name': 'ID 0', 'value': True})\n            self.block_list.append({'type': 'checkbox', 'name': 'ID 1', 'value': True})\n            self.visdom.properties(self.block_list, opts={'title': 'BBReg Vis'}, win='bbreg_vis')\n            self.visdom.register_event_handler(self.block_list_callback_handler, 'bbreg_vis')\n\n        disp_image = self.image\n\n        ids = [x['value'] for x in self.block_list]\n        init_box_image = show_image_with_boxes(disp_image.clone(), self.init_boxes.clone(), disp_ids=ids)\n        final_box_image = show_image_with_boxes(disp_image.clone(), self.final_boxes.clone(), self.final_ious.clone(), disp_ids=ids)\n\n        self.visdom.image(init_box_image, opts={'title': 'Init Boxes'}, win='Init Boxes')\n        self.visdom.image(final_box_image, opts={'title': 'Final Boxes'}, win='Final Boxes')\n\n\nclass Visdom:\n    def __init__(self, debug=0, ui_info=None, visdom_info=None):\n        self.debug = debug\n        self.visdom = visdom.Visdom(server=visdom_info.get('server', '127.0.0.1'), port=visdom_info.get('port', 8097))\n        self.registered_blocks = {}\n        self.blocks_list = []\n\n        self.visdom.properties(self.blocks_list, opts={'title': 'Block List'}, win='block_list')\n        self.visdom.register_event_handler(self.block_list_callback_handler, 'block_list')\n\n        if ui_info is not None:\n            self.visdom.register_event_handler(ui_info['handler'], ui_info['win_id'])\n\n    def block_list_callback_handler(self, data):\n        field_name = self.blocks_list[data['propertyId']]['name']\n\n        self.registered_blocks[field_name].toggle_display(data['value'])\n\n        self.blocks_list[data['propertyId']]['value'] = data['value']\n\n        self.visdom.properties(self.blocks_list, opts={'title': 'Block List'}, win='block_list')\n\n    def register(self, data, mode, debug_level=0, title='Data', **kwargs):\n        if title not in self.registered_blocks.keys():\n            show_data = self.debug >= debug_level\n\n            if title != 'Tracking':\n                self.blocks_list.append({'type': 'checkbox', 'name': title, 'value': show_data})\n\n            self.visdom.properties(self.blocks_list, opts={'title': 'Block List'}, win='block_list')\n\n            if mode == 'image':\n                self.registered_blocks[title] = VisImage(self.visdom, show_data, title)\n            elif mode == 'heatmap':\n                self.registered_blocks[title] = VisHeatmap(self.visdom, show_data, title)\n            elif mode == 'cost_volume':\n                self.registered_blocks[title] = VisCostVolume(self.visdom, show_data, title)\n            elif mode == 'cost_volume_flip':\n                self.registered_blocks[title] = VisCostVolume(self.visdom, show_data, title, flip=True)\n            elif mode == 'cost_volume_ui':\n                self.registered_blocks[title] = VisCostVolumeUI(self.visdom, show_data, title, data[1],\n                                                                self.registered_blocks)\n            elif mode == 'info_dict':\n                self.registered_blocks[title] = VisInfoDict(self.visdom, show_data, title)\n            elif mode == 'text':\n                self.registered_blocks[title] = VisText(self.visdom, show_data, title)\n            elif mode == 'lineplot':\n                self.registered_blocks[title] = VisLinePlot(self.visdom, show_data, title)\n            elif mode == 'Tracking':\n                self.registered_blocks[title] = VisTracking(self.visdom, show_data, title)\n            elif mode == 'bbreg':\n                self.registered_blocks[title] = VisBBReg(self.visdom, show_data, title)\n            elif mode == 'featmap':\n                self.registered_blocks[title] = VisFeaturemap(self.visdom, show_data, title)\n            else:\n                raise ValueError('Visdom Error: Unknown data mode {}'.format(mode))\n        # Update\n        self.registered_blocks[title].update(data, **kwargs)\n\n"
  },
  {
    "path": "artrackv2_mindspore/external/AR/pytracking/vot20_utils.py",
    "content": "import numpy as np\n\n\ndef make_full_size(x, output_sz):\n    '''\n    zero-pad input x (right and down) to match output_sz\n    x: numpy array e.g., binary mask\n    output_sz: size of the output [width, height]\n    '''\n    if x.shape[0] == output_sz[1] and x.shape[1] == output_sz[0]:\n        return x\n    pad_x = output_sz[0] - x.shape[1]\n    if pad_x < 0:\n        x = x[:, :x.shape[1] + pad_x]\n        # padding has to be set to zero, otherwise pad function fails\n        pad_x = 0\n    pad_y = output_sz[1] - x.shape[0]\n    if pad_y < 0:\n        x = x[:x.shape[0] + pad_y, :]\n        # padding has to be set to zero, otherwise pad function fails\n        pad_y = 0\n    return np.pad(x, ((0, pad_y), (0, pad_x)), 'constant', constant_values=0)\n\n\ndef rect_from_mask(mask):\n    '''\n    create an axis-aligned rectangle from a given binary mask\n    mask in created as a minimal rectangle containing all non-zero pixels\n    '''\n    x_ = np.sum(mask, axis=0)\n    y_ = np.sum(mask, axis=1)\n    x0 = np.min(np.nonzero(x_))\n    x1 = np.max(np.nonzero(x_))\n    y0 = np.min(np.nonzero(y_))\n    y1 = np.max(np.nonzero(y_))\n    return [x0, y0, x1 - x0 + 1, y1 - y0 + 1]\n\n\ndef mask_from_rect(rect, output_sz):\n    '''\n    create a binary mask from a given rectangle\n    rect: axis-aligned rectangle [x0, y0, width, height]\n    output_sz: size of the output [width, height]\n    '''\n    mask = np.zeros((output_sz[1], output_sz[0]), dtype=np.uint8)\n    x0 = max(int(round(rect[0])), 0)\n    y0 = max(int(round(rect[1])), 0)\n    x1 = min(int(round(rect[0] + rect[2])), output_sz[0])\n    y1 = min(int(round(rect[1] + rect[3])), output_sz[1])\n    mask[y0:y1, x0:x1] = 1\n    return mask\n\n\ndef bbox_clip(x1, y1, x2, y2, boundary, min_sz=10):\n    '''boundary (H,W)'''\n    x1_new = max(0, min(x1, boundary[1] - min_sz))\n    y1_new = max(0, min(y1, boundary[0] - min_sz))\n    x2_new = max(min_sz, min(x2, boundary[1]))\n    y2_new = max(min_sz, min(y2, boundary[0]))\n    return x1_new, y1_new, x2_new, y2_new"
  },
  {
    "path": "artrackv2_mindspore/external/PreciseRoIPooling/.gitignore",
    "content": "# Byte-compiled / optimized / DLL files\n__pycache__/\n*.py[cod]\n*$py.class\n\n.vim-template*\n\n# C extensions\n*.so\n\n# Distribution / packaging\n.Python\nbuild/\ndevelop-eggs/\ndist/\ndownloads/\neggs/\n.eggs/\nlib/\nlib64/\nparts/\nsdist/\nvar/\nwheels/\n*.egg-info/\n.installed.cfg\n*.egg\nMANIFEST\n\n# PyInstaller\n#  Usually these files are written by a python script from a template\n#  before PyInstaller builds the exe, so as to inject date/other infos into it.\n*.manifest\n*.spec\n\n# Installer logs\npip-log.txt\npip-delete-this-directory.txt\n\n# Unit test / coverage reports\nhtmlcov/\n.tox/\n.coverage\n.coverage.*\n.cache\nnosetests.xml\ncoverage.xml\n*.cover\n.hypothesis/\n.pytest_cache/\n\n# Translations\n*.mo\n*.pot\n\n# Django stuff:\n*.log\nlocal_settings.py\ndb.sqlite3\n\n# Flask stuff:\ninstance/\n.webassets-cache\n\n# Scrapy stuff:\n.scrapy\n\n# Sphinx documentation\ndocs/_build/\n\n# PyBuilder\ntarget/\n\n# Jupyter Notebook\n.ipynb_checkpoints\n\n# pyenv\n.python-version\n\n# celery beat schedule file\ncelerybeat-schedule\n\n# SageMath parsed files\n*.sage.py\n\n# Environments\n.env\n.venv\nenv/\nvenv/\nENV/\nenv.bak/\nvenv.bak/\n\n# Spyder project settings\n.spyderproject\n.spyproject\n\n# Rope project settings\n.ropeproject\n\n# mkdocs documentation\n/site\n\n# mypy\n.mypy_cache/\n"
  },
  {
    "path": "artrackv2_mindspore/external/PreciseRoIPooling/LICENSE",
    "content": "MIT License\n\nCopyright (c) 2018 Jiayuan Mao\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n"
  },
  {
    "path": "artrackv2_mindspore/external/PreciseRoIPooling/README.md",
    "content": "# PreciseRoIPooling\nThis repo implements the **Precise RoI Pooling** (PrRoI Pooling), proposed in the paper **Acquisition of Localization Confidence for Accurate Object Detection** published at ECCV 2018 (Oral Presentation).\n\n**Acquisition of Localization Confidence for Accurate Object Detection**\n\n_Borui Jiang*, Ruixuan Luo*, Jiayuan Mao*, Tete Xiao, Yuning Jiang_ (* indicates equal contribution.)\n\nhttps://arxiv.org/abs/1807.11590\n\n## Brief\n\nIn short, Precise RoI Pooling is an integration-based (bilinear interpolation) average pooling method for RoI Pooling. It avoids any quantization and has a continuous gradient on bounding box coordinates. It is:\n\n- different from the original RoI Pooling proposed in [Fast R-CNN](https://arxiv.org/abs/1504.08083). PrRoI Pooling uses average pooling instead of max pooling for each bin and has a continuous gradient on bounding box coordinates. That is, one can take the derivatives of some loss function w.r.t the coordinates of each RoI and optimize the RoI coordinates.\n- different from the RoI Align proposed in [Mask R-CNN](https://arxiv.org/abs/1703.06870). PrRoI Pooling uses a full integration-based average pooling instead of sampling a constant number of points. This makes the gradient w.r.t. the coordinates continuous.\n\nFor a better illustration, we illustrate RoI Pooling, RoI Align and PrRoI Pooing in the following figure. More details including the gradient computation can be found in our paper.\n\n<center><img src=\"./_assets/prroi_visualization.png\" width=\"80%\"></center>\n\n## Implementation\n\nPrRoI Pooling was originally implemented by [Tete Xiao](http://tetexiao.com/) based on MegBrain, an (internal) deep learning framework built by Megvii Inc. It was later adapted into open-source deep learning frameworks. Currently, we only support PyTorch. Unfortunately, we don't have any specific plan for the adaptation into other frameworks such as TensorFlow, but any contributions (pull requests) will be more than welcome.\n\n## Usage (PyTorch 1.0)\n\nIn the directory `pytorch/`, we provide a PyTorch-based implementation of PrRoI Pooling. It requires PyTorch 1.0+ and only supports CUDA (CPU mode is not implemented).\nSince we use PyTorch JIT for cxx/cuda code compilation, to use the module in your code, simply do:\n\n```\nfrom prroi_pool import PrRoIPool2D\n\navg_pool = PrRoIPool2D(window_height, window_width, spatial_scale)\nroi_features = avg_pool(features, rois)\n\n# for those who want to use the \"functional\"\n\nfrom prroi_pool.functional import prroi_pool2d\nroi_features = prroi_pool2d(features, rois, window_height, window_width, spatial_scale)\n```\n\n\n## Usage (PyTorch 0.4)\n\n**!!! Please first checkout to the branch pytorch0.4.**\n\nIn the directory `pytorch/`, we provide a PyTorch-based implementation of PrRoI Pooling. It requires PyTorch 0.4 and only supports CUDA (CPU mode is not implemented).\nTo use the PrRoI Pooling module, first goto `pytorch/prroi_pool` and execute `./travis.sh` to compile the essential components (you may need `nvcc` for this step). To use the module in your code, simply do:\n\n```\nfrom prroi_pool import PrRoIPool2D\n\navg_pool = PrRoIPool2D(window_height, window_width, spatial_scale)\nroi_features = avg_pool(features, rois)\n\n# for those who want to use the \"functional\"\n\nfrom prroi_pool.functional import prroi_pool2d\nroi_features = prroi_pool2d(features, rois, window_height, window_width, spatial_scale)\n```\n\nHere,\n\n- RoI is an `m * 5` float tensor of format `(batch_index, x0, y0, x1, y1)`, following the convention in the original Caffe implementation of RoI Pooling, although in some frameworks the batch indices are provided by an integer tensor.\n- `spatial_scale` is multiplied to the RoIs. For example, if your feature maps are down-sampled by a factor of 16 (w.r.t. the input image), you should use a spatial scale of `1/16`.\n- The coordinates for RoI follows the [L, R) convension. That is, `(0, 0, 4, 4)` denotes a box of size `4x4`.\n"
  },
  {
    "path": "artrackv2_mindspore/external/PreciseRoIPooling/pytorch/prroi_pool/.gitignore",
    "content": "*.o\n/_prroi_pooling\n"
  },
  {
    "path": "artrackv2_mindspore/external/PreciseRoIPooling/pytorch/prroi_pool/__init__.py",
    "content": "#! /usr/bin/env python3\n# -*- coding: utf-8 -*-\n# File   : __init__.py\n# Author : Jiayuan Mao, Tete Xiao\n# Email  : maojiayuan@gmail.com, jasonhsiao97@gmail.com\n# Date   : 07/13/2018\n# \n# This file is part of PreciseRoIPooling.\n# Distributed under terms of the MIT license.\n# Copyright (c) 2017 Megvii Technology Limited.\n\nfrom .prroi_pool import *\n\n"
  },
  {
    "path": "artrackv2_mindspore/external/PreciseRoIPooling/pytorch/prroi_pool/functional.py",
    "content": "#! /usr/bin/env python3\n# -*- coding: utf-8 -*-\n# File   : functional.py\n# Author : Jiayuan Mao, Tete Xiao\n# Email  : maojiayuan@gmail.com, jasonhsiao97@gmail.com\n# Date   : 07/13/2018\n#\n# This file is part of PreciseRoIPooling.\n# Distributed under terms of the MIT license.\n# Copyright (c) 2017 Megvii Technology Limited.\n\nimport torch\nimport torch.autograd as ag\n\n__all__ = ['prroi_pool2d']\n\n\n_prroi_pooling = None\n\n\ndef _import_prroi_pooling():\n    global _prroi_pooling\n\n    if _prroi_pooling is None:\n        try:\n            from os.path import join as pjoin, dirname\n            from torch.utils.cpp_extension import load as load_extension\n            root_dir = pjoin(dirname(__file__), 'src')\n\n            _prroi_pooling = load_extension(\n                '_prroi_pooling',\n                [pjoin(root_dir, 'prroi_pooling_gpu.c'), pjoin(root_dir, 'prroi_pooling_gpu_impl.cu')],\n                verbose=True\n            )\n        except ImportError:\n            raise ImportError('Can not compile Precise RoI Pooling library.')\n\n    return _prroi_pooling\n\n\nclass PrRoIPool2DFunction(ag.Function):\n    @staticmethod\n    def forward(ctx, features, rois, pooled_height, pooled_width, spatial_scale):\n        _prroi_pooling = _import_prroi_pooling()\n\n        assert 'FloatTensor' in features.type() and 'FloatTensor' in rois.type(), \\\n                'Precise RoI Pooling only takes float input, got {} for features and {} for rois.'.format(features.type(), rois.type())\n\n        pooled_height = int(pooled_height)\n        pooled_width = int(pooled_width)\n        spatial_scale = float(spatial_scale)\n\n        features = features.contiguous()\n        rois = rois.contiguous()\n        params = (pooled_height, pooled_width, spatial_scale)\n\n        if features.is_cuda:\n            output = _prroi_pooling.prroi_pooling_forward_cuda(features, rois, *params)\n            ctx.params = params\n            # everything here is contiguous.\n            ctx.save_for_backward(features, rois, output)\n        else:\n            raise NotImplementedError('Precise RoI Pooling only supports GPU (cuda) implememtations.')\n\n        return output\n\n    @staticmethod\n    def backward(ctx, grad_output):\n        _prroi_pooling = _import_prroi_pooling()\n\n        features, rois, output = ctx.saved_tensors\n        grad_input = grad_coor = None\n\n        if features.requires_grad:\n            grad_output = grad_output.contiguous()\n            grad_input = _prroi_pooling.prroi_pooling_backward_cuda(features, rois, output, grad_output, *ctx.params)\n        if rois.requires_grad:\n            grad_output = grad_output.contiguous()\n            grad_coor = _prroi_pooling.prroi_pooling_coor_backward_cuda(features, rois, output, grad_output, *ctx.params)\n\n        return grad_input, grad_coor, None, None, None\n\nprroi_pool2d = PrRoIPool2DFunction.apply"
  },
  {
    "path": "artrackv2_mindspore/external/PreciseRoIPooling/pytorch/prroi_pool/prroi_pool.py",
    "content": "#! /usr/bin/env python3\n# -*- coding: utf-8 -*-\n# File   : prroi_pool.py\n# Author : Jiayuan Mao, Tete Xiao\n# Email  : maojiayuan@gmail.com, jasonhsiao97@gmail.com\n# Date   : 07/13/2018\n#\n# This file is part of PreciseRoIPooling.\n# Distributed under terms of the MIT license.\n# Copyright (c) 2017 Megvii Technology Limited.\n\nimport torch.nn as nn\n\nfrom .functional import prroi_pool2d\n\n__all__ = ['PrRoIPool2D']\n\n\nclass PrRoIPool2D(nn.Module):\n    def __init__(self, pooled_height, pooled_width, spatial_scale):\n        super().__init__()\n\n        self.pooled_height = int(pooled_height)\n        self.pooled_width = int(pooled_width)\n        self.spatial_scale = float(spatial_scale)\n\n    def forward(self, features, rois):\n        return prroi_pool2d(features, rois, self.pooled_height, self.pooled_width, self.spatial_scale)\n\n    def extra_repr(self):\n        return 'kernel_size=({pooled_height}, {pooled_width}), spatial_scale={spatial_scale}'.format(**self.__dict__)\n\n"
  },
  {
    "path": "artrackv2_mindspore/external/PreciseRoIPooling/pytorch/prroi_pool/src/prroi_pooling_gpu.c",
    "content": "/*\n * File   : prroi_pooling_gpu.c\n * Author : Jiayuan Mao, Tete Xiao\n * Email  : maojiayuan@gmail.com, jasonhsiao97@gmail.com\n * Date   : 07/13/2018\n *\n * Distributed under terms of the MIT license.\n * Copyright (c) 2017 Megvii Technology Limited.\n */\n\n#include <math.h>\n#include <torch/extension.h>\n\n#include <ATen/ATen.h>\n#include <ATen/cuda/CUDAContext.h>\n\n#include \"prroi_pooling_gpu_impl.cuh\"\n\n\nat::Tensor prroi_pooling_forward_cuda(const at::Tensor &features, const at::Tensor &rois, int pooled_height, int pooled_width, float spatial_scale) {\n    int nr_rois = rois.size(0);\n    int nr_channels = features.size(1);\n    int height = features.size(2);\n    int width = features.size(3);\n    int top_count = nr_rois * nr_channels * pooled_height * pooled_width;\n    auto output = at::zeros({nr_rois, nr_channels, pooled_height, pooled_width}, features.options());\n\n    if (output.numel() == 0) {\n        AT_CUDA_CHECK(cudaGetLastError());\n        return output;\n    }\n\n    cudaStream_t stream = at::cuda::getCurrentCUDAStream();\n    PrRoIPoolingForwardGpu(\n        stream, features.data<float>(), rois.data<float>(), output.data<float>(),\n        nr_channels, height, width, pooled_height, pooled_width, spatial_scale,\n        top_count\n    );\n\n    AT_CUDA_CHECK(cudaGetLastError());\n    return output;\n}\n\nat::Tensor prroi_pooling_backward_cuda(\n    const at::Tensor &features, const at::Tensor &rois, const at::Tensor &output, const at::Tensor &output_diff,\n    int pooled_height, int pooled_width, float spatial_scale) {\n\n    auto features_diff = at::zeros_like(features);\n\n    int nr_rois = rois.size(0);\n    int batch_size = features.size(0);\n    int nr_channels = features.size(1);\n    int height = features.size(2);\n    int width = features.size(3);\n    int top_count = nr_rois * nr_channels * pooled_height * pooled_width;\n    int bottom_count = batch_size * nr_channels * height * width;\n\n    if (output.numel() == 0) {\n        AT_CUDA_CHECK(cudaGetLastError());\n        return features_diff;\n    }\n\n    cudaStream_t stream = at::cuda::getCurrentCUDAStream();\n    PrRoIPoolingBackwardGpu(\n        stream,\n        features.data<float>(), rois.data<float>(), output.data<float>(), output_diff.data<float>(),\n        features_diff.data<float>(),\n        nr_channels, height, width, pooled_height, pooled_width, spatial_scale,\n        top_count, bottom_count\n    );\n\n    AT_CUDA_CHECK(cudaGetLastError());\n    return features_diff;\n}\n\nat::Tensor prroi_pooling_coor_backward_cuda(\n    const at::Tensor &features, const at::Tensor &rois, const at::Tensor &output, const at::Tensor &output_diff,\n    int pooled_height, int pooled_width, float spatial_scale) {\n\n    auto coor_diff = at::zeros_like(rois);\n\n    int nr_rois = rois.size(0);\n    int nr_channels = features.size(1);\n    int height = features.size(2);\n    int width = features.size(3);\n    int top_count = nr_rois * nr_channels * pooled_height * pooled_width;\n    int bottom_count = nr_rois * 5;\n\n    if (output.numel() == 0) {\n        AT_CUDA_CHECK(cudaGetLastError());\n        return coor_diff;\n    }\n\n    cudaStream_t stream = at::cuda::getCurrentCUDAStream();\n    PrRoIPoolingCoorBackwardGpu(\n        stream,\n        features.data<float>(), rois.data<float>(), output.data<float>(), output_diff.data<float>(),\n        coor_diff.data<float>(),\n        nr_channels, height, width, pooled_height, pooled_width, spatial_scale,\n        top_count, bottom_count\n    );\n\n    AT_CUDA_CHECK(cudaGetLastError());\n    return coor_diff;\n}\n\nPYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {\n    m.def(\"prroi_pooling_forward_cuda\", &prroi_pooling_forward_cuda, \"PRRoIPooling_forward\");\n    m.def(\"prroi_pooling_backward_cuda\", &prroi_pooling_backward_cuda, \"PRRoIPooling_backward\");\n    m.def(\"prroi_pooling_coor_backward_cuda\", &prroi_pooling_coor_backward_cuda, \"PRRoIPooling_backward_coor\");\n}\n"
  },
  {
    "path": "artrackv2_mindspore/external/PreciseRoIPooling/pytorch/prroi_pool/src/prroi_pooling_gpu.h",
    "content": "/*\n * File   : prroi_pooling_gpu.h\n * Author : Jiayuan Mao, Tete Xiao\n * Email  : maojiayuan@gmail.com, jasonhsiao97@gmail.com \n * Date   : 07/13/2018\n * \n * Distributed under terms of the MIT license.\n * Copyright (c) 2017 Megvii Technology Limited.\n */\n\nint prroi_pooling_forward_cuda(THCudaTensor *features, THCudaTensor *rois, THCudaTensor *output, int pooled_height, int pooled_width, float spatial_scale);\n\nint prroi_pooling_backward_cuda(\n    THCudaTensor *features, THCudaTensor *rois, THCudaTensor *output, THCudaTensor *output_diff, THCudaTensor *features_diff,\n    int pooled_height, int pooled_width, float spatial_scale\n);\n\nint prroi_pooling_coor_backward_cuda(\n    THCudaTensor *features, THCudaTensor *rois, THCudaTensor *output, THCudaTensor *output_diff, THCudaTensor *features_diff,\n    int pooled_height, int pooled_width, float spatial_scal\n);\n\n"
  },
  {
    "path": "artrackv2_mindspore/external/PreciseRoIPooling/pytorch/prroi_pool/src/prroi_pooling_gpu_impl.cu",
    "content": "/*\n * File   : prroi_pooling_gpu_impl.cu\n * Author : Tete Xiao, Jiayuan Mao\n * Email  : jasonhsiao97@gmail.com\n *\n * Distributed under terms of the MIT license.\n * Copyright (c) 2017 Megvii Technology Limited.\n */\n\n#include \"prroi_pooling_gpu_impl.cuh\"\n\n#include <cstdio>\n#include <cfloat>\n\n#define CUDA_KERNEL_LOOP(i, n) \\\n    for (int i = blockIdx.x * blockDim.x + threadIdx.x; \\\n        i < (n); \\\n        i += blockDim.x * gridDim.x)\n\n#define CUDA_POST_KERNEL_CHECK \\\n    do { \\\n        cudaError_t err = cudaGetLastError(); \\\n        if (cudaSuccess != err) { \\\n            fprintf(stderr, \"cudaCheckError() failed : %s\\n\", cudaGetErrorString(err)); \\\n            exit(-1); \\\n        } \\\n    } while(0)\n\n#define CUDA_NUM_THREADS 512\n\nnamespace {\n\nstatic int CUDA_NUM_BLOCKS(const int N) {\n  return (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS;\n}\n\n__device__ static float PrRoIPoolingGetData(F_DEVPTR_IN data, const int h, const int w, const int height, const int width)\n{\n    bool overflow = (h < 0) || (w < 0) || (h >= height) || (w >= width);\n    float retVal = overflow ? 0.0f : data[h * width + w];\n    return retVal;\n}\n\n__device__ static float PrRoIPoolingGetCoeff(float dh, float dw){\n    dw = dw > 0 ? dw : -dw;\n    dh = dh > 0 ? dh : -dh;\n    return (1.0f - dh) * (1.0f - dw);\n}\n\n__device__ static float PrRoIPoolingSingleCoorIntegral(float s, float t, float c1, float c2) {\n    return 0.5 * (t * t - s * s) * c2 + (t - 0.5 * t * t - s + 0.5 * s * s) * c1;\n}\n\n__device__ static float PrRoIPoolingInterpolation(F_DEVPTR_IN data, const float h, const float w, const int height, const int width){\n    float retVal = 0.0f;\n    int h1 = floorf(h);\n    int w1 = floorf(w);\n    retVal += PrRoIPoolingGetData(data, h1, w1, height, width) * PrRoIPoolingGetCoeff(h - float(h1), w - float(w1));\n    h1 = floorf(h)+1;\n    w1 = floorf(w);\n    retVal += PrRoIPoolingGetData(data, h1, w1, height, width) * PrRoIPoolingGetCoeff(h - float(h1), w - float(w1));\n    h1 = floorf(h);\n    w1 = floorf(w)+1;\n    retVal += PrRoIPoolingGetData(data, h1, w1, height, width) * PrRoIPoolingGetCoeff(h - float(h1), w - float(w1));\n    h1 = floorf(h)+1;\n    w1 = floorf(w)+1;\n    retVal += PrRoIPoolingGetData(data, h1, w1, height, width) * PrRoIPoolingGetCoeff(h - float(h1), w - float(w1));\n    return retVal;\n}\n\n__device__ static float PrRoIPoolingMatCalculation(F_DEVPTR_IN this_data, const int s_h, const int s_w, const int e_h, const int e_w,\n        const float y0, const float x0, const float y1, const float x1, const int h0, const int w0)\n{\n    float alpha, beta, lim_alpha, lim_beta, tmp;\n    float sum_out = 0;\n\n    alpha = x0 - float(s_w);\n    beta = y0 - float(s_h);\n    lim_alpha = x1 - float(s_w);\n    lim_beta = y1 - float(s_h);\n    tmp = (lim_alpha - 0.5f * lim_alpha * lim_alpha - alpha + 0.5f * alpha * alpha)\n        * (lim_beta - 0.5f * lim_beta * lim_beta - beta + 0.5f * beta * beta);\n    sum_out += PrRoIPoolingGetData(this_data, s_h, s_w, h0, w0) * tmp;\n\n    alpha = float(e_w) - x1;\n    lim_alpha = float(e_w) - x0;\n    tmp = (lim_alpha - 0.5f * lim_alpha * lim_alpha - alpha + 0.5f * alpha * alpha)\n        * (lim_beta - 0.5f * lim_beta * lim_beta - beta + 0.5f * beta * beta);\n    sum_out += PrRoIPoolingGetData(this_data, s_h, e_w, h0, w0) * tmp;\n\n    alpha = x0 - float(s_w);\n    beta = float(e_h) - y1;\n    lim_alpha = x1 - float(s_w);\n    lim_beta = float(e_h) - y0;\n    tmp = (lim_alpha - 0.5f * lim_alpha * lim_alpha - alpha + 0.5f * alpha * alpha)\n        * (lim_beta - 0.5f * lim_beta * lim_beta - beta + 0.5f * beta * beta);\n    sum_out += PrRoIPoolingGetData(this_data, e_h, s_w, h0, w0) * tmp;\n\n    alpha = float(e_w) - x1;\n    lim_alpha = float(e_w) - x0;\n    tmp = (lim_alpha - 0.5f * lim_alpha * lim_alpha - alpha + 0.5f * alpha * alpha)\n        * (lim_beta - 0.5f * lim_beta * lim_beta - beta + 0.5f * beta * beta);\n    sum_out += PrRoIPoolingGetData(this_data, e_h, e_w, h0, w0) * tmp;\n\n    return sum_out;\n}\n\n__device__ static void PrRoIPoolingDistributeDiff(F_DEVPTR_OUT diff, const float top_diff, const int h, const int w, const int height, const int width, const float coeff)\n{\n    bool overflow = (h < 0) || (w < 0) || (h >= height) || (w >= width);\n    if (!overflow)\n        atomicAdd(diff + h * width + w, top_diff * coeff);\n}\n\n__device__ static void PrRoIPoolingMatDistributeDiff(F_DEVPTR_OUT diff, const float top_diff, const int s_h, const int s_w, const int e_h, const int e_w,\n        const float y0, const float x0, const float y1, const float x1, const int h0, const int w0)\n{\n    float alpha, beta, lim_alpha, lim_beta, tmp;\n\n    alpha = x0 - float(s_w);\n    beta = y0 - float(s_h);\n    lim_alpha = x1 - float(s_w);\n    lim_beta = y1 - float(s_h);\n    tmp = (lim_alpha - 0.5f * lim_alpha * lim_alpha - alpha + 0.5f * alpha * alpha)\n        * (lim_beta - 0.5f * lim_beta * lim_beta - beta + 0.5f * beta * beta);\n    PrRoIPoolingDistributeDiff(diff, top_diff, s_h, s_w, h0, w0, tmp);\n\n    alpha = float(e_w) - x1;\n    lim_alpha = float(e_w) - x0;\n    tmp = (lim_alpha - 0.5f * lim_alpha * lim_alpha - alpha + 0.5f * alpha * alpha)\n        * (lim_beta - 0.5f * lim_beta * lim_beta - beta + 0.5f * beta * beta);\n    PrRoIPoolingDistributeDiff(diff, top_diff, s_h, e_w, h0, w0, tmp);\n\n    alpha = x0 - float(s_w);\n    beta = float(e_h) - y1;\n    lim_alpha = x1 - float(s_w);\n    lim_beta = float(e_h) - y0;\n    tmp = (lim_alpha - 0.5f * lim_alpha * lim_alpha - alpha + 0.5f * alpha * alpha)\n        * (lim_beta - 0.5f * lim_beta * lim_beta - beta + 0.5f * beta * beta);\n    PrRoIPoolingDistributeDiff(diff, top_diff, e_h, s_w, h0, w0, tmp);\n\n    alpha = float(e_w) - x1;\n    lim_alpha = float(e_w) - x0;\n    tmp = (lim_alpha - 0.5f * lim_alpha * lim_alpha - alpha + 0.5f * alpha * alpha)\n        * (lim_beta - 0.5f * lim_beta * lim_beta - beta + 0.5f * beta * beta);\n    PrRoIPoolingDistributeDiff(diff, top_diff, e_h, e_w, h0, w0, tmp);\n}\n\n__global__ void PrRoIPoolingForward(\n        const int nthreads,\n        F_DEVPTR_IN bottom_data,\n        F_DEVPTR_IN bottom_rois,\n        F_DEVPTR_OUT top_data,\n        const int channels,\n        const int height,\n        const int width,\n        const int pooled_height,\n        const int pooled_width,\n        const float spatial_scale) {\n\n  CUDA_KERNEL_LOOP(index, nthreads) {\n    // (n, c, ph, pw) is an element in the pooled output\n    int pw = index % pooled_width;\n    int ph = (index / pooled_width) % pooled_height;\n    int c = (index / pooled_width / pooled_height) % channels;\n    int n = index / pooled_width / pooled_height / channels;\n\n    bottom_rois += n * 5;\n    int roi_batch_ind = bottom_rois[0];\n\n    float roi_start_w = bottom_rois[1] * spatial_scale;\n    float roi_start_h = bottom_rois[2] * spatial_scale;\n    float roi_end_w = bottom_rois[3] * spatial_scale;\n    float roi_end_h = bottom_rois[4] * spatial_scale;\n\n    float roi_width = max(roi_end_w - roi_start_w, ((float)0.0));\n    float roi_height = max(roi_end_h - roi_start_h, ((float)0.0));\n    float bin_size_h = roi_height / static_cast<float>(pooled_height);\n    float bin_size_w = roi_width / static_cast<float>(pooled_width);\n\n    const float *this_data = bottom_data + (roi_batch_ind * channels + c) * height * width;\n    float *this_out = top_data + index;\n\n    float win_start_w = roi_start_w + bin_size_w * pw;\n    float win_start_h = roi_start_h + bin_size_h * ph;\n    float win_end_w = win_start_w + bin_size_w;\n    float win_end_h = win_start_h + bin_size_h;\n\n    float win_size = max(float(0.0), bin_size_w * bin_size_h);\n    if (win_size == 0) {\n        *this_out = 0;\n        return;\n    }\n\n    float sum_out = 0;\n\n    int s_w, s_h, e_w, e_h;\n\n    s_w = floorf(win_start_w);\n    e_w = ceilf(win_end_w);\n    s_h = floorf(win_start_h);\n    e_h = ceilf(win_end_h);\n\n    for (int w_iter = s_w; w_iter < e_w; ++w_iter)\n        for (int h_iter = s_h; h_iter < e_h; ++h_iter)\n            sum_out += PrRoIPoolingMatCalculation(this_data, h_iter, w_iter, h_iter + 1, w_iter + 1,\n                max(win_start_h, float(h_iter)), max(win_start_w, float(w_iter)),\n                min(win_end_h, float(h_iter) + 1.0), min(win_end_w, float(w_iter + 1.0)),\n                height, width);\n    *this_out = sum_out / win_size;\n  }\n}\n\n__global__ void PrRoIPoolingBackward(\n        const int nthreads,\n        F_DEVPTR_IN bottom_rois,\n        F_DEVPTR_IN top_diff,\n        F_DEVPTR_OUT bottom_diff,\n        const int channels,\n        const int height,\n        const int width,\n        const int pooled_height,\n        const int pooled_width,\n        const float spatial_scale) {\n\n  CUDA_KERNEL_LOOP(index, nthreads) {\n    // (n, c, ph, pw) is an element in the pooled output\n    int pw = index % pooled_width;\n    int ph = (index / pooled_width) % pooled_height;\n    int c = (index / pooled_width / pooled_height) % channels;\n    int n = index / pooled_width / pooled_height / channels;\n    bottom_rois += n * 5;\n\n    int roi_batch_ind = bottom_rois[0];\n    float roi_start_w = bottom_rois[1] * spatial_scale;\n    float roi_start_h = bottom_rois[2] * spatial_scale;\n    float roi_end_w = bottom_rois[3] * spatial_scale;\n    float roi_end_h = bottom_rois[4] * spatial_scale;\n\n    float roi_width = max(roi_end_w - roi_start_w, (float)0);\n    float roi_height = max(roi_end_h - roi_start_h, (float)0);\n    float bin_size_h = roi_height / static_cast<float>(pooled_height);\n    float bin_size_w = roi_width / static_cast<float>(pooled_width);\n\n    const float *this_out_grad = top_diff + index;\n    float *this_data_grad = bottom_diff + (roi_batch_ind * channels + c) * height * width;\n\n    float win_start_w = roi_start_w + bin_size_w * pw;\n    float win_start_h = roi_start_h + bin_size_h * ph;\n    float win_end_w = win_start_w + bin_size_w;\n    float win_end_h = win_start_h + bin_size_h;\n\n    float win_size = max(float(0.0), bin_size_w * bin_size_h);\n\n    float sum_out = win_size == float(0) ? float(0) : *this_out_grad / win_size;\n\n    int s_w, s_h, e_w, e_h;\n\n    s_w = floorf(win_start_w);\n    e_w = ceilf(win_end_w);\n    s_h = floorf(win_start_h);\n    e_h = ceilf(win_end_h);\n\n    for (int w_iter = s_w; w_iter < e_w; ++w_iter)\n        for (int h_iter = s_h; h_iter < e_h; ++h_iter)\n            PrRoIPoolingMatDistributeDiff(this_data_grad, sum_out, h_iter, w_iter, h_iter + 1, w_iter + 1,\n                max(win_start_h, float(h_iter)), max(win_start_w, float(w_iter)),\n                min(win_end_h, float(h_iter) + 1.0), min(win_end_w, float(w_iter + 1.0)),\n                height, width);\n\n  }\n}\n\n__global__ void PrRoIPoolingCoorBackward(\n        const int nthreads,\n        F_DEVPTR_IN bottom_data,\n        F_DEVPTR_IN bottom_rois,\n        F_DEVPTR_IN top_data,\n        F_DEVPTR_IN top_diff,\n        F_DEVPTR_OUT bottom_diff,\n        const int channels,\n        const int height,\n        const int width,\n        const int pooled_height,\n        const int pooled_width,\n        const float spatial_scale) {\n\n  CUDA_KERNEL_LOOP(index, nthreads) {\n    // (n, c, ph, pw) is an element in the pooled output\n    int pw = index % pooled_width;\n    int ph = (index / pooled_width) % pooled_height;\n    int c = (index / pooled_width / pooled_height) % channels;\n    int n = index / pooled_width / pooled_height / channels;\n    bottom_rois += n * 5;\n\n    int roi_batch_ind = bottom_rois[0];\n    float roi_start_w = bottom_rois[1] * spatial_scale;\n    float roi_start_h = bottom_rois[2] * spatial_scale;\n    float roi_end_w = bottom_rois[3] * spatial_scale;\n    float roi_end_h = bottom_rois[4] * spatial_scale;\n\n    float roi_width = max(roi_end_w - roi_start_w, (float)0);\n    float roi_height = max(roi_end_h - roi_start_h, (float)0);\n    float bin_size_h = roi_height / static_cast<float>(pooled_height);\n    float bin_size_w = roi_width / static_cast<float>(pooled_width);\n\n    const float *this_out_grad = top_diff + index;\n    const float *this_bottom_data = bottom_data + (roi_batch_ind * channels + c) * height * width;\n    const float *this_top_data = top_data + index;\n    float *this_data_grad = bottom_diff + n * 5;\n\n    float win_start_w = roi_start_w + bin_size_w * pw;\n    float win_start_h = roi_start_h + bin_size_h * ph;\n    float win_end_w = win_start_w + bin_size_w;\n    float win_end_h = win_start_h + bin_size_h;\n\n    float win_size = max(float(0.0), bin_size_w * bin_size_h);\n\n    float sum_out = win_size == float(0) ? float(0) : *this_out_grad / win_size;\n\n    // WARNING: to be discussed\n    if (sum_out == 0)\n        return;\n\n    int s_w, s_h, e_w, e_h;\n\n    s_w = floorf(win_start_w);\n    e_w = ceilf(win_end_w);\n    s_h = floorf(win_start_h);\n    e_h = ceilf(win_end_h);\n\n    float g_x1_y = 0, g_x2_y = 0, g_x_y1 = 0, g_x_y2 = 0;\n    for (int h_iter = s_h; h_iter < e_h; ++h_iter) {\n        g_x1_y += PrRoIPoolingSingleCoorIntegral(max(win_start_h, float(h_iter)) - h_iter,\n                min(win_end_h, float(h_iter + 1)) - h_iter,\n                PrRoIPoolingInterpolation(this_bottom_data, h_iter, win_start_w, height, width),\n                PrRoIPoolingInterpolation(this_bottom_data, h_iter + 1, win_start_w, height, width));\n\n        g_x2_y += PrRoIPoolingSingleCoorIntegral(max(win_start_h, float(h_iter)) - h_iter,\n                min(win_end_h, float(h_iter + 1)) - h_iter,\n                PrRoIPoolingInterpolation(this_bottom_data, h_iter, win_end_w, height, width),\n                PrRoIPoolingInterpolation(this_bottom_data, h_iter + 1, win_end_w, height, width));\n    }\n\n    for (int w_iter = s_w; w_iter < e_w; ++w_iter) {\n        g_x_y1 += PrRoIPoolingSingleCoorIntegral(max(win_start_w, float(w_iter)) - w_iter,\n                min(win_end_w, float(w_iter + 1)) - w_iter,\n                PrRoIPoolingInterpolation(this_bottom_data, win_start_h, w_iter, height, width),\n                PrRoIPoolingInterpolation(this_bottom_data, win_start_h, w_iter + 1, height, width));\n\n        g_x_y2 += PrRoIPoolingSingleCoorIntegral(max(win_start_w, float(w_iter)) - w_iter,\n                min(win_end_w, float(w_iter + 1)) - w_iter,\n                PrRoIPoolingInterpolation(this_bottom_data, win_end_h, w_iter, height, width),\n                PrRoIPoolingInterpolation(this_bottom_data, win_end_h, w_iter + 1, height, width));\n    }\n\n    float partial_x1 = -g_x1_y + (win_end_h - win_start_h) * (*this_top_data);\n    float partial_y1 = -g_x_y1 + (win_end_w - win_start_w) * (*this_top_data);\n    float partial_x2 = g_x2_y - (win_end_h - win_start_h) * (*this_top_data);\n    float partial_y2 = g_x_y2 - (win_end_w - win_start_w) * (*this_top_data);\n\n    partial_x1 = partial_x1 / win_size * spatial_scale;\n    partial_x2 = partial_x2 / win_size * spatial_scale;\n    partial_y1 = partial_y1 / win_size * spatial_scale;\n    partial_y2 = partial_y2 / win_size * spatial_scale;\n\n    // (b, x1, y1, x2, y2)\n\n    this_data_grad[0] = 0;\n    atomicAdd(this_data_grad + 1, (partial_x1 * (1.0 - float(pw) / pooled_width) + partial_x2 * (1.0 - float(pw + 1) / pooled_width))\n            * (*this_out_grad));\n    atomicAdd(this_data_grad + 2, (partial_y1 * (1.0 - float(ph) / pooled_height) + partial_y2 * (1.0 - float(ph + 1) / pooled_height))\n            * (*this_out_grad));\n    atomicAdd(this_data_grad + 3, (partial_x2 * float(pw + 1) / pooled_width + partial_x1 * float(pw) / pooled_width)\n            * (*this_out_grad));\n    atomicAdd(this_data_grad + 4, (partial_y2 * float(ph + 1) / pooled_height + partial_y1 * float(ph) / pooled_height)\n            * (*this_out_grad));\n  }\n}\n\n} /* !anonymous namespace */\n\n#ifdef __cplusplus\nextern \"C\" {\n#endif\n\nvoid PrRoIPoolingForwardGpu(\n    cudaStream_t stream,\n    F_DEVPTR_IN bottom_data,\n    F_DEVPTR_IN bottom_rois,\n    F_DEVPTR_OUT top_data,\n    const int channels_, const int height_, const int width_,\n    const int pooled_height_, const int pooled_width_,\n    const float spatial_scale_,\n    const int top_count) {\n\n    PrRoIPoolingForward<<<CUDA_NUM_BLOCKS(top_count), CUDA_NUM_THREADS, 0, stream>>>(\n        top_count, bottom_data, bottom_rois, top_data,\n        channels_, height_, width_, pooled_height_, pooled_width_, spatial_scale_);\n\n    CUDA_POST_KERNEL_CHECK;\n}\n\nvoid PrRoIPoolingBackwardGpu(\n    cudaStream_t stream,\n    F_DEVPTR_IN bottom_data,\n    F_DEVPTR_IN bottom_rois,\n    F_DEVPTR_IN top_data,\n    F_DEVPTR_IN top_diff,\n    F_DEVPTR_OUT bottom_diff,\n    const int channels_, const int height_, const int width_,\n    const int pooled_height_, const int pooled_width_,\n    const float spatial_scale_,\n    const int top_count, const int bottom_count) {\n\n    cudaMemsetAsync(bottom_diff, 0, sizeof(float) * bottom_count, stream);\n    PrRoIPoolingBackward<<<CUDA_NUM_BLOCKS(top_count), CUDA_NUM_THREADS, 0, stream>>>(\n        top_count, bottom_rois, top_diff, bottom_diff,\n        channels_, height_, width_, pooled_height_, pooled_width_, spatial_scale_);\n    CUDA_POST_KERNEL_CHECK;\n}\n\nvoid PrRoIPoolingCoorBackwardGpu(\n    cudaStream_t stream,\n    F_DEVPTR_IN bottom_data,\n    F_DEVPTR_IN bottom_rois,\n    F_DEVPTR_IN top_data,\n    F_DEVPTR_IN top_diff,\n    F_DEVPTR_OUT bottom_diff,\n    const int channels_, const int height_, const int width_,\n    const int pooled_height_, const int pooled_width_,\n    const float spatial_scale_,\n    const int top_count, const int bottom_count) {\n\n    cudaMemsetAsync(bottom_diff, 0, sizeof(float) * bottom_count, stream);\n    PrRoIPoolingCoorBackward<<<CUDA_NUM_BLOCKS(top_count), CUDA_NUM_THREADS, 0, stream>>>(\n        top_count, bottom_data, bottom_rois, top_data, top_diff, bottom_diff,\n        channels_, height_, width_, pooled_height_, pooled_width_, spatial_scale_);\n    CUDA_POST_KERNEL_CHECK;\n}\n\n} /* !extern \"C\" */\n\n"
  },
  {
    "path": "artrackv2_mindspore/external/PreciseRoIPooling/pytorch/prroi_pool/src/prroi_pooling_gpu_impl.cuh",
    "content": "/*\n * File   : prroi_pooling_gpu_impl.cuh\n * Author : Tete Xiao, Jiayuan Mao\n * Email  : jasonhsiao97@gmail.com\n *\n * Distributed under terms of the MIT license.\n * Copyright (c) 2017 Megvii Technology Limited.\n */\n\n#ifndef PRROI_POOLING_GPU_IMPL_CUH\n#define PRROI_POOLING_GPU_IMPL_CUH\n\n#ifdef __cplusplus\nextern \"C\" {\n#endif\n\n#define F_DEVPTR_IN const float *\n#define F_DEVPTR_OUT float *\n\nvoid PrRoIPoolingForwardGpu(\n    cudaStream_t stream,\n    F_DEVPTR_IN bottom_data,\n    F_DEVPTR_IN bottom_rois,\n    F_DEVPTR_OUT top_data,\n    const int channels_, const int height_, const int width_,\n    const int pooled_height_, const int pooled_width_,\n    const float spatial_scale_,\n    const int top_count);\n\nvoid PrRoIPoolingBackwardGpu(\n    cudaStream_t stream,\n    F_DEVPTR_IN bottom_data,\n    F_DEVPTR_IN bottom_rois,\n    F_DEVPTR_IN top_data,\n    F_DEVPTR_IN top_diff,\n    F_DEVPTR_OUT bottom_diff,\n    const int channels_, const int height_, const int width_,\n    const int pooled_height_, const int pooled_width_,\n    const float spatial_scale_,\n    const int top_count, const int bottom_count);\n\nvoid PrRoIPoolingCoorBackwardGpu(\n    cudaStream_t stream,\n    F_DEVPTR_IN bottom_data,\n    F_DEVPTR_IN bottom_rois,\n    F_DEVPTR_IN top_data,\n    F_DEVPTR_IN top_diff,\n    F_DEVPTR_OUT bottom_diff,\n    const int channels_, const int height_, const int width_,\n    const int pooled_height_, const int pooled_width_,\n    const float spatial_scale_,\n    const int top_count, const int bottom_count);\n\n#ifdef __cplusplus\n} /* !extern \"C\" */\n#endif\n\n#endif /* !PRROI_POOLING_GPU_IMPL_CUH */\n\n"
  },
  {
    "path": "artrackv2_mindspore/external/PreciseRoIPooling/pytorch/tests/test_prroi_pooling2d.py",
    "content": "# -*- coding: utf-8 -*-\n# File   : test_prroi_pooling2d.py\n# Author : Jiayuan Mao\n# Email  : maojiayuan@gmail.com\n# Date   : 18/02/2018\n#\n# This file is part of Jacinle.\n\nimport unittest\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom jactorch.utils.unittest import TorchTestCase\n\nfrom prroi_pool import PrRoIPool2D\n\n\nclass TestPrRoIPool2D(TorchTestCase):\n    def test_forward(self):\n        pool = PrRoIPool2D(7, 7, spatial_scale=0.5)\n        features = torch.rand((4, 16, 24, 32)).cuda()\n        rois = torch.tensor([\n            [0, 0, 0, 14, 14],\n            [1, 14, 14, 28, 28],\n        ]).float().cuda()\n\n        out = pool(features, rois)\n        out_gold = F.avg_pool2d(features, kernel_size=2, stride=1)\n\n        self.assertTensorClose(out, torch.stack((\n            out_gold[0, :, :7, :7],\n            out_gold[1, :, 7:14, 7:14],\n        ), dim=0))\n\n    def test_backward_shapeonly(self):\n        pool = PrRoIPool2D(2, 2, spatial_scale=0.5)\n\n        features = torch.rand((4, 2, 24, 32)).cuda()\n        rois = torch.tensor([\n            [0, 0, 0, 4, 4],\n            [1, 14, 14, 18, 18],\n        ]).float().cuda()\n        features.requires_grad = rois.requires_grad = True\n        out = pool(features, rois)\n\n        loss = out.sum()\n        loss.backward()\n\n        self.assertTupleEqual(features.size(), features.grad.size())\n        self.assertTupleEqual(rois.size(), rois.grad.size())\n\n\nif __name__ == '__main__':\n    unittest.main()\n"
  },
  {
    "path": "artrackv2_mindspore/external/PreciseRoIPooling/src/prroi_pooling_gpu_impl.cu",
    "content": "/*\n * File   : prroi_pooling_gpu_impl.cu\n * Author : Tete Xiao, Jiayuan Mao\n * Email  : jasonhsiao97@gmail.com\n *\n * Distributed under terms of the MIT license.\n * Copyright (c) 2017 Megvii Technology Limited.\n */\n\n#include \"prroi_pooling_gpu_impl.cuh\"\n\n#include <cstdio>\n#include <cfloat>\n\n#define CUDA_KERNEL_LOOP(i, n) \\\n    for (int i = blockIdx.x * blockDim.x + threadIdx.x; \\\n        i < (n); \\\n        i += blockDim.x * gridDim.x)\n\n#define CUDA_POST_KERNEL_CHECK \\\n    do { \\\n        cudaError_t err = cudaGetLastError(); \\\n        if (cudaSuccess != err) { \\\n            fprintf(stderr, \"cudaCheckError() failed : %s\\n\", cudaGetErrorString(err)); \\\n            exit(-1); \\\n        } \\\n    } while(0)\n\n#define CUDA_NUM_THREADS 512\n\nnamespace {\n\nstatic int CUDA_NUM_BLOCKS(const int N) {\n  return (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS;\n}\n\n__device__ static float PrRoIPoolingGetData(F_DEVPTR_IN data, const int h, const int w, const int height, const int width)\n{\n    bool overflow = (h < 0) || (w < 0) || (h >= height) || (w >= width);\n    float retVal = overflow ? 0.0f : data[h * width + w];\n    return retVal;\n}\n\n__device__ static float PrRoIPoolingGetCoeff(float dh, float dw){\n    dw = dw > 0 ? dw : -dw;\n    dh = dh > 0 ? dh : -dh;\n    return (1.0f - dh) * (1.0f - dw);\n}\n\n__device__ static float PrRoIPoolingSingleCoorIntegral(float s, float t, float c1, float c2) {\n    return 0.5 * (t * t - s * s) * c2 + (t - 0.5 * t * t - s + 0.5 * s * s) * c1;\n}\n\n__device__ static float PrRoIPoolingInterpolation(F_DEVPTR_IN data, const float h, const float w, const int height, const int width){\n    float retVal = 0.0f;\n    int h1 = floorf(h);\n    int w1 = floorf(w);\n    retVal += PrRoIPoolingGetData(data, h1, w1, height, width) * PrRoIPoolingGetCoeff(h - float(h1), w - float(w1));\n    h1 = floorf(h)+1;\n    w1 = floorf(w);\n    retVal += PrRoIPoolingGetData(data, h1, w1, height, width) * PrRoIPoolingGetCoeff(h - float(h1), w - float(w1));\n    h1 = floorf(h);\n    w1 = floorf(w)+1;\n    retVal += PrRoIPoolingGetData(data, h1, w1, height, width) * PrRoIPoolingGetCoeff(h - float(h1), w - float(w1));\n    h1 = floorf(h)+1;\n    w1 = floorf(w)+1;\n    retVal += PrRoIPoolingGetData(data, h1, w1, height, width) * PrRoIPoolingGetCoeff(h - float(h1), w - float(w1));\n    return retVal;\n}\n\n__device__ static float PrRoIPoolingMatCalculation(F_DEVPTR_IN this_data, const int s_h, const int s_w, const int e_h, const int e_w,\n        const float y0, const float x0, const float y1, const float x1, const int h0, const int w0)\n{\n    float alpha, beta, lim_alpha, lim_beta, tmp;\n    float sum_out = 0;\n\n    alpha = x0 - float(s_w);\n    beta = y0 - float(s_h);\n    lim_alpha = x1 - float(s_w);\n    lim_beta = y1 - float(s_h);\n    tmp = (lim_alpha - 0.5f * lim_alpha * lim_alpha - alpha + 0.5f * alpha * alpha)\n        * (lim_beta - 0.5f * lim_beta * lim_beta - beta + 0.5f * beta * beta);\n    sum_out += PrRoIPoolingGetData(this_data, s_h, s_w, h0, w0) * tmp;\n\n    alpha = float(e_w) - x1;\n    lim_alpha = float(e_w) - x0;\n    tmp = (lim_alpha - 0.5f * lim_alpha * lim_alpha - alpha + 0.5f * alpha * alpha)\n        * (lim_beta - 0.5f * lim_beta * lim_beta - beta + 0.5f * beta * beta);\n    sum_out += PrRoIPoolingGetData(this_data, s_h, e_w, h0, w0) * tmp;\n\n    alpha = x0 - float(s_w);\n    beta = float(e_h) - y1;\n    lim_alpha = x1 - float(s_w);\n    lim_beta = float(e_h) - y0;\n    tmp = (lim_alpha - 0.5f * lim_alpha * lim_alpha - alpha + 0.5f * alpha * alpha)\n        * (lim_beta - 0.5f * lim_beta * lim_beta - beta + 0.5f * beta * beta);\n    sum_out += PrRoIPoolingGetData(this_data, e_h, s_w, h0, w0) * tmp;\n\n    alpha = float(e_w) - x1;\n    lim_alpha = float(e_w) - x0;\n    tmp = (lim_alpha - 0.5f * lim_alpha * lim_alpha - alpha + 0.5f * alpha * alpha)\n        * (lim_beta - 0.5f * lim_beta * lim_beta - beta + 0.5f * beta * beta);\n    sum_out += PrRoIPoolingGetData(this_data, e_h, e_w, h0, w0) * tmp;\n\n    return sum_out;\n}\n\n__device__ static void PrRoIPoolingDistributeDiff(F_DEVPTR_OUT diff, const float top_diff, const int h, const int w, const int height, const int width, const float coeff)\n{\n    bool overflow = (h < 0) || (w < 0) || (h >= height) || (w >= width);\n    if (!overflow)\n        atomicAdd(diff + h * width + w, top_diff * coeff);\n}\n\n__device__ static void PrRoIPoolingMatDistributeDiff(F_DEVPTR_OUT diff, const float top_diff, const int s_h, const int s_w, const int e_h, const int e_w,\n        const float y0, const float x0, const float y1, const float x1, const int h0, const int w0)\n{\n    float alpha, beta, lim_alpha, lim_beta, tmp;\n\n    alpha = x0 - float(s_w);\n    beta = y0 - float(s_h);\n    lim_alpha = x1 - float(s_w);\n    lim_beta = y1 - float(s_h);\n    tmp = (lim_alpha - 0.5f * lim_alpha * lim_alpha - alpha + 0.5f * alpha * alpha)\n        * (lim_beta - 0.5f * lim_beta * lim_beta - beta + 0.5f * beta * beta);\n    PrRoIPoolingDistributeDiff(diff, top_diff, s_h, s_w, h0, w0, tmp);\n\n    alpha = float(e_w) - x1;\n    lim_alpha = float(e_w) - x0;\n    tmp = (lim_alpha - 0.5f * lim_alpha * lim_alpha - alpha + 0.5f * alpha * alpha)\n        * (lim_beta - 0.5f * lim_beta * lim_beta - beta + 0.5f * beta * beta);\n    PrRoIPoolingDistributeDiff(diff, top_diff, s_h, e_w, h0, w0, tmp);\n\n    alpha = x0 - float(s_w);\n    beta = float(e_h) - y1;\n    lim_alpha = x1 - float(s_w);\n    lim_beta = float(e_h) - y0;\n    tmp = (lim_alpha - 0.5f * lim_alpha * lim_alpha - alpha + 0.5f * alpha * alpha)\n        * (lim_beta - 0.5f * lim_beta * lim_beta - beta + 0.5f * beta * beta);\n    PrRoIPoolingDistributeDiff(diff, top_diff, e_h, s_w, h0, w0, tmp);\n\n    alpha = float(e_w) - x1;\n    lim_alpha = float(e_w) - x0;\n    tmp = (lim_alpha - 0.5f * lim_alpha * lim_alpha - alpha + 0.5f * alpha * alpha)\n        * (lim_beta - 0.5f * lim_beta * lim_beta - beta + 0.5f * beta * beta);\n    PrRoIPoolingDistributeDiff(diff, top_diff, e_h, e_w, h0, w0, tmp);\n}\n\n__global__ void PrRoIPoolingForward(\n        const int nthreads,\n        F_DEVPTR_IN bottom_data,\n        F_DEVPTR_IN bottom_rois,\n        F_DEVPTR_OUT top_data,\n        const int channels,\n        const int height,\n        const int width,\n        const int pooled_height,\n        const int pooled_width,\n        const float spatial_scale) {\n\n  CUDA_KERNEL_LOOP(index, nthreads) {\n    // (n, c, ph, pw) is an element in the pooled output\n    int pw = index % pooled_width;\n    int ph = (index / pooled_width) % pooled_height;\n    int c = (index / pooled_width / pooled_height) % channels;\n    int n = index / pooled_width / pooled_height / channels;\n\n    bottom_rois += n * 5;\n    int roi_batch_ind = bottom_rois[0];\n\n    float roi_start_w = bottom_rois[1] * spatial_scale;\n    float roi_start_h = bottom_rois[2] * spatial_scale;\n    float roi_end_w = bottom_rois[3] * spatial_scale;\n    float roi_end_h = bottom_rois[4] * spatial_scale;\n\n    float roi_width = max(roi_end_w - roi_start_w, ((float)0.0));\n    float roi_height = max(roi_end_h - roi_start_h, ((float)0.0));\n    float bin_size_h = roi_height / static_cast<float>(pooled_height);\n    float bin_size_w = roi_width / static_cast<float>(pooled_width);\n\n    const float *this_data = bottom_data + (roi_batch_ind * channels + c) * height * width;\n    float *this_out = top_data + index;\n\n    float win_start_w = roi_start_w + bin_size_w * pw;\n    float win_start_h = roi_start_h + bin_size_h * ph;\n    float win_end_w = win_start_w + bin_size_w;\n    float win_end_h = win_start_h + bin_size_h;\n\n    float win_size = max(float(0.0), bin_size_w * bin_size_h);\n    if (win_size == 0) {\n        *this_out = 0;\n        return;\n    }\n\n    float sum_out = 0;\n\n    int s_w, s_h, e_w, e_h;\n\n    s_w = floorf(win_start_w);\n    e_w = ceilf(win_end_w);\n    s_h = floorf(win_start_h);\n    e_h = ceilf(win_end_h);\n\n    for (int w_iter = s_w; w_iter < e_w; ++w_iter)\n        for (int h_iter = s_h; h_iter < e_h; ++h_iter)\n            sum_out += PrRoIPoolingMatCalculation(this_data, h_iter, w_iter, h_iter + 1, w_iter + 1,\n                max(win_start_h, float(h_iter)), max(win_start_w, float(w_iter)),\n                min(win_end_h, float(h_iter) + 1.0), min(win_end_w, float(w_iter + 1.0)),\n                height, width);\n    *this_out = sum_out / win_size;\n  }\n}\n\n__global__ void PrRoIPoolingBackward(\n        const int nthreads,\n        F_DEVPTR_IN bottom_rois,\n        F_DEVPTR_IN top_diff,\n        F_DEVPTR_OUT bottom_diff,\n        const int channels,\n        const int height,\n        const int width,\n        const int pooled_height,\n        const int pooled_width,\n        const float spatial_scale) {\n\n  CUDA_KERNEL_LOOP(index, nthreads) {\n    // (n, c, ph, pw) is an element in the pooled output\n    int pw = index % pooled_width;\n    int ph = (index / pooled_width) % pooled_height;\n    int c = (index / pooled_width / pooled_height) % channels;\n    int n = index / pooled_width / pooled_height / channels;\n    bottom_rois += n * 5;\n\n    int roi_batch_ind = bottom_rois[0];\n    float roi_start_w = bottom_rois[1] * spatial_scale;\n    float roi_start_h = bottom_rois[2] * spatial_scale;\n    float roi_end_w = bottom_rois[3] * spatial_scale;\n    float roi_end_h = bottom_rois[4] * spatial_scale;\n\n    float roi_width = max(roi_end_w - roi_start_w, (float)0);\n    float roi_height = max(roi_end_h - roi_start_h, (float)0);\n    float bin_size_h = roi_height / static_cast<float>(pooled_height);\n    float bin_size_w = roi_width / static_cast<float>(pooled_width);\n\n    const float *this_out_grad = top_diff + index;\n    float *this_data_grad = bottom_diff + (roi_batch_ind * channels + c) * height * width;\n\n    float win_start_w = roi_start_w + bin_size_w * pw;\n    float win_start_h = roi_start_h + bin_size_h * ph;\n    float win_end_w = win_start_w + bin_size_w;\n    float win_end_h = win_start_h + bin_size_h;\n\n    float win_size = max(float(0.0), bin_size_w * bin_size_h);\n\n    float sum_out = win_size == float(0) ? float(0) : *this_out_grad / win_size;\n\n    int s_w, s_h, e_w, e_h;\n\n    s_w = floorf(win_start_w);\n    e_w = ceilf(win_end_w);\n    s_h = floorf(win_start_h);\n    e_h = ceilf(win_end_h);\n\n    for (int w_iter = s_w; w_iter < e_w; ++w_iter)\n        for (int h_iter = s_h; h_iter < e_h; ++h_iter)\n            PrRoIPoolingMatDistributeDiff(this_data_grad, sum_out, h_iter, w_iter, h_iter + 1, w_iter + 1,\n                max(win_start_h, float(h_iter)), max(win_start_w, float(w_iter)),\n                min(win_end_h, float(h_iter) + 1.0), min(win_end_w, float(w_iter + 1.0)),\n                height, width);\n\n  }\n}\n\n__global__ void PrRoIPoolingCoorBackward(\n        const int nthreads,\n        F_DEVPTR_IN bottom_data,\n        F_DEVPTR_IN bottom_rois,\n        F_DEVPTR_IN top_data,\n        F_DEVPTR_IN top_diff,\n        F_DEVPTR_OUT bottom_diff,\n        const int channels,\n        const int height,\n        const int width,\n        const int pooled_height,\n        const int pooled_width,\n        const float spatial_scale) {\n\n  CUDA_KERNEL_LOOP(index, nthreads) {\n    // (n, c, ph, pw) is an element in the pooled output\n    int pw = index % pooled_width;\n    int ph = (index / pooled_width) % pooled_height;\n    int c = (index / pooled_width / pooled_height) % channels;\n    int n = index / pooled_width / pooled_height / channels;\n    bottom_rois += n * 5;\n\n    int roi_batch_ind = bottom_rois[0];\n    float roi_start_w = bottom_rois[1] * spatial_scale;\n    float roi_start_h = bottom_rois[2] * spatial_scale;\n    float roi_end_w = bottom_rois[3] * spatial_scale;\n    float roi_end_h = bottom_rois[4] * spatial_scale;\n\n    float roi_width = max(roi_end_w - roi_start_w, (float)0);\n    float roi_height = max(roi_end_h - roi_start_h, (float)0);\n    float bin_size_h = roi_height / static_cast<float>(pooled_height);\n    float bin_size_w = roi_width / static_cast<float>(pooled_width);\n\n    const float *this_out_grad = top_diff + index;\n    const float *this_bottom_data = bottom_data + (roi_batch_ind * channels + c) * height * width;\n    const float *this_top_data = top_data + index;\n    float *this_data_grad = bottom_diff + n * 5;\n\n    float win_start_w = roi_start_w + bin_size_w * pw;\n    float win_start_h = roi_start_h + bin_size_h * ph;\n    float win_end_w = win_start_w + bin_size_w;\n    float win_end_h = win_start_h + bin_size_h;\n\n    float win_size = max(float(0.0), bin_size_w * bin_size_h);\n\n    float sum_out = win_size == float(0) ? float(0) : *this_out_grad / win_size;\n\n    // WARNING: to be discussed\n    if (sum_out == 0)\n        return;\n\n    int s_w, s_h, e_w, e_h;\n\n    s_w = floorf(win_start_w);\n    e_w = ceilf(win_end_w);\n    s_h = floorf(win_start_h);\n    e_h = ceilf(win_end_h);\n\n    float g_x1_y = 0, g_x2_y = 0, g_x_y1 = 0, g_x_y2 = 0;\n    for (int h_iter = s_h; h_iter < e_h; ++h_iter) {\n        g_x1_y += PrRoIPoolingSingleCoorIntegral(max(win_start_h, float(h_iter)) - h_iter,\n                min(win_end_h, float(h_iter + 1)) - h_iter,\n                PrRoIPoolingInterpolation(this_bottom_data, h_iter, win_start_w, height, width),\n                PrRoIPoolingInterpolation(this_bottom_data, h_iter + 1, win_start_w, height, width));\n\n        g_x2_y += PrRoIPoolingSingleCoorIntegral(max(win_start_h, float(h_iter)) - h_iter,\n                min(win_end_h, float(h_iter + 1)) - h_iter,\n                PrRoIPoolingInterpolation(this_bottom_data, h_iter, win_end_w, height, width),\n                PrRoIPoolingInterpolation(this_bottom_data, h_iter + 1, win_end_w, height, width));\n    }\n\n    for (int w_iter = s_w; w_iter < e_w; ++w_iter) {\n        g_x_y1 += PrRoIPoolingSingleCoorIntegral(max(win_start_w, float(w_iter)) - w_iter,\n                min(win_end_w, float(w_iter + 1)) - w_iter,\n                PrRoIPoolingInterpolation(this_bottom_data, win_start_h, w_iter, height, width),\n                PrRoIPoolingInterpolation(this_bottom_data, win_start_h, w_iter + 1, height, width));\n\n        g_x_y2 += PrRoIPoolingSingleCoorIntegral(max(win_start_w, float(w_iter)) - w_iter,\n                min(win_end_w, float(w_iter + 1)) - w_iter,\n                PrRoIPoolingInterpolation(this_bottom_data, win_end_h, w_iter, height, width),\n                PrRoIPoolingInterpolation(this_bottom_data, win_end_h, w_iter + 1, height, width));\n    }\n\n    float partial_x1 = -g_x1_y + (win_end_h - win_start_h) * (*this_top_data);\n    float partial_y1 = -g_x_y1 + (win_end_w - win_start_w) * (*this_top_data);\n    float partial_x2 = g_x2_y - (win_end_h - win_start_h) * (*this_top_data);\n    float partial_y2 = g_x_y2 - (win_end_w - win_start_w) * (*this_top_data);\n\n    partial_x1 = partial_x1 / win_size * spatial_scale;\n    partial_x2 = partial_x2 / win_size * spatial_scale;\n    partial_y1 = partial_y1 / win_size * spatial_scale;\n    partial_y2 = partial_y2 / win_size * spatial_scale;\n\n    // (b, x1, y1, x2, y2)\n\n    this_data_grad[0] = 0;\n    atomicAdd(this_data_grad + 1, (partial_x1 * (1.0 - float(pw) / pooled_width) + partial_x2 * (1.0 - float(pw + 1) / pooled_width))\n            * (*this_out_grad));\n    atomicAdd(this_data_grad + 2, (partial_y1 * (1.0 - float(ph) / pooled_height) + partial_y2 * (1.0 - float(ph + 1) / pooled_height))\n            * (*this_out_grad));\n    atomicAdd(this_data_grad + 3, (partial_x2 * float(pw + 1) / pooled_width + partial_x1 * float(pw) / pooled_width)\n            * (*this_out_grad));\n    atomicAdd(this_data_grad + 4, (partial_y2 * float(ph + 1) / pooled_height + partial_y1 * float(ph) / pooled_height)\n            * (*this_out_grad));\n  }\n}\n\n} /* !anonymous namespace */\n\n#ifdef __cplusplus\nextern \"C\" {\n#endif\n\nvoid PrRoIPoolingForwardGpu(\n    cudaStream_t stream,\n    F_DEVPTR_IN bottom_data,\n    F_DEVPTR_IN bottom_rois,\n    F_DEVPTR_OUT top_data,\n    const int channels_, const int height_, const int width_,\n    const int pooled_height_, const int pooled_width_,\n    const float spatial_scale_,\n    const int top_count) {\n\n    PrRoIPoolingForward<<<CUDA_NUM_BLOCKS(top_count), CUDA_NUM_THREADS, 0, stream>>>(\n        top_count, bottom_data, bottom_rois, top_data,\n        channels_, height_, width_, pooled_height_, pooled_width_, spatial_scale_);\n\n    CUDA_POST_KERNEL_CHECK;\n}\n\nvoid PrRoIPoolingBackwardGpu(\n    cudaStream_t stream,\n    F_DEVPTR_IN bottom_data,\n    F_DEVPTR_IN bottom_rois,\n    F_DEVPTR_IN top_data,\n    F_DEVPTR_IN top_diff,\n    F_DEVPTR_OUT bottom_diff,\n    const int channels_, const int height_, const int width_,\n    const int pooled_height_, const int pooled_width_,\n    const float spatial_scale_,\n    const int top_count, const int bottom_count) {\n\n    cudaMemsetAsync(bottom_diff, 0, sizeof(float) * bottom_count, stream);\n    PrRoIPoolingBackward<<<CUDA_NUM_BLOCKS(top_count), CUDA_NUM_THREADS, 0, stream>>>(\n        top_count, bottom_rois, top_diff, bottom_diff,\n        channels_, height_, width_, pooled_height_, pooled_width_, spatial_scale_);\n    CUDA_POST_KERNEL_CHECK;\n}\n\nvoid PrRoIPoolingCoorBackwardGpu(\n    cudaStream_t stream,\n    F_DEVPTR_IN bottom_data,\n    F_DEVPTR_IN bottom_rois,\n    F_DEVPTR_IN top_data,\n    F_DEVPTR_IN top_diff,\n    F_DEVPTR_OUT bottom_diff,\n    const int channels_, const int height_, const int width_,\n    const int pooled_height_, const int pooled_width_,\n    const float spatial_scale_,\n    const int top_count, const int bottom_count) {\n\n    cudaMemsetAsync(bottom_diff, 0, sizeof(float) * bottom_count, stream);\n    PrRoIPoolingCoorBackward<<<CUDA_NUM_BLOCKS(top_count), CUDA_NUM_THREADS, 0, stream>>>(\n        top_count, bottom_data, bottom_rois, top_data, top_diff, bottom_diff,\n        channels_, height_, width_, pooled_height_, pooled_width_, spatial_scale_);\n    CUDA_POST_KERNEL_CHECK;\n}\n\n} /* !extern \"C\" */\n\n"
  },
  {
    "path": "artrackv2_mindspore/external/PreciseRoIPooling/src/prroi_pooling_gpu_impl.cuh",
    "content": "/*\n * File   : prroi_pooling_gpu_impl.cuh\n * Author : Tete Xiao, Jiayuan Mao\n * Email  : jasonhsiao97@gmail.com\n *\n * Distributed under terms of the MIT license.\n * Copyright (c) 2017 Megvii Technology Limited.\n */\n\n#ifndef PRROI_POOLING_GPU_IMPL_CUH\n#define PRROI_POOLING_GPU_IMPL_CUH\n\n#ifdef __cplusplus\nextern \"C\" {\n#endif\n\n#define F_DEVPTR_IN const float *\n#define F_DEVPTR_OUT float *\n\nvoid PrRoIPoolingForwardGpu(\n    cudaStream_t stream,\n    F_DEVPTR_IN bottom_data,\n    F_DEVPTR_IN bottom_rois,\n    F_DEVPTR_OUT top_data,\n    const int channels_, const int height_, const int width_,\n    const int pooled_height_, const int pooled_width_,\n    const float spatial_scale_,\n    const int top_count);\n\nvoid PrRoIPoolingBackwardGpu(\n    cudaStream_t stream,\n    F_DEVPTR_IN bottom_data,\n    F_DEVPTR_IN bottom_rois,\n    F_DEVPTR_IN top_data,\n    F_DEVPTR_IN top_diff,\n    F_DEVPTR_OUT bottom_diff,\n    const int channels_, const int height_, const int width_,\n    const int pooled_height_, const int pooled_width_,\n    const float spatial_scale_,\n    const int top_count, const int bottom_count);\n\nvoid PrRoIPoolingCoorBackwardGpu(\n    cudaStream_t stream,\n    F_DEVPTR_IN bottom_data,\n    F_DEVPTR_IN bottom_rois,\n    F_DEVPTR_IN top_data,\n    F_DEVPTR_IN top_diff,\n    F_DEVPTR_OUT bottom_diff,\n    const int channels_, const int height_, const int width_,\n    const int pooled_height_, const int pooled_width_,\n    const float spatial_scale_,\n    const int top_count, const int bottom_count);\n\n#ifdef __cplusplus\n} /* !extern \"C\" */\n#endif\n\n#endif /* !PRROI_POOLING_GPU_IMPL_CUH */\n\n"
  },
  {
    "path": "artrackv2_mindspore/external/vot20/cttrack/config.yaml",
    "content": "registry:\n- ./trackers.ini\nstack: vot2020\n"
  },
  {
    "path": "artrackv2_mindspore/external/vot20/cttrack/trackers.ini",
    "content": "[cttrack_large]  # <tracker-name>\nlabel = cttrack_large\nprotocol = traxpython\n\ncommand = from cttrack_start import main;main()\n\n# Specify a path to trax python wrapper if it is not visible (separate by ; if using multiple paths)\n\n# paths = /home/lr/workspace/CTTrack:\npaths = <PATH_OF_CTTRACK>\n\n# Additional environment paths\nenv_PATH = <PATH_OF_CUDA_LIB>:<PATH_OF_PYTHON>"
  },
  {
    "path": "artrackv2_mindspore/lib/__init__.py",
    "content": ""
  },
  {
    "path": "artrackv2_mindspore/lib/config/__init__.py",
    "content": ""
  },
  {
    "path": "artrackv2_mindspore/lib/config/ostrack/config.py",
    "content": "from easydict import EasyDict as edict\nimport yaml\n\n\"\"\"\nAdd default config for OSTrack.\n\"\"\"\ncfg = edict()\n\n# MODEL\ncfg.MODEL = edict()\ncfg.MODEL.PRETRAIN_FILE = \"mae_pretrain_vit_base.pth\"\ncfg.MODEL.EXTRA_MERGER = False\n\ncfg.MODEL.RETURN_INTER = False\ncfg.MODEL.RETURN_STAGES = [2, 5, 8, 11]\n\n# MODEL.BACKBONE\ncfg.MODEL.BACKBONE = edict()\ncfg.MODEL.BACKBONE.TYPE = \"vit_base_patch16_224\"\ncfg.MODEL.BACKBONE.PATCHSIZE = 16\ncfg.MODEL.BACKBONE.EMBEDDIM = 768\ncfg.MODEL.BACKBONE.STRIDE = 16\ncfg.MODEL.BACKBONE.MID_PE = False\ncfg.MODEL.BACKBONE.SEP_SEG = False\ncfg.MODEL.BACKBONE.CAT_MODE = 'direct'\ncfg.MODEL.BACKBONE.MERGE_LAYER = 0\ncfg.MODEL.BACKBONE.ADD_CLS_TOKEN = False\ncfg.MODEL.BACKBONE.CLS_TOKEN_USE_MODE = 'ignore'\n\ncfg.MODEL.BACKBONE.CE_LOC = []\ncfg.MODEL.BACKBONE.CE_KEEP_RATIO = []\ncfg.MODEL.BACKBONE.CE_TEMPLATE_RANGE = 'ALL'  # choose between ALL, CTR_POINT, CTR_REC, GT_BOX\n\n# MODEL.DECODER\ncfg.MODEL.DECODER = edict()\ncfg.MODEL.DECODER.TYPE = \"mask\"\ncfg.MODEL.DECODER.MASK_RATIO = 0.75\ncfg.MODEL.DECODER.EMBEDDIM = 512\ncfg.MODEL.DECODER.DEPTH = 8\ncfg.MODEL.DECODER.NUMHEADS = 16\ncfg.MODEL.DECODER.MLPRATIO = 4\n\n# MODEL.HEAD\ncfg.MODEL.BINS = 400\ncfg.MODEL.ENCODER_LAYER = 3\ncfg.MODEL.NUM_HEADS = 16\ncfg.MODEL.MLP_RATIO = 4\ncfg.MODEL.QKV_BIAS = True\ncfg.MODEL.DROP_RATE = 0.1\ncfg.MODEL.ATTN_DROP = 0.0\ncfg.MODEL.DROP_PATH = 0.0\ncfg.MODEL.DECODER_LAYER = 6\ncfg.MODEL.HEAD = edict()\ncfg.MODEL.HEAD.TYPE = \"PIX\"\ncfg.MODEL.HEAD.NUM_CHANNELS = 1024\n\n# TRAIN\ncfg.TRAIN = edict()\ncfg.TRAIN.LR = 0.0001\ncfg.TRAIN.WEIGHT_DECAY = 0.0001\ncfg.TRAIN.EPOCH = 500\ncfg.TRAIN.LR_DROP_EPOCH = 400\ncfg.TRAIN.BATCH_SIZE = 16\ncfg.TRAIN.NUM_WORKER = 10\ncfg.TRAIN.OPTIMIZER = \"ADAMW\"\ncfg.TRAIN.BACKBONE_MULTIPLIER = 0.1\ncfg.TRAIN.GIOU_WEIGHT = 2.0\ncfg.TRAIN.L1_WEIGHT = 5.0\ncfg.TRAIN.FREEZE_LAYERS = [0, ]\ncfg.TRAIN.PRINT_INTERVAL = 50\ncfg.TRAIN.VAL_EPOCH_INTERVAL = 20\ncfg.TRAIN.GRAD_CLIP_NORM = 0.1\ncfg.TRAIN.AMP = False\n\ncfg.TRAIN.CE_START_EPOCH = 20  # candidate elimination start epoch\ncfg.TRAIN.CE_WARM_EPOCH = 80  # candidate elimination warm up epoch\ncfg.TRAIN.DROP_PATH_RATE = 0.1  # drop path rate for ViT backbone\n\n# TRAIN.SCHEDULER\ncfg.TRAIN.SCHEDULER = edict()\ncfg.TRAIN.SCHEDULER.TYPE = \"step\"\ncfg.TRAIN.SCHEDULER.DECAY_RATE = 0.1\n\n# DATA\ncfg.DATA = edict()\ncfg.DATA.SAMPLER_MODE = \"causal\"  # sampling methods\ncfg.DATA.MEAN = [0.485, 0.456, 0.406]\ncfg.DATA.STD = [0.229, 0.224, 0.225]\ncfg.DATA.MAX_SAMPLE_INTERVAL = 200\ncfg.DATA.MAX_GAP = 300\ncfg.DATA.MAX_INTERVAL = 5\ncfg.DATA.INTERVAL_PROB = 0.0\ncfg.DATA.TEMP = 2\n\n# DATA.TRAIN\ncfg.DATA.TRAIN = edict()\ncfg.DATA.TRAIN.DATASETS_NAME = [\"LASOT\", \"GOT10K_vottrain\"]\ncfg.DATA.TRAIN.DATASETS_RATIO = [1, 1]\ncfg.DATA.TRAIN.SAMPLE_PER_EPOCH = 60000\n# DATA.VAL\ncfg.DATA.VAL = edict()\ncfg.DATA.VAL.DATASETS_NAME = [\"GOT10K_votval\"]\ncfg.DATA.VAL.DATASETS_RATIO = [1]\ncfg.DATA.VAL.SAMPLE_PER_EPOCH = 10000\n# DATA.SEARCH\ncfg.DATA.SEARCH = edict()\ncfg.DATA.SEARCH.SIZE = 256\ncfg.DATA.SEARCH.FACTOR = 5.0\ncfg.DATA.SEARCH.CENTER_JITTER = 4.5\ncfg.DATA.SEARCH.SCALE_JITTER = 0.5\ncfg.DATA.SEARCH.NUMBER = 1\n# DATA.TEMPLATE\ncfg.DATA.TEMPLATE = edict()\ncfg.DATA.TEMPLATE.NUMBER = 1\ncfg.DATA.TEMPLATE.SIZE = 128\ncfg.DATA.TEMPLATE.FACTOR = 2.0\ncfg.DATA.TEMPLATE.CENTER_JITTER = 0\ncfg.DATA.TEMPLATE.SCALE_JITTER = 0\n\n# TEST\ncfg.TEST = edict()\ncfg.TEST.TEMPLATE_FACTOR = 2.0\ncfg.TEST.TEMPLATE_SIZE = 128\ncfg.TEST.SEARCH_FACTOR = 5.0\ncfg.TEST.SEARCH_SIZE = 256\ncfg.TEST.EPOCH = 500\n\n\ndef _edict2dict(dest_dict, src_edict):\n    if isinstance(dest_dict, dict) and isinstance(src_edict, dict):\n        for k, v in src_edict.items():\n            if not isinstance(v, edict):\n                dest_dict[k] = v\n            else:\n                dest_dict[k] = {}\n                _edict2dict(dest_dict[k], v)\n    else:\n        return\n\n\ndef gen_config(config_file):\n    cfg_dict = {}\n    _edict2dict(cfg_dict, cfg)\n    with open(config_file, 'w') as f:\n        yaml.dump(cfg_dict, f, default_flow_style=False)\n\n\ndef _update_config(base_cfg, exp_cfg):\n    if isinstance(base_cfg, dict) and isinstance(exp_cfg, edict):\n        for k, v in exp_cfg.items():\n            if k in base_cfg:\n                if not isinstance(v, dict):\n                    base_cfg[k] = v\n                else:\n                    _update_config(base_cfg[k], v)\n            else:\n                raise ValueError(\"{} not exist in config.py\".format(k))\n    else:\n        return\n\n\ndef update_config_from_file(filename, base_cfg=None):\n    exp_config = None\n    with open(filename) as f:\n        exp_config = edict(yaml.safe_load(f))\n        if base_cfg is not None:\n            _update_config(base_cfg, exp_config)\n        else:\n            _update_config(cfg, exp_config)\n"
  },
  {
    "path": "artrackv2_mindspore/lib/models/__init__.py",
    "content": "from .ostrack.ostrack import build_ostrack\r\n"
  },
  {
    "path": "artrackv2_mindspore/lib/models/component/__init__.py",
    "content": "# -*- coding:utf-8 -*-\n# author  : Skye Song\n# file    : __init__.py.py\n# Copyright (c) Skye-Song. All Rights Reserved\n"
  },
  {
    "path": "artrackv2_mindspore/lib/models/component/attention.py",
    "content": "# -*- coding:utf-8 -*-\n# author  : Skye Song\n# file    : attention.py\n# Copyright (c) Skye-Song. All Rights Reserved\nimport sys\nsys.path.append(\"/home/baiyifan/code/AR2_mindspore_cp/2stage\")\nimport mindspore as ms\nimport mindspore.nn as nn\nimport mindspore.ops as ops\nfrom einops import rearrange\n\nfrom lib.utils.image import *\n\n\nclass Attention(nn.Cell):\n\tdef __init__(self, dim, num_heads=8, qkv_bias=False, attn_drop=0., proj_drop=0.):\n\t\tsuper().__init__()\n\t\tself.num_heads = num_heads\n\t\thead_dim = dim // num_heads\n\t\tself.scale = head_dim ** -0.5\n\n\t\tself.qkv = nn.Dense(dim, dim * 3, has_bias=qkv_bias)\n\t\tself.attn_drop = nn.Dropout(p=attn_drop)\n\t\tself.proj = nn.Dense(dim, dim)\n\t\tself.proj_drop = nn.Dropout(p=proj_drop)\n\n\tdef construct(self, x, padding_mask=None, **kwargs):\n\t\tB, N, C = x.shape\n\t\tqkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)\n\t\tq, k, v = qkv[0], qkv[1], qkv[2]  # (B, head, N, C//head)\n\n\t\tattn = (q @ k.swapaxes(-2, -1)) * self.scale  # (B, head, N, N)\n\n\t\tif padding_mask is not None:\n\t\t\tassert padding_mask.size()[0] == B\n\t\t\tassert padding_mask.size()[1] == N\n\t\t\tattn = attn.masked_fill(padding_mask.unsqueeze(1).unsqueeze(2), float(\"-inf\"))\n\n\t\tattn = ops.softmax(attn,axis=-1)\n\t\tattn = self.attn_drop(attn)\n\n\t\tx = (attn @ v).swapaxes(1, 2).reshape(B, N, C)\n\t\tx = self.proj(x)\n\t\tx = self.proj_drop(x)\n\t\treturn x\n\n\nclass ClsMixAttention(nn.Cell):\n\tdef __init__(self,\n\t             dim,\n\t             num_heads,\n\t             qkv_bias=False,\n\t             attn_drop=0.,\n\t             proj_drop=0.,\n\t             ):\n\t\tsuper().__init__()\n\t\tself.num_heads = num_heads\n\t\thead_dim = dim // num_heads\n\t\tself.scale = head_dim ** -0.5\n\n\t\tself.qkv = nn.Dense(dim, dim * 3, has_bias=qkv_bias)\n\t\tself.attn_drop = nn.Dropout(p=attn_drop)\n\t\tself.proj = nn.Dense(dim, dim)\n\t\tself.proj_drop = nn.Dropout(p=proj_drop)\n\n\tdef construct(self, x, t_h, t_w, s_h, s_w, online_size=1, padding_mask=None):\n\t\tB, N, C = x.shape\n\t\tqkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)\n\t\tq, k, v = qkv[0], qkv[1], qkv[2]  # (B, head, N, C)\n\n\t\tq_cls, q_t, q_s = ops.split(q, [1, t_h * t_w * (1 + online_size), s_h * s_w], axis=2)\n\t\tk_cls, k_t, k_s = ops.split(k, [1, t_h * t_w * (1 + online_size), s_h * s_w], axis=2)\n\t\tv_cls, v_t, v_s = ops.split(v, [1, t_h * t_w * (1 + online_size), s_h * s_w], axis=2)\n\t\t# cls token attention\n\t\tattn = (q_cls @ k.swapaxes(-2, -1)) * self.scale  # (B, head, N_q, N)\n\t\tif padding_mask is not None:\n\t\t\tassert padding_mask.size()[0] == B\n\t\t\tassert padding_mask.size()[1] == N\n\t\t\tattn = attn.masked_fill(padding_mask.unsqueeze(1).unsqueeze(2), float(\"-inf\"))\n\t\tattn = ops.softmax(attn,axis=-1)\n\t\tattn = self.attn_drop(attn)\n\t\tx_cls = rearrange(attn @ v, 'b h t d -> b t (h d)')\n\n\t\t# template attention\n\t\tattn = (q_t @ k_t.swapaxes(-2, -1)) * self.scale  # (B, head, N_q, N)\n\t\tif padding_mask is not None:\n\t\t\tassert padding_mask.size()[0] == B\n\t\t\tassert padding_mask.size()[1] == N\n\t\t\tattn = attn.masked_fill(padding_mask.unsqueeze(1).unsqueeze(2), float(\"-inf\"))\n\t\tattn = ops.softmax(attn,axis=-1)\n\t\tattn = self.attn_drop(attn)\n\t\tx_t = rearrange(attn @ v_t, 'b h t d -> b t (h d)')\n\n\t\t# search region attention\n\t\tattn = (q_s @ k.swapaxes(-2, -1)) * self.scale  # (B, head, N_s, N)\n\t\tif padding_mask is not None:\n\t\t\tassert padding_mask.size()[0] == B\n\t\t\tassert padding_mask.size()[1] == N\n\t\t\tattn = attn.masked_fill(padding_mask.unsqueeze(1).unsqueeze(2), float(\"-inf\"))\n\t\tattn = ops.softmax(attn,axis=-1)\n\t\tattn = self.attn_drop(attn)\n\t\tx_s = rearrange(attn @ v, 'b h t d -> b t (h d)')\n\n\t\tx = ops.cat([x_cls, x_t, x_s], axis=1)\n\n\t\tx = self.proj(x)\n\t\tx = self.proj_drop(x)\n\t\treturn x\n\n\nclass MixAttention(nn.Cell):\n\tdef __init__(self,\n\t             dim,\n\t             num_heads,\n\t             qkv_bias=False,\n\t             attn_drop=0.,\n\t             proj_drop=0.,\n\t             ):\n\t\tsuper().__init__()\n\t\tself.num_heads = num_heads\n\t\thead_dim = dim // num_heads\n\t\tself.scale = head_dim ** -0.5\n\n\t\tself.qkv = nn.Dense(dim, dim * 3, has_bias=qkv_bias)\n\t\tself.attn_drop = nn.Dropout(p=attn_drop)\n\t\tself.proj = nn.Dense(dim, dim)\n\t\tself.proj_drop = nn.Dropout(p=proj_drop)\n\n\tdef construct(self, x, t_h, t_w, s_h, s_w, padding_mask=None):\n\t\tB, N, C = x.shape\n\t\tqkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)\n\t\tq, k, v = qkv[0], qkv[1], qkv[2]  # (B, head, N, C)\n\n\t\tq_t, q_s = ops.split(q, [t_h * t_w * 2, s_h * s_w], axis=2)\n\t\tk_t, k_s = ops.split(k, [t_h * t_w * 2, s_h * s_w], axis=2)\n\t\tv_t, v_s = ops.split(v, [t_h * t_w * 2, s_h * s_w], axis=2)\n\n\t\t# template attention\n\t\tattn = (q_t @ k_t.swapaxes(-2, -1)) * self.scale  # (B, head, N_q, N)\n\t\tif padding_mask is not None:\n\t\t\tassert padding_mask.size()[0] == B\n\t\t\tassert padding_mask.size()[1] == N\n\t\t\tattn = attn.masked_fill(padding_mask.unsqueeze(1).unsqueeze(2), float(\"-inf\"))\n\t\tattn = ops.softmax(attn,axis=-1)\n\t\tattn = self.attn_drop(attn)\n\t\tx_t = rearrange(attn @ v_t, 'b h t d -> b t (h d)')\n\n\t\t# search region attention\n\t\tattn = (q_s @ k.swapaxes(-2, -1)) * self.scale  # (B, head, N_s, N)\n\t\tif padding_mask is not None:\n\t\t\tassert padding_mask.size()[0] == B\n\t\t\tassert padding_mask.size()[1] == N\n\t\t\tattn = attn.masked_fill(padding_mask.unsqueeze(1).unsqueeze(2), float(\"-inf\"))\n\t\tattn = ops.softmax(attn,axis=-1)\n\t\tattn = self.attn_drop(attn)\n\t\tx_s = rearrange(attn @ v, 'b h t d -> b t (h d)')\n\n\t\tx = ops.cat([x_t, x_s], axis=1)\n\n\t\tx = self.proj(x)\n\t\tx = self.proj_drop(x)\n\t\treturn x\n\n\nclass NottAttention(nn.Cell):\n\tdef __init__(self,\n\t             dim,\n\t             num_heads,\n\t             qkv_bias=False,\n\t             attn_drop=0.,\n\t             proj_drop=0.,\n\t             ):\n\t\tsuper().__init__()\n\t\tself.num_heads = num_heads\n\t\thead_dim = dim // num_heads\n\t\tself.scale = head_dim ** -0.5\n\n\t\tself.qkv = nn.Dense(dim, dim * 3, has_bias=qkv_bias)\n\t\tself.attn_drop = nn.Dropout(p=attn_drop)\n\t\tself.proj = nn.Dense(dim, dim)\n\t\tself.proj_drop = nn.Dropout(p=proj_drop)\n\n\tdef construct(self, x, t_h, t_w, s_h, s_w, padding_mask=None):\n\t\tB, N, C = x.shape\n\t\tqkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)\n\t\tq, k, v = qkv[0], qkv[1], qkv[2]  # (B, head, N, C)\n\n\t\tq_t, q_s = ops.split(q, [t_h * t_w * 2, s_h * s_w], axis=2)\n\t\tk_t, k_s = ops.split(k, [t_h * t_w * 2, s_h * s_w], axis=2)\n\t\tv_t, v_s = ops.split(v, [t_h * t_w * 2, s_h * s_w], axis=2)\n\n\t\t# template attention\n\t\tattn = (q_t @ k_s.swapaxes(-2, -1)) * self.scale  # (B, head, N_q, N)\n\t\tif padding_mask is not None:\n\t\t\tassert padding_mask.size()[0] == B\n\t\t\tassert padding_mask.size()[1] == N\n\t\t\tattn = attn.masked_fill(padding_mask.unsqueeze(1).unsqueeze(2), float(\"-inf\"))\n\t\tattn = ops.softmax(attn,axis=-1)\n\t\tattn = self.attn_drop(attn)\n\t\tx_t = rearrange(attn @ v_s, 'b h t d -> b t (h d)')\n\n\t\t# search region attention\n\t\tattn = (q_s @ k.swapaxes(-2, -1)) * self.scale  # (B, head, N_s, N)\n\t\tif padding_mask is not None:\n\t\t\tassert padding_mask.size()[0] == B\n\t\t\tassert padding_mask.size()[1] == N\n\t\t\tattn = attn.masked_fill(padding_mask.unsqueeze(1).unsqueeze(2), float(\"-inf\"))\n\t\tattn = ops.softmax(attn,axis=-1)\n\t\tattn = self.attn_drop(attn)\n\t\tx_s = rearrange(attn @ v, 'b h t d -> b t (h d)')\n\n\t\tx = ops.cat([x_t, x_s], axis=1)\n\n\t\tx = self.proj(x)\n\t\tx = self.proj_drop(x)\n\t\treturn x\n\n\nclass NossAttention(nn.Cell):\n\tdef __init__(self,\n\t             dim,\n\t             num_heads,\n\t             qkv_bias=False,\n\t             attn_drop=0.,\n\t             proj_drop=0.,\n\t             ):\n\t\tsuper().__init__()\n\t\tself.num_heads = num_heads\n\t\thead_dim = dim // num_heads\n\t\tself.scale = head_dim ** -0.5\n\n\t\tself.qkv = nn.Dense(dim, dim * 3, has_bias=qkv_bias)\n\t\tself.attn_drop = nn.Dropout(p=attn_drop)\n\t\tself.proj = nn.Dense(dim, dim)\n\t\tself.proj_drop = nn.Dropout(p=proj_drop)\n\n\tdef construct(self, x, t_h, t_w, s_h, s_w, padding_mask=None):\n\t\tB, N, C = x.shape\n\t\tqkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)\n\t\tq, k, v = qkv[0], qkv[1], qkv[2]  # (B, head, N, C)\n\n\t\tq_t, q_s = ops.split(q, [t_h * t_w * 2, s_h * s_w], axis=2)\n\t\tk_t, k_s = ops.split(k, [t_h * t_w * 2, s_h * s_w], axis=2)\n\t\tv_t, v_s = ops.split(v, [t_h * t_w * 2, s_h * s_w], axis=2)\n\n\t\t# template attention\n\t\tattn = (q_t @ k.swapaxes(-2, -1)) * self.scale  # (B, head, N_q, N)\n\t\tif padding_mask is not None:\n\t\t\tassert padding_mask.size()[0] == B\n\t\t\tassert padding_mask.size()[1] == N\n\t\t\tattn = attn.masked_fill(padding_mask.unsqueeze(1).unsqueeze(2), float(\"-inf\"))\n\t\tattn = ops.softmax(attn,axis=-1)\n\t\tattn = self.attn_drop(attn)\n\t\tx_t = rearrange(attn @ v, 'b h t d -> b t (h d)')\n\n\t\t# search region attention\n\t\tattn = (q_s @ k_t.swapaxes(-2, -1)) * self.scale  # (B, head, N_s, N)\n\t\tif padding_mask is not None:\n\t\t\tassert padding_mask.size()[0] == B\n\t\t\tassert padding_mask.size()[1] == N\n\t\t\tattn = attn.masked_fill(padding_mask.unsqueeze(1).unsqueeze(2), float(\"-inf\"))\n\t\tattn = ops.softmax(attn,axis=-1)\n\t\tattn = self.attn_drop(attn)\n\t\tx_s = rearrange(attn @ v_t, 'b h t d -> b t (h d)')\n\n\t\tx = ops.cat([x_t, x_s], axis=1)\n\n\t\tx = self.proj(x)\n\t\tx = self.proj_drop(x)\n\t\treturn x\n\n\nclass CrossAttention(nn.Cell):\n\tdef __init__(self,\n\t             dim,\n\t             num_heads,\n\t             qkv_bias=False,\n\t             attn_drop=0.,\n\t             proj_drop=0.,\n\t             ):\n\t\tsuper().__init__()\n\t\tself.num_heads = num_heads\n\t\thead_dim = dim // num_heads\n\t\tself.scale = head_dim ** -0.5\n\n\t\tself.qkv = nn.Dense(dim, dim * 3, has_bias=qkv_bias)\n\t\tself.attn_drop = nn.Dropout(p=attn_drop)\n\t\tself.proj = nn.Dense(dim, dim)\n\t\tself.proj_drop = nn.Dropout(p=proj_drop)\n\n\tdef construct(self, x, t_h, t_w, s_h, s_w):\n\t\tB, N, C = x.shape\n\t\tqkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)\n\t\tq, k, v = qkv[0], qkv[1], qkv[2]  # (B, head, N, C)\n\n\t\tq_t, q_s = ops.split(q, [t_h * t_w * 2, s_h * s_w], axis=2)\n\t\tk_t, k_s = ops.split(k, [((t_h + 1) // 2) ** 2 * 2, s_h * s_w // 4], axis=4)\n\t\tv_t, v_s = ops.split(v, [((t_h + 1) // 2) ** 2 * 2, s_h * s_w // 4], axis=4)\n\n\t\t# template attention\n\t\tattn = (q_t @ k_s.swapaxes(-2, -1)) * self.scale\n\t\tattn = ops.softmax(attn,axis=-1)\n\t\tattn = self.attn_drop(attn)\n\t\tx_t = rearrange(attn @ v_s, 'b h t d -> b t (h d)')\n\n\t\t# search region attention\n\t\tattn = (q_s @ k_t.swapaxes(-2, -1)) * self.scale\n\t\tattn = ops.softmax(attn,axis=-1)\n\t\tattn = self.attn_drop(attn)\n\t\tx_s = rearrange(attn @ v_t, 'b h t d -> b t (h d)')\n\n\t\tx = ops.cat([x_t, x_s], axis=1)\n\n\t\tx = self.proj(x)\n\t\tx = self.proj_drop(x)\n\t\treturn x\n"
  },
  {
    "path": "artrackv2_mindspore/lib/models/component/block.py",
    "content": "# -*- coding:utf-8 -*-\n# author  : Skye Song\n# file    : block.py\n# Copyright (c) Skye-Song. All Rights Reserved\n\nimport sys\nsys.path.append(\"/home/baiyifan/weizhenhuan/2stage/lib/models/component\")\nfrom attention import *\nfrom lib.models.timm import *\n\nclass Block(nn.Cell):\n    def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, drop=0., attn_drop=0.,\n                 drop_path=0., attention = \"Attention\", act_layer=nn.GELU, norm_layer=nn.LayerNorm):\n        super().__init__()\n        if norm_layer is None:\n            norm_layer = nn.LayerNorm\n        dim_tuple=dim\n        if isinstance(dim,int):\n            dim_tuple=tuple([dim])\n        self.norm1 = norm_layer(dim_tuple)\n\n        self.attn = globals()[attention](dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop,\n                                             proj_drop=drop)\n\n        # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here\n        self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()\n        self.norm2 = norm_layer(dim_tuple)\n        mlp_hidden_dim = int(dim * mlp_ratio)\n        self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)\n\n    def construct(self, x, **kwargs):\n        x = x + self.drop_path(self.attn(self.norm1(x), **kwargs))\n        x = x + self.drop_path(self.mlp(self.norm2(x)))\n        return x\n"
  },
  {
    "path": "artrackv2_mindspore/lib/models/component/drop.py",
    "content": "\"\"\" DropBlock, DropPath\n\nPyTorch implementations of DropBlock and DropPath (Stochastic Depth) regularization layers.\n\nPapers:\nDropBlock: A regularization method for convolutional networks (https://arxiv.org/abs/1810.12890)\n\nDeep Networks with Stochastic Depth (https://arxiv.org/abs/1603.09382)\n\nCode:\nDropBlock impl inspired by two Tensorflow impl that I liked:\n - https://github.com/tensorflow/tpu/blob/master/models/official/resnet/resnet_model.py#L74\n - https://github.com/clovaai/assembled-cnn/blob/master/nets/blocks.py\n\nHacked together by / Copyright 2020 Ross Wightman\n\"\"\"\nimport mindspore as ms\nfrom mindspore import nn\nfrom mindspore import ops\n\ndef drop_block_2d(\n        x, drop_prob: float = 0.1, block_size: int = 7,  gamma_scale: float = 1.0,\n        with_noise: bool = False, inplace: bool = False, batchwise: bool = False):\n    \"\"\" DropBlock. See https://arxiv.org/pdf/1810.12890.pdf\n\n    DropBlock with an experimental gaussian noise option. This layer has been tested on a few training\n    runs with success, but needs further validation and possibly optimization for lower runtime impact.\n    \"\"\"\n    B, C, H, W = x.shape\n    total_size = W * H\n    clipped_block_size = min(block_size, min(W, H))\n    # seed_drop_rate, the gamma parameter\n    gamma = gamma_scale * drop_prob * total_size / clipped_block_size ** 2 / (\n        (W - block_size + 1) * (H - block_size + 1))\n\n    # Forces the block to be inside the feature map.\n    w_i, h_i = ops.meshgrid(ops.arange(W).to(x.device), ops.arange(H).to(x.device))\n    valid_block = ((w_i >= clipped_block_size // 2) & (w_i < W - (clipped_block_size - 1) // 2)) & \\\n                  ((h_i >= clipped_block_size // 2) & (h_i < H - (clipped_block_size - 1) // 2))\n    valid_block = ops.reshape(valid_block, (1, 1, H, W)).to(dtype=x.dtype)\n\n    if batchwise:\n        # one mask for whole batch, quite a bit faster\n        uniform_noise = ops.rand((1, C, H, W), dtype=x.dtype, device=x.device)\n    else:\n        uniform_noise = ops.rand_like(x)\n    block_mask = ((2 - gamma - valid_block + uniform_noise) >= 1).to(dtype=x.dtype)\n    block_mask = -ops.max_pool2d(\n        -block_mask,\n        kernel_size=clipped_block_size,  # block_size,\n        stride=1,\n        padding=clipped_block_size // 2)\n\n    if with_noise:\n        normal_noise = ops.randn((1, C, H, W), dtype=x.dtype, device=x.device) if batchwise else ops.randn_like(x)\n        if inplace:\n            x.mul_(block_mask).add_(normal_noise * (1 - block_mask))\n        else:\n            x = x * block_mask + normal_noise * (1 - block_mask)\n    else:\n        normalize_scale = (block_mask.numel() / block_mask.to(dtype=ms.float32).sum().add(1e-7)).to(x.dtype)\n        if inplace:\n            x.mul_(block_mask * normalize_scale)\n        else:\n            x = x * block_mask * normalize_scale\n    return x\n\n\ndef drop_block_fast_2d(\n        x: ms.Tensor, drop_prob: float = 0.1, block_size: int = 7,\n        gamma_scale: float = 1.0, with_noise: bool = False, inplace: bool = False, batchwise: bool = False):\n    \"\"\" DropBlock. See https://arxiv.org/pdf/1810.12890.pdf\n\n    DropBlock with an experimental gaussian noise option. Simplied from above without concern for valid\n    block mask at edges.\n    \"\"\"\n    B, C, H, W = x.shape\n    total_size = W * H\n    clipped_block_size = min(block_size, min(W, H))\n    gamma = gamma_scale * drop_prob * total_size / clipped_block_size ** 2 / (\n            (W - block_size + 1) * (H - block_size + 1))\n\n    if batchwise:\n        # one mask for whole batch, quite a bit faster\n        block_mask = ops.rand((1, C, H, W), dtype=x.dtype, device=x.device) < gamma\n    else:\n        # mask per batch element\n        block_mask = ops.rand_like(x) < gamma\n    block_mask = ops.max_pool2d(\n        block_mask.to(x.dtype), kernel_size=clipped_block_size, stride=1, padding=clipped_block_size // 2)\n\n    if with_noise:\n        normal_noise = ops.randn((1, C, H, W), dtype=x.dtype, device=x.device) if batchwise else ops.randn_like(x)\n        if inplace:\n            x.mul_(1. - block_mask).add_(normal_noise * block_mask)\n        else:\n            x = x * (1. - block_mask) + normal_noise * block_mask\n    else:\n        block_mask = 1 - block_mask\n        normalize_scale = (block_mask.numel() / block_mask.to(dtype=ms.float32).sum().add(1e-7)).to(dtype=x.dtype)\n        if inplace:\n            x.mul_(block_mask * normalize_scale)\n        else:\n            x = x * block_mask * normalize_scale\n    return x\n\n\nclass DropBlock2d(nn.Cell):\n    \"\"\" DropBlock. See https://arxiv.org/pdf/1810.12890.pdf\n    \"\"\"\n    def __init__(self,\n                 drop_prob=0.1,\n                 block_size=7,\n                 gamma_scale=1.0,\n                 with_noise=False,\n                 inplace=False,\n                 batchwise=False,\n                 fast=True):\n        super(DropBlock2d, self).__init__()\n        self.drop_prob = drop_prob\n        self.gamma_scale = gamma_scale\n        self.block_size = block_size\n        self.with_noise = with_noise\n        self.inplace = inplace\n        self.batchwise = batchwise\n        self.fast = fast  # FIXME finish comparisons of fast vs not\n\n    def construct(self, x):\n        if not self.training or not self.drop_prob:\n            return x\n        if self.fast:\n            return drop_block_fast_2d(\n                x, self.drop_prob, self.block_size, self.gamma_scale, self.with_noise, self.inplace, self.batchwise)\n        else:\n            return drop_block_2d(\n                x, self.drop_prob, self.block_size, self.gamma_scale, self.with_noise, self.inplace, self.batchwise)\n\n\ndef drop_path(x, drop_prob: float = 0., training: bool = False):\n    \"\"\"Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).\n\n    This is the same as the DropConnect impl I created for EfficientNet, etc networks, however,\n    the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper...\n    See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for\n    changing the layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use\n    'survival rate' as the argument.\n\n    \"\"\"\n    if drop_prob == 0. or not training:\n        return x\n    keep_prob = 1 - drop_prob\n    shape = (x.shape[0],) + (1,) * (x.ndim - 1)  # work with diff dim tensors, not just 2D ConvNets\n    random_tensor = keep_prob + ops.rand(shape, dtype=x.dtype, device=x.device)\n    random_tensor.floor_()  # binarize\n    output = x.div(keep_prob) * random_tensor\n    return output\n\nclass DropPath(nn.Cell):\n    \"\"\"Drop paths (Stochastic Depth) per sample  (when applied in main path of residual blocks).\n    \"\"\"\n    def __init__(self, drop_prob=None):\n        super(DropPath, self).__init__()\n        self.drop_prob = drop_prob\n\n    def construct(self, x):\n        return drop_path(x, self.drop_prob, self.training)\n"
  },
  {
    "path": "artrackv2_mindspore/lib/models/component/mlp.py",
    "content": "\"\"\" MLP module w/ dropout and configurable activation layer\n\nHacked together by / Copyright 2020 Ross Wightman\n\"\"\"\nimport mindspore as ms\nfrom mindspore import nn\nfrom mindspore import ops\n\nclass Mlp(nn.Cell):\n    \"\"\" MLP as used in Vision Transformer, MLP-Mixer and related networks\n    \"\"\"\n    def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):\n        super().__init__()\n        out_features = out_features or in_features\n        hidden_features = hidden_features or in_features\n        self.fc1 = nn.Dense(in_features, hidden_features)\n        self.act = act_layer()\n        self.fc2 = nn.Dense(hidden_features, out_features)\n        self.drop = nn.Dropout(p=drop)\n\n    def construct(self, x):\n        x = self.fc1(x)\n        x = self.act(x)\n        x = self.drop(x)\n        x = self.fc2(x)\n        x = self.drop(x)\n        return x\n\nclass MultiLayerMlp(nn.Cell):\n    \"\"\" Very simple multi-layer perceptron (also called FFN)\"\"\"\n\n    def __init__(self, input_dim, hidden_dim, output_dim, num_layers, BN=False):\n        super().__init__()\n        self.num_layers = num_layers\n        h = [hidden_dim] * (num_layers - 1)\n        if BN:\n            self.layers = nn.CellList(nn.SequentialCell(nn.Dense(n, k), nn.BatchNorm1d(k))\n                                        for n, k in zip([input_dim] + h, h + [output_dim]))\n        else:\n            self.layers = nn.CellList(nn.Dense(n, k)\n                                        for n, k in zip([input_dim] + h, h + [output_dim]))\n\n    def construct(self, x):\n        for i, layer in enumerate(self.layers):\n            x = F.relu(layer(x)) if i < self.num_layers - 1 else layer(x)\n        return x\n\nclass GluMlp(nn.Cell):\n    \"\"\" MLP w/ GLU style gating\n    See: https://arxiv.org/abs/1612.08083, https://arxiv.org/abs/2002.05202\n    \"\"\"\n    def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.Sigmoid, drop=0.):\n        super().__init__()\n        out_features = out_features or in_features\n        hidden_features = hidden_features or in_features\n        assert hidden_features % 2 == 0\n        self.fc1 = nn.Dense(in_features, hidden_features)\n        self.act = act_layer()\n        self.fc2 = nn.Dense(hidden_features // 2, out_features)\n        self.drop = nn.Dropout(p=drop)\n\n    def init_weights(self):\n        # override init of fc1 w/ gate portion set to weight near zero, bias=1\n        fc1_mid = self.fc1.bias.shape[0] // 2\n        nn.init.ones_(self.fc1.bias[fc1_mid:])\n        nn.init.normal_(self.fc1.weight[fc1_mid:], std=1e-6)\n\n    def construct(self, x):\n        x = self.fc1(x)\n        x, gates = x.chunk(2, dim=-1)\n        x = x * self.act(gates)\n        x = self.drop(x)\n        x = self.fc2(x)\n        x = self.drop(x)\n        return x\n\n\nclass GatedMlp(nn.Cell):\n    \"\"\" MLP as used in gMLP\n    \"\"\"\n    def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU,\n                 gate_layer=None, drop=0.):\n        super().__init__()\n        out_features = out_features or in_features\n        hidden_features = hidden_features or in_features\n        self.fc1 = nn.Dense(in_features, hidden_features)\n        self.act = act_layer()\n        if gate_layer is not None:\n            assert hidden_features % 2 == 0\n            self.gate = gate_layer(hidden_features)\n            hidden_features = hidden_features // 2  # FIXME base reduction on gate property?\n        else:\n            self.gate = nn.Identity()\n        self.fc2 = nn.Dense(hidden_features, out_features)\n        self.drop = nn.Dropout(p=drop)\n\n    def construct(self, x):\n        x = self.fc1(x)\n        x = self.act(x)\n        x = self.drop(x)\n        x = self.gate(x)\n        x = self.fc2(x)\n        x = self.drop(x)\n        return x\n\n\nclass ConvMlp(nn.Cell):\n    \"\"\" MLP using 1x1 convs that keeps spatial dims\n    \"\"\"\n    def __init__(\n            self, in_features, hidden_features=None, out_features=None, act_layer=nn.ReLU, norm_layer=None, drop=0.):\n        super().__init__()\n        out_features = out_features or in_features\n        hidden_features = hidden_features or in_features\n        self.fc1 = nn.Conv2d(in_features, hidden_features, kernel_size=1, bias=True)\n        self.norm = norm_layer(hidden_features) if norm_layer else nn.Identity()\n        self.act = act_layer()\n        self.fc2 = nn.Conv2d(hidden_features, out_features, kernel_size=1, bias=True)\n        self.drop = nn.Dropout(p=drop)\n\n    def construct(self, x):\n        x = self.fc1(x)\n        x = self.norm(x)\n        x = self.act(x)\n        x = self.drop(x)\n        x = self.fc2(x)\n        return x\n"
  },
  {
    "path": "artrackv2_mindspore/lib/models/component/norm.py",
    "content": "# -*- coding:utf-8 -*-\n# author  : Skye Song\n# file    : norm.py\n# Copyright (c) Skye-Song. All Rights Reserved\n\nimport torch\n\nclass FrozenBatchNorm2d(torch.nn.Module):\n    \"\"\"\n    BatchNorm2d where the batch statistics and the affine parameters are fixed.\n\n    Copy-paste from torchvision.misc.ops with added eps before rqsrt,\n    without which any other models than torchvision.models.resnet[18,34,50,101]\n    produce nans.\n    \"\"\"\n\n    def __init__(self, n):\n        super(FrozenBatchNorm2d, self).__init__()\n        self.register_buffer(\"weight\", torch.ones(n))\n        self.register_buffer(\"bias\", torch.zeros(n))\n        self.register_buffer(\"running_mean\", torch.zeros(n))\n        self.register_buffer(\"running_var\", torch.ones(n))\n\n    def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict,\n                              missing_keys, unexpected_keys, error_msgs):\n        num_batches_tracked_key = prefix + 'num_batches_tracked'\n        if num_batches_tracked_key in state_dict:\n            del state_dict[num_batches_tracked_key]\n\n        super(FrozenBatchNorm2d, self)._load_from_state_dict(\n            state_dict, prefix, local_metadata, strict,\n            missing_keys, unexpected_keys, error_msgs)\n\n    def forward(self, x):\n        # move reshapes to the beginning\n        # to make it fuser-friendly\n        w = self.weight.reshape(1, -1, 1, 1)\n        b = self.bias.reshape(1, -1, 1, 1)\n        rv = self.running_var.reshape(1, -1, 1, 1)\n        rm = self.running_mean.reshape(1, -1, 1, 1)\n        eps = 1e-5\n        scale = w * (rv + eps).rsqrt()  # rsqrt(x): 1/sqrt(x), r: reciprocal\n        bias = b - rm * scale\n        return x * scale + bias"
  },
  {
    "path": "artrackv2_mindspore/lib/models/component/patch_embed.py",
    "content": "\"\"\" Image to Patch Embedding using Conv2d\n\nA convolution based approach to patchifying a 2D image w/ embedding projection.\n\nBased on the impl in https://github.com/google-research/vision_transformer\n\nHacked together by / Copyright 2020 Ross Wightman\n\"\"\"\n\nfrom torch import nn as nn\n\nfrom itertools import repeat\nimport collections.abc\n\n\n# From PyTorch internals\ndef _ntuple(n):\n    def parse(x):\n        if isinstance(x, collections.abc.Iterable):\n            return x\n        return tuple(repeat(x, n))\n    return parse\n\n\nto_1tuple = _ntuple(1)\nto_2tuple = _ntuple(2)\nto_3tuple = _ntuple(3)\nto_4tuple = _ntuple(4)\nto_ntuple = _ntuple\n\n\nclass PatchEmbed(nn.Module):\n    \"\"\" 2D Image to Patch Embedding\n    \"\"\"\n    def __init__(self, patch_size=16, in_chans=3, embed_dim=768, norm_layer=None, flatten=True):\n        super().__init__()\n        self.flatten = flatten\n        self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)\n        self.norm = norm_layer(embed_dim) if norm_layer else nn.Identity()\n\n    def forward(self, x):\n        x = self.proj(x)\n        if self.flatten:\n            x = x.flatten(2).transpose(1, 2)  # BCHW -> BNC\n        x = self.norm(x)\n        return x\n"
  },
  {
    "path": "artrackv2_mindspore/lib/models/component/pos_embed.py",
    "content": "# Copyright (c) Meta Platforms, Inc. and affiliates.\n# All rights reserved.\n\n# This source code is licensed under the license found in the\n# LICENSE file in the root directory of this source tree.\n# --------------------------------------------------------\n# Position embedding utils\n# --------------------------------------------------------\nimport numpy as np\nimport torch\n\n# --------------------------------------------------------\n# 2D sine-cosine position embedding\n# References:\n# Transformer: https://github.com/tensorflow/models/blob/master/official/nlp/transformer/model_utils.py\n# MoCo v3: https://github.com/facebookresearch/moco-v3\n# --------------------------------------------------------\ndef get_2d_sincos_pos_embed(embed_dim, grid_size, cls_token=False):\n    \"\"\"\n    grid_size: int of the grid height and width\n    return:\n    pos_embed: [grid_size*grid_size, embed_dim] or [1+grid_size*grid_size, embed_dim] (w/ or w/o cls_token)\n    \"\"\"\n    grid_h = np.arange(grid_size, dtype=np.float32)\n    grid_w = np.arange(grid_size, dtype=np.float32)\n    grid = np.meshgrid(grid_w, grid_h)  # here w goes first\n    grid = np.stack(grid, axis=0)\n\n    grid = grid.reshape([2, 1, grid_size, grid_size])\n    pos_embed = get_2d_sincos_pos_embed_from_grid(embed_dim, grid)\n    if cls_token:\n        pos_embed = np.concatenate([np.zeros([1, embed_dim]), pos_embed], axis=0)\n    return pos_embed\n\n\ndef get_2d_sincos_pos_embed_from_grid(embed_dim, grid):\n    assert embed_dim % 2 == 0\n\n    # use half of dimensions to encode grid_h\n    emb_h = get_1d_sincos_pos_embed_from_grid(embed_dim // 2, grid[0])  # (H*W, D/2)\n    emb_w = get_1d_sincos_pos_embed_from_grid(embed_dim // 2, grid[1])  # (H*W, D/2)\n\n    emb = np.concatenate([emb_h, emb_w], axis=1) # (H*W, D)\n    return emb\n\n\ndef get_1d_sincos_pos_embed_from_grid(embed_dim, pos):\n    \"\"\"\n    embed_dim: output dimension for each position\n    pos: a list of positions to be encoded: size (M,)\n    out: (M, D)\n    \"\"\"\n    assert embed_dim % 2 == 0\n    omega = np.arange(embed_dim // 2, dtype=float)\n    omega /= embed_dim / 2.\n    omega = 1. / 10000**omega  # (D/2,)\n\n    pos = pos.reshape(-1)  # (M,)\n    out = np.einsum('m,d->md', pos, omega)  # (M, D/2), outer product\n\n    emb_sin = np.sin(out) # (M, D/2)\n    emb_cos = np.cos(out) # (M, D/2)\n\n    emb = np.concatenate([emb_sin, emb_cos], axis=1)  # (M, D)\n    return emb\n\n\n\n# --------------------------------------------------------\n# Interpolate position embeddings for high-resolution\n# References:\n# DeiT: https://github.com/facebookresearch/deit\n# --------------------------------------------------------\ndef interpolate_pos_embed(model, checkpoint_model):\n    if 'pos_embed' in checkpoint_model:\n        pos_embed_checkpoint = checkpoint_model['pos_embed']\n        embedding_size = pos_embed_checkpoint.shape[-1]\n        num_patches = model.patch_embed.num_patches\n        num_extra_tokens = model.pos_embed.shape[-2] - num_patches\n        # height (== width) for the checkpoint position embedding\n        orig_size = int((pos_embed_checkpoint.shape[-2] - num_extra_tokens) ** 0.5)\n        # height (== width) for the new position embedding\n        new_size = int(num_patches ** 0.5)\n        # class_token and dist_token are kept unchanged\n        if orig_size != new_size:\n            print(\"Position interpolate from %dx%d to %dx%d\" % (orig_size, orig_size, new_size, new_size))\n            extra_tokens = pos_embed_checkpoint[:, :num_extra_tokens]\n            # only the position tokens are interpolated\n            pos_tokens = pos_embed_checkpoint[:, num_extra_tokens:]\n            pos_tokens = pos_tokens.reshape(-1, orig_size, orig_size, embedding_size).permute(0, 3, 1, 2)\n            pos_tokens = torch.nn.functional.interpolate(\n                pos_tokens, size=(new_size, new_size), mode='bicubic', align_corners=False)\n            pos_tokens = pos_tokens.permute(0, 2, 3, 1).flatten(1, 2)\n            new_pos_embed = torch.cat((extra_tokens, pos_tokens), dim=1)\n            checkpoint_model['pos_embed'] = new_pos_embed\n"
  },
  {
    "path": "artrackv2_mindspore/lib/models/component/weight_init.py",
    "content": "import torch\nimport math\nimport warnings\n\nfrom torch.nn.init import _calculate_fan_in_and_fan_out\n\ndef _no_grad_trunc_normal_(tensor, mean, std, a, b):\n    # Cut & paste from PyTorch official master until it's in a few official releases - RW\n    # Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf\n    def norm_cdf(x):\n        # Computes standard normal cumulative distribution function\n        return (1. + math.erf(x / math.sqrt(2.))) / 2.\n\n    if (mean < a - 2 * std) or (mean > b + 2 * std):\n        warnings.warn(\"mean is more than 2 std from [a, b] in nn.init.trunc_normal_. \"\n                      \"The distribution of values may be incorrect.\",\n                      stacklevel=2)\n\n    with torch.no_grad():\n        # Values are generated by using a truncated uniform distribution and\n        # then using the inverse CDF for the normal distribution.\n        # Get upper and lower cdf values\n        l = norm_cdf((a - mean) / std)\n        u = norm_cdf((b - mean) / std)\n\n        # Uniformly fill tensor with values from [l, u], then translate to\n        # [2l-1, 2u-1].\n        tensor.uniform_(2 * l - 1, 2 * u - 1)\n\n        # Use inverse cdf transform for normal distribution to get truncated\n        # standard normal\n        tensor.erfinv_()\n\n        # Transform to proper mean, std\n        tensor.mul_(std * math.sqrt(2.))\n        tensor.add_(mean)\n\n        # Clamp to ensure it's in the proper range\n        tensor.clamp_(min=a, max=b)\n        return tensor\n\n\ndef trunc_normal_(tensor, mean=0., std=1., a=-2., b=2.):\n    # type: (Tensor, float, float, float, float) -> Tensor\n    r\"\"\"Fills the input Tensor with values drawn from a truncated\n    normal distribution. The values are effectively drawn from the\n    normal distribution :math:`\\mathcal{N}(\\text{mean}, \\text{std}^2)`\n    with values outside :math:`[a, b]` redrawn until they are within\n    the bounds. The method used for generating the random values works\n    best when :math:`a \\leq \\text{mean} \\leq b`.\n    Args:\n        tensor: an n-dimensional `torch.Tensor`\n        mean: the mean of the normal distribution\n        std: the standard deviation of the normal distribution\n        a: the minimum cutoff value\n        b: the maximum cutoff value\n    Examples:\n        >>> w = torch.empty(3, 5)\n        >>> nn.init.trunc_normal_(w)\n    \"\"\"\n    return _no_grad_trunc_normal_(tensor, mean, std, a, b)\n\n\ndef variance_scaling_(tensor, scale=1.0, mode='fan_in', distribution='normal'):\n    fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor)\n    if mode == 'fan_in':\n        denom = fan_in\n    elif mode == 'fan_out':\n        denom = fan_out\n    elif mode == 'fan_avg':\n        denom = (fan_in + fan_out) / 2\n\n    variance = scale / denom\n\n    if distribution == \"truncated_normal\":\n        # constant is stddev of standard normal truncated to (-2, 2)\n        trunc_normal_(tensor, std=math.sqrt(variance) / .87962566103423978)\n    elif distribution == \"normal\":\n        tensor.normal_(std=math.sqrt(variance))\n    elif distribution == \"uniform\":\n        bound = math.sqrt(3 * variance)\n        tensor.uniform_(-bound, bound)\n    else:\n        raise ValueError(f\"invalid distribution {distribution}\")\n\n\ndef lecun_normal_(tensor):\n    variance_scaling_(tensor, mode='fan_in', distribution='truncated_normal')\n"
  },
  {
    "path": "artrackv2_mindspore/lib/models/layers/__init__.py",
    "content": ""
  },
  {
    "path": "artrackv2_mindspore/lib/models/layers/attn.py",
    "content": "import sys\r\nsys.path.append(\"/home/baiyifan/code/AR2_mindspore_cp/2stage\")\r\nimport mindspore as ms\r\nfrom mindspore import ops\r\nfrom mindspore import nn\r\nfrom mindspore.common.initializer import initializer,TruncatedNormal\r\n\r\nfrom lib.models.layers.rpe import generate_2d_concatenated_self_attention_relative_positional_encoding_index\r\n\r\n\r\nclass Attention(nn.Cell):\r\n    def __init__(self, dim, num_heads=8, qkv_bias=False, attn_drop=0., proj_drop=0.,\r\n                 rpe=False, z_size=7, x_size=14):\r\n        super().__init__()\r\n        self.num_heads = num_heads\r\n        head_dim = dim // num_heads\r\n        self.scale = head_dim ** -0.5\r\n\r\n        self.qkv = nn.Dense(dim, dim * 3, has_bias=qkv_bias)\r\n        self.attn_drop = nn.Dropout(p=attn_drop)\r\n        self.proj = nn.Dense(dim, dim)\r\n        self.proj_drop = nn.Dropout(p=proj_drop)\r\n\r\n        self.rpe =rpe\r\n        if self.rpe:\r\n            relative_position_index = \\\r\n                generate_2d_concatenated_self_attention_relative_positional_encoding_index([z_size, z_size],\r\n                                                                                           [x_size, x_size])\r\n            self.register_buffer(\"relative_position_index\", relative_position_index)\r\n            # define a parameter table of relative position bias\r\n            self.relative_position_bias_table = ops.empty((num_heads,relative_position_index.max() + 1))\r\n            relative_position_bias_table_shape = self.relative_position_bias_table.shape\r\n            self.relative_position_bias_table = initializer(TruncatedNormal(sigma=0.02),relative_position_bias_table_shape)\r\n            self.relative_position_bias_table = ms.Parameter(self.relative_position_bias_table)\r\n\r\n    def construct(self, x, mask=None, return_attention=False):\r\n        # x: B, N, C\r\n        # mask: [B, N, ] torch.bool\r\n        B, N, C = x.shape\r\n        qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)\r\n        q, k, v = qkv.unbind(0)   # make torchscript happy (cannot use tensor as tuple)\r\n\r\n        attn = (q @ k.swapaxes(-2, -1)) * self.scale\r\n\r\n        if self.rpe:\r\n            relative_position_bias = self.relative_position_bias_table[:, self.relative_position_index].unsqueeze(0)\r\n            attn += relative_position_bias\r\n\r\n        if mask is not None:\r\n            attn = attn.masked_fill(mask.unsqueeze(1).unsqueeze(2), float('-inf'),)\r\n\r\n        split_attn = False\r\n        len_t = 49\r\n        if split_attn:\r\n            attn_t = ops.softmax(attn[..., :len_t],axis=-1)\r\n            attn_s = ops.softmax(attn[..., len_t:],axis=-1)\r\n            attn = ops.cat([attn_t, attn_s], dim=-1)\r\n        else:\r\n            attn = ops.softmax(attn,axis=-1)\r\n        attn = self.attn_drop(attn)\r\n\r\n        x = (attn @ v).swapaxes(1, 2).reshape(B, N, C)\r\n        x = self.proj(x)\r\n        x = self.proj_drop(x)\r\n\r\n        if return_attention:\r\n            return x, attn\r\n        else:\r\n            return x\r\n\r\n\r\nclass Attention_talking_head(nn.Cell):\r\n    # taken from https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py\r\n    # with slight modifications to add Talking Heads Attention (https://arxiv.org/pdf/2003.02436v1.pdf)\r\n    def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0.,\r\n                 rpe=True, z_size=7, x_size=14):\r\n        super().__init__()\r\n\r\n        self.num_heads = num_heads\r\n\r\n        head_dim = dim // num_heads\r\n\r\n        self.scale = qk_scale or head_dim ** -0.5\r\n\r\n        self.qkv = nn.Dense(dim, dim * 3, has_bias=qkv_bias)\r\n        self.attn_drop = nn.Dropout(p=attn_drop)\r\n\r\n        self.proj = nn.Dense(dim, dim)\r\n\r\n        self.proj_l = nn.Dense(num_heads, num_heads)\r\n        self.proj_w = nn.Dense(num_heads, num_heads)\r\n\r\n        self.proj_drop = nn.Dropout(p=proj_drop)\r\n\r\n        self.rpe = rpe\r\n        if self.rpe:\r\n            relative_position_index = \\\r\n                generate_2d_concatenated_self_attention_relative_positional_encoding_index([z_size, z_size],\r\n                                                                                           [x_size, x_size])\r\n            self.register_buffer(\"relative_position_index\", relative_position_index)\r\n            # define a parameter table of relative position bias\r\n            self.relative_position_bias_table = ops.empty((num_heads,relative_position_index.max() + 1))\r\n            relative_position_bias_table_shape = self.relative_position_bias_table.shape\r\n            self.relative_position_bias_table=initializer(TruncatedNormal(sigma=0.02),relative_position_bias_table_shape)\r\n            self.relative_position_bias_table=ms.Parameter(self.relative_position_bias_table)\r\n    def construct(self, x, mask=None):\r\n        B, N, C = x.shape\r\n        qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)\r\n        q, k, v = qkv[0] * self.scale, qkv[1], qkv[2]\r\n\r\n        attn = (q @ k.swapaxes(-2, -1))\r\n\r\n        if self.rpe:\r\n            relative_position_bias = self.relative_position_bias_table[:, self.relative_position_index].unsqueeze(0)\r\n            attn += relative_position_bias\r\n\r\n        if mask is not None:\r\n            attn = attn.masked_fill(mask.unsqueeze(1).unsqueeze(2),\r\n                                    float('-inf'),)\r\n\r\n        attn = self.proj_l(attn.permute(0, 2, 3, 1)).permute(0, 3, 1, 2)\r\n\r\n        attn = ops.softmax(attn,axis=-1)\r\n\r\n        attn = self.proj_w(attn.permute(0, 2, 3, 1)).permute(0, 3, 1, 2)\r\n        attn = self.attn_drop(attn)\r\n\r\n        x = (attn @ v).swapaxes(1, 2).reshape(B, N, C)\r\n        x = self.proj(x)\r\n        x = self.proj_drop(x)\r\n        return x\r\n"
  },
  {
    "path": "artrackv2_mindspore/lib/models/layers/attn_blocks.py",
    "content": "import math\r\nimport torch\r\nimport torch.nn as nn\r\nfrom timm.models.layers import Mlp, DropPath, trunc_normal_, lecun_normal_\r\n\r\nfrom lib.models.layers.attn import Attention\r\n\r\n\r\ndef candidate_elimination(attn: torch.Tensor, tokens: torch.Tensor, lens_t: int, keep_ratio: float, global_index: torch.Tensor, box_mask_z: torch.Tensor):\r\n    \"\"\"\r\n    Eliminate potential background candidates for computation reduction and noise cancellation.\r\n    Args:\r\n        attn (torch.Tensor): [B, num_heads, L_t + L_s, L_t + L_s], attention weights\r\n        tokens (torch.Tensor):  [B, L_t + L_s, C], template and search region tokens\r\n        lens_t (int): length of template\r\n        keep_ratio (float): keep ratio of search region tokens (candidates)\r\n        global_index (torch.Tensor): global index of search region tokens\r\n        box_mask_z (torch.Tensor): template mask used to accumulate attention weights\r\n\r\n    Returns:\r\n        tokens_new (torch.Tensor): tokens after candidate elimination\r\n        keep_index (torch.Tensor): indices of kept search region tokens\r\n        removed_index (torch.Tensor): indices of removed search region tokens\r\n    \"\"\"\r\n    lens_s = attn.shape[-1] - lens_t\r\n    bs, hn, _, _ = attn.shape\r\n\r\n    lens_keep = math.ceil(keep_ratio * lens_s)\r\n    if lens_keep == lens_s:\r\n        return tokens, global_index, None\r\n\r\n    attn_t = attn[:, :, :lens_t, lens_t:]\r\n\r\n    if box_mask_z is not None:\r\n        box_mask_z = box_mask_z.unsqueeze(1).unsqueeze(-1).expand(-1, attn_t.shape[1], -1, attn_t.shape[-1])\r\n        # attn_t = attn_t[:, :, box_mask_z, :]\r\n        attn_t = attn_t[box_mask_z]\r\n        attn_t = attn_t.view(bs, hn, -1, lens_s)\r\n        attn_t = attn_t.mean(dim=2).mean(dim=1)  # B, H, L-T, L_s --> B, L_s\r\n\r\n        # attn_t = [attn_t[i, :, box_mask_z[i, :], :] for i in range(attn_t.size(0))]\r\n        # attn_t = [attn_t[i].mean(dim=1).mean(dim=0) for i in range(len(attn_t))]\r\n        # attn_t = torch.stack(attn_t, dim=0)\r\n    else:\r\n        attn_t = attn_t.mean(dim=2).mean(dim=1)  # B, H, L-T, L_s --> B, L_s\r\n\r\n    # use sort instead of topk, due to the speed issue\r\n    # https://github.com/pytorch/pytorch/issues/22812\r\n    sorted_attn, indices = torch.sort(attn_t, dim=1, descending=True)\r\n\r\n    topk_attn, topk_idx = sorted_attn[:, :lens_keep], indices[:, :lens_keep]\r\n    non_topk_attn, non_topk_idx = sorted_attn[:, lens_keep:], indices[:, lens_keep:]\r\n\r\n    keep_index = global_index.gather(dim=1, index=topk_idx)\r\n    removed_index = global_index.gather(dim=1, index=non_topk_idx)\r\n\r\n    # separate template and search tokens\r\n    tokens_t = tokens[:, :lens_t]\r\n    tokens_s = tokens[:, lens_t:]\r\n\r\n    # obtain the attentive and inattentive tokens\r\n    B, L, C = tokens_s.shape\r\n    # topk_idx_ = topk_idx.unsqueeze(-1).expand(B, lens_keep, C)\r\n    attentive_tokens = tokens_s.gather(dim=1, index=topk_idx.unsqueeze(-1).expand(B, -1, C))\r\n    # inattentive_tokens = tokens_s.gather(dim=1, index=non_topk_idx.unsqueeze(-1).expand(B, -1, C))\r\n\r\n    # compute the weighted combination of inattentive tokens\r\n    # fused_token = non_topk_attn @ inattentive_tokens\r\n\r\n    # concatenate these tokens\r\n    # tokens_new = torch.cat([tokens_t, attentive_tokens, fused_token], dim=0)\r\n    tokens_new = torch.cat([tokens_t, attentive_tokens], dim=1)\r\n\r\n    return tokens_new, keep_index, removed_index\r\n\r\n\r\nclass CEBlock(nn.Module):\r\n\r\n    def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, drop=0., attn_drop=0.,\r\n                 drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, keep_ratio_search=1.0,):\r\n        super().__init__()\r\n        self.norm1 = norm_layer(dim)\r\n        self.attn = Attention(dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=drop)\r\n        # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here\r\n        self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()\r\n        self.norm2 = norm_layer(dim)\r\n        mlp_hidden_dim = int(dim * mlp_ratio)\r\n        self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)\r\n\r\n        self.keep_ratio_search = keep_ratio_search\r\n\r\n    def forward(self, x, global_index_template, global_index_search, mask=None, ce_template_mask=None, keep_ratio_search=None):\r\n        x_attn, attn = self.attn(self.norm1(x), mask, True)\r\n        x = x + self.drop_path(x_attn)\r\n        lens_t = global_index_template.shape[1]\r\n\r\n        removed_index_search = None\r\n        if self.keep_ratio_search < 1 and (keep_ratio_search is None or keep_ratio_search < 1):\r\n            keep_ratio_search = self.keep_ratio_search if keep_ratio_search is None else keep_ratio_search\r\n            x, global_index_search, removed_index_search = candidate_elimination(attn, x, lens_t, keep_ratio_search, global_index_search, ce_template_mask)\r\n\r\n        x = x + self.drop_path(self.mlp(self.norm2(x)))\r\n        return x, global_index_template, global_index_search, removed_index_search, attn\r\n\r\n\r\nclass Block(nn.Module):\r\n\r\n    def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, drop=0., attn_drop=0.,\r\n                 drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm):\r\n        super().__init__()\r\n        self.norm1 = norm_layer(dim)\r\n        self.attn = Attention(dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=drop)\r\n        # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here\r\n        self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()\r\n        self.norm2 = norm_layer(dim)\r\n        mlp_hidden_dim = int(dim * mlp_ratio)\r\n        self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)\r\n\r\n    def forward(self, x, mask=None):\r\n        x = x + self.drop_path(self.attn(self.norm1(x), mask))\r\n        x = x + self.drop_path(self.mlp(self.norm2(x)))\r\n        return x\r\n"
  },
  {
    "path": "artrackv2_mindspore/lib/models/layers/head.py",
    "content": "import sys\nimport mindspore as ms\nimport mindspore.nn as nn\nfrom mindspore import ops\nfrom mindspore import Tensor\nfrom mindspore.nn import Identity\nfrom mindspore.nn.probability.distribution import Categorical\n\nfrom lib.models.timm import *\n\nimport copy\nfrom typing import Optional\n\ndef top_k_top_p_filtering_batch(logits, top_k=0, top_p=0.0, filter_value=-float('Inf')):\n    \"\"\" Filter a distribution of logits using top-k and/or nucleus (top-p) filtering\n        Args:\n            logits: logits distribution shape (vocabulary size)\n            top_k > 0: keep only top k tokens with highest probability (top-k filtering).\n            top_p > 0.0: keep the top tokens with cumulative probability >= top_p (nucleus filtering).\n                Nucleus filtering is described in Holtzman et al. (http://arxiv.org/abs/1904.09751)\n        From: https://gist.github.com/thomwolf/1a5a29f6962089e871b94cbd09daf317\n    \"\"\"\n    top_k = min(top_k, logits.size(-1))  # Safety check\n    if top_k > 0:\n        # Remove all tokens with a probability less than the last token of the top-k\n        # ops.topk()返回最后一维最大的top_k个元素，返回值为二维(values,indices)\n        # ...表示其他维度由计算机自行推断\n        for i in range(logits.shape[0]):\n            indices_to_remove = logits[i] < ops.topk(logits[i], top_k)[0][..., -1, None]\n            logits[i][indices_to_remove] = filter_value  # 对于topk之外的其他元素的logits值设为负无穷\n\n    if top_p > 0.0:\n        for i in range(logits.shape[0]):\n            sorted_logits, sorted_indices = ops.sort(logits[i], descending=True)  # 对logits进行递减排序\n            cumulative_probs = ops.cumsum(ops.softmax(sorted_logits, axis=-1), axis=-1)\n\n            # Remove tokens with cumulative probability above the threshold\n            sorted_indices_to_remove = cumulative_probs > top_p\n            # Shift the indices to the right to keep also the first token above the threshold\n            sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()\n            sorted_indices_to_remove[..., 0] = 0\n\n            indices_to_remove = sorted_indices[sorted_indices_to_remove]\n            logits[i][indices_to_remove] = filter_value\n    return logits\n    \ndef conv(in_planes, out_planes, kernel_size=3, stride=1, padding=1, dilation=1,\n         freeze_bn=False):\n\n    return nn.Sequential(\n        nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride,\n                    padding=padding, dilation=dilation, bias=True),\n        nn.BatchNorm2d(out_planes),\n        nn.ReLU(inplace=True))\n\n\nclass Corner_Predictor(nn.Cell):\n    \"\"\" Corner Predictor module\"\"\"\n\n    def __init__(self, inplanes=64, channel=256, feat_sz=20, stride=16, freeze_bn=False):\n        super(Corner_Predictor, self).__init__()\n        self.feat_sz = feat_sz\n        self.stride = stride\n        self.img_sz = self.feat_sz * self.stride\n        '''top-left corner'''\n        self.conv1_tl = conv(inplanes, channel, freeze_bn=freeze_bn)\n        self.conv2_tl = conv(channel, channel // 2, freeze_bn=freeze_bn)\n        self.conv3_tl = conv(channel // 2, channel // 4, freeze_bn=freeze_bn)\n        self.conv4_tl = conv(channel // 4, channel // 8, freeze_bn=freeze_bn)\n        self.conv5_tl = nn.Conv2d(channel // 8, 1, kernel_size=1)\n\n        '''bottom-right corner'''\n        self.conv1_br = conv(inplanes, channel, freeze_bn=freeze_bn)\n        self.conv2_br = conv(channel, channel // 2, freeze_bn=freeze_bn)\n        self.conv3_br = conv(channel // 2, channel // 4, freeze_bn=freeze_bn)\n        self.conv4_br = conv(channel // 4, channel // 8, freeze_bn=freeze_bn)\n        self.conv5_br = nn.Conv2d(channel // 8, 1, kernel_size=1)\n\n        '''about coordinates and indexs'''\n        self.indice = ops.arange(0, self.feat_sz).view(-1, 1) * self.stride\n        # generate mesh-grid\n        self.coord_x = self.indice.repeat((self.feat_sz, 1)).view((self.feat_sz * self.feat_sz,)).float()\n        self.coord_y = self.indice.repeat((1, self.feat_sz)).view((self.feat_sz * self.feat_sz,)).float()\n\n    def construct(self, x, return_dist=False, softmax=True):\n        \"\"\" Forward pass with input x. \"\"\"\n        score_map_tl, score_map_br = self.get_score_map(x)\n        if return_dist:\n            coorx_tl, coory_tl, prob_vec_tl = self.soft_argmax(score_map_tl, return_dist=True, softmax=softmax)\n            coorx_br, coory_br, prob_vec_br = self.soft_argmax(score_map_br, return_dist=True, softmax=softmax)\n            return ops.stack((coorx_tl, coory_tl, coorx_br, coory_br), axis=1) / self.img_sz, prob_vec_tl, prob_vec_br\n        else:\n            coorx_tl, coory_tl = self.soft_argmax(score_map_tl)\n            coorx_br, coory_br = self.soft_argmax(score_map_br)\n            return ops.stack((coorx_tl, coory_tl, coorx_br, coory_br), axis=1) / self.img_sz\n\n    def get_score_map(self, x):\n        # top-left branch\n        x_tl1 = self.conv1_tl(x)\n        x_tl2 = self.conv2_tl(x_tl1)\n        x_tl3 = self.conv3_tl(x_tl2)\n        x_tl4 = self.conv4_tl(x_tl3)\n        score_map_tl = self.conv5_tl(x_tl4)\n\n        # bottom-right branch\n        x_br1 = self.conv1_br(x)\n        x_br2 = self.conv2_br(x_br1)\n        x_br3 = self.conv3_br(x_br2)\n        x_br4 = self.conv4_br(x_br3)\n        score_map_br = self.conv5_br(x_br4)\n        return score_map_tl, score_map_br\n\n    def soft_argmax(self, score_map, return_dist=False, softmax=True):\n        \"\"\" get soft-argmax coordinate for a given heatmap \"\"\"\n        score_vec = score_map.view((-1, self.feat_sz * self.feat_sz))  # (batch, feat_sz * feat_sz)\n        prob_vec = ops.softmax(score_vec, axis=1)\n        exp_x = ops.sum((self.coord_x * prob_vec), dim=1)\n        exp_y = ops.sum((self.coord_y * prob_vec), dim=1)\n        if return_dist:\n            if softmax:\n                return exp_x, exp_y, prob_vec\n            else:\n                return exp_x, exp_y, score_vec\n        else:\n            return exp_x, exp_y\n\n\nclass CenterPredictor(nn.Cell, ):\n    def __init__(self, inplanes=64, channel=256, feat_sz=20, stride=16, freeze_bn=False):\n        super(CenterPredictor, self).__init__()\n        self.feat_sz = feat_sz\n        self.stride = stride\n        self.img_sz = self.feat_sz * self.stride\n\n        # corner predict\n        self.conv1_ctr = conv(inplanes, channel, freeze_bn=freeze_bn)\n        self.conv2_ctr = conv(channel, channel // 2, freeze_bn=freeze_bn)\n        self.conv3_ctr = conv(channel // 2, channel // 4, freeze_bn=freeze_bn)\n        self.conv4_ctr = conv(channel // 4, channel // 8, freeze_bn=freeze_bn)\n        self.conv5_ctr = nn.Conv2d(channel // 8, 1, kernel_size=1)\n\n        # size regress\n        self.conv1_offset = conv(inplanes, channel, freeze_bn=freeze_bn)\n        self.conv2_offset = conv(channel, channel // 2, freeze_bn=freeze_bn)\n        self.conv3_offset = conv(channel // 2, channel // 4, freeze_bn=freeze_bn)\n        self.conv4_offset = conv(channel // 4, channel // 8, freeze_bn=freeze_bn)\n        self.conv5_offset = nn.Conv2d(channel // 8, 2, kernel_size=1)\n\n        # size regress\n        self.conv1_size = conv(inplanes, channel, freeze_bn=freeze_bn)\n        self.conv2_size = conv(channel, channel // 2, freeze_bn=freeze_bn)\n        self.conv3_size = conv(channel // 2, channel // 4, freeze_bn=freeze_bn)\n        self.conv4_size = conv(channel // 4, channel // 8, freeze_bn=freeze_bn)\n        self.conv5_size = nn.Conv2d(channel // 8, 2, kernel_size=1)\n\n        for p in self.parameters():\n            if p.dim() > 1:\n                nn.init.xavier_uniform_(p)\n\n    def construct(self, x, gt_score_map=None):\n        \"\"\" Forward pass with input x. \"\"\"\n        score_map_ctr, size_map, offset_map = self.get_score_map(x)\n\n        # assert gt_score_map is None\n        if gt_score_map is None:\n            bbox = self.cal_bbox(score_map_ctr, size_map, offset_map)\n        else:\n            bbox = self.cal_bbox(gt_score_map.unsqueeze(1), size_map, offset_map)\n\n        return score_map_ctr, bbox, size_map, offset_map\n\n    def cal_bbox(self, score_map_ctr, size_map, offset_map, return_score=False):\n        max_score, idx = ops.max(score_map_ctr.flatten(1), axis=1, keepdim=True)\n        idx_y = idx // self.feat_sz\n        idx_x = idx % self.feat_sz\n\n        idx = idx.unsqueeze(1).expand(idx.shape[0], 2, 1)\n        size = size_map.flatten(2).gather(dim=2, index=idx)\n        offset = offset_map.flatten(2).gather(dim=2, index=idx).squeeze(-1)\n\n        # bbox = ops.cat([idx_x - size[:, 0] / 2, idx_y - size[:, 1] / 2,\n        #                   idx_x + size[:, 0] / 2, idx_y + size[:, 1] / 2], axis=1) / self.feat_sz\n        # cx, cy, w, h\n        bbox = ops.cat([(idx_x.to(ms.float) + offset[:, :1]) / self.feat_sz,\n                          (idx_y.to(ms.float) + offset[:, 1:]) / self.feat_sz,\n                          size.squeeze(-1)], axis=1)\n\n        if return_score:\n            return bbox, max_score\n        return bbox\n\n    def get_pred(self, score_map_ctr, size_map, offset_map):\n        max_score, idx = ops.max(score_map_ctr.flatten(1), axis=1, keepdim=True)\n        idx_y = idx // self.feat_sz\n        idx_x = idx % self.feat_sz\n\n        idx = idx.unsqueeze(1).expand(idx.shape[0], 2, 1)\n        size = size_map.flatten(2).gather(dim=2, index=idx)\n        offset = offset_map.flatten(2).gather(dim=2, index=idx).squeeze(-1)\n\n        # bbox = ops.cat([idx_x - size[:, 0] / 2, idx_y - size[:, 1] / 2,\n        #                   idx_x + size[:, 0] / 2, idx_y + size[:, 1] / 2], dim=1) / self.feat_sz\n        return size * self.feat_sz, offset\n\n    def get_score_map(self, x):\n\n        def _sigmoid(x):\n            y = ops.clamp(x.sigmoid_(), min=1e-4, max=1 - 1e-4)\n            return y\n\n        # ctr branch\n        x_ctr1 = self.conv1_ctr(x)\n        x_ctr2 = self.conv2_ctr(x_ctr1)\n        x_ctr3 = self.conv3_ctr(x_ctr2)\n        x_ctr4 = self.conv4_ctr(x_ctr3)\n        score_map_ctr = self.conv5_ctr(x_ctr4)\n\n        # offset branch\n        x_offset1 = self.conv1_offset(x)\n        x_offset2 = self.conv2_offset(x_offset1)\n        x_offset3 = self.conv3_offset(x_offset2)\n        x_offset4 = self.conv4_offset(x_offset3)\n        score_map_offset = self.conv5_offset(x_offset4)\n\n        # size branch\n        x_size1 = self.conv1_size(x)\n        x_size2 = self.conv2_size(x_size1)\n        x_size3 = self.conv3_size(x_size2)\n        x_size4 = self.conv4_size(x_size3)\n        score_map_size = self.conv5_size(x_size4)\n        return _sigmoid(score_map_ctr), _sigmoid(score_map_size), score_map_offset\n\n\nclass MLP(nn.Cell):\n    \"\"\" Very simple multi-layer perceptron (also called FFN)\"\"\"\n\n    def __init__(self, input_dim, hidden_dim, output_dim, num_layers, BN=False):\n        super().__init__()\n        self.num_layers = num_layers\n        h = [hidden_dim] * (num_layers - 1)\n        if BN:\n            self.layers = nn.CellList(nn.Sequential(nn.Linear(n, k), nn.BatchNorm1d(k))\n                                        for n, k in zip([input_dim] + h, h + [output_dim]))\n        else:\n            self.layers = nn.CellList(nn.Linear(n, k)\n                                        for n, k in zip([input_dim] + h, h + [output_dim]))\n\n    def construct(self, x):\n        for i, layer in enumerate(self.layers):\n            x = ops.relu(layer(x)) if i < self.num_layers - 1 else layer(x)\n        return x\n\nclass SelfAttention(nn.Cell):\n    def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0.,\n                 attn_pos_encoding_only=False):\n        super(SelfAttention, self).__init__()\n        assert dim % num_heads == 0, f\"dim {dim} should be divided by num_heads {num_heads}.\"\n\n        self.dim = dim\n        self.num_heads = num_heads\n        head_dim = dim // num_heads\n        self.scale = qk_scale or head_dim ** -0.5\n\n        if attn_pos_encoding_only:\n            self.qkv = nn.Linear(dim, 3 * dim, bias=qkv_bias)\n        else:\n            self.q = nn.Linear(dim, dim, bias=qkv_bias)\n            self.k = nn.Linear(dim, dim, bias=qkv_bias)\n            self.v = nn.Linear(dim, dim, bias=qkv_bias)\n        self.attn_drop = nn.Dropout(attn_drop)\n        self.proj = nn.Linear(dim, dim)\n        self.proj_drop = nn.Dropout(proj_drop)\n\n        self.attn_pos_encoding_only = attn_pos_encoding_only\n\n    def construct(self, x, q_ape, k_ape, attn_pos):\n        '''\n            Args:\n                x (ms.Tensor): (B, L, C)\n                q_ape (ms.Tensor | None): (1 or B, L, C), absolute positional encoding for q\n                k_ape (ms.Tensor | None): (1 or B, L, C), absolute positional encoding for k\n                attn_pos (ms.Tensor | None): (1 or B, num_heads, L, L), untied positional encoding\n            Returns:\n                ms.Tensor: (B, L, C)\n        '''\n        B, N, C = x.shape\n\n        if self.attn_pos_encoding_only:\n            assert q_ape is None and k_ape is None\n            qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)\n            q, k, v = qkv[0], qkv[1], qkv[2]\n        else:\n            q = x + q_ape if q_ape is not None else x\n            q = self.q(q).reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3)\n\n            k = x + k_ape if k_ape is not None else x\n            k = self.k(k).reshape(B, -1, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3)\n            v = self.v(x).reshape(B, -1, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3)\n\n        attn = q @ k.swapaxes(-2, -1)\n        attn = attn * self.scale\n        if attn_pos is not None:\n            attn = attn + attn_pos\n        attn = ops.softmax(attn,axis=-1)\n        attn = self.attn_drop(attn)\n\n        x = attn @ v\n        x = x.swapaxes(1, 2).reshape(B, N, C)\n        x = self.proj(x)\n        x = self.proj_drop(x)\n\n        return x\n\nclass CrossAttention(nn.Cell):\n    def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0.,\n                 attn_pos_encoding_only=False):\n        super(CrossAttention, self).__init__()\n        assert dim % num_heads == 0, f\"dim {dim} should be divided by num_heads {num_heads}.\"\n\n        self.dim = dim\n        self.num_heads = num_heads\n        head_dim = dim // num_heads\n        self.scale = qk_scale or head_dim ** -0.5\n\n        if attn_pos_encoding_only:\n            self.q = nn.Linear(dim, dim, bias=qkv_bias)\n            self.kv = nn.Linear(dim, 2 * dim, bias=qkv_bias)\n        else:\n            self.q = nn.Linear(dim, dim, bias=qkv_bias)\n            self.k = nn.Linear(dim, dim, bias=qkv_bias)\n            self.v = nn.Linear(dim, dim, bias=qkv_bias)\n        self.attn_drop = nn.Dropout(attn_drop)\n        self.proj = nn.Linear(dim, dim)\n        self.proj_drop = nn.Dropout(proj_drop)\n\n        self.attn_pos_encoding_only = attn_pos_encoding_only\n\n    def construct(self, q, kv, q_ape, k_ape, attn_pos):\n        '''\n            Args:\n                q (ms.Tensor): (B, L_q, C)\n                kv (ms.Tensor): (B, L_kv, C)\n                q_ape (ms.Tensor | None): (1 or B, L_q, C), absolute positional encoding for q\n                k_ape (ms.Tensor | None): (1 or B, L_kv, C), absolute positional encoding for k\n                attn_pos (ms.Tensor | None): (1 or B, num_heads, L_q, L_kv), untied positional encoding\n            Returns:\n                ms.Tensor: (B, L_q, C)\n        '''\n        B, q_N, C = q.shape\n        kv_N = kv.shape[1]\n\n        if self.attn_pos_encoding_only:\n            assert q_ape is None and k_ape is None\n            q = self.q(q).reshape(B, q_N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3)\n            kv = self.kv(kv).reshape(B, kv_N, 2, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)\n            k, v = kv[0], kv[1]\n        else:\n            q = q + q_ape if q_ape is not None else q\n            q = self.q(q).reshape(B, q_N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3)\n            k = kv + k_ape if k_ape is not None else kv\n            k = self.k(k).reshape(B, -1, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3)\n            v = self.v(kv).reshape(B, -1, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3)\n\n        attn = q @ k.swapaxes(-2, -1)\n        attn = attn * self.scale\n        if attn_pos is not None:\n            attn = attn + attn_pos\n        attn = ops.softmax(attn,axis=-1)\n        attn = self.attn_drop(attn)\n        x = attn @ v\n        x = x.swapaxes(1, 2).reshape(B, q_N, C)\n        x = self.proj(x)\n        x = self.proj_drop(x)\n\n        return x\n\nclass Mlp(nn.Cell):\n    \"\"\" Multilayer perceptron.\"\"\"\n\n    def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):\n        super().__init__()\n        out_features = out_features or in_features\n        hidden_features = hidden_features or in_features\n        self.fc1 = nn.Linear(in_features, hidden_features)\n        self.act = act_layer()\n        self.fc2 = nn.Linear(hidden_features, out_features)\n        self.drop = nn.Dropout(drop)\n\n    def construct(self, x):\n        '''\n            Args:\n                x (ms.Tensor): (B, L, C), input tensor\n            Returns:\n                ms.Tensor: (B, L, C), output tensor\n        '''\n        x = self.fc1(x)\n        x = self.act(x)\n        x = self.drop(x)\n        x = self.fc2(x)\n        x = self.drop(x)\n        return x\n\nclass FeatureFusion(nn.Cell):\n    def __init__(self,\n                 dim, num_heads, mlp_ratio=2., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0.,\n                 drop_path=nn.Identity(), act_layer=nn.GELU, norm_layer=nn.LayerNorm, attn_pos_encoding_only=False):\n        super(FeatureFusion, self).__init__()\n        self.z_norm1 = norm_layer(dim)\n        self.x_norm1 = norm_layer(dim)\n        self.z_self_attn = SelfAttention(dim, num_heads, qkv_bias, qk_scale, attn_drop, drop, attn_pos_encoding_only)\n        self.x_self_attn = SelfAttention(dim, num_heads, qkv_bias, qk_scale, attn_drop, drop, attn_pos_encoding_only)\n\n        self.z_norm2_1 = norm_layer(dim)\n        self.z_norm2_2 = norm_layer(dim)\n        self.x_norm2_1 = norm_layer(dim)\n        self.x_norm2_2 = norm_layer(dim)\n\n        self.z_x_cross_attention = CrossAttention(dim, num_heads, qkv_bias, qk_scale, attn_drop, drop, attn_pos_encoding_only)\n        self.x_z_cross_attention = CrossAttention(dim, num_heads, qkv_bias, qk_scale, attn_drop, drop, attn_pos_encoding_only)\n\n        mlp_hidden_dim = int(dim * mlp_ratio)\n        self.z_norm3 = norm_layer(dim)\n        self.x_norm3 = norm_layer(dim)\n        print(mlp_ratio)\n        self.z_mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)\n        self.x_mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)\n\n        self.drop_path = drop_path\n\n    def construct(self, z, x, z_self_attn_pos, x_self_attn_pos, z_x_cross_attn_pos, x_z_cross_attn_pos):\n        z = z + self.drop_path(self.z_self_attn(self.z_norm1(z), None, None, z_self_attn_pos))\n        x = x + self.drop_path(self.x_self_attn(self.x_norm1(x), None, None, x_self_attn_pos))\n\n        z = z + self.drop_path(self.z_x_cross_attention(self.z_norm2_1(z), self.x_norm2_1(x), None, None, z_x_cross_attn_pos))\n        x = x + self.drop_path(self.x_z_cross_attention(self.x_norm2_2(x), self.z_norm2_2(z), None, None, x_z_cross_attn_pos))\n\n        z = z + self.drop_path(self.z_mlp(self.z_norm3(z)))\n        x = x + self.drop_path(self.x_mlp(self.x_norm3(x)))\n        return z, x\n\n\nclass FeatureFusionEncoder(nn.Cell):\n    def __init__(self, feature_fusion_layers, z_pos_enc, x_pos_enc,\n                 z_rel_pos_index, x_rel_pos_index, z_x_rel_pos_index, x_z_rel_pos_index,\n                 z_rel_pos_bias_table, x_rel_pos_bias_table, z_x_rel_pos_bias_table, x_z_rel_pos_bias_table):\n        super(FeatureFusionEncoder, self).__init__()\n        self.layers = nn.CellList(feature_fusion_layers)\n        self.z_pos_enc = z_pos_enc\n        self.x_pos_enc = x_pos_enc\n        self.register_buffer('z_rel_pos_index', z_rel_pos_index, False)\n        self.register_buffer('x_rel_pos_index', x_rel_pos_index, False)\n        self.register_buffer('z_x_rel_pos_index', z_x_rel_pos_index, False)\n        self.register_buffer('x_z_rel_pos_index', x_z_rel_pos_index, False)\n        self.z_rel_pos_bias_table = z_rel_pos_bias_table\n        self.x_rel_pos_bias_table = x_rel_pos_bias_table\n        self.z_x_rel_pos_bias_table = z_x_rel_pos_bias_table\n        self.x_z_rel_pos_bias_table = x_z_rel_pos_bias_table\n        #self.conv1 = ms.nn.Conv2d(384,768,1,1,0)\n        #self.conv2 = ms.nn.Conv2d(768,768,2,1,1)\n        #self.conv3 = ms.nn.Conv2d(768,384,1,1,0)\n        #self.norm1 = ms.nn.LayerNorm(384)\n        #self.norm2 = ms.nn.LayerNorm(768)\n        #self.norm3 = ms.nn.LayerNorm(384)\n    def construct(self, z, x, z_pos, x_pos):\n        '''\n            Args:\n                z (ms.Tensor): (B, L_z, C), template image feature tokens\n                x (ms.Tensor): (B, L_x, C), search image feature tokens\n                z_pos (ms.Tensor | None): (1 or B, L_z, C), optional positional encoding for z\n                x_pos (ms.Tensor | None): (1 or B, L_x, C), optional positional encoding for x\n            Returns:\n                Tuple[ms.Tensor, ms.Tensor]:\n                    (B, L_z, C): template image feature tokens\n                    (B, L_x, C): search image feature tokens\n        '''\n        # Support untied positional encoding only for simplicity\n        assert z_pos is None and x_pos is None\n\n        # untied positional encoding\n        z_q_pos, z_k_pos = self.z_pos_enc()\n        x_q_pos, x_k_pos = self.x_pos_enc()\n        z_self_attn_pos = (z_q_pos @ z_k_pos.swapaxes(-2, -1)).unsqueeze(0)\n        x_self_attn_pos = (x_q_pos @ x_k_pos.swapaxes(-2, -1)).unsqueeze(0)\n\n        z_x_cross_attn_pos = (z_q_pos @ x_k_pos.swapaxes(-2, -1)).unsqueeze(0)\n        x_z_cross_attn_pos = (x_q_pos @ z_k_pos.swapaxes(-2, -1)).unsqueeze(0)\n\n        # relative positional encoding\n        z_self_attn_pos = z_self_attn_pos + self.z_rel_pos_bias_table(self.z_rel_pos_index)\n        x_self_attn_pos = x_self_attn_pos + self.x_rel_pos_bias_table(self.x_rel_pos_index)\n        z_x_cross_attn_pos = z_x_cross_attn_pos + self.z_x_rel_pos_bias_table(self.z_x_rel_pos_index)\n        x_z_cross_attn_pos = x_z_cross_attn_pos + self.x_z_rel_pos_bias_table(self.x_z_rel_pos_index)\n        # x = self.norm1(x)\n        # B,L,C = x.shape\n        # x = x.permute(0,2,1).reshape(B,C,14,14)\n        # x_temp = x\n        # x = self.conv3(self.conv2((self.conv1(x))))\n        # x = x[:,:,1:,1:]\n        # x = x+x_temp\n        # x = x.reshape(B,C,L).permute(0,2,1)\n        # x = self.norm3(x)\n        for layer in self.layers:\n            z, x = layer(z, x, z_self_attn_pos, x_self_attn_pos, z_x_cross_attn_pos, x_z_cross_attn_pos)\n\n        return z, x\n\nclass Learned2DPositionalEncoder(nn.Cell):\n    def __init__(self, dim, w, h):\n        super(Learned2DPositionalEncoder, self).__init__()\n        self.w_pos = nn.Parameter(ops.empty(w, dim))\n        self.h_pos = nn.Parameter(ops.empty(h, dim))\n        trunc_normal_(self.w_pos, std=0.02)\n        trunc_normal_(self.h_pos, std=0.02)\n\n    def construct(self):\n        w = self.w_pos.shape[0]\n        h = self.h_pos.shape[0]\n        return (self.w_pos[None, :, :] + self.h_pos[:, None, :]).view(h * w, -1)\n\nclass Untied2DPositionalEncoder(nn.Cell):\n    def __init__(self, dim, num_heads, w, h, scale=None, with_q=True, with_k=True):\n        super(Untied2DPositionalEncoder, self).__init__()\n        assert dim % num_heads == 0, f\"dim {dim} should be divided by num_heads {num_heads}.\"\n        self.pos = Learned2DPositionalEncoder(dim, w, h)\n        self.norm = nn.LayerNorm(dim)\n        self.pos_q_linear = None\n        self.pos_k_linear = None\n        if with_q:\n            self.pos_q_linear = nn.Linear(dim, dim)\n        if with_k:\n            self.pos_k_linear = nn.Linear(dim, dim)\n\n        self.num_heads = num_heads\n        head_dim = dim // num_heads\n        self.scale = scale or head_dim ** -0.5\n\n    def construct(self):\n        pos = self.norm(self.pos())\n        seq_len = pos.shape[0]\n        if self.pos_q_linear is not None and self.pos_k_linear is not None:\n            pos_q = self.pos_q_linear(pos).view(seq_len, self.num_heads, -1).swapaxes(0, 1) * self.scale\n            pos_k = self.pos_k_linear(pos).view(seq_len, self.num_heads, -1).swapaxes(0, 1)\n            return pos_q, pos_k\n        elif self.pos_q_linear is not None:\n            pos_q = self.pos_q_linear(pos).view(seq_len, self.num_heads, -1).swapaxes(0, 1) * self.scale\n            return pos_q\n        elif self.pos_k_linear is not None:\n            pos_k = self.pos_k_linear(pos).view(seq_len, self.num_heads, -1).swapaxes(0, 1)\n            return pos_k\n        else:\n            raise RuntimeError\n\ndef generate_2d_relative_positional_encoding_index(z_shape, x_shape):\n    '''\n        z_shape: (z_h, z_w)\n        x_shape: (x_h, x_w)\n    '''\n    z_2d_index_h, z_2d_index_w = ops.meshgrid(ops.arange(z_shape[0]), ops.arange(z_shape[1]))\n    x_2d_index_h, x_2d_index_w = ops.meshgrid(ops.arange(x_shape[0]), ops.arange(x_shape[1]))\n\n    z_2d_index_h = z_2d_index_h.flatten(0)\n    z_2d_index_w = z_2d_index_w.flatten(0)\n    x_2d_index_h = x_2d_index_h.flatten(0)\n    x_2d_index_w = x_2d_index_w.flatten(0)\n\n    diff_h = z_2d_index_h[:, None] - x_2d_index_h[None, :]\n    diff_w = z_2d_index_w[:, None] - x_2d_index_w[None, :]\n\n    diff = ops.stack((diff_h, diff_w), axis=-1)\n    _, indices = ops.unique(diff.view(-1, 2), return_inverse=True, dim=0)\n    return indices.view(z_shape[0] * z_shape[1], x_shape[0] * x_shape[1])\n\nclass RelativePosition2DEncoder(nn.Cell):\n    def __init__(self, num_heads, embed_size):\n        super(RelativePosition2DEncoder, self).__init__()\n        self.relative_position_bias_table = nn.Parameter(ops.empty((num_heads, embed_size)))\n        trunc_normal_(self.relative_position_bias_table, std=0.02)\n\n    def construct(self, attn_rpe_index):\n        '''\n            Args:\n                attn_rpe_index (ms.Tensor): (*), any shape containing indices, max(attn_rpe_index) < embed_size\n            Returns:\n                ms.Tensor: (1, num_heads, *)\n        '''\n        return self.relative_position_bias_table[:, attn_rpe_index].unsqueeze(0)\n\nclass DropPathAllocator:\n    def __init__(self, max_drop_path_rate, stochastic_depth_decay = True):\n        self.max_drop_path_rate = max_drop_path_rate\n        self.stochastic_depth_decay = stochastic_depth_decay\n        self.allocated = []\n        self.allocating = []\n\n    def __enter__(self):\n        self.allocating = []\n\n    def __exit__(self, exc_type, exc_val, exc_tb):\n        if len(self.allocating) != 0:\n            self.allocated.append(self.allocating)\n        self.allocating = None\n        if not self.stochastic_depth_decay:\n            for depth_module in self.allocated:\n                for module in depth_module:\n                    if isinstance(module, DropPath):\n                        module.drop_prob = self.max_drop_path_rate\n        else:\n            depth = self.get_depth()\n            dpr = [x.item() for x in ops.linspace(0, self.max_drop_path_rate, depth)]\n            assert len(dpr) == len(self.allocated)\n            for drop_path_rate, depth_modules in zip(dpr, self.allocated):\n                for module in depth_modules:\n                    if isinstance(module, DropPath):\n                        module.drop_prob = drop_path_rate\n\n    def __len__(self):\n        length = 0\n\n        for depth_modules in self.allocated:\n            length += len(depth_modules)\n\n        return length\n\n    def increase_depth(self):\n        self.allocated.append(self.allocating)\n        self.allocating = []\n\n    def get_depth(self):\n        return len(self.allocated)\n\n    def allocate(self):\n        if self.max_drop_path_rate == 0 or (self.stochastic_depth_decay and self.get_depth() == 0):\n            drop_path_module = Identity()\n        else:\n            drop_path_module = DropPath()\n        self.allocating.append(drop_path_module)\n        return drop_path_module\n\n    def get_all_allocated(self):\n        allocated = []\n        for depth_module in self.allocated:\n            for module in depth_module:\n                allocated.append(module)\n        return allocated\n\ndef build_encoder(encoder_layer, num_heads, mlp_ratio, qkv_bias, drop_rate, attn_drop, dim, z_size, x_size, drop_path):\n    z_shape = [z_size, z_size]\n    x_shape = [x_size, x_size]\n    encoder_layers = []\n    for i in range(encoder_layer):\n        encoder_layers.append(\n            FeatureFusion(dim, num_heads, mlp_ratio, qkv_bias, drop=drop_rate, attn_drop=attn_drop,\n                          drop_path=drop_path.allocate(),\n                          attn_pos_encoding_only=True)\n        )\n    z_abs_encoder = Untied2DPositionalEncoder(dim, num_heads, z_shape[0], z_shape[1])\n    x_abs_encoder = Untied2DPositionalEncoder(dim, num_heads, x_shape[0], x_shape[1])\n\n    z_self_attn_rel_pos_index = generate_2d_relative_positional_encoding_index(z_shape, z_shape)\n    x_self_attn_rel_pos_index = generate_2d_relative_positional_encoding_index(x_shape, x_shape)\n\n    z_x_cross_attn_rel_pos_index = generate_2d_relative_positional_encoding_index(z_shape, x_shape)\n    x_z_cross_attn_rel_pos_index = generate_2d_relative_positional_encoding_index(x_shape, z_shape)\n\n    z_self_attn_rel_pos_bias_table = RelativePosition2DEncoder(num_heads, z_self_attn_rel_pos_index.max() + 1)\n    x_self_attn_rel_pos_bias_table = RelativePosition2DEncoder(num_heads, x_self_attn_rel_pos_index.max() + 1)\n    z_x_cross_attn_rel_pos_bias_table = RelativePosition2DEncoder(num_heads, z_x_cross_attn_rel_pos_index.max() + 1)\n    x_z_cross_attn_rel_pos_bias_table = RelativePosition2DEncoder(num_heads, x_z_cross_attn_rel_pos_index.max() + 1)\n\n    return FeatureFusionEncoder(encoder_layers, z_abs_encoder, x_abs_encoder, z_self_attn_rel_pos_index,\n                                x_self_attn_rel_pos_index,\n                                z_x_cross_attn_rel_pos_index, x_z_cross_attn_rel_pos_index,\n                                z_self_attn_rel_pos_bias_table,\n                                x_self_attn_rel_pos_bias_table, z_x_cross_attn_rel_pos_bias_table,\n                                x_z_cross_attn_rel_pos_bias_table)\n\nclass TargetQueryDecoderLayer(nn.Cell):\n    def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0.,\n                 drop_path=nn.Identity(), act_layer=nn.GELU, norm_layer=nn.LayerNorm):\n        super(TargetQueryDecoderLayer, self).__init__()\n        self.norm_1 = norm_layer(dim)\n        #self.self_attn1 = SelfAttention(dim, num_heads, qkv_bias, qk_scale, attn_drop, drop)\n        self.self_attn1 = nn.MultiheadAttention(dim, num_heads, dropout=drop)\n        self.norm_2_query = norm_layer(dim)\n        self.norm_2_memory = norm_layer(dim)\n        # self.cross_attn = CrossAttention(dim, num_heads, qkv_bias, qk_scale, attn_drop, drop)\n        self.multihead_attn = nn.MultiheadAttention(dim, num_heads, dropout=drop)\n        self.norm_3 = norm_layer(dim)\n        mlp_hidden_dim = int(dim * mlp_ratio)\n        self.mlpz = Mlp(dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)\n\n        #self.norm_4 = norm_layer(dim)\n        #self.self_attn2 = nn.MultiheadAttention(dim, num_heads, dropout=drop)\n        #self.norm_5_query = norm_layer(dim)\n        #self.norm_5_memory = norm_layer(dim)\n        # self.cross_attn = CrossAttention(dim, num_heads, qkv_bias, qk_scale, attn_drop, drop)\n        #self.multihead_attn2 = nn.MultiheadAttention(dim, num_heads, dropout=drop)\n        #self.norm_6 = norm_layer(dim)\n        #mlp_hidden_dim = int(dim * mlp_ratio)\n        #self.mlpx = Mlp(dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)\n\n        self.drop_path = drop_path\n\n    def construct(self, query, memoryz, query_pos, tgt_mask: Optional[Tensor] = None,\n                memory_mask: Optional[Tensor] = None,\n                tgt_key_padding_mask: Optional[Tensor] = None,\n                memory_key_padding_mask: Optional[Tensor] = None,\n                ):\n        '''\n            Args:\n                query (ms.Tensor): (B, num_queries, C)\n                memory (ms.Tensor): (B, L, C)\n                query_pos (ms.Tensor): (1 or B, num_queries, C)\n                memory_pos (ms.Tensor): (1 or B, L, C)\n            Returns:\n                ms.Tensor: (B, num_queries, C)\n        '''\n        #memory = ops.cat((memoryx,memoryz),dim=1)\n        tgt = query\n        q = k = self.norm_1(query) + query_pos\n        query = query + self.drop_path(self.self_attn1(q, k, value=tgt, attn_mask=tgt_mask,\n                                                       key_padding_mask=tgt_key_padding_mask)[0])\n        q2 = self.norm_2_query(query) + query_pos\n        memory = memoryz\n\n        k2 = self.norm_2_memory(memory).permute(1, 0 ,2)\n        memory_in = memory.permute(1, 0 ,2)\n        query = query + self.drop_path(\n            self.multihead_attn(query=q2, key=k2, value=memory_in, attn_mask=memory_mask,\n                            key_padding_mask=memory_key_padding_mask)[0])\n        query = query + self.drop_path(self.mlpz(self.norm_3(query)))\n\n        return query\n\ndef _get_clones(module, N):\n    return nn.CellList([copy.deepcopy(module) for i in range(N)])\n\nclass TargetQueryDecoderBlock(nn.Cell):\n    def __init__(self, dim, decoder_layers, num_layer):\n        super(TargetQueryDecoderBlock, self).__init__()\n        self.layers = nn.CellList(decoder_layers)\n        self.num_layers = num_layer\n        self.norm = nn.LayerNorm(dim)\n\n    def construct(self, tgt, z, query_pos: Optional[Tensor] = None,\n                tgt_mask: Optional[Tensor] = None,\n                memory_mask: Optional[Tensor] = None,\n                tgt_key_padding_mask: Optional[Tensor] = None,\n                memory_key_padding_mask: Optional[Tensor] = None):\n        '''\n            Args:\n                z (ops.Tensor): (B, L_z, C)\n                x (ms.Tensor): (B, L_x, C)\n            Returns:\n                ms.Tensor: (B, num_queries, C)\n        '''\n        output = tgt\n        for layer in self.layers:\n            output = layer(output, z, query_pos,\n                           tgt_mask=tgt_mask,\n                           memory_mask=memory_mask,\n                           tgt_key_padding_mask=tgt_key_padding_mask,\n                           memory_key_padding_mask=memory_key_padding_mask)\n        output = self.norm(output)\n\n        return output\n\ndef build_decoder(decoder_layer, drop_path, dim, num_heads, mlp_ratio, qkv_bias, drop_rate, attn_drop_rate):\n    num_layers = decoder_layer\n    decoder_layers = []\n    for _ in range(num_layers):\n        decoder_layers.append(\n            TargetQueryDecoderLayer(dim, num_heads, mlp_ratio, qkv_bias, drop=drop_rate, attn_drop=attn_drop_rate,\n                                    drop_path=drop_path.allocate()))\n        drop_path.increase_depth()\n\n\n    decoder = TargetQueryDecoderBlock(dim, decoder_layers, num_layers)\n    return decoder\n\ndef generate_square_subsequent_mask(sz):\n    r\"\"\"Generate a square mask for the sequence. The masked positions are filled with float('-inf').\n        Unmasked positions are filled with float(0.0).\n    \"\"\"\n    mask = (ops.triu(ops.ones(sz, sz)) == 1).swapaxes(0, 1)\n    #for i in range(int(sz/4 - 1)):\n    #    j = i+1\n    #    for k in range(4):\n    #        mask[j*4+k, 0:j*4] = 0\n    mask = mask.float().masked_fill(mask == 0, float(\n        '-inf')).masked_fill(mask == 1, float(0.0))\n    return mask\n\nclass Pix2Track(nn.Cell):\n    def __init__(self, in_channel=64, feat_sz=20, feat_tz=10, stride=16, encoder_layer=3, decoder_layer=3,\n                 bins=400,num_heads=12, mlp_ratio=2, qkv_bias=True, drop_rate=0.0,attn_drop=0.0, drop_path=nn.Identity):\n        super(Pix2Track, self).__init__()\n        self.bins = bins\n        self.word_embeddings = nn.Embedding(self.bins * 3 + 2, in_channel, padding_idx=self.bins * 3, max_norm=1, norm_type=2.0)\n        print(self.bins)\n        self.position_embeddings = nn.Embedding(\n            5, in_channel)\n        self.prev_position_embeddings = nn.Embedding(5, in_channel)\n        self.output_bias = ms.Parameter(ops.zeros(self.bins * 3 + 2))\n        #self.out_norm_cls = nn.LayerNorm(in_channel)\n        self.identity_search = ms.Parameter(ops.zeros(1, 1, 768))\n        self.identity_search = trunc_normal_(self.identity_search, std=.02)  \n        self.encoder_layer = encoder_layer\n        self.drop_path = drop_path\n        self.tz = feat_tz * feat_tz\n        self.sz = feat_sz * feat_sz\n        trunc_normal_(self.word_embeddings.weight, std=.02)\n        if self.encoder_layer > 0 :\n            self.encoder = build_encoder(encoder_layer, num_heads, mlp_ratio, qkv_bias,\n                        drop_rate, attn_drop, in_channel, feat_tz, feat_sz, self.drop_path)\n        else:\n            self.encoder = None\n        self.decoder = build_decoder(decoder_layer, self.drop_path, in_channel, num_heads,\n                                     mlp_ratio, qkv_bias, drop_rate, attn_drop, feat_tz, feat_sz)\n    def construct(self, zx_feat, pos_z, pos_x, identity, seqs_input=None, head_type=None, stage=None, search_feature=None):\n        emb_weight = self.word_embeddings.weight.clone()\n        share_weight = emb_weight.T\n\n        z_feat = zx_feat[:, :self.tz]\n        x_feat = zx_feat[:, self.tz:]\n        z_pos = None\n        x_pos = None\n        out_list = []\n        bs = zx_feat.shape[0]\n        if self.encoder != None:\n            z_feat, x_feat = self.encoder(z_feat, x_feat, None, None)\n        output_x_feat = x_feat.clone() \n        #print(\"this is original x_feat\")\n        #print(x_feat)\n        #if search_feature == None:\n            #print(\"I input none\")\n        #    x_feat = ops.cat((x_feat, x_feat), dim=1)\n       # else:\n            #print(\"i input something\")\n         #   x_feat = ops.cat((x_feat, search_feature), axis=1)\n        #print(\"this is train_x_feat\")\n        #print(x_feat)\n        #print(x_feat.shape)\n        #print(x_feat)\n        #print(x_feat.shape)\n        #print(stage)\n        if stage == None:\n            seqs_input = seqs_input.to(ms.int64).to(zx_feat.device)\n            tgt = self.word_embeddings(seqs_input).permute(1, 0, 2)\n            query_embed_ = self.position_embeddings.weight.unsqueeze(1)\n            prev_embed = self.prev_position_embeddings.weight.unsqueeze(1)\n            query_embed = ops.cat([prev_embed, query_embed_], axis=0)\n            query_embed = query_embed.repeat(1, bs, 1)\n            #print(tgt.shape)\n            decoder_feat_cls = self.decoder(tgt, z_feat, x_feat, pos_z, pos_x, identity, self.identity_search, query_embed[:len(tgt)],\n                                                tgt_mask=generate_square_subsequent_mask(len(tgt)).to(tgt.device))\n            #decoder_feat = self.out_norm_cls(decoder_feat)\n            at = ops.matmul(decoder_feat_cls, share_weight)\n            at = at + self.output_bias\n            output = {'feat': at, \"state\": \"train\"}\n            #print(\"dododo!\")\n        else:\n            b = seqs_input\n            #b = seqs_input.unsqueeze(0)\n            #print(b)\n            a = ops.ones(bs, 1) * self.bins * 3\n            #print(a)\n            a = a.to(b)\n            #print(a.shape)\n            #print(b.shape)\n            c = ops.cat([b, a], axis=1)\n            #c = a\n            #print(c)\n            #print(c)\n            bs_lst = bs / 2\n            seqs_input = c.to(zx_feat.device).to(ms.int32)\n            #print(seqs_input)\n            #print(\"may i do this?\")\n            for i in range(5):\n                tgt = self.word_embeddings(seqs_input).permute(1, 0, 2)\n                #print(\"may i do do do!\")\n                query_embed_ = self.position_embeddings.weight.unsqueeze(1)\n                prev_embed = self.prev_position_embeddings.weight.unsqueeze(0).repeat(4,1,1).permute(1,0,2).reshape(4*5, -1).unsqueeze(1)\n                \n                query_embed = ops.cat([prev_embed, query_embed_], axis=0)\n                #query_embed = query_embed_.repeat(1, bs, 1)\n                query_embed = query_embed.repeat(1, bs, 1)\n                \n                \n                #print(tgt.shape)\n                #print(query_embed.shape)\n                #print(len(tgt))\n                #print(z_feat.shape)\n                #print(x_feat.shape)\n                \n                decoder_feat_cls = self.decoder(tgt, z_feat, x_feat, pos_z, pos_x, identity, self.identity_search, query_embed[:len(tgt)],\n                                                tgt_mask=generate_square_subsequent_mask(len(tgt)).to(tgt.device))\n                #        print(decoder_feat_cls)\n                #decoder_feat_cls = self.out_norm_cls(decoder_feat_cls)\n                out = ops.matmul(decoder_feat_cls.swapaxes(0, 1)[:, -1, :], share_weight) + self.output_bias\n                if i == 4:\n                    temp = ops.matmul(decoder_feat_cls, share_weight) + self.output_bias\n                    # temp = temp.softmax(-1)\n                #out_logits = top_k_top_p_filtering_batch(out, 0, 0.4)\n                #next_token = seqs_input[:, -1:].clone()\n                #for j in range(next_token.shape[0]):\n                #    next_token[j] = ops.multinomial(ops.softmax(out_logits[j].squeeze(0), axis=-1), num_samples=1)\n                #out = out.softmax(-1)\n                #value, extra_seq = out.topk(axis=-1, k=1)[0], out.topk(axis=-1, k=1)[1]\n                #seqs_input = ops.cat([seqs_input, next_token], axis=-1)\n                #if i == 0:\n                #    seqs_output = next_token\n                #    values = value\n                #else:\n                #    seqs_output = ops.cat([seqs_output, next_token], axis=-1)\n                #    values = ops.cat([values, value], axis=-1)\n                out_list.append(out.unsqueeze(0))\n                out_val = ops.softmax(out[:, :self.bins*3],axis=-1)\n                out = ops.softmax(out,axis=-1)\n\n                if head_type == \"half\":\n                    #print(\"can i do that?\")\n                    if i <= 3:\n                        prob_out = out_val\n                    else:\n                        prob_out = out\n                    prob = Categorical(prob_out)\n                    max_indicies = ops.argmax(prob_out, -1)\n                    samplex_indices = prob.sample()\n                    #temp_bs = len(max_indicies) // 2\n                    #assert len(max_indicies) % 2 == 0\n                    selected_indices = ops.cat([max_indicies], axis=0)\n                    for j in range(bs):\n                        if j == 0 :\n                            value = prob_out[j, max_indicies[j]].unsqueeze(0)\n                        else:\n                            value = ops.cat([value, prob_out[j, max_indicies[j]].unsqueeze(0)], axis=0)\n                    #    else:\n                    #        value = ops.cat([value, prob_out[j, samplex_indices[j]].unsqueeze(0)], axis=0)\n                    selected_indices = selected_indices.unsqueeze(1)\n                    value = value.unsqueeze(1)\n                    seqs_input = ops.cat([seqs_input, selected_indices], axis=-1)\n                    if i == 0:\n                        seqs_output = selected_indices\n                        values = value\n                    else:\n                        seqs_output = ops.cat([seqs_output, selected_indices], axis=-1)\n                        values = ops.cat([values, value], axis=-1)\n                    continue\n                value, extra_seq = out.topk(dim=-1, k=1)[0], out.topk(dim=-1, k=1)[1]\n                seqs_input = ops.cat([seqs_input, extra_seq], axis=-1)\n                if i == 0:\n                    seqs_output = extra_seq\n                    values = value\n                else:\n                    seqs_output = ops.cat([seqs_output, extra_seq], axis=-1)\n                    values = ops.cat([values, value], axis=-1)\n                #print(seqs_input)\n                #print(seqs_input)\n                #print(x_feat.shape)\n                #print(z_feat.shape)\n                #print(seqs_input)\n            if not(not out_list):\n                feat = ops.cat(out_list)\n            #print(seqs_input)\n            output = {'seqs': seqs_output, 'class': values, 'feat': feat, \"state\": \"val/test\", \"x_feat\": output_x_feat.detach()}\n        return output\n\n\n\n\n\n\ndef build_box_head(cfg, hidden_dim):\n    stride = cfg.MODEL.BACKBONE.STRIDE\n\n    if cfg.MODEL.HEAD.TYPE == \"MLP\":\n        mlp_head = MLP(hidden_dim, hidden_dim, 4, 3)  # dim_in, dim_hidden, dim_out, 3 layers\n        return mlp_head\n    elif \"CORNER\" in cfg.MODEL.HEAD.TYPE:\n        feat_sz = int(cfg.DATA.SEARCH.SIZE / stride)\n        channel = getattr(cfg.MODEL, \"NUM_CHANNELS\", 256)\n        print(\"head channel: %d\" % channel)\n        if cfg.MODEL.HEAD.TYPE == \"CORNER\":\n            corner_head = Corner_Predictor(inplanes=cfg.MODEL.HIDDEN_DIM, channel=channel,\n                                           feat_sz=feat_sz, stride=stride)\n        else:\n            raise ValueError()\n        return corner_head\n    elif cfg.MODEL.HEAD.TYPE == \"CENTER\":\n        in_channel = hidden_dim\n        out_channel = cfg.MODEL.HEAD.NUM_CHANNELS\n        feat_sz = int(cfg.DATA.SEARCH.SIZE / stride)\n        center_head = CenterPredictor(inplanes=in_channel, channel=out_channel,\n                                      feat_sz=feat_sz, stride=stride)\n        return center_head\n    elif cfg.MODEL.HEAD.TYPE == \"PIX\":\n        in_channel = hidden_dim\n        feat_sz = int(cfg.DATA.SEARCH.SIZE / stride)\n        feat_tz = int(cfg.DATA.TEMPLATE.SIZE / stride)\n        decoder_layer = cfg.MODEL.DECODER_LAYER\n        encoder_layer = cfg.MODEL.ENCODER_LAYER\n        bins = cfg.MODEL.BINS\n        num_heads = cfg.MODEL.NUM_HEADS\n        mlp_ratio = cfg.MODEL.MLP_RATIO\n        qkv_bias = cfg.MODEL.QKV_BIAS\n        drop_rate = cfg.MODEL.DROP_RATE\n        attn_drop = cfg.MODEL.ATTN_DROP\n        drop_path = cfg.MODEL.DROP_PATH\n        drop_path_allocator = DropPathAllocator(drop_path)\n        pix_head = Pix2Track(in_channel=in_channel, feat_sz=feat_sz, feat_tz=feat_tz,\n                             stride=stride, encoder_layer=encoder_layer, decoder_layer=decoder_layer, bins=bins,\n                             num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, drop_rate=drop_rate,\n                             attn_drop=attn_drop, drop_path=drop_path_allocator)\n        return pix_head\n    else:\n        raise ValueError(\"HEAD TYPE %s is not supported.\" % cfg.MODEL.HEAD_TYPE)\n"
  },
  {
    "path": "artrackv2_mindspore/lib/models/layers/mask_decoder.py",
    "content": "# -*- coding:utf-8 -*-\n# author  : Skye Song\n# file    : vit_decoder.py\n# Copyright (c) Skye-Song. All Rights Reserved\n\nimport mindspore as ms\nimport mindspore.nn as nn\nfrom mindspore import ops\nfrom mindspore import Tensor\nimport sys\n\nfrom lib.utils.box_ops import box_xywh_to_cxywh, box_cxcywh_to_xyxy\nfrom lib.models.component.block import Block\nfrom einops import rearrange\n\nfrom lib.utils.image import *\nfrom mindspore.common.initializer import initializer,Normal,XavierUniform,Constant\n\nclass MaskDecoder(nn.Cell):\n\tdef __init__(self, mask_ratio=0.75, patch_size=16, num_patches=8 ** 2, embed_dim=1024, decoder_embed_dim=512,\n\t             decoder_depth=8, decoder_num_heads=16, pool_size=8,\n\t             mlp_ratio=4., norm_layer=nn.LayerNorm, norm_pix_loss=False):\n\t\tsuper().__init__()\n\t\tself.mask_ratio = mask_ratio\n\n\t\tself.num_patches = num_patches\n\t\tself.patch_size = patch_size\n\n\t\tself.decoder_embed = nn.Dense(embed_dim, decoder_embed_dim, has_bias=True)\n\n\t\tself.mask_token = ms.Parameter(ops.zeros((1, 1, decoder_embed_dim)))\n\n\t\tself.decoder_pos_embed = ms.Parameter(ops.zeros((1, num_patches, decoder_embed_dim)),\n\t\t                                      requires_grad=False)  # fixed sin-cos embedding\n\n\t\tself.decoder_blocks = nn.CellList([\n\t\t\tBlock(decoder_embed_dim, decoder_num_heads, mlp_ratio, qkv_bias=True, norm_layer=norm_layer)\n\t\t\tfor i in range(decoder_depth)])\n\t\tdecoder_embed_dim_tuple=decoder_embed_dim\n\t\tif isinstance(decoder_embed_dim,int):\n\t\t\tdecoder_embed_dim_tuple=tuple([decoder_embed_dim])\n\t\tself.decoder_norm = norm_layer(decoder_embed_dim_tuple)\n\t\tself.decoder_pred = nn.Dense(decoder_embed_dim, patch_size ** 2 * 3, has_bias=True)  # decoder to patch\n\n\t\tself.norm_pix_loss = norm_pix_loss\n\n\tdef random_masking(self, x):\n\t\t\"\"\"\n\t\tPerform per-sample random masking by per-sample shuffling.\n\t\tPer-sample shuffling is done by argsort random noise.\n\t\tx: [N, L, D], sequence\n\t\t\"\"\"\n\t\tN, L, D = x.shape  # batch, length, dim\n\t\tlen_keep = int(L * (1 - self.mask_ratio))\n\n\t\tnoise = ops.rand(N, L, device=x.device)  # noise in [0, 1]\n\n\t\t# sort noise for each sample\n\t\tids_shuffle = ops.argsort(noise, dim=1)  # ascend: small is keep, large is remove\n\t\tids_restore = ops.argsort(ids_shuffle, dim=1)\n\n\t\t# keep the first subset\n\t\tids_keep = ids_shuffle[:, :len_keep]\n\t\tx_keep = ops.gather_elements(x, dim=1, index=ids_keep.unsqueeze(-1).repeat(1, 1, D))\n\n\t\t# generate the binary mask: 0 is keep, 1 is remove\n\t\tmask = ops.ones([N, L], device=x.device)\n\t\tmask[:, :len_keep] = 0\n\t\t# unshuffle to get the binary mask\n\t\tmask = ops.gather_elements(mask, dim=1, index=ids_restore)\n\n\t\t# get the masked x\n\t\tmask_tokens = self.mask_token.repeat(x.shape[0], ids_restore.shape[1] - x_keep.shape[1], 1)\n\t\tx_ = ops.cat([x_keep, mask_tokens], axis=1)  # no cls token\n\t\tx_masked = ops.gather_elements(x_, dim=1, index=ids_restore.unsqueeze(-1).repeat(1, 1, x.shape[2]))  # unshuffle\n\n\t\treturn x_masked, mask\n\n\tdef forward_decoder(self, x, eval=False):\n\t\t# embed tokens\n\n\t\tx = self.decoder_embed(x)\n\t\tmask = None\n\n\t\t# append mask tokens to sequence\n\t\tif not eval:\n\t\t\tx, mask = self.random_masking(x)\n\n\t\t# add pos embed\n\t\tx = x + self.decoder_pos_embed\n\n\t\t# apply Transformer blocks\n\t\tfor blk in self.decoder_blocks:\n\t\t\tx = blk(x)\n\t\tx = self.decoder_norm(x)\n\n\t\t# predictor projection\n\t\tx = self.decoder_pred(x)\n\t\treturn x, mask\n\n\tdef unpatchify(self, x):\n\t\t\"\"\"\n        x: (N, L, patch_size**2 *3)\n        imgs: (N, 3, H, W)\n        \"\"\"\n\t\tp = self.patch_size\n\t\th = w = int(x.shape[1] ** .5)\n\t\tassert h * w == x.shape[1]\n\n\t\tx = x.reshape((x.shape[0], h, w, p, p, 3))\n\t\tx = ops.permute(x, (0,5,1,3,2,4))\n\t\timgs = x.reshape((x.shape[0], 3, h * p, h * p))\n\t\treturn imgs\n\n\tdef patchify(self, imgs):\n\t\t\"\"\"\n\t\timgs: (N, 3, H, W)\n\t\tx: (N, L, patch_size**2 *3)\n\t\t\"\"\"\n\t\tp = self.patch_size\n\t\tassert imgs.shape[2] == imgs.shape[3] and imgs.shape[2] % p == 0\n\n\t\th = w = imgs.shape[2] // p\n\t\tx = imgs.reshape((imgs.shape[0], 3, h, p, w, p))\n\t\tx = ops.permute(x, (0,2,4,3,5,1))\n\t\tx = x.reshape((imgs.shape[0], h * w, p ** 2 * 3))\n\n\t\treturn x\n\n\tdef forward_loss(self, imgs, pred, mask=None):\n\t\t\"\"\"\n\t\timgs: [N, 3, H, W]\n\t\tpred: [N, L, p*p*3]\n\t\tmask: [N, L], 0 is keep, 1 is remove,\n\t\t\"\"\"\n\t\ttarget = self.patchify(imgs)\n\t\tif self.norm_pix_loss:\n\t\t\tmean = target.mean(dim=-1, keepdims=True)\n\t\t\tvar = target.var(dim=-1, keepdims=True)\n\t\t\ttarget = (target - mean) / (var + 1.e-6) ** .5\n\n\t\tloss = (pred - target) ** 2\n\t\tloss = loss.mean(dim=-1)  # [N, L], mean loss per patc\n\t\tif mask == None:\n\t\t\tloss = loss.sum() / pred.shape[1] / pred.shape[0]  # mean loss on removed patches\n\t\telse:\n\t\t\tloss = loss.sum() / pred.shape[1] / pred.shape[0]\n\t\treturn loss\n\n\tdef construct(self, x, images=None, gt_bboxes=None, eval=False,):\n\t\tx_numpy = x.asnumpy()\n\t\tx_numpy = rearrange(x_numpy, 'b c h w -> b (h w) c')\n\t\tx = Tensor(x_numpy)\n\t\tpred, mask = self.forward_decoder(x, eval)  # [N, L, p*p*3]\n\t\tif eval:\n\t\t\treturn self.unpatchify(pred)\n\t\tif mask != None:\n\t\t\tloss = self.forward_loss(imgs=images, pred=pred, mask=mask)\n\t\telse:\n\t\t\tloss = self.forward_loss(imgs=images, pred=pred)\n\t\tpred = self.unpatchify(pred)\n\t\treturn pred, loss\n\n\ndef mask_decoder():\n\tmodel = MaskDecoder(\n\t\tmask_ratio=0.75, patch_size=16, num_patches=8 ** 2, embed_dim=1024, decoder_embed_dim=512, decoder_depth=8,\n\t\tdecoder_num_heads=16, mlp_ratio=4., norm_layer=nn.LayerNorm, norm_pix_loss=False)\n\treturn model\n\n\ndef build_maskdecoder(cfg):\n\tpool_size = int(cfg.DATA.TEMPLATE.SIZE / cfg.MODEL.BACKBONE.PATCHSIZE)\n\n\tnum_patches = (cfg.DATA.TEMPLATE.SIZE // cfg.MODEL.BACKBONE.PATCHSIZE) ** 2\n\n\tmodel = MaskDecoder(\n\t\tmask_ratio=cfg.MODEL.DECODER.MASK_RATIO,\n\t\tpatch_size=cfg.MODEL.BACKBONE.PATCHSIZE,\n\t\tnum_patches=num_patches,\n\t\tembed_dim=cfg.MODEL.BACKBONE.EMBEDDIM,\n\t\tdecoder_embed_dim=cfg.MODEL.DECODER.EMBEDDIM,\n\t\tdecoder_depth=cfg.MODEL.DECODER.DEPTH,\n\t\tdecoder_num_heads=cfg.MODEL.DECODER.NUMHEADS,\n\t\tpool_size=pool_size,\n\t\tmlp_ratio=cfg.MODEL.DECODER.MLPRATIO,\n\t\tnorm_layer=nn.LayerNorm,\n\t\tnorm_pix_loss=False)\n\treturn model\n"
  },
  {
    "path": "artrackv2_mindspore/lib/models/layers/patch_embed.py",
    "content": "import sys\r\nsys.path.append(\"/home/baiyifan/code/AR2_mindspore_cp/2stage\")\r\nimport mindspore.nn as nn\r\nfrom mindspore import ops\r\nfrom lib.models.timm import to_2tuple\r\n\r\n\r\nclass PatchEmbed(nn.Cell):\r\n    \"\"\" 2D Image to Patch Embedding\r\n    \"\"\"\r\n\r\n    def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768, norm_layer=None, flatten=True):\r\n        super().__init__()\r\n        img_size = to_2tuple(img_size)\r\n        patch_size = to_2tuple(patch_size)\r\n        self.img_size = img_size\r\n        self.patch_size = patch_size\r\n        self.grid_size = (img_size[0] // patch_size[0], img_size[1] // patch_size[1])\r\n        self.num_patches = self.grid_size[0] * self.grid_size[1]\r\n        self.flatten = flatten\r\n\r\n        self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size,pad_mode='valid',has_bias=True)\r\n        self.norm = norm_layer(embed_dim) if norm_layer else nn.Identity()\r\n\r\n    def construct(self, x):\r\n        # allow different input size\r\n        # B, C, H, W = x.shape\r\n        # _assert(H == self.img_size[0], f\"Input image height ({H}) doesn't match model ({self.img_size[0]}).\")\r\n        # _assert(W == self.img_size[1], f\"Input image width ({W}) doesn't match model ({self.img_size[1]}).\")\r\n        x = self.proj(x)\r\n        if self.flatten:\r\n            x = ops.flatten(x,start_dim=2)\r\n            x = x.swapaxes(1, 2)  # BCHW -> BNC\r\n        x = self.norm(x)\r\n        return x\r\n"
  },
  {
    "path": "artrackv2_mindspore/lib/models/layers/rpe.py",
    "content": "import torch\nimport torch.nn as nn\nfrom timm.models.layers import trunc_normal_\n\n\ndef generate_2d_relative_positional_encoding_index(z_shape, x_shape):\n    '''\n        z_shape: (z_h, z_w)\n        x_shape: (x_h, x_w)\n    '''\n    z_2d_index_h, z_2d_index_w = torch.meshgrid(torch.arange(z_shape[0]), torch.arange(z_shape[1]))\n    x_2d_index_h, x_2d_index_w = torch.meshgrid(torch.arange(x_shape[0]), torch.arange(x_shape[1]))\n\n    z_2d_index_h = z_2d_index_h.flatten(0)\n    z_2d_index_w = z_2d_index_w.flatten(0)\n    x_2d_index_h = x_2d_index_h.flatten(0)\n    x_2d_index_w = x_2d_index_w.flatten(0)\n\n    diff_h = z_2d_index_h[:, None] - x_2d_index_h[None, :]\n    diff_w = z_2d_index_w[:, None] - x_2d_index_w[None, :]\n\n    diff = torch.stack((diff_h, diff_w), dim=-1)\n    _, indices = torch.unique(diff.view(-1, 2), return_inverse=True, dim=0)\n    return indices.view(z_shape[0] * z_shape[1], x_shape[0] * x_shape[1])\n\n\ndef generate_2d_concatenated_self_attention_relative_positional_encoding_index(z_shape, x_shape):\n    '''\n        z_shape: (z_h, z_w)\n        x_shape: (x_h, x_w)\n    '''\n    z_2d_index_h, z_2d_index_w = torch.meshgrid(torch.arange(z_shape[0]), torch.arange(z_shape[1]))\n    x_2d_index_h, x_2d_index_w = torch.meshgrid(torch.arange(x_shape[0]), torch.arange(x_shape[1]))\n\n    z_2d_index_h = z_2d_index_h.flatten(0)\n    z_2d_index_w = z_2d_index_w.flatten(0)\n    x_2d_index_h = x_2d_index_h.flatten(0)\n    x_2d_index_w = x_2d_index_w.flatten(0)\n\n    concatenated_2d_index_h = torch.cat((z_2d_index_h, x_2d_index_h))\n    concatenated_2d_index_w = torch.cat((z_2d_index_w, x_2d_index_w))\n\n    diff_h = concatenated_2d_index_h[:, None] - concatenated_2d_index_h[None, :]\n    diff_w = concatenated_2d_index_w[:, None] - concatenated_2d_index_w[None, :]\n\n    z_len = z_shape[0] * z_shape[1]\n    x_len = x_shape[0] * x_shape[1]\n    a = torch.empty((z_len + x_len), dtype=torch.int64)\n    a[:z_len] = 0\n    a[z_len:] = 1\n    b=a[:, None].repeat(1, z_len + x_len)\n    c=a[None, :].repeat(z_len + x_len, 1)\n\n    diff = torch.stack((diff_h, diff_w, b, c), dim=-1)\n    _, indices = torch.unique(diff.view((z_len + x_len) * (z_len + x_len), 4), return_inverse=True, dim=0)\n    return indices.view((z_len + x_len), (z_len + x_len))\n\n\ndef generate_2d_concatenated_cross_attention_relative_positional_encoding_index(z_shape, x_shape):\n    '''\n        z_shape: (z_h, z_w)\n        x_shape: (x_h, x_w)\n    '''\n    z_2d_index_h, z_2d_index_w = torch.meshgrid(torch.arange(z_shape[0]), torch.arange(z_shape[1]))\n    x_2d_index_h, x_2d_index_w = torch.meshgrid(torch.arange(x_shape[0]), torch.arange(x_shape[1]))\n\n    z_2d_index_h = z_2d_index_h.flatten(0)\n    z_2d_index_w = z_2d_index_w.flatten(0)\n    x_2d_index_h = x_2d_index_h.flatten(0)\n    x_2d_index_w = x_2d_index_w.flatten(0)\n\n    concatenated_2d_index_h = torch.cat((z_2d_index_h, x_2d_index_h))\n    concatenated_2d_index_w = torch.cat((z_2d_index_w, x_2d_index_w))\n\n    diff_h = x_2d_index_h[:, None] - concatenated_2d_index_h[None, :]\n    diff_w = x_2d_index_w[:, None] - concatenated_2d_index_w[None, :]\n\n    z_len = z_shape[0] * z_shape[1]\n    x_len = x_shape[0] * x_shape[1]\n\n    a = torch.empty(z_len + x_len, dtype=torch.int64)\n    a[: z_len] = 0\n    a[z_len:] = 1\n    c = a[None, :].repeat(x_len, 1)\n\n    diff = torch.stack((diff_h, diff_w, c), dim=-1)\n    _, indices = torch.unique(diff.view(x_len * (z_len + x_len), 3), return_inverse=True, dim=0)\n    return indices.view(x_len, (z_len + x_len))\n\n\nclass RelativePosition2DEncoder(nn.Module):\n    def __init__(self, num_heads, embed_size):\n        super(RelativePosition2DEncoder, self).__init__()\n        self.relative_position_bias_table = nn.Parameter(torch.empty((num_heads, embed_size)))\n        trunc_normal_(self.relative_position_bias_table, std=0.02)\n\n    def forward(self, attn_rpe_index):\n        '''\n            Args:\n                attn_rpe_index (torch.Tensor): (*), any shape containing indices, max(attn_rpe_index) < embed_size\n            Returns:\n                torch.Tensor: (1, num_heads, *)\n        '''\n        return self.relative_position_bias_table[:, attn_rpe_index].unsqueeze(0)\n"
  },
  {
    "path": "artrackv2_mindspore/lib/models/layers/self_practice.py",
    "content": "import torch\nimport mindspore as ms\nimport torch.nn as nn\nimport mindspore.nn as msnn\nfrom mindspore import ops\nimport numpy as np\nx = np.random.randn(3,5)\nx1 = torch.tensor(x)\nx2 = ms.tensor(x)\nlength1 = torch.tensor([[2,4,1],[3,1,0],[4,2,2]])\nlength2 = ms.tensor([[2,4,1],[3,1,0],[4,2,2]])\ny1 = torch.gather(x1,dim=1,index = length1)\ny2 = ops.gather_elements(x2,dim=1,index= length2)\nprint(y1)\nprint(y2)\n"
  },
  {
    "path": "artrackv2_mindspore/lib/models/ostrack/Vit_model_test.py",
    "content": "import sys\nsys.path.append(\"/home/djh/python-code/Artrackv2/2stage\")\nfrom lib.models.ostrack.vit import *\nfrom lib.test.evaluation.tracker import Tracker\nfrom lib.models.layers.mask_decoder import build_maskdecoder\nfrom lib.models.layers.head import DropPathAllocator\n\ntracker = Tracker('ostrack', '2stage_256_got', 'got10k_test', None)\nparam = tracker.get_parameters()\ncfg = param.cfg\npatch_start_index = 1\nkwargs = {'patch_size': 16, 'embed_dim': 768, 'depth': 12, 'num_heads': 12, 'drop_path_rate': 0.1}\nmodel = VisionTransformer(**kwargs)\nmodel.finetune_track(cfg=cfg, patch_start_index=patch_start_index)\ncross_2_decoder = build_maskdecoder(cfg)\ndrop_path = cfg.MODEL.DROP_PATH\ndrop_path_allocator = DropPathAllocator(drop_path)\nnum_heads = cfg.MODEL.NUM_HEADS\nmlp_ratio = cfg.MODEL.MLP_RATIO\nqkv_bias = cfg.MODEL.QKV_BIAS\ndrop_rate = cfg.MODEL.DROP_RATE\nattn_drop = cfg.MODEL.ATTN_DROP\n#def __init__(self, input_dim, hidden_dim, output_dim, num_layers, BN=False):\nscore_mlp = build_score_decoder(cfg)\ncover_mlp = build_score_decoder(cfg)\n\nmodel = OSTrack(\n    backbone,\n    #decoder,\n    cross_2_decoder,\n    score_mlp,\n    #cover_mlp,\n)\nfor name,param in model.parameters_and_names():\n    print (param.name)\n"
  },
  {
    "path": "artrackv2_mindspore/lib/models/ostrack/__init__.py",
    "content": "from .ostrack import build_ostrack\r\n"
  },
  {
    "path": "artrackv2_mindspore/lib/models/ostrack/base_backbone.py",
    "content": "from functools import partial\r\n\r\nimport mindspore as ms\r\nimport mindspore.nn as nn\r\nimport mindspore.ops as ops\r\nimport sys\r\nsys.path.append(\"/home/baiyifan/code/AR2_mindspore_cp/2stage\")\r\nfrom lib.models.timm import *\r\n\r\nfrom lib.models.layers.patch_embed import PatchEmbed\r\nfrom lib.models.ostrack.utils import combine_tokens, recover_tokens\r\n\r\n\r\nimport time\r\n\r\ndef generate_square_subsequent_mask(sz, sx, ss):\r\n    r\"\"\"Generate a square mask for the sequence. The masked positions are filled with float('-inf').\r\n        Unmasked positions are filled with float(0.0).\r\n    \"\"\"\r\n    # 0 means mask, 1 means visible\r\n    sum = sz + sx + ss\r\n    mask = (ops.triu(ops.ones((sum, sum))) == 1).swapaxes(0, 1)\r\n    mask[:, :] = 0\r\n    mask[:int(sz/2), :int(sz/2)] = 1 #template self\r\n    mask[int(sz/2):sz, int(sz/2):sz] = 1 # dt self\r\n    mask[int(sz/2):sz, sz:sz+sx] = 1 # dt search\r\n    mask[int(sz / 2):sz, -1] = 1  # dt search\r\n    mask[sz:sz+sx, :sz+sx] = 1 # sr dt-t-sr\r\n    mask[sz+sx:, :] = 1 # co dt-t-sr-co\r\n    # mask[sz+sx:, :sz] = 0\r\n    return ~mask\r\n\r\nclass BaseBackbone(nn.Cell):\r\n    def __init__(self):\r\n        super().__init__()\r\n\r\n        # for original ViT\r\n        self.pos_embed = None\r\n        self.img_size = [224, 224]\r\n        self.patch_size = 16\r\n        self.embed_dim = 384\r\n\r\n        self.cat_mode = 'direct'\r\n\r\n        self.pos_embed_z = None\r\n        self.pos_embed_x = None\r\n\r\n        self.bins = 400\r\n        in_channel = 768\r\n        self.range = 2\r\n        self.word_embeddings = nn.Embedding(self.bins * self.range + 6, in_channel, padding_idx=self.bins * self.range+4)\r\n        # mindspore的nn.Embedding中没有max_norm,norm_type，所以只能删去参数max_norm=1, norm_type=2.0\r\n        print(self.bins)\r\n        self.position_embeddings = nn.Embedding(\r\n            5, in_channel)\r\n        self.output_bias = ms.Parameter(ops.zeros(self.bins * self.range + 6))\r\n        self.prev_position_embeddings = nn.Embedding(7 * 4, in_channel)\r\n\r\n        self.template_segment_pos_embed = None\r\n        self.search_segment_pos_embed = None\r\n\r\n        self.return_inter = False\r\n        self.return_stage = [2, 5, 8, 11]\r\n\r\n        self.add_cls_token = False\r\n        self.add_sep_seg = False\r\n\r\n    def finetune_track(self, cfg, patch_start_index=1):\r\n\r\n        search_size = to_2tuple(cfg.DATA.SEARCH.SIZE)\r\n        template_size = to_2tuple(cfg.DATA.TEMPLATE.SIZE)\r\n        new_patch_size = cfg.MODEL.BACKBONE.STRIDE\r\n\r\n        self.cat_mode = cfg.MODEL.BACKBONE.CAT_MODE\r\n        self.return_inter = cfg.MODEL.RETURN_INTER\r\n        self.add_sep_seg = cfg.MODEL.BACKBONE.SEP_SEG\r\n\r\n        # resize patch embedding\r\n        if new_patch_size != self.patch_size:\r\n            print('Inconsistent Patch Size With The Pretrained Weights, Interpolate The Weight!')\r\n            old_patch_embed = {}\r\n            for name, param in self.patch_embed.named_parameters():\r\n                if 'weight' in name:\r\n                    param = ops.interpolate(param, size=(new_patch_size, new_patch_size),\r\n                                                      mode='bicubic', align_corners=False)\r\n                    param = ms.Parameter(param)\r\n                old_patch_embed[name] = param\r\n            print(\"Attention:old_patch_embed:\",old_patch_embed)\r\n            self.patch_embed = PatchEmbed(img_size=self.img_size, patch_size=new_patch_size, in_chans=3,\r\n                                          embed_dim=self.embed_dim)\r\n            self.patch_embed.proj.bias = old_patch_embed['proj.bias']\r\n            self.patch_embed.proj.weight = old_patch_embed['proj.weight']\r\n\r\n        # for patch embedding\r\n        patch_pos_embed = self.pos_embed[:, patch_start_index:, :]\r\n        patch_pos_embed = patch_pos_embed.swapaxes(1, 2)\r\n        B, E, Q = patch_pos_embed.shape\r\n        P_H, P_W = self.img_size[0] // self.patch_size, self.img_size[1] // self.patch_size\r\n        patch_pos_embed = patch_pos_embed.view(B, E, P_H, P_W)\r\n\r\n        # for search region\r\n        H, W = search_size\r\n        new_P_H, new_P_W = H // new_patch_size, W // new_patch_size\r\n        search_patch_pos_embed = ops.interpolate(patch_pos_embed, size=(new_P_H, new_P_W), mode='bicubic',\r\n                                                           align_corners=False)\r\n        search_patch_pos_embed = ops.flatten(search_patch_pos_embed,start_dim=2)\r\n        search_patch_pos_embed = search_patch_pos_embed.swapaxes(1, 2)\r\n\r\n        # for template region\r\n        H, W = template_size\r\n        new_P_H, new_P_W = H // new_patch_size, W // new_patch_size\r\n        template_patch_pos_embed = ops.interpolate(patch_pos_embed, size=(new_P_H, new_P_W), mode='bicubic',\r\n                                                             align_corners=False)\r\n        template_patch_pos_embed = ops.flatten(template_patch_pos_embed,start_dim=2).swapaxes(1, 2)\r\n\r\n        self.pos_embed_z = ms.Parameter(template_patch_pos_embed)\r\n        self.pos_embed_z0 = ms.Parameter(template_patch_pos_embed)\r\n        self.pos_embed_z1 = ms.Parameter(template_patch_pos_embed)\r\n        self.pos_embed_x = ms.Parameter(search_patch_pos_embed)\r\n\r\n        # for cls token (keep it but not used)\r\n        if self.add_cls_token and patch_start_index > 0:\r\n            cls_pos_embed = self.pos_embed[:, 0:1, :]\r\n            self.cls_pos_embed = ms.Parameter(cls_pos_embed)\r\n\r\n        # separate token and segment token\r\n        if self.add_sep_seg:\r\n            self.template_segment_pos_embed = ms.Parameter(ops.zeros((1, 1, self.embed_dim)))\r\n            self.template_segment_pos_embed = trunc_normal_(self.template_segment_pos_embed, std=.02)\r\n            self.search_segment_pos_embed = ms.Parameter(ops.zeros((1, 1, self.embed_dim)))\r\n            self.search_segment_pos_embed = trunc_normal_(self.search_segment_pos_embed, std=.02)\r\n\r\n        # self.cls_token = None\r\n        # self.pos_embed = None\r\n\r\n        if self.return_inter:\r\n            for i_layer in self.fpn_stage:\r\n                if i_layer != 11:\r\n                    norm_layer = partial(nn.LayerNorm, eps=1e-6)\r\n                    layer = norm_layer(self.embed_dim)\r\n                    layer_name = f'norm{i_layer}'\r\n                    self.add_module(layer_name, layer)\r\n\r\n    def forward_features(self, z_0, z_1_feat, x, identity, seqs_input):\r\n        share_weight = self.word_embeddings.embedding_table.T\r\n        out_list = []\r\n        begin = self.bins * self.range\r\n        begin_2 = self.bins * self.range + 1\r\n        begin_3 = self.bins * self.range + 2\r\n        begin_4 = self.bins * self.range + 3\r\n        score = self.bins * self.range + 5\r\n        B, H, W = x.shape[0], x.shape[2], x.shape[3]\r\n        a = ops.cat((ops.ones((B, 1),dtype=x.dtype) * begin, ops.ones((B, 1),dtype=x.dtype) * begin_2,\r\n                       ops.ones((B, 1),dtype=x.dtype) * begin_3,\r\n                       ops.ones((B, 1),dtype=x.dtype) * begin_4,\r\n                       ops.ones((B, 1),dtype=x.dtype) * score), axis=1)\r\n        b = seqs_input\r\n        # c = ops.cat([a], axis=1)\r\n        c = ops.cat([b, a], axis=1)\r\n        seqs_input_ = c.to(ms.int64)\r\n        output_x_feat = x.copy()\r\n        # original:output_x_feat = x.clone()\r\n\r\n        tgt = self.word_embeddings(seqs_input_).permute(1, 0, 2)\r\n        x = self.patch_embed(x)\r\n        z_0 = self.patch_embed(z_0)\r\n        z_1 = z_1_feat\r\n\r\n        s_x = x.shape[1]\r\n        s_z = z_0.shape[1] + z_1.shape[1]\r\n        s_s = seqs_input.shape[1]\r\n\r\n        z_0 += identity[:, 0, :].tile((B, self.pos_embed_z.shape[1], 1))\r\n        z_1 += identity[:, 1, :].tile((B, self.pos_embed_z.shape[1], 1))\r\n\r\n        x += identity[:, 2, :].tile((B, self.pos_embed_x.shape[1], 1))\r\n        query_embed_ = self.position_embeddings.embedding_table.unsqueeze(1)\r\n        prev_embed_ = self.prev_position_embeddings.embedding_table.unsqueeze(1)\r\n        query_embed = ops.cat([prev_embed_, query_embed_], axis=0)\r\n        #query_embed = ops.cat([query_embed_], axis=0)\r\n        query_embed = query_embed.tile((1, B, 1))\r\n\r\n        tgt = tgt.swapaxes(0, 1)\r\n        query_embed = query_embed.swapaxes(0, 1)\r\n        # print(self.pos_embed_z0.value())\r\n        # print(self.pos_embed_z1.value())\r\n        # print(self.pos_embed_x.value())\r\n        z_0 += self.pos_embed_z0\r\n        z_1 += self.pos_embed_z1\r\n        x += self.pos_embed_x\r\n        s_s = seqs_input_.shape[1]\r\n\r\n        mask = generate_square_subsequent_mask(s_z, s_x, s_s)\r\n\r\n        tgt += query_embed[:, :tgt.shape[1]]\r\n\r\n        z = ops.cat((z_0, z_1), axis=1)\r\n        zx = combine_tokens(z, x, mode=self.cat_mode)\r\n        zxs = ops.cat((zx, tgt), axis=1)\r\n        zxs = self.pos_drop(zxs)\r\n        m1 = zxs[:, -5:-1]\r\n        for j, blk in enumerate(self.blocks):\r\n            zxs = blk(zxs, padding_mask=mask)\r\n        zxs_numpy = zxs.numpy()\r\n        m3 = zxs[:, -5:-1]\r\n        for j, blk in enumerate(self.extension):\r\n            zxs = blk(zxs, padding_mask=mask)\r\n\r\n        lens_z = self.pos_embed_z.shape[1]\r\n        lens_x = self.pos_embed_x.shape[1]\r\n        z_0_feat = zxs[:, :lens_z]\r\n        z_1_feat = zxs[:, lens_z:lens_z*2]\r\n        x_feat = zxs[:, lens_z*2:lens_z*2+lens_x]\r\n        m2 = zxs[:, -5:-1]\r\n        x_out = self.norm(zxs[:, -5:-1])\r\n        score_feat = zxs[:, -1]\r\n        seq_feat = x_out\r\n        at = ops.matmul(x_out, share_weight)\r\n        out = at + self.output_bias\r\n        temp = out.swapaxes(0, 1)\r\n\r\n        out_list.append(out.unsqueeze(0))\r\n        out = ops.softmax(out,-1)\r\n        value, extra_seq = out.topk(dim=-1, k=1)[0], out.topk(dim=-1, k=1)[1]\r\n        for i in range(4):\r\n            value, extra_seq = out[:, i, :].topk(dim=-1, k=1)[0], out[:, i, :].topk(dim=-1, k=1)[1]\r\n            if i == 0:\r\n                seqs_output = extra_seq\r\n                values = value\r\n            else:\r\n                seqs_output = ops.cat([seqs_output, extra_seq], axis=-1)\r\n                values = ops.cat([values, value], axis=-1)\r\n\r\n        output = {'seqs': seqs_output, 'class': values, 'feat': temp, \"state\": \"val/test\", \"x_feat\": ops.stop_gradient(output_x_feat), \"seq_feat\": seq_feat}\r\n        return output, z_0_feat, z_1_feat, x_feat, score_feat\r\n\r\n    def construct(self, z_0, z_1_feat, x, identity, seqs_input, **kwargs):\r\n        \"\"\"\r\n        Joint feature extraction and relation modeling for the basic ViT backbone.\r\n        Args:\r\n            z (ops.Tensor): template feature, [B, C, H_z, W_z]\r\n            x (ops.Tensor): search region feature, [B, C, H_x, W_x]\r\n\r\n        Returns:\r\n            x (ops.Tensor): merged template and search region feature, [B, L_z+L_x, C]\r\n            attn : None\r\n        \"\"\"\r\n        output = self.forward_features(z_0, z_1_feat, x, identity, seqs_input)\r\n\r\n        return output\r\n"
  },
  {
    "path": "artrackv2_mindspore/lib/models/ostrack/load_parameter_test.py",
    "content": "import torch\nimport sys\nsys.path.append(\"/home/djh/python-code/Artrackv2/2stage\")\nfrom mindspore import Tensor\nfrom mindspore import save_checkpoint\ndef get_keymap_txt(pth_file):\n    # 如果是tar压缩文件。需要执行下面这段代码\n    checkpoint = torch.load(pth_file,map_location=\"cpu\")\n    state_dict = checkpoint['net']\n    # end\n\n    # # 否则就是执行下面这段代码\n    # map_path = pth_file.split('.')[0] + '_key_map.txt'\n    # map_file = open(map_path, 'w')\n    # state_dict = torch.load(pth_file, map_location=torch.device('cpu'))\n    # if 'model_state' in state_dict:\n    #     state_dict = state_dict['model_state']\n    # elif 'module' in state_dict:\n    #     state_dict = state_dict['module']\n    # elif 'model' in state_dict:\n    #     state_dict = state_dict['model']\n    # # end\n    list = []\n    dict = {}\n    for name,value in state_dict.items():\n        print(name)\n        if name == \"cross_2_decoder.decoder_blocks.0.norm1.weight\":\n            print(state_dict[name])\n        new_name = name.replace(\"norm1.weight\",\"norm1.gamma\")\n        new_name = new_name.replace(\"norm1.bias\",\"norm1.beta\")\n        new_name = new_name.replace(\"norm2.weight\",\"norm2.gamma\")\n        new_name = new_name.replace(\"norm2.bias\",\"norm2.beta\")\n        new_name = new_name.replace(\"decoder_norm.weight\",\"decoder_norm.gamma\")\n        new_name = new_name.replace(\"decoder_norm.bias\",\"decoder_norm.beta\")\n        new_name = new_name.replace(\"word_embeddings.weight\", \"word_embeddings.embedding_table\")\n        new_name = new_name.replace(\"position_embeddings.weight\", \"position_embeddings.embedding_table\")\n        new_name = new_name.replace(\"prev_position_embeddings.weight\", \"prev_position_embeddings.embedding_table\")\n        new_name = new_name.replace(\"norm.weight\",\"norm.gamma\")\n        new_name = new_name.replace(\"norm.bias\",\"norm.beta\")\n        list.append(new_name)\n    ms_params_list=[]\n    for name,value in dict.items():\n        param_dict={}\n        param_dict['name'] = name\n        param_dict['data'] = Tensor(value.numpy())\n        if name==\"backbone.pos_embed_z0\":\n            print(param_dict['data'])\n        ms_params_list.append(param_dict)\n    # 要生成转换文件的话，就执行下面这句注释代码\n    # save_checkpoint(ms_params_list, \"/home/djh/python-code/Artrackv2/checkpoint1.ckpt\")\npth_file = \"/mnt/d/Download/OSTrack_ep0030.pth.tar\"\nget_keymap_txt(pth_file)\n"
  },
  {
    "path": "artrackv2_mindspore/lib/models/ostrack/ostrack.py",
    "content": "\"\"\"\r\nBasic OSTrack model.\r\n\"\"\"\r\nimport sys\r\nfrom copy import deepcopy\r\nimport math\r\nimport os\r\nfrom typing import List\r\n\r\nimport mindspore as ms\r\nfrom mindspore import nn\r\nfrom lib.models.timm import *\r\n\r\nfrom lib.models.ostrack.vit import vit_base_patch16_224, vit_large_patch16_224\r\nfrom lib.models.ostrack.vit_ce import vit_large_patch16_224_ce, vit_base_patch16_224_ce\r\nfrom lib.models.layers.mask_decoder import build_maskdecoder\r\nfrom lib.models.layers.head import build_decoder, MLP, DropPathAllocator\r\n\r\n\r\nclass OSTrack(nn.Cell):\r\n    \"\"\" This is the base class for OSTrack \"\"\"\r\n\r\n    def __init__(self, transformer,\r\n                 #decoder,\r\n                 cross_2_decoder,\r\n                 score_mlp,\r\n                 #cover_mlp,\r\n                 ):\r\n        \"\"\" Initializes the model.\r\n        Parameters:\r\n            transformer: torch module of the transformer architecture.\r\n            aux_loss: True if auxiliary decoding losses (loss at each decoder layer) are to be used.\r\n        \"\"\"\r\n        super().__init__()\r\n        self.backbone = transformer\r\n        self.score_mlp = score_mlp\r\n            \r\n        self.identity = ms.Parameter(ops.zeros((1, 3, 768)))\r\n        self.identity = trunc_normal_(self.identity, std=.02)\r\n\r\n        self.cross_2_decoder = cross_2_decoder\r\n\r\n\r\n    def construct(self, template: ms.Tensor,\r\n                dz_feat: ms.Tensor,\r\n                search: ms.Tensor,\r\n                ce_template_mask=None,\r\n                ce_keep_rate=None,\r\n                return_last_attn=False,\r\n                seq_input=None,\r\n                head_type=None,\r\n                stage=None,\r\n                search_feature=None,\r\n                target_in_search_img=None,\r\n                gt_bboxes=None,\r\n                ):\r\n        template_0 = template[:, 0]\r\n        out, z_0_feat, z_1_feat, x_feat, score_feat = self.backbone(z_0=template_0, z_1_feat=dz_feat, x=search, identity=self.identity, seqs_input=seq_input,\r\n                                    ce_template_mask=ce_template_mask,\r\n                                    ce_keep_rate=ce_keep_rate,\r\n                                    return_last_attn=return_last_attn,)\r\n        seq_feat = out['seq_feat'].permute(1, 0 ,2)\r\n        share_weight = self.backbone.word_embeddings.embedding_table[:800, :].unsqueeze(0).tile((seq_feat.shape[1], 1, 1))\r\n        pos = self.backbone.position_embeddings.embedding_table.unsqueeze(0).tile((seq_feat.shape[1], 1, 1)).permute(1, 0 ,2)\r\n        score = self.score_mlp(score_feat)\r\n        ops.clamp(score, min=0.0, max=1.0)\r\n        out['score'] = score\r\n\r\n        loss = ms.tensor(0.0, dtype=ms.float32)\r\n        if target_in_search_img != None:\r\n            target_in_search_gt = self.backbone.patch_embed(target_in_search_img)\r\n            z_1_feat = z_1_feat.reshape(z_1_feat.shape[0], int(z_1_feat.shape[1] ** 0.5), int(z_1_feat.shape[1] ** 0.5),\r\n                                        z_1_feat.shape[2]).permute(0, 3, 1, 2)\r\n            target_in_search_gt = self.cross_2_decoder.unpatchify(target_in_search_gt)               \r\n\r\n            update_img, loss_temp = self.cross_2_decoder(z_1_feat, target_in_search_gt)\r\n            update_feat = self.cross_2_decoder.patchify(update_img)\r\n            out['dz_feat'] = update_feat\r\n            loss += loss_temp\r\n\r\n            out['renew_loss'] = loss\r\n\r\n        else:\r\n            z_1_feat = z_1_feat.reshape(z_1_feat.shape[0], int(z_1_feat.shape[1] ** 0.5), int(z_1_feat.shape[1] ** 0.5),\r\n                                        z_1_feat.shape[2]).permute(0, 3, 1, 2)\r\n            update_feat = self.cross_2_decoder(z_1_feat, eval=True)\r\n            update_feat = self.cross_2_decoder.patchify(update_feat)\r\n            out['dz_feat'] = update_feat\r\n\r\n        return out\r\n\r\n    def forward_head(self, cat_feature, pos_z, pos_x, identity, seq_input=None, gt_score_map=None, head_type=None, stage=None, search_feature=None):\r\n        \"\"\"\r\n        cat_feature: output embeddings of the backbone, it can be (HW1+HW2, B, C) or (HW2, B, C)\r\n        \"\"\"\r\n        if self.head_type == \"CORNER\":\r\n            # run the corner head\r\n            pred_box, score_map = self.box_head(opt_feat, True)\r\n            outputs_coord = box_xyxy_to_cxcywh(pred_box)\r\n            outputs_coord_new = outputs_coord.view(bs, Nq, 4)\r\n            out = {'pred_boxes': outputs_coord_new,\r\n                   'score_map': score_map,\r\n                   }\r\n            return out\r\n\r\n        elif self.head_type == \"CENTER\":\r\n            # run the center head\r\n            score_map_ctr, bbox, size_map, offset_map = self.box_head(opt_feat, gt_score_map)\r\n            # outputs_coord = box_xyxy_to_cxcywh(bbox)\r\n            outputs_coord = bbox\r\n            outputs_coord_new = outputs_coord.view(bs, Nq, 4)\r\n            out = {'pred_boxes': outputs_coord_new,\r\n                   'score_map': score_map_ctr,\r\n                   'size_map': size_map,\r\n                   'offset_map': offset_map}\r\n            return out\r\n        elif self.head_type == \"PIX\":\r\n            output_dict = self.box_head(cat_feature, pos_z, pos_x, identity, seq_input, head_type, stage, search_feature)\r\n            return output_dict\r\n        else:\r\n            raise NotImplementedError\r\n\r\nclass MlpScoreDecoder(nn.Cell):\r\n    def __init__(self, in_dim, hidden_dim, num_layers, bn=False):\r\n        super().__init__()\r\n        self.num_layers = num_layers\r\n        h = [hidden_dim] * (num_layers - 1)\r\n        out_dim = 1 # score\r\n        if bn:\r\n            self.layers = nn.SequentialCell(*[nn.SequentialCell(nn.Dense(n, k), nn.BatchNorm1d(k), nn.ReLU())\r\n                                          if i < num_layers - 1\r\n                                          else nn.SequentialCell(nn.Dense(n, k), nn.BatchNorm1d(k))\r\n                                          for i, (n, k) in enumerate(zip([in_dim] + h, h + [out_dim]))])\r\n        else:\r\n            self.layers = nn.SequentialCell(*[nn.SequentialCell(nn.Dense(n, k), nn.ReLU())\r\n                                          if i < num_layers - 1\r\n                                          else nn.Dense(n, k)\r\n                                          for i, (n, k) in enumerate(zip([in_dim] + h, h + [out_dim]))])\r\n\r\n    def construct(self, reg_tokens):\r\n        \"\"\"\r\n        reg tokens shape: (b, 4, embed_dim)\r\n        \"\"\"\r\n        x = self.layers(reg_tokens) # (b, 4, 1)\r\n        x = ops.mean(x,axis=1)   # (b, 1)\r\n        return x\r\n\r\ndef build_score_decoder(cfg):\r\n    return MlpScoreDecoder(\r\n        in_dim=cfg.MODEL.BACKBONE.EMBEDDIM,\r\n        hidden_dim=cfg.MODEL.BACKBONE.EMBEDDIM,\r\n        num_layers=2,\r\n        bn=False\r\n    )\r\n\r\ndef build_ostrack(cfg, training=True):\r\n    current_dir = os.path.dirname(os.path.abspath(__file__))  # This is your Project Root\r\n    pretrained_path = \"/home/baiyifan/code/vitrack/\"\r\n    if cfg.MODEL.PRETRAIN_FILE and ('OSTrack' not in cfg.MODEL.PRETRAIN_FILE) and training:\r\n        pretrained = os.path.join(pretrained_path, cfg.MODEL.PRETRAIN_FILE)\r\n    else:\r\n        pretrained = ''\r\n\r\n    if cfg.MODEL.BACKBONE.TYPE == 'vit_base_patch16_224':\r\n        backbone = vit_base_patch16_224(pretrained, drop_path_rate=cfg.TRAIN.DROP_PATH_RATE)\r\n        hidden_dim = backbone.embed_dim\r\n        patch_start_index = 1\r\n    elif cfg.MODEL.BACKBONE.TYPE == 'vit_large_patch16_224':\r\n        print(\"i use vit_large\")\r\n        backbone = vit_large_patch16_224(pretrained, drop_path_rate=cfg.TRAIN.DROP_PATH_RATE)\r\n        hidden_dim = backbone.embed_dim\r\n        patch_start_index = 1\r\n\r\n    elif cfg.MODEL.BACKBONE.TYPE == 'vit_base_patch16_224_ce':\r\n        backbone = vit_base_patch16_224_ce(pretrained, drop_path_rate=cfg.TRAIN.DROP_PATH_RATE,\r\n                                           ce_loc=cfg.MODEL.BACKBONE.CE_LOC,\r\n                                           ce_keep_ratio=cfg.MODEL.BACKBONE.CE_KEEP_RATIO,\r\n                                           )\r\n        hidden_dim = backbone.embed_dim\r\n        patch_start_index = 1\r\n\r\n    elif cfg.MODEL.BACKBONE.TYPE == 'vit_large_patch16_224_ce':\r\n        backbone = vit_large_patch16_224_ce(pretrained, drop_path_rate=cfg.TRAIN.DROP_PATH_RATE,\r\n                                            ce_loc=cfg.MODEL.BACKBONE.CE_LOC,\r\n                                            ce_keep_ratio=cfg.MODEL.BACKBONE.CE_KEEP_RATIO,\r\n                                            )\r\n\r\n        hidden_dim = backbone.embed_dim\r\n        patch_start_index = 1\r\n\r\n    else:\r\n        raise NotImplementedError\r\n\r\n    backbone.finetune_track(cfg=cfg, patch_start_index=patch_start_index)\r\n\r\n    #decoder = build_maskdecoder(cfg)\r\n    cross_2_decoder = build_maskdecoder(cfg)\r\n\r\n    drop_path = cfg.MODEL.DROP_PATH\r\n    drop_path_allocator = DropPathAllocator(drop_path)\r\n    num_heads = cfg.MODEL.NUM_HEADS\r\n    mlp_ratio = cfg.MODEL.MLP_RATIO\r\n    qkv_bias = cfg.MODEL.QKV_BIAS\r\n    drop_rate = cfg.MODEL.DROP_RATE\r\n    attn_drop = cfg.MODEL.ATTN_DROP\r\n    score_mlp = build_score_decoder(cfg)\r\n    cover_mlp = build_score_decoder(cfg)\r\n\r\n    model = OSTrack(\r\n        backbone,\r\n        #decoder,\r\n        cross_2_decoder,\r\n        score_mlp,\r\n        #cover_mlp,\r\n    )\r\n\r\n    from mindspore.amp import auto_mixed_precision\r\n    model = auto_mixed_precision(model, 'O0')\r\n    load_from = cfg.MODEL.PRETRAIN_FILE\r\n    param_dict = ms.load_checkpoint(load_from)\r\n    param_not_load, _ = ms.load_param_into_net(model, param_dict)\r\n    print(\"未加载权重：\",param_not_load)\r\n    print('Load pretrained model from: ' + load_from)\r\n    model.backbone.pos_embed_z0 = model.backbone.pos_embed_z1\r\n\r\n    return model\r\n"
  },
  {
    "path": "artrackv2_mindspore/lib/models/ostrack/ostrack_test.py",
    "content": "import sys\nsys.path.append(\"/home/djh/python-code/Artrackv2/2stage\")\nfrom lib.models.ostrack.vit import *\nfrom lib.test.evaluation.tracker import Tracker\nfrom lib.models.ostrack import *\ntracker = Tracker('ostrack', '2stage_256_got', 'got10k_test', None)\nparam = tracker.get_parameters()\ncfg = param.cfg\nmodel = build_ostrack(cfg,training=False)\n\n"
  },
  {
    "path": "artrackv2_mindspore/lib/models/ostrack/utils.py",
    "content": "import math\r\n\r\nimport mindspore as ms\r\nfrom mindspore import ops\r\n\r\ndef combine_tokens(template_tokens, search_tokens, mode='direct', return_res=False):\r\n    # [B, HW, C]\r\n    len_t = template_tokens.shape[1]\r\n    len_s = search_tokens.shape[1]\r\n\r\n    if mode == 'direct':\r\n        merged_feature = ops.cat((template_tokens, search_tokens), axis=1)\r\n    elif mode == 'template_central':\r\n        central_pivot = len_s // 2\r\n        first_half = search_tokens[:, :central_pivot, :]\r\n        second_half = search_tokens[:, central_pivot:, :]\r\n        merged_feature = ops.cat((first_half, template_tokens, second_half), axis=1)\r\n    elif mode == 'partition':\r\n        feat_size_s = int(math.sqrt(len_s))\r\n        feat_size_t = int(math.sqrt(len_t))\r\n        window_size = math.ceil(feat_size_t / 2.)\r\n        # pad feature maps to multiples of window size\r\n        B, _, C = template_tokens.shape\r\n        H = W = feat_size_t\r\n        template_tokens = template_tokens.view(B, H, W, C)\r\n        pad_l = pad_b = pad_r = 0\r\n        # pad_r = (window_size - W % window_size) % window_size\r\n        pad_t = (window_size - H % window_size) % window_size\r\n        template_tokens = ops.pad(template_tokens, (0, 0, pad_l, pad_r, pad_t, pad_b))\r\n        _, Hp, Wp, _ = template_tokens.shape\r\n        template_tokens = template_tokens.view(B, Hp // window_size, window_size, W, C)\r\n        template_tokens = ops.cat([template_tokens[:, 0, ...], template_tokens[:, 1, ...]], axis=2)\r\n        _, Hc, Wc, _ = template_tokens.shape\r\n        template_tokens = template_tokens.view(B, -1, C)\r\n        merged_feature = ops.cat([template_tokens, search_tokens], axis=1)\r\n\r\n        # calculate new h and w, which may be useful for SwinT or others\r\n        merged_h, merged_w = feat_size_s + Hc, feat_size_s\r\n        if return_res:\r\n            return merged_feature, merged_h, merged_w\r\n\r\n    else:\r\n        raise NotImplementedError\r\n\r\n    return merged_feature\r\n\r\n\r\ndef recover_tokens(merged_tokens, len_template_token, len_search_token, mode='direct'):\r\n    if mode == 'direct':\r\n        recovered_tokens = merged_tokens\r\n    elif mode == 'template_central':\r\n        central_pivot = len_search_token // 2\r\n        len_remain = len_search_token - central_pivot\r\n        len_half_and_t = central_pivot + len_template_token\r\n\r\n        first_half = merged_tokens[:, :central_pivot, :]\r\n        second_half = merged_tokens[:, -len_remain:, :]\r\n        template_tokens = merged_tokens[:, central_pivot:len_half_and_t, :]\r\n\r\n        recovered_tokens = ops.cat((template_tokens, first_half, second_half), axis=1)\r\n    elif mode == 'partition':\r\n        recovered_tokens = merged_tokens\r\n    else:\r\n        raise NotImplementedError\r\n\r\n    return recovered_tokens\r\n\r\n\r\ndef window_partition(x, window_size: int):\r\n    \"\"\"\r\n    Args:\r\n        x: (B, H, W, C)\r\n        window_size (int): window size\r\n\r\n    Returns:\r\n        windows: (num_windows*B, window_size, window_size, C)\r\n    \"\"\"\r\n    B, H, W, C = x.shape\r\n    x = x.view(B, H // window_size, window_size, W // window_size, window_size, C)\r\n    windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C)\r\n    return windows\r\n\r\n\r\ndef window_reverse(windows, window_size: int, H: int, W: int):\r\n    \"\"\"\r\n    Args:\r\n        windows: (num_windows*B, window_size, window_size, C)\r\n        window_size (int): Window size\r\n        H (int): Height of image\r\n        W (int): Width of image\r\n\r\n    Returns:\r\n        x: (B, H, W, C)\r\n    \"\"\"\r\n    B = int(windows.shape[0] / (H * W / window_size / window_size))\r\n    x = windows.view(B, H // window_size, W // window_size, window_size, window_size, -1)\r\n    x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1)\r\n    return x\r\n"
  },
  {
    "path": "artrackv2_mindspore/lib/models/ostrack/vit.py",
    "content": "\"\"\" Vision Transformer (ViT) in PyTorch\r\nA PyTorch implement of Vision Transformers as described in:\r\n'An Image Is Worth 16 x 16 Words: Transformers for Image Recognition at Scale'\r\n    - https://arxiv.org/abs/2010.11929\r\n`How to train your ViT? Data, Augmentation, and Regularization in Vision Transformers`\r\n    - https://arxiv.org/abs/2106.10270\r\nThe official jax code is released and available at https://github.com/google-research/vision_transformer\r\nDeiT model defs and weights from https://github.com/facebookresearch/deit,\r\npaper `DeiT: Data-efficient Image Transformers` - https://arxiv.org/abs/2012.12877\r\nAcknowledgments:\r\n* The paper authors for releasing code and weights, thanks!\r\n* I fixed my class token impl based on Phil Wang's https://github.com/lucidrains/vit-pytorch ... check it out\r\nfor some einops/einsum fun\r\n* Simple transformer style inspired by Andrej Karpathy's https://github.com/karpathy/minGPT\r\n* Bert reference code checks against Huggingface Transformers and Tensorflow Bert\r\nHacked together by / Copyright 2021 Ross Wightman\r\n\r\nModified by Botao Ye\r\n\"\"\"\r\nimport sys\r\nsys.path.append(\"/home/baiyifan/code/AR2_mindspore_cp/2stage\")\r\nimport math\r\nimport logging\r\nfrom functools import partial\r\nfrom collections import OrderedDict\r\nfrom copy import deepcopy\r\nfrom lib.models.timm import *\r\n\r\nfrom lib.models.layers.patch_embed import PatchEmbed\r\nfrom lib.models.ostrack.base_backbone import BaseBackbone\r\nimport mindspore as ms\r\nfrom mindspore import nn as  msnn\r\nimport mindspore.ops as ops\r\nimport mindspore.numpy as np\r\nfrom mindspore.common.initializer import initializer\r\nimport numpy \r\n\r\n\r\n\r\nclass Attention(msnn.Cell):\r\n    def __init__(self, dim, num_heads=8, qkv_bias=False, attn_drop=0., proj_drop=0.):\r\n        super().__init__()\r\n        self.num_heads = num_heads\r\n        head_dim = dim // num_heads\r\n        self.scale = head_dim ** -0.5\r\n\r\n        self.qkv = msnn.Dense(dim, dim * 3, has_bias=qkv_bias)\r\n        self.attn_drop = msnn.Dropout(p=attn_drop)\r\n        self.proj = msnn.Dense(dim, dim)\r\n        self.proj_drop = msnn.Dropout(p=proj_drop)\r\n\r\n    def construct(self, x, return_attention=False,padding_mask=None):\r\n        B, N, C = x.shape\r\n        # print(\"x:\",x)\r\n        # weight = ms.tensor(self.qkv.weight.value(),dtype=ms.float32).numpy()\r\n        # bias = ms.tensor(self.qkv.bias.value(),dtype=ms.float32).numpy()\r\n        # m = ops.ones_like(x,dtype=ms.float32)\r\n        # test = self.qkv(m).numpy()\r\n        # with numpy.printoptions (precision=12):\r\n        #     print(\"weight:\",weight)\r\n        #     print(\"bias:\",bias)\r\n        #     print(\"test:\",test)\r\n        # numpy.savez(\"/home/baiyifan/code/Artrackv2_mindspore/2stage/outputv1.npz\",weight,bias,test)\r\n        qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)\r\n        q, k, v = qkv[0], qkv[1], qkv[2]  # make torchscript happy (camsnnot use tensor as tuple)\r\n\r\n        attn = (q @ k.swapaxes(-2, -1)) * self.scale\r\n        if padding_mask != None:\r\n            attn = attn.masked_fill(padding_mask, float(\"-inf\"))\r\n        attn = ops.softmax(attn,axis=-1)\r\n        attn = self.attn_drop(attn)\r\n\r\n        x = (attn @ v).swapaxes(1, 2).reshape(B, N, C)\r\n        x = self.proj(x)\r\n        x = self.proj_drop(x)\r\n        if return_attention:\r\n            return x, attn\r\n        return x\r\n\r\n\r\nclass Block(msnn.Cell):\r\n\r\n    def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, drop=0., attn_drop=0.,\r\n                 drop_path=0., act_layer=msnn.GELU, norm_layer=msnn.LayerNorm):\r\n        super().__init__()\r\n        norm_layer_dim=[]\r\n        norm_layer_dim.append(dim)\r\n        norm_layer_dim = tuple(norm_layer_dim)\r\n        self.norm1 = norm_layer(norm_layer_dim)\r\n        self.attn = Attention(dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=drop)\r\n        # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here\r\n        self.drop_path = DropPath(drop_path) if drop_path > 0. else msnn.Identity()\r\n        self.norm2 = norm_layer(norm_layer_dim)\r\n        mlp_hidden_dim = int(dim * mlp_ratio)\r\n        self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)\r\n\r\n    def construct(self, x, return_attention=False,padding_mask=None):\r\n        if return_attention:\r\n            feat, attn = self.attn(self.norm1(x), True,padding_mask)\r\n            x = x + self.drop_path(feat)\r\n            x = x + self.drop_path(self.mlp(self.norm2(x)))\r\n            return x, attn\r\n        else:\r\n            x = x + self.drop_path(self.attn(self.norm1(x),padding_mask=padding_mask))\r\n            x = x + self.drop_path(self.mlp(self.norm2(x)))\r\n            return x\r\n\r\n\r\nclass VisionTransformer(BaseBackbone):\r\n    \"\"\" Vision Transformer\r\n    A PyTorch impl of : `An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale`\r\n        - https://arxiv.org/abs/2010.11929\r\n    Includes distillation token & head support for `DeiT: Data-efficient Image Transformers`\r\n        - https://arxiv.org/abs/2012.12877\r\n    \"\"\"\r\n\r\n    def __init__(self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, embed_dim=768, depth=12,\r\n                 num_heads=12, mlp_ratio=4., qkv_bias=True, representation_size=None, distilled=False,\r\n                 drop_rate=0., attn_drop_rate=0., drop_path_rate=0., embed_layer=PatchEmbed, norm_layer=None,\r\n                 act_layer=None, weight_init=''):\r\n        \"\"\"\r\n        Args:\r\n            img_size (int, tuple): input image size\r\n            patch_size (int, tuple): patch size\r\n            in_chans (int): number of input chamsnnels\r\n            num_classes (int): number of classes for classification head\r\n            embed_dim (int): embedding dimension\r\n            depth (int): depth of transformer\r\n            num_heads (int): number of attention heads\r\n            mlp_ratio (int): ratio of mlp hidden dim to embedding dim\r\n            qkv_bias (bool): enable bias for qkv if True\r\n            representation_size (Optional[int]): enable and set representation layer (pre-logits) to this value if set\r\n            distilled (bool): model includes a distillation token and head as in DeiT models\r\n            drop_rate (float): dropout rate\r\n            attn_drop_rate (float): attention dropout rate\r\n            drop_path_rate (float): stochastic depth rate\r\n            embed_layer (msnn.Cell): patch embedding layer\r\n            norm_layer: (msnn.Cell): normalization layer\r\n            weight_init: (str): weight init scheme\r\n        \"\"\"\r\n        super().__init__()\r\n        self.num_classes = num_classes\r\n        self.num_features = self.embed_dim = embed_dim  # num_features for consistency with other models\r\n        self.num_tokens = 2 if distilled else 1\r\n        norm_layer = norm_layer or partial(msnn.LayerNorm, epsilon=1e-6)\r\n        act_layer = act_layer or msnn.GELU\r\n\r\n        self.patch_embed = embed_layer(\r\n            img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim)\r\n        num_patches = self.patch_embed.num_patches\r\n        self.cls_token = ms.Parameter(ops.zeros((1, 1, embed_dim)))\r\n        self.dist_token = ms.Parameter(ops.zeros((1, 1, embed_dim))) if distilled else None\r\n        self.pos_embed = ms.Parameter(ops.zeros((1, num_patches + self.num_tokens, embed_dim)))\r\n        self.pos_drop = msnn.Dropout(p=drop_rate)\r\n\r\n        dpr = [x.item() for x in ops.linspace(0, drop_path_rate, depth)]  # stochastic depth decay rule\r\n        self.blocks = msnn.SequentialCell(*[\r\n            Block(\r\n                dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, drop=drop_rate,\r\n                attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, act_layer=act_layer)\r\n            for i in range(depth)])\r\n\r\n        self.extension = msnn.SequentialCell(*[\r\n            Block(\r\n                dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, drop=drop_rate,\r\n                attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, act_layer=act_layer)\r\n            for i in range(3)])\r\n        norm_layer_dim=[]\r\n        norm_layer_dim.append(embed_dim)\r\n        self.norm = norm_layer(norm_layer_dim)\r\n\r\n        # # Representation layer\r\n        # if representation_size and not distilled:\r\n        #     self.num_features = representation_size\r\n        #     self.pre_logits = msnn.SequentialCell(OrderedDict([\r\n        #         ('fc', msnn.Linear(embed_dim, representation_size)),\r\n        #         ('act', msnn.Tanh())\r\n        #     ]))\r\n        # else:\r\n        #     self.pre_logits = msnn.Identity()\r\n        #\r\n        # # Classifier head(s)\r\n        # self.head = msnn.Linear(self.num_features, num_classes) if num_classes > 0 else msnn.Identity()\r\n        # self.head_dist = None\r\n        # if distilled:\r\n        #     self.head_dist = msnn.Linear(self.embed_dim, self.num_classes) if num_classes > 0 else msnn.Identity()\r\n\r\n        self.init_weights(weight_init)\r\n\r\n    def init_weights(self, mode=''):\r\n        assert mode in ('jax', 'jax_nlhb', 'nlhb', '')\r\n        head_bias = -math.log(self.num_classes) if 'nlhb' in mode else 0.\r\n        trunc_normal_(self.pos_embed, std=.02)\r\n        if self.dist_token is not None:\r\n            trunc_normal_(self.dist_token, std=.02)\r\n        if mode.startswith('jax'):\r\n            # leave cls token as zeros to match jax impl\r\n            named_apply(partial(_init_vit_weights, head_bias=head_bias, jax_impl=True), self)\r\n        else:\r\n            trunc_normal_(self.cls_token, std=.02)\r\n            self.apply(_init_vit_weights)\r\n\r\n    def _init_weights(self, m):\r\n        # this fn left here for compat with downstream users\r\n        _init_vit_weights(m)\r\n\r\n    # @torch.jit.ignore()\r\n    def load_pretrained(self, checkpoint_path, prefix=''):\r\n        _load_weights(self, checkpoint_path, prefix)\r\n\r\n    # @torch.jit.ignore\r\n    def no_weight_decay(self):\r\n        return {'pos_embed', 'cls_token', 'dist_token'}\r\n\r\n    def get_classifier(self):\r\n        if self.dist_token is None:\r\n            return self.head\r\n        else:\r\n            return self.head, self.head_dist\r\n\r\n    def reset_classifier(self, num_classes, global_pool=''):\r\n        self.num_classes = num_classes\r\n        self.head = msnn.Dense(self.embed_dim, num_classes) if num_classes > 0 else msnn.Identity()\r\n        if self.num_tokens == 2:\r\n            self.head_dist = msnn.Dense(self.embed_dim, self.num_classes) if num_classes > 0 else msnn.Identity()\r\n\r\n\r\ndef _init_vit_weights(Cell: msnn.Cell, name: str = '', head_bias: float = 0., jax_impl: bool = False):\r\n    \"\"\" ViT weight initialization\r\n    * When called without n, head_bias, jax_impl args it will behave exactly the same\r\n      as my original init for compatibility with prev hparam / downstream use cases (ie DeiT).\r\n    * When called w/ valid n (Cell name) and jax_impl=True, will (hopefully) match JAX impl\r\n    \"\"\"\r\n    if isinstance(Cell, msnn.Dense):\r\n        if name.startswith('head'):\r\n            bias_shape = Cell.bias.shape\r\n            weight_shape = Cell.weight.shape\r\n            Cell.weight=initializer('zeros',weight_shape)\r\n            Cell.bias=initializer(Constant(head_bias),bias_shape)\r\n        elif name.startswith('pre_logits'):\r\n            bias_shape = Cell.bias.shape\r\n            weight_shape = Cell.weight.shape\r\n            lecun_normal_(Cell.weight)\r\n            Cell.bias=initializer('zeros',bias_shape)\r\n        else:\r\n            weight_shape = Cell.weight.shape\r\n            if jax_impl:\r\n                msnn.init.xavier_uniform_(Cell.weight)\r\n                if Cell.bias is not None:\r\n                    bias_shape = Cell.bias.shape\r\n                    if 'mlp' in name:\r\n                        Cell.bias=initializer('normal',bias_shape,sigma=1e-6)\r\n                    else:\r\n                        Cell.bias=initializer('zeros',bias_shape)\r\n            else:\r\n                trunc_normal_(Cell.weight, std=.02)\r\n                if Cell.bias is not None:\r\n                    bias_shape = Cell.bias.shape\r\n                    Cell.bias=initializer('zeros',bias_shape)\r\n    elif jax_impl and isinstance(Cell, msnn.Conv2d):\r\n        # NOTE conv was left to pytorch default in my original init\r\n        weight_shape = Cell.weight.shape\r\n        lecun_normal_(Cell.weight)\r\n        if Cell.bias is not None:\r\n            bias_shape = Cell.bias.shape\r\n            Cell.bias=initializer('zeros',bias_shape)\r\n\r\n\r\ndef _load_weights(model: VisionTransformer, checkpoint_path: str, prefix: str = ''):\r\n    \"\"\" Load weights from .npz checkpoints for official Google Brain Flax implementation\r\n    \"\"\"\r\n    import numpy as np\r\n\r\n    def _n2p(w, t=True):\r\n        if w.ndim == 4 and w.shape[0] == w.shape[1] == w.shape[2] == 1:\r\n            w = w.flatten()\r\n        if t:\r\n            if w.ndim == 4:\r\n                w = w.swapaxes([3, 2, 0, 1])\r\n            elif w.ndim == 3:\r\n                w = w.swapaxes([2, 0, 1])\r\n            elif w.ndim == 2:\r\n                w = w.swapaxes([1, 0])\r\n        return ops.from_numpy(w)\r\n\r\n    w = np.load(checkpoint_path)\r\n    if not prefix and 'opt/target/embedding/kernel' in w:\r\n        prefix = 'opt/target/'\r\n\r\n    if hasattr(model.patch_embed, 'backbone'):\r\n        # hybrid\r\n        backbone = model.patch_embed.backbone\r\n        stem_only = not hasattr(backbone, 'stem')\r\n        stem = backbone if stem_only else backbone.stem\r\n        stem.conv.weight.copy_(adapt_input_conv(stem.conv.weight.shape[1], _n2p(w[f'{prefix}conv_root/kernel'])))\r\n        stem.norm.weight.copy_(_n2p(w[f'{prefix}gn_root/scale']))\r\n        stem.norm.bias.copy_(_n2p(w[f'{prefix}gn_root/bias']))\r\n        if not stem_only:\r\n            for i, stage in enumerate(backbone.stages):\r\n                for j, block in enumerate(stage.blocks):\r\n                    bp = f'{prefix}block{i + 1}/unit{j + 1}/'\r\n                    for r in range(3):\r\n                        getattr(block, f'conv{r + 1}').weight.copy_(_n2p(w[f'{bp}conv{r + 1}/kernel']))\r\n                        getattr(block, f'norm{r + 1}').weight.copy_(_n2p(w[f'{bp}gn{r + 1}/scale']))\r\n                        getattr(block, f'norm{r + 1}').bias.copy_(_n2p(w[f'{bp}gn{r + 1}/bias']))\r\n                    if block.downsample is not None:\r\n                        block.downsample.conv.weight.copy_(_n2p(w[f'{bp}conv_proj/kernel']))\r\n                        block.downsample.norm.weight.copy_(_n2p(w[f'{bp}gn_proj/scale']))\r\n                        block.downsample.norm.bias.copy_(_n2p(w[f'{bp}gn_proj/bias']))\r\n        embed_conv_w = _n2p(w[f'{prefix}embedding/kernel'])\r\n    else:\r\n        embed_conv_w = adapt_input_conv(\r\n            model.patch_embed.proj.weight.shape[1], _n2p(w[f'{prefix}embedding/kernel']))\r\n    model.patch_embed.proj.weight.copy_(embed_conv_w)\r\n    model.patch_embed.proj.bias.copy_(_n2p(w[f'{prefix}embedding/bias']))\r\n    model.cls_token.copy_(_n2p(w[f'{prefix}cls'], t=False))\r\n    pos_embed_w = _n2p(w[f'{prefix}Transformer/posembed_input/pos_embedding'], t=False)\r\n    if pos_embed_w.shape != model.pos_embed.shape:\r\n        pos_embed_w = resize_pos_embed(  # resize pos embedding when different size from pretrained weights\r\n            pos_embed_w, model.pos_embed, getattr(model, 'num_tokens', 1), model.patch_embed.grid_size)\r\n    model.pos_embed.copy_(pos_embed_w)\r\n    model.norm.weight.copy_(_n2p(w[f'{prefix}Transformer/encoder_norm/scale']))\r\n    model.norm.bias.copy_(_n2p(w[f'{prefix}Transformer/encoder_norm/bias']))\r\n    if isinstance(model.head, msnn.Dense) and model.head.bias.shape[0] == w[f'{prefix}head/bias'].shape[-1]:\r\n        model.head.weight.copy_(_n2p(w[f'{prefix}head/kernel']))\r\n        model.head.bias.copy_(_n2p(w[f'{prefix}head/bias']))\r\n    if isinstance(getattr(model.pre_logits, 'fc', None), msnn.Dense) and f'{prefix}pre_logits/bias' in w:\r\n        model.pre_logits.fc.weight.copy_(_n2p(w[f'{prefix}pre_logits/kernel']))\r\n        model.pre_logits.fc.bias.copy_(_n2p(w[f'{prefix}pre_logits/bias']))\r\n    for i, block in enumerate(model.blocks.children()):\r\n        block_prefix = f'{prefix}Transformer/encoderblock_{i}/'\r\n        mha_prefix = block_prefix + 'MultiHeadDotProductAttention_1/'\r\n        block.norm1.weight.copy_(_n2p(w[f'{block_prefix}LayerNorm_0/scale']))\r\n        block.norm1.bias.copy_(_n2p(w[f'{block_prefix}LayerNorm_0/bias']))\r\n        block.attn.qkv.weight.copy_(ops.cat([\r\n            _n2p(w[f'{mha_prefix}{n}/kernel'], t=False).flatten(1).T for n in ('query', 'key', 'value')]))\r\n        block.attn.qkv.bias.copy_(ops.cat([\r\n            _n2p(w[f'{mha_prefix}{n}/bias'], t=False).reshape(-1) for n in ('query', 'key', 'value')]))\r\n        block.attn.proj.weight.copy_(_n2p(w[f'{mha_prefix}out/kernel']).flatten(1))\r\n        block.attn.proj.bias.copy_(_n2p(w[f'{mha_prefix}out/bias']))\r\n        for r in range(2):\r\n            getattr(block.mlp, f'fc{r + 1}').weight.copy_(_n2p(w[f'{block_prefix}MlpBlock_3/Dense_{r}/kernel']))\r\n            getattr(block.mlp, f'fc{r + 1}').bias.copy_(_n2p(w[f'{block_prefix}MlpBlock_3/Dense_{r}/bias']))\r\n        block.norm2.weight.copy_(_n2p(w[f'{block_prefix}LayerNorm_2/scale']))\r\n        block.norm2.bias.copy_(_n2p(w[f'{block_prefix}LayerNorm_2/bias']))\r\n\r\n\r\ndef resize_pos_embed(posemb, posemb_new, num_tokens=1, gs_new=()):\r\n    # Rescale the grid of position embeddings when loading from state_dict. Adapted from\r\n    # https://github.com/google-research/vision_transformer/blob/00883dd691c63a6830751563748663526e811cee/vit_jax/checkpoint.py#L224\r\n    print('Resized position embedding: %s to %s', posemb.shape, posemb_new.shape)\r\n    ntok_new = posemb_new.shape[1]\r\n    if num_tokens:\r\n        posemb_tok, posemb_grid = posemb[:, :num_tokens], posemb[0, num_tokens:]\r\n        ntok_new -= num_tokens\r\n    else:\r\n        posemb_tok, posemb_grid = posemb[:, :0], posemb[0]\r\n    gs_old = int(math.sqrt(len(posemb_grid)))\r\n    if not len(gs_new):  # backwards compatibility\r\n        gs_new = [int(math.sqrt(ntok_new))] * 2\r\n    assert len(gs_new) >= 2\r\n    print('Position embedding grid-size from %s to %s', [gs_old, gs_old], gs_new)\r\n    posemb_grid = posemb_grid.reshape(1, gs_old, gs_old, -1).permute(0, 3, 1, 2)\r\n    posemb_grid = ops.interpolate(posemb_grid, size=gs_new, mode='bilinear')\r\n    posemb_grid = posemb_grid.permute(0, 2, 3, 1).reshape(1, gs_new[0] * gs_new[1], -1)\r\n    posemb = ops.cat([posemb_tok, posemb_grid], axis=1)\r\n    return posemb\r\n\r\n\r\ndef checkpoint_filter_fn(state_dict, model):\r\n    \"\"\" convert patch embedding weight from manual patchify + linear proj to conv\"\"\"\r\n    out_dict = {}\r\n    if 'model' in state_dict:\r\n        # For deit models\r\n        state_dict = state_dict['model']\r\n    for k, v in state_dict.items():\r\n        if 'patch_embed.proj.weight' in k and len(v.shape) < 4:\r\n            # For old models that I trained prior to conv based patchification\r\n            O, I, H, W = model.patch_embed.proj.weight.shape\r\n            v = v.reshape(O, -1, H, W)\r\n        elif k == 'pos_embed' and v.shape != model.pos_embed.shape:\r\n            # To resize pos embedding when using model at different size from pretrained weights\r\n            v = resize_pos_embed(\r\n                v, model.pos_embed, getattr(model, 'num_tokens', 1), model.patch_embed.grid_size)\r\n        out_dict[k] = v\r\n    return out_dict\r\n\r\n\r\ndef _create_vision_transformer(variant, pretrained=False, default_cfg=None, **kwargs):\r\n    if kwargs.get('features_only', None):\r\n        raise RuntimeError('features_only not implemented for Vision Transformer models.')\r\n\r\n    model = VisionTransformer(**kwargs)\r\n    if pretrained:\r\n        if 'npz' in pretrained:\r\n            model.load_pretrained(pretrained, prefix='')\r\n        else:\r\n            checkpoint = ms.load_checkpoint(pretrained, map_location=\"cpu\")\r\n            missing_keys, unexpected_keys = model.load_state_dict(checkpoint[\"model\"], strict=False)\r\n            print('Load pretrained model from: ' + pretrained)\r\n\r\n    return model\r\n\r\n\r\ndef vit_base_patch16_224(pretrained=False, **kwargs):\r\n    \"\"\"\r\n    ViT-Base model (ViT-B/16) from original paper (https://arxiv.org/abs/2010.11929).\r\n    \"\"\"\r\n    model_kwargs = dict(\r\n        patch_size=16, embed_dim=768, depth=12, num_heads=12, **kwargs)\r\n    model = _create_vision_transformer('vit_base_patch16_224_in21k', pretrained=pretrained, **model_kwargs)\r\n    return model\r\n    \r\ndef vit_large_patch16_224(pretrained=False, **kwargs):\r\n    model_kwargs = dict(\r\n        patch_size=16, embed_dim=1024, depth=24, num_heads=16, **kwargs)\r\n    model = _create_vision_transformer('vit_large_patch16_224_in21k', pretrained=pretrained, **model_kwargs)\r\n    return model\r\n"
  },
  {
    "path": "artrackv2_mindspore/lib/models/ostrack/vit_ce.py",
    "content": "import math\r\nimport logging\r\nfrom functools import partial\r\nfrom collections import OrderedDict\r\nfrom copy import deepcopy\r\n\r\nimport torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\n\r\nfrom timm.models.layers import to_2tuple\r\n\r\nfrom lib.models.layers.patch_embed import PatchEmbed\r\nfrom .utils import combine_tokens, recover_tokens\r\nfrom .vit import VisionTransformer\r\nfrom ..layers.attn_blocks import CEBlock\r\n\r\n_logger = logging.getLogger(__name__)\r\n\r\n\r\nclass VisionTransformerCE(VisionTransformer):\r\n    \"\"\" Vision Transformer with candidate elimination (CE) module\r\n\r\n    A PyTorch impl of : `An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale`\r\n        - https://arxiv.org/abs/2010.11929\r\n\r\n    Includes distillation token & head support for `DeiT: Data-efficient Image Transformers`\r\n        - https://arxiv.org/abs/2012.12877\r\n    \"\"\"\r\n\r\n    def __init__(self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, embed_dim=768, depth=12,\r\n                 num_heads=12, mlp_ratio=4., qkv_bias=True, representation_size=None, distilled=False,\r\n                 drop_rate=0., attn_drop_rate=0., drop_path_rate=0., embed_layer=PatchEmbed, norm_layer=None,\r\n                 act_layer=None, weight_init='',\r\n                 ce_loc=None, ce_keep_ratio=None):\r\n        \"\"\"\r\n        Args:\r\n            img_size (int, tuple): input image size\r\n            patch_size (int, tuple): patch size\r\n            in_chans (int): number of input channels\r\n            num_classes (int): number of classes for classification head\r\n            embed_dim (int): embedding dimension\r\n            depth (int): depth of transformer\r\n            num_heads (int): number of attention heads\r\n            mlp_ratio (int): ratio of mlp hidden dim to embedding dim\r\n            qkv_bias (bool): enable bias for qkv if True\r\n            representation_size (Optional[int]): enable and set representation layer (pre-logits) to this value if set\r\n            distilled (bool): model includes a distillation token and head as in DeiT models\r\n            drop_rate (float): dropout rate\r\n            attn_drop_rate (float): attention dropout rate\r\n            drop_path_rate (float): stochastic depth rate\r\n            embed_layer (nn.Module): patch embedding layer\r\n            norm_layer: (nn.Module): normalization layer\r\n            weight_init: (str): weight init scheme\r\n        \"\"\"\r\n        # super().__init__()\r\n        super().__init__()\r\n        if isinstance(img_size, tuple):\r\n            self.img_size = img_size\r\n        else:\r\n            self.img_size = to_2tuple(img_size)\r\n        self.patch_size = patch_size\r\n        self.in_chans = in_chans\r\n\r\n        self.num_classes = num_classes\r\n        self.num_features = self.embed_dim = embed_dim  # num_features for consistency with other models\r\n        self.num_tokens = 2 if distilled else 1\r\n        norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6)\r\n        act_layer = act_layer or nn.GELU\r\n\r\n        self.patch_embed = embed_layer(\r\n            img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim)\r\n        num_patches = self.patch_embed.num_patches\r\n\r\n        self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))\r\n        self.dist_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) if distilled else None\r\n        self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + self.num_tokens, embed_dim))\r\n        self.pos_drop = nn.Dropout(p=drop_rate)\r\n\r\n        dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)]  # stochastic depth decay rule\r\n        blocks = []\r\n        ce_index = 0\r\n        self.ce_loc = ce_loc\r\n        for i in range(depth):\r\n            ce_keep_ratio_i = 1.0\r\n            if ce_loc is not None and i in ce_loc:\r\n                ce_keep_ratio_i = ce_keep_ratio[ce_index]\r\n                ce_index += 1\r\n\r\n            blocks.append(\r\n                CEBlock(\r\n                    dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, drop=drop_rate,\r\n                    attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, act_layer=act_layer,\r\n                    keep_ratio_search=ce_keep_ratio_i)\r\n            )\r\n\r\n        self.blocks = nn.Sequential(*blocks)\r\n        self.norm = norm_layer(embed_dim)\r\n\r\n        self.init_weights(weight_init)\r\n\r\n    def forward_features(self, z, x, mask_z=None, mask_x=None,\r\n                         ce_template_mask=None, ce_keep_rate=None,\r\n                         return_last_attn=False\r\n                         ):\r\n        B, H, W = x.shape[0], x.shape[2], x.shape[3]\r\n\r\n        x = self.patch_embed(x)\r\n        z = self.patch_embed(z)\r\n\r\n        # attention mask handling\r\n        # B, H, W\r\n        if mask_z is not None and mask_x is not None:\r\n            mask_z = F.interpolate(mask_z[None].float(), scale_factor=1. / self.patch_size).to(torch.bool)[0]\r\n            mask_z = mask_z.flatten(1).unsqueeze(-1)\r\n\r\n            mask_x = F.interpolate(mask_x[None].float(), scale_factor=1. / self.patch_size).to(torch.bool)[0]\r\n            mask_x = mask_x.flatten(1).unsqueeze(-1)\r\n\r\n            mask_x = combine_tokens(mask_z, mask_x, mode=self.cat_mode)\r\n            mask_x = mask_x.squeeze(-1)\r\n\r\n        if self.add_cls_token:\r\n            cls_tokens = self.cls_token.expand(B, -1, -1)\r\n            cls_tokens = cls_tokens + self.cls_pos_embed\r\n\r\n        z += self.pos_embed_z\r\n        x += self.pos_embed_x\r\n\r\n        if self.add_sep_seg:\r\n            x += self.search_segment_pos_embed\r\n            z += self.template_segment_pos_embed\r\n\r\n        x = combine_tokens(z, x, mode=self.cat_mode)\r\n        if self.add_cls_token:\r\n            x = torch.cat([cls_tokens, x], dim=1)\r\n\r\n        x = self.pos_drop(x)\r\n\r\n        lens_z = self.pos_embed_z.shape[1]\r\n        lens_x = self.pos_embed_x.shape[1]\r\n\r\n        global_index_t = torch.linspace(0, lens_z - 1, lens_z).to(x.device)\r\n        global_index_t = global_index_t.repeat(B, 1)\r\n\r\n        global_index_s = torch.linspace(0, lens_x - 1, lens_x).to(x.device)\r\n        global_index_s = global_index_s.repeat(B, 1)\r\n        removed_indexes_s = []\r\n        for i, blk in enumerate(self.blocks):\r\n            x, global_index_t, global_index_s, removed_index_s, attn = \\\r\n                blk(x, global_index_t, global_index_s, mask_x, ce_template_mask, ce_keep_rate)\r\n\r\n            if self.ce_loc is not None and i in self.ce_loc:\r\n                removed_indexes_s.append(removed_index_s)\r\n\r\n        x = self.norm(x)\r\n        lens_x_new = global_index_s.shape[1]\r\n        lens_z_new = global_index_t.shape[1]\r\n\r\n        z = x[:, :lens_z_new]\r\n        x = x[:, lens_z_new:]\r\n\r\n        if removed_indexes_s and removed_indexes_s[0] is not None:\r\n            removed_indexes_cat = torch.cat(removed_indexes_s, dim=1)\r\n\r\n            pruned_lens_x = lens_x - lens_x_new\r\n            pad_x = torch.zeros([B, pruned_lens_x, x.shape[2]], device=x.device)\r\n            x = torch.cat([x, pad_x], dim=1)\r\n            index_all = torch.cat([global_index_s, removed_indexes_cat], dim=1)\r\n            # recover original token order\r\n            C = x.shape[-1]\r\n            # x = x.gather(1, index_all.unsqueeze(-1).expand(B, -1, C).argsort(1))\r\n            x = torch.zeros_like(x).scatter_(dim=1, index=index_all.unsqueeze(-1).expand(B, -1, C).to(torch.int64), src=x)\r\n\r\n        x = recover_tokens(x, lens_z_new, lens_x, mode=self.cat_mode)\r\n\r\n        # re-concatenate with the template, which may be further used by other modules\r\n        x = torch.cat([z, x], dim=1)\r\n\r\n        aux_dict = {\r\n            \"attn\": attn,\r\n            \"removed_indexes_s\": removed_indexes_s,  # used for visualization\r\n        }\r\n\r\n        return x, aux_dict\r\n\r\n    def forward(self, z, x, ce_template_mask=None, ce_keep_rate=None,\r\n                tnc_keep_rate=None,\r\n                return_last_attn=False):\r\n\r\n        x, aux_dict = self.forward_features(z, x, ce_template_mask=ce_template_mask, ce_keep_rate=ce_keep_rate,)\r\n\r\n        return x, aux_dict\r\n\r\n\r\ndef _create_vision_transformer(pretrained=False, **kwargs):\r\n    model = VisionTransformerCE(**kwargs)\r\n\r\n    if pretrained:\r\n        if 'npz' in pretrained:\r\n            model.load_pretrained(pretrained, prefix='')\r\n        else:\r\n            checkpoint = torch.load(pretrained, map_location=\"cpu\")\r\n            missing_keys, unexpected_keys = model.load_state_dict(checkpoint[\"model\"], strict=False)\r\n            print('Load pretrained model from: ' + pretrained)\r\n\r\n    return model\r\n\r\n\r\ndef vit_base_patch16_224_ce(pretrained=False, **kwargs):\r\n    \"\"\" ViT-Base model (ViT-B/16) from original paper (https://arxiv.org/abs/2010.11929).\r\n    \"\"\"\r\n    model_kwargs = dict(\r\n        patch_size=16, embed_dim=768, depth=12, num_heads=12, **kwargs)\r\n    model = _create_vision_transformer(pretrained=pretrained, **model_kwargs)\r\n    return model\r\n\r\n\r\ndef vit_large_patch16_224_ce(pretrained=False, **kwargs):\r\n    \"\"\" ViT-Large model (ViT-L/16) from original paper (https://arxiv.org/abs/2010.11929).\r\n    \"\"\"\r\n    model_kwargs = dict(\r\n        patch_size=16, embed_dim=1024, depth=24, num_heads=16, **kwargs)\r\n    model = _create_vision_transformer(pretrained=pretrained, **model_kwargs)\r\n    return model\r\n"
  },
  {
    "path": "artrackv2_mindspore/lib/models/timm.py",
    "content": "import mindspore as ms\nimport mindspore.nn as nn\nimport mindspore.ops as ops\nimport math\nimport numpy as np\ndef to_2tuple(input):\n    if isinstance(input,tuple):\n        if len(input)==2:\n            return input\n        else:\n            raise ValueError(\"The tuple's length is not 2!\")\n    elif isinstance(input,list):\n        if len(input)==2:\n            return tuple(x for x in input)\n        else:\n            raise ValueError(\"The List's length is not 2!\")\n    else:\n        return (input,input)\nclass Mlp(nn.Cell):\n    \"\"\" MLP as used in Vision Transformer, MLP-Mixer and related networks\n    \"\"\"\n    def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):\n        super().__init__()\n        out_features = out_features or in_features\n        hidden_features = hidden_features or in_features\n        drop_probs = to_2tuple(drop)\n\n        self.fc1 = nn.Dense(in_features, hidden_features,has_bias=True)\n        if act_layer==nn.GELU:\n            self.act = act_layer(approximate=False)\n        self.drop1 = nn.Dropout(p=drop_probs[0])\n        self.fc2 = nn.Dense(hidden_features, out_features,has_bias=True)\n        self.drop2 = nn.Dropout(p=drop_probs[1])\n\n    def construct(self, x):\n        x = self.fc1(x)\n        x = self.act(x)\n        x = self.drop1(x)\n        x = self.fc2(x)\n        x = self.drop2(x)\n        return x\n\ndef drop_path(x, drop_prob: float = 0., training: bool = False, scale_by_keep: bool = True):\n    \"\"\"Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).\n\n    This is the same as the DropConnect impl I created for EfficientNet, etc networks, however,\n    the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper...\n    See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for\n    changing the layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use\n    'survival rate' as the argument.\n\n    \"\"\"\n    if drop_prob == 0. or not training:\n        return x\n    keep_prob = 1 - drop_prob\n    shape = (x.shape[0],) + (1,) * (x.ndim - 1)  # work with diff dim tensors, not just 2D ConvNets\n    random_tensor = x.new_empty(shape).bernoulli_(keep_prob)\n    if keep_prob > 0.0 and scale_by_keep:\n        random_tensor.div_(keep_prob)\n    return x * random_tensor\n\n\nclass DropPath(nn.Cell):\n    \"\"\"Drop paths (Stochastic Depth) per sample  (when applied in main path of residual blocks).\n    \"\"\"\n    def __init__(self, drop_prob: float = 0., scale_by_keep: bool = True):\n        super(DropPath, self).__init__()\n        self.drop_prob = drop_prob\n        self.scale_by_keep = scale_by_keep\n\n    def construct(self, x):\n        return drop_path(x, self.drop_prob, self.training, self.scale_by_keep)\n\n    def extra_repr(self):\n        return f'drop_prob={round(self.drop_prob,3):0.3f}'\ndef _trunc_normal_(tensor, mean, std, a, b):\n    # Cut & paste from PyTorch official master until it's in a few official releases - RW\n    # Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf\n    def norm_cdf(x):\n        # Computes standard normal cumulative distribution function\n        return (1. + math.erf(x / math.sqrt(2.))) / 2.\n\n    if (mean < a - 2 * std) or (mean > b + 2 * std):\n        warnings.warn(\"mean is more than 2 std from [a, b] in nn.init.trunc_normal_. \"\n                      \"The distribution of values may be incorrect.\",\n                      stacklevel=2)\n\n    # Values are generated by using a truncated uniform distribution and\n    # then using the inverse CDF for the normal distribution.\n    # Get upper and lower cdf values\n    l = norm_cdf((a - mean) / std)\n    u = norm_cdf((b - mean) / std)\n\n    # Uniformly fill tensor with values from [l, u], then translate to\n    # [2l-1, 2u-1].\n    shape = tensor.shape\n    minval = ms.Tensor(2*l-1)\n    maxval = ms.Tensor(2*u-1)\n    x = ops.uniform(shape,minval, maxval)\n    tensor = x\n\n    # Use inverse cdf transform for normal distribution to get truncated\n    # standard normal\n    tensor = tensor.erfinv()\n\n    # Transform to proper mean, std\n    tenspr = tensor.mul(std * math.sqrt(2.))\n    tensor = tensor.add(mean)\n\n    # Clamp to ensure it's in the proper range\n    tensor = tensor.clamp(min=a, max=b)\n    return tensor\n\n\ndef trunc_normal_(tensor, mean=0., std=1., a=-2., b=2.):\n    # type: (Tensor, float, float, float, float) -> Tensor\n    r\"\"\"Fills the input Tensor with values drawn from a truncated\n    normal distribution. The values are effectively drawn from the\n    normal distribution :math:`\\mathcal{N}(\\text{mean}, \\text{std}^2)`\n    with values outside :math:`[a, b]` redrawn until they are within\n    the bounds. The method used for generating the random values works\n    best when :math:`a \\leq \\text{mean} \\leq b`.\n\n    NOTE: this impl is similar to the PyTorch trunc_normal_, the bounds [a, b] are\n    applied while sampling the normal with mean/std applied, therefore a, b args\n    should be adjusted to match the range of mean, std args.\n\n    Args:\n        tensor: an n-dimensional `torch.Tensor`\n        mean: the mean of the normal distribution\n        std: the standard deviation of the normal distribution\n        a: the minimum cutoff value\n        b: the maximum cutoff value\n    Examples:\n        >>> w = torch.empty(3, 5)\n        >>> nn.init.trunc_normal_(w)\n    \"\"\"\n    return _trunc_normal_(tensor, mean, std, a, b)\n\ndef trunc_normal_tf_(tensor, mean=0., std=1., a=-2., b=2.):\n    # type: (Tensor, float, float, float, float) -> Tensor\n    r\"\"\"Fills the input Tensor with values drawn from a truncated\n    normal distribution. The values are effectively drawn from the\n    normal distribution :math:`\\mathcal{N}(\\text{mean}, \\text{std}^2)`\n    with values outside :math:`[a, b]` redrawn until they are within\n    the bounds. The method used for generating the random values works\n    best when :math:`a \\leq \\text{mean} \\leq b`.\n\n    NOTE: this 'tf' variant behaves closer to Tensorflow / JAX impl where the\n    bounds [a, b] are applied when sampling the normal distribution with mean=0, std=1.0\n    and the result is subsquently scaled and shifted by the mean and std args.\n\n    Args:\n        tensor: an n-dimensional `torch.Tensor`\n        mean: the mean of the normal distribution\n        std: the standard deviation of the normal distribution\n        a: the minimum cutoff value\n        b: the maximum cutoff value\n    Examples:\n        >>> w = torch.empty(3, 5)\n        >>> nn.init.trunc_normal_(w)\n    \"\"\"\n    _trunc_normal_(tensor, 0, 1.0, a, b)\n    tensor.mul_(std).add_(mean)\n    return tensor\ndef variance_scaling_(tensor, scale=1.0, mode='fan_in', distribution='normal'):\n    fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor)\n    if mode == 'fan_in':\n        denom = fan_in\n    elif mode == 'fan_out':\n        denom = fan_out\n    elif mode == 'fan_avg':\n        denom = (fan_in + fan_out) / 2\n\n    variance = scale / denom\n\n    if distribution == \"truncated_normal\":\n        # constant is stddev of standard normal truncated to (-2, 2)\n        trunc_normal_tf_(tensor, std=math.sqrt(variance) / .87962566103423978)\n    elif distribution == \"normal\":\n        tensor.normal_(std=math.sqrt(variance))\n    elif distribution == \"uniform\":\n        bound = math.sqrt(3 * variance)\n        tensor.uniform_(-bound, bound)\n    else:\n        raise ValueError(f\"invalid distribution {distribution}\")\ndef lecun_normal_(tensor):\n    variance_scaling_(tensor, mode='fan_in', distribution='truncated_normal')\ndef adapt_input_conv(in_chans, conv_weight):\n    conv_type = conv_weight.dtype\n    conv_weight = conv_weight.float()  # Some weights are in torch.half, ensure it's float for sum on CPU\n    O, I, J, K = conv_weight.shape\n    if in_chans == 1:\n        if I > 3:\n            assert conv_weight.shape[1] % 3 == 0\n            # For models with space2depth stems\n            conv_weight = conv_weight.reshape(O, I // 3, 3, J, K)\n            conv_weight = conv_weight.sum(dim=2, keepdim=False)\n        else:\n            conv_weight = conv_weight.sum(dim=1, keepdim=True)\n    elif in_chans != 3:\n        if I != 3:\n            raise NotImplementedError('Weight format not supported by conversion.')\n        else:\n            # NOTE this strategy should be better than random init, but there could be other combinations of\n            # the original RGB input layer weights that'd work better for specific cases.\n            repeat = int(math.ceil(in_chans / 3))\n            conv_weight = conv_weight.repeat(1, repeat, 1, 1)[:, :in_chans, :, :]\n            conv_weight *= (3 / float(in_chans))\n    conv_weight = conv_weight.to(conv_type)\n    return conv_weight\n\n"
  },
  {
    "path": "artrackv2_mindspore/lib/train/__init__.py",
    "content": "# from .admin.multigpu import MultiGPU\n"
  },
  {
    "path": "artrackv2_mindspore/lib/train/_init_paths.py",
    "content": "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os.path as osp\nimport sys\n\n\ndef add_path(path):\n    if path not in sys.path:\n        sys.path.insert(0, path)\n\n\nthis_dir = osp.dirname(__file__)\n\nprj_path = osp.join(this_dir, '../..')\nadd_path(prj_path)\n"
  },
  {
    "path": "artrackv2_mindspore/lib/train/actors/__init__.py",
    "content": "from .base_actor import BaseActor\nfrom .ostrack import OSTrackActor\n"
  },
  {
    "path": "artrackv2_mindspore/lib/train/actors/base_actor.py",
    "content": "from lib.utils import TensorDict\n\n\nclass BaseActor:\n    \"\"\" Base class for actor. The actor class handles the passing of the data through the network\n    and calculation the loss\"\"\"\n    def __init__(self, net, objective):\n        \"\"\"\n        args:\n            net - The network to train\n            objective - The loss function\n        \"\"\"\n        self.net = net\n        self.objective = objective\n\n    def __call__(self, data: TensorDict):\n        \"\"\" Called in each training iteration. Should pass in input data through the network, calculate the loss, and\n        return the training stats for the input data\n        args:\n            data - A TensorDict containing all the necessary data blocks.\n\n        returns:\n            loss    - loss for the input data\n            stats   - a dict containing detailed losses\n        \"\"\"\n        raise NotImplementedError\n\n    def to(self, device):\n        \"\"\" Move the network to device\n        args:\n            device - device to use. 'cpu' or 'cuda'\n        \"\"\"\n        self.net.to(device)\n\n    def train(self, mode=True):\n        \"\"\" Set whether the network is in train mode.\n        args:\n            mode (True) - Bool specifying whether in training mode.\n        \"\"\"\n        self.net.train(mode)\n\n    def eval(self):\n        \"\"\" Set network to eval mode\"\"\"\n        self.train(False)"
  },
  {
    "path": "artrackv2_mindspore/lib/train/actors/ostrack.py",
    "content": "from . import BaseActor\nfrom lib.utils.misc import NestedTensor\nfrom lib.utils.box_ops import box_cxcywh_to_xyxy, box_xywh_to_xyxy\nimport torch\nimport math\nimport numpy as np\nimport numpy\nimport cv2\nimport torch.nn.functional as F\nimport torchvision.transforms.functional as tvisf\nimport lib.train.data.bounding_box_utils as bbutils\nfrom lib.utils.merge import merge_template_search\nfrom torch.distributions.categorical import Categorical\nfrom ...utils.heapmap_utils import generate_heatmap\nfrom ...utils.ce_utils import generate_mask_cond, adjust_keep_rate\n\ndef IoU(rect1, rect2):\n    \"\"\" caculate interection over union\n    Args:\n        rect1: (x1, y1, x2, y2)\n        rect2: (x1, y1, x2, y2)\n    Returns:\n        iou\n    \"\"\"\n    # overlap\n    x1, y1, x2, y2 = rect1[0], rect1[1], rect1[2], rect1[3]\n    tx1, ty1, tx2, ty2 = rect2[0], rect2[1], rect2[2], rect2[3]\n\n    xx1 = np.maximum(tx1, x1)\n    yy1 = np.maximum(ty1, y1)\n    xx2 = np.minimum(tx2, x2)\n    yy2 = np.minimum(ty2, y2)\n\n    ww = np.maximum(0, xx2 - xx1)\n    hh = np.maximum(0, yy2 - yy1)\n\n    area = (x2-x1) * (y2-y1)\n    target_a = (tx2-tx1) * (ty2 - ty1)\n    inter = ww * hh\n    iou = inter / (area + target_a - inter)\n    return iou\n\ndef fp16_clamp(x, min=None, max=None):\n    if not x.is_cuda and x.dtype == torch.float16:\n        # clamp for cpu float16, tensor fp16 has no clamp implementation\n        return x.float().clamp(min, max).half()\n\n    return x.clamp(min, max)\n    \ndef generate_sa_simdr(joints):\n    '''\n    :param joints:  [num_joints, 3]\n    :param joints_vis: [num_joints, 3]\n    :return: target, target_weight(1: visible, 0: invisible)\n    '''\n    num_joints = 48\n    image_size = [256, 256]\n    simdr_split_ratio = 1.5625\n    sigma = 6\n\n    target_x1 = np.zeros((num_joints,\n                              int(image_size[0] * simdr_split_ratio)),\n                             dtype=np.float32)\n    target_y1 = np.zeros((num_joints,\n                              int(image_size[1] * simdr_split_ratio)),\n                             dtype=np.float32)\n    target_x2 = np.zeros((num_joints,\n                              int(image_size[0] * simdr_split_ratio)),\n                             dtype=np.float32)\n    target_y2 = np.zeros((num_joints,\n                              int(image_size[1] * simdr_split_ratio)),\n                             dtype=np.float32)\n    zero_4_begin = np.zeros((num_joints, 1), dtype=np.float32)\n\n    tmp_size = sigma * 3\n\n    for joint_id in range(num_joints):\n\n        mu_x1 = joints[joint_id][0]\n        mu_y1 = joints[joint_id][1]\n        mu_x2 = joints[joint_id][2]\n        mu_y2 = joints[joint_id][3]\n\n        x1 = np.arange(0, int(image_size[0] * simdr_split_ratio), 1, np.float32)\n        y1 = np.arange(0, int(image_size[1] * simdr_split_ratio), 1, np.float32)\n        x2 = np.arange(0, int(image_size[0] * simdr_split_ratio), 1, np.float32)\n        y2 = np.arange(0, int(image_size[1] * simdr_split_ratio), 1, np.float32)\n\n        target_x1[joint_id] = (np.exp(- ((x1 - mu_x1) ** 2) / (2 * sigma ** 2))) / (\n                        sigma * np.sqrt(np.pi * 2))\n        target_y1[joint_id] = (np.exp(- ((y1 - mu_y1) ** 2) / (2 * sigma ** 2))) / (\n                        sigma * np.sqrt(np.pi * 2))\n        target_x2[joint_id] = (np.exp(- ((x2 - mu_x2) ** 2) / (2 * sigma ** 2))) / (\n                        sigma * np.sqrt(np.pi * 2))\n        target_y2[joint_id] = (np.exp(- ((y2 - mu_y2) ** 2) / (2 * sigma ** 2))) / (\n                        sigma * np.sqrt(np.pi * 2))\n    return target_x1, target_y1, target_x2, target_y2\n\n# angle cost\ndef SIoU_loss(test1, test2, theta=4):\n    eps = 1e-7\n    cx_pred = (test1[:, 0] + test1[:, 2]) / 2\n    cy_pred = (test1[:, 1] + test1[:, 3]) / 2\n    cx_gt = (test2[:, 0] + test2[:, 2]) / 2\n    cy_gt = (test2[:, 1] + test2[:, 3]) / 2\n\n    dist = ((cx_pred - cx_gt)**2 + (cy_pred - cy_gt)**2) ** 0.5\n    ch = torch.max(cy_gt, cy_pred) - torch.min(cy_gt, cy_pred)\n    x = ch / (dist + eps)\n\n    angle = 1 - 2*torch.sin(torch.arcsin(x)-torch.pi/4)**2\n    # distance cost\n    xmin = torch.min(test1[:, 0], test2[:, 0])\n    xmax = torch.max(test1[:, 2], test2[:, 2])\n    ymin = torch.min(test1[:, 1], test2[:, 1])\n    ymax = torch.max(test1[:, 3], test2[:, 3])\n    cw = xmax - xmin\n    ch = ymax - ymin\n    px = ((cx_gt - cx_pred) / (cw+eps))**2\n    py = ((cy_gt - cy_pred) / (ch+eps))**2\n    gama = 2 - angle\n    dis = (1 - torch.exp(-1 * gama * px)) + (1 - torch.exp(-1 * gama * py))\n\n    #shape cost\n    w_pred = test1[:, 2] - test1[:, 0]\n    h_pred = test1[:, 3] - test1[:, 1]\n    w_gt = test2[:, 2] - test2[:, 0]\n    h_gt = test2[:, 3] - test2[:, 1]\n    ww = torch.abs(w_pred - w_gt) / (torch.max(w_pred, w_gt) + eps)\n    wh = torch.abs(h_gt - h_pred) / (torch.max(h_gt, h_pred) + eps)\n    omega = (1 - torch.exp(-1 * wh)) ** theta + (1 - torch.exp(-1 * ww)) ** theta\n\n    #IoU loss\n    lt = torch.max(test1[..., :2], test2[..., :2])  # [B, rows, 2]\n    rb = torch.min(test1[..., 2:], test2[..., 2:])  # [B, rows, 2]\n\n    wh = fp16_clamp(rb - lt, min=0)\n    overlap = wh[..., 0] * wh[..., 1]\n    area1 = (test1[..., 2] - test1[..., 0]) * (\n            test1[..., 3] - test1[..., 1])\n    area2 = (test2[..., 2] - test2[..., 0]) * (\n            test2[..., 3] - test2[..., 1])\n    iou = overlap / (area1 + area2 - overlap)\n\n    SIoU = 1 - iou + (omega + dis) / 2\n    return SIoU, iou\n    \ndef ciou(pred, target, eps=1e-7):\n    # overlap\n    lt = torch.max(pred[:, :2], target[:, :2])\n    rb = torch.min(pred[:, 2:], target[:, 2:])\n    wh = (rb - lt).clamp(min=0)\n    overlap = wh[:, 0] * wh[:, 1]\n\n    # union\n    ap = (pred[:, 2] - pred[:, 0]) * (pred[:, 3] - pred[:, 1])\n    ag = (target[:, 2] - target[:, 0]) * (target[:, 3] - target[:, 1])\n    union = ap + ag - overlap + eps\n\n    # IoU\n    ious = overlap / union\n\n    # enclose area\n    enclose_x1y1 = torch.min(pred[:, :2], target[:, :2])\n    enclose_x2y2 = torch.max(pred[:, 2:], target[:, 2:])\n    enclose_wh = (enclose_x2y2 - enclose_x1y1).clamp(min=0)\n\n    cw = enclose_wh[:, 0]\n    ch = enclose_wh[:, 1]\n\n    c2 = cw**2 + ch**2 + eps\n\n    b1_x1, b1_y1 = pred[:, 0], pred[:, 1]\n    b1_x2, b1_y2 = pred[:, 2], pred[:, 3]\n    b2_x1, b2_y1 = target[:, 0], target[:, 1]\n    b2_x2, b2_y2 = target[:, 2], target[:, 3]\n\n    w1, h1 = b1_x2 - b1_x1, b1_y2 - b1_y1 + eps\n    w2, h2 = b2_x2 - b2_x1, b2_y2 - b2_y1 + eps\n\n    left = ((b2_x1 + b2_x2) - (b1_x1 + b1_x2))**2 / 4\n    right = ((b2_y1 + b2_y2) - (b1_y1 + b1_y2))**2 / 4\n    rho2 = left + right\n\n    factor = 4 / math.pi**2\n    v = factor * torch.pow(torch.atan(w2 / h2) - torch.atan(w1 / h1), 2)\n\n    # CIoU\n    cious = ious - (rho2 / c2 + v**2 / (1 - ious + v))\n    return cious, ious\n\nclass OSTrackActor(BaseActor):\n    \"\"\" Actor for training OSTrack models \"\"\"\n\n    def __init__(self, net, objective, loss_weight, settings, bins, search_size, cfg=None):\n        super().__init__(net, objective)\n        self.loss_weight = loss_weight\n        self.settings = settings\n        self.bs = self.settings.batchsize  # batch size\n        self.cfg = cfg\n        self.bins = bins\n        self.search_size = search_size\n        self.logsoftmax = torch.nn.LogSoftmax(dim=1)\n        self.focal = None\n        self.range = 2\n        self.loss_weight['KL'] = 0\n        self.loss_weight['focal'] = 0\n        self.pre_num = 7\n        self.pre_bbox = None\n        self.x_feat_rem = None\n\n    def __call__(self, data):\n        \"\"\"\n        args:\n            data - The input data, should contain the fields 'template', 'search', 'gt_bbox'.\n            template_images: (N_t, batch, 3, H, W)\n            search_images: (N_s, batch, 3, H, W)\n        returns:\n            loss    - the training loss\n            status  -  dict containing detailed losses\n        \"\"\"\n        # forward pass\n        out_dict = self.forward_pass(data)\n\n        # compute losses\n        loss, status = self.compute_losses(out_dict, data)\n\n        return loss, status\n\n    def _bbox_clip(self, cx, cy, width, height, boundary):\n        cx = max(0, min(cx, boundary[1]))\n        cy = max(0, min(cy, boundary[0]))\n        width = max(10, min(width, boundary[1]))\n        height = max(10, min(height, boundary[0]))\n        return cx, cy, width, height\n\n    def get_subwindow(self, im, pos, model_sz, original_sz, avg_chans):\n        \"\"\"\n        args:\n            im: bgr based image\n            pos: center position\n            model_sz: exemplar size\n            s_z: original size\n            avg_chans: channel average\n        \"\"\"\n        if isinstance(pos, float):\n            pos = [pos, pos]\n        sz = original_sz\n        im_sz = im.shape\n        c = (original_sz + 1) / 2\n        # context_xmin = round(pos[0] - c) # py2 and py3 round\n        context_xmin = np.floor(pos[0] - c + 0.5)\n        context_xmax = context_xmin + sz - 1\n        # context_ymin = round(pos[1] - c)\n        context_ymin = np.floor(pos[1] - c + 0.5)\n        context_ymax = context_ymin + sz - 1\n        left_pad = int(max(0., -context_xmin))\n        top_pad = int(max(0., -context_ymin))\n        right_pad = int(max(0., context_xmax - im_sz[1] + 1))\n        bottom_pad = int(max(0., context_ymax - im_sz[0] + 1))\n\n        context_xmin = context_xmin + left_pad\n        context_xmax = context_xmax + left_pad\n        context_ymin = context_ymin + top_pad\n        context_ymax = context_ymax + top_pad\n\n        r, c, k = im.shape\n        if any([top_pad, bottom_pad, left_pad, right_pad]):\n            size = (r + top_pad + bottom_pad, c + left_pad + right_pad, k)\n            te_im = np.zeros(size, np.uint8)\n            te_im[top_pad:top_pad + r, left_pad:left_pad + c, :] = im\n            if top_pad:\n                te_im[0:top_pad, left_pad:left_pad + c, :] = avg_chans\n            if bottom_pad:\n                te_im[r + top_pad:, left_pad:left_pad + c, :] = avg_chans\n            if left_pad:\n                te_im[:, 0:left_pad, :] = avg_chans\n            if right_pad:\n                te_im[:, c + left_pad:, :] = avg_chans\n            im_patch = te_im[int(context_ymin):int(context_ymax + 1),\n                       int(context_xmin):int(context_xmax + 1), :]\n        else:\n            im_patch = im[int(context_ymin):int(context_ymax + 1),\n                       int(context_xmin):int(context_xmax + 1), :]\n\n        if not np.array_equal(model_sz, original_sz):\n            try:\n                im_patch = cv2.resize(im_patch, (model_sz, model_sz))\n            except:\n                return None\n        im_patch = im_patch.transpose(2, 0, 1)\n        im_patch = im_patch[np.newaxis, :, :, :]\n        im_patch = im_patch.astype(np.float32)\n        im_patch = torch.from_numpy(im_patch)\n        im_patch = im_patch.cuda()\n        return im_patch\n\n    def batch_init(self, images, template_bbox, initial_bbox) -> dict:\n        self.frame_num = 1\n        self.device = 'cuda'\n        # Convert bbox (x1, y1, w, h) -> (cx, cy, w, h)\n\n        template_bbox_1 = template_bbox[:, 0]\n        template_bbox_2 = template_bbox[:, 1]\n        \n        \n        template_bbox_1 = bbutils.batch_xywh2center2(template_bbox_1) # ndarray:(2*num_seq,4)\n        template_bbox_2 = bbutils.batch_xywh2center2(template_bbox_2) # ndarray:(2*num_seq,4)\n        \n        initial_bbox = bbutils.batch_xywh2center2(initial_bbox) # ndarray:(2*num_seq,4)\n        self.center_pos = initial_bbox[:, :2] # ndarray:(2*num_seq,2)\n        self.size = initial_bbox[:, 2:] # ndarray:(2*num_seq,2)\n        self.pre_bbox = initial_bbox\n        for i in range(self.pre_num - 1):\n            self.pre_bbox = numpy.concatenate((self.pre_bbox, initial_bbox), axis=1)\n        #print(self.pre_bbox.shape)\n\n        template_factor = self.cfg.DATA.TEMPLATE.FACTOR\n        w_z_1 = template_bbox_1[:, 2] * template_factor  # ndarray:(2*num_seq)\n        h_z_1 = template_bbox_1[:, 3] * template_factor  # ndarray:(2*num_seq)\n        s_z_1 = np.ceil(np.sqrt(w_z_1 * h_z_1))  # ndarray:(2*num_seq)\n\n        w_z_2 = template_bbox_2[:, 2] * template_factor  # ndarray:(2*num_seq)\n        h_z_2 = template_bbox_2[:, 3] * template_factor  # ndarray:(2*num_seq)\n        s_z_2 = np.ceil(np.sqrt(w_z_2 * h_z_2))  # ndarray:(2*num_seq)\n\n        self.channel_average = []\n        for img in images:\n            self.channel_average.append(np.mean(img[0], axis=(0, 1)))\n            self.channel_average.append(np.mean(img[1], axis=(0, 1)))\n        self.channel_average = np.array(self.channel_average)  # ndarray:(2*num_seq,3)\n\n        #get crop\n        z_crop_list = []\n        z_1_list = []\n        z_2_list = []\n        for i in range(len(images)):\n            here_crop_1 = self.get_subwindow(images[i][0], template_bbox_1[i, :2],\n                                           self.cfg.DATA.TEMPLATE.SIZE, s_z_1[i], self.channel_average[2*i])\n            here_crop_2 = self.get_subwindow(images[i][1], template_bbox_2[i, :2],\n                                           self.cfg.DATA.TEMPLATE.SIZE, s_z_2[i], self.channel_average[2*i+1])\n            z_crop_1 = here_crop_1.float().mul(1.0 / 255.0).clamp(0.0, 1.0)\n            z_crop_2 = here_crop_2.float().mul(1.0 / 255.0).clamp(0.0, 1.0)\n            self.mean = [0.485, 0.456, 0.406]\n            self.std = [0.229, 0.224, 0.225]\n            self.inplace = False\n            z_crop_1[0] = tvisf.normalize(z_crop_1[0], self.mean, self.std, self.inplace)\n            z_crop_2[0] = tvisf.normalize(z_crop_2[0], self.mean, self.std, self.inplace)\n            z_1_list.append(z_crop_1.unsqueeze(1).clone())\n            z_2_list.append(z_crop_2.unsqueeze(1).clone())\n            z_crop = torch.concat([z_crop_1.unsqueeze(1), z_crop_2.unsqueeze(1)], dim=1)\n            z_crop_list.append(z_crop.clone())\n        z_crop = torch.cat(z_crop_list, dim=0)  # Tensor(2*num_seq,3,128,128)\n        z_1_crop = torch.cat(z_1_list, dim=0)\n        z_2_crop = torch.cat(z_2_list, dim=0)\n        z_2_crop = z_2_crop.squeeze(1).to(self.net.module.backbone.word_embeddings.weight)\n        z_2_feat = self.net.module.backbone.patch_embed(z_2_crop)\n\n        out = {'template_images': z_crop, \"z_1\":z_1_crop, \"z_2\":z_2_crop, \"z_2_feat\":z_2_feat}\n        return out\n\n    def batch_track(self, img, gt_boxes, template, dz_feat, action_mode='max') -> dict:\n        search_factor = self.cfg.DATA.SEARCH.FACTOR\n        w_x = self.size[:, 0] * search_factor\n        h_x = self.size[:, 1] * search_factor\n        s_x = np.ceil(np.sqrt(w_x * h_x))\n        \n        gt_boxes_corner = bbutils.batch_xywh2corner(gt_boxes)  # ndarray:(2*num_seq,4)\n        initial_bbox = bbutils.batch_xywh2center2(gt_boxes)\n\n        x_crop_list = []\n        gt_in_crop_list = []\n        pre_seq_list = []\n        pre_seq_in_list = []\n        x_feat_list = []\n        target_in_search_list = []\n        update_feat_list = []\n        for i in range(len(img)):\n\n            template_factor = self.cfg.DATA.TEMPLATE.FACTOR\n            w_z_1 = initial_bbox[:, 2] * template_factor  # ndarray:(2*num_seq)\n            h_z_1 = initial_bbox[:, 3] * template_factor  # ndarray:(2*num_seq)\n            s_z_1 = np.ceil(np.sqrt(w_z_1 * h_z_1))  # ndarray:(2*num_seq)\n\n            channel_avg = np.mean(img[i], axis=(0, 1))\n            target_in_search = self.get_subwindow(img[i], initial_bbox[i, :2], self.cfg.DATA.TEMPLATE.SIZE,\n                                                  round(s_z_1[i]), channel_avg)\n\n            x_crop = self.get_subwindow(img[i], self.center_pos[i], self.cfg.DATA.SEARCH.SIZE,\n                                        round(s_x[i]), channel_avg)\n            if x_crop == None:\n                return None\n            if target_in_search == None:\n                return None\n            for q in range(self.pre_num):\n                pre_seq_temp = bbutils.batch_center2corner(self.pre_bbox[:, 0+4*q:4+4*q])\n                if q == 0:\n                    pre_seq = pre_seq_temp\n                else:\n                    pre_seq = numpy.concatenate((pre_seq, pre_seq_temp), axis=1)\n            #pre_seq = bbutils.batch_center2corner(self.pre_bbox) #ndarray:(x1 y1 x2 y2)\n\n            if gt_boxes_corner is not None and np.sum(np.abs(gt_boxes_corner[i] - np.zeros(4))) > 10:\n                pre_in = np.zeros(4 * self.pre_num)\n                for w in range(self.pre_num):\n                    #print(pre_seq[i, 0+w*4:2+w*4].shape)\n                    #print(self.center_pos[i].shape)\n                    #print(pre_in[0+w*4:2+w*4].shape)\n                    pre_in[0+w*4:2+w*4] = pre_seq[i, 0+w*4:2+w*4] - self.center_pos[i]\n                    pre_in[2+w*4:4+w*4] = pre_seq[i, 2+w*4:4+w*4] - self.center_pos[i]\n                    pre_in[0+w*4:4+w*4] = pre_in[0+w*4:4+w*4] * (self.cfg.DATA.SEARCH.SIZE / s_x[i]) + self.cfg.DATA.SEARCH.SIZE / 2\n                    #print(pre_in)\n                    pre_in[0+w*4:4+w*4] = pre_in[0+w*4:4+w*4] / self.cfg.DATA.SEARCH.SIZE\n\n                pre_seq_list.append(pre_in)\n                gt_in_crop = np.zeros(4)\n                gt_in_crop[:2] = gt_boxes_corner[i, :2] - self.center_pos[i]\n                gt_in_crop[2:] = gt_boxes_corner[i, 2:] - self.center_pos[i]\n                gt_in_crop = gt_in_crop * (self.cfg.DATA.SEARCH.SIZE / s_x[i]) + self.cfg.DATA.SEARCH.SIZE / 2\n                gt_in_crop[2:] = gt_in_crop[2:] - gt_in_crop[:2]  # (x1,y1,x2,y2) to (x1,y1,w,h)\n                gt_in_crop_list.append(gt_in_crop)\n            else:\n                pre_in = np.zeros(4 * self.pre_num)\n                #pre_in[:2] = pre_seq[i, :2] - self.center_pos[i]\n                #pre_in[2:] = pre_seq[i, 2:] - self.center_pos[i]\n                #pre_in = pre_in * (self.cfg.DATA.SEARCH.SIZE / s_x[i]) + self.cfg.DATA.SEARCH.SIZE / 2\n                pre_seq_list.append(pre_in)\n                gt_in_crop_list.append(np.zeros(4))\n            #print(gt_in_crop)\n            pre_seq_input = torch.from_numpy(pre_in).clamp(-0.5 * self.range + 0.5, 0.5 + self.range * 0.5)\n            pre_seq_input = (pre_seq_input + (0.5 * self.range - 0.5)) * (self.bins - 1)\n            pre_seq_in_list.append(pre_seq_input.clone())\n            x_crop = x_crop.float().mul(1.0 / 255.0).clamp(0.0, 1.0)\n            target_in_search = target_in_search.float().mul(1.0 / 255.0).clamp(0.0, 1.0)\n            rem_x = x_crop\n            x_crop[0] = tvisf.normalize(x_crop[0], self.mean, self.std, self.inplace)\n            target_in_search[0] = tvisf.normalize(target_in_search[0], self.mean, self.std, self.inplace)\n            x_crop_list.append(x_crop.clone())\n            target_in_search_list.append(target_in_search.clone())\n\n        x_crop = torch.cat(x_crop_list, dim=0)\n        target_in_search = torch.cat(target_in_search_list, dim=0)\n        pre_seq_output = torch.cat(pre_seq_in_list, dim=0).reshape(-1, 4*self.pre_num)\n        pre = torch.zeros_like(pre_seq_output)\n        #print(\"this is x_feat_rem\")\n        #print(self.x_feat_rem)\n        #print(\"i do this\")\n        outputs = self.net(template, dz_feat.cuda(), x_crop, seq_input=pre_seq_output, head_type=None, stage=\"batch_track\",\n                           search_feature=self.x_feat_rem, target_in_search_img=target_in_search,\n                           gt_bboxes=gt_boxes[-1])\n\n        selected_indices = outputs['seqs'].detach()\n        x_feat = outputs['x_feat'].detach().cpu()\n        self.x_feat_rem = x_feat.clone()\n        x_feat_list.append(x_feat.clone())\n\n        update_feat = outputs['dz_feat'].detach().cpu()\n        update_feat_list.append(update_feat.clone())\n\n        pred_bbox = selected_indices[:, 0:4].data.cpu().numpy()\n        bbox = (pred_bbox / (self.bins-1) - (self.range * 0.5 - 0.5)) * s_x.reshape(-1, 1)\n        cx = bbox[:, 0] + self.center_pos[:, 0] - s_x/2\n        cy = bbox[:, 1] + self.center_pos[:, 1] - s_x/2\n        width = bbox[:, 2] - bbox[:, 0]\n        height = bbox[:, 3] - bbox[:, 1]\n        cx = cx + width/2\n        cy = cy + height/2\n\n        for i in range(len(img)):\n            cx[i], cy[i], width[i], height[i] = self._bbox_clip(cx[i], cy[i], width[i],\n                                                    height[i], img[i].shape[:2])\n        self.center_pos = np.stack([cx, cy], 1)\n        self.size = np.stack([width, height], 1)\n        #self.pre_bbox = np.stack([cx, cy, width, height], 1)\n        for e in range(self.pre_num):\n            if e != self.pre_num-1 :\n                self.pre_bbox[:, 0+e*4:4+e*4] = self.pre_bbox[:, 4+e*4:8+e*4]\n            else:\n                self.pre_bbox[:, 0+e*4:4+e*4] = numpy.stack([cx, cy, width, height], 1)\n        #print(self.pre_bbox)\n        #print(self.pre_bbox)\n        #print(gt_boxes)\n        #print(gt_in_crop)\n\n        bbox = np.stack([cx - width / 2, cy - height / 2, width, height], 1)\n        #print(pre_seq_output)\n        #print(bbox)\n        #print(gt_boxes)\n\n        out = {\n            'dz_feat': update_feat,\n            'search_images': x_crop,\n            'target_in_search': target_in_search,\n            'pred_bboxes': bbox,\n            'selected_indices': selected_indices.cpu(),\n            'gt_in_crop': torch.tensor(np.stack(gt_in_crop_list, axis=0), dtype=torch.float),\n            'pre_seq': torch.tensor(np.stack(pre_seq_list, axis=0), dtype=torch.float),\n            'x_feat': torch.tensor([item.cpu().detach().numpy() for item in x_feat_list], dtype=torch.float),\n        }\n        #print(\"i want to see this\")\n        #print(out['x_feat'].shape)\n        #print(out['pre_seq'].shape)\n\n\n            # import matplotlib.pyplot as plt\n            # print(gt_in_crop)\n            # rem_x_plt = x_crop[0].permute(1, 2, 0).cpu()\n            # plt.imshow(rem_x_plt)\n            # ax = plt.gca()\n            # ax.add_patch(plt.Rectangle(gt_in_crop[0:2], gt_in_crop[2], gt_in_crop[3], color=\"blue\", fill=False, linewidth=1))\n            # plt.show()\n            # input()\n        return out\n\n    def explore(self, data):\n        results = {}\n        search_images_list = []\n        search_anno_list = []\n        action_tensor_list = []\n        iou_list = []\n       # cover_list = []\n        pre_seq_list = []\n        x_feat_list = []\n        target_in_search_list = []\n        template_all_list = []\n        dz_feat_udpate_list = []\n\n        num_frames = data['num_frames']\n        images = data['search_images']\n        gt_bbox = data['search_annos']\n        template = data['template_images']\n        template_bbox = data['template_annos']\n        visible_ratio = data['visible_ratio']\n\n        template = template\n        template_bbox = template_bbox\n        template_bbox = np.array(template_bbox)\n        num_seq = len(num_frames)\n\n        for idx in range(np.max(num_frames)):\n            here_images = [img[idx] for img in images] #S, N\n            here_gt_bbox = np.array([gt[idx] for gt in gt_bbox])\n\n            here_images = here_images\n            here_gt_bbox = np.concatenate([here_gt_bbox], 0)\n\n            if idx == 0:\n                outputs_template = self.batch_init(template, template_bbox, here_gt_bbox)\n                results['template_images'] = outputs_template['z_1']\n                self.template_temp = outputs_template['z_1'].clone()\n                z_all = [outputs_template['z_1'], outputs_template['z_2']]\n                results['z_all'] = z_all\n                self.dz_feat_update = outputs_template['z_2_feat']\n\n            else:\n                outputs = self.batch_track(here_images, here_gt_bbox, self.template_temp, self.dz_feat_update, action_mode='half')\n                if outputs == None:\n                    return None\n                template_all_list.append(self.template_temp.clone())\n                dz_feat_udpate_list.append(self.dz_feat_update.clone().to(outputs['dz_feat']))\n                \n                x_feat = outputs['x_feat']\n                self.dz_feat_update = outputs['dz_feat']\n                #print(x_feat.shape)\n                pred_bbox = outputs['pred_bboxes']\n                search_images_list.append(outputs['search_images'])\n                target_in_search_list.append(outputs['target_in_search'])\n                search_anno_list.append(outputs['gt_in_crop'])\n                #action_tensor_list.append(outputs['selected_indices'])\n                if len(outputs['pre_seq']) != 8:\n                    print(outputs['pre_seq'])\n                    print(len(outputs['pre_seq']))\n                    print(idx)\n                    print(data['num_frames'])\n                    print(data['search_annos'])\n                    return None\n                pre_seq_list.append(outputs['pre_seq'])\n                pred_bbox_corner = bbutils.batch_xywh2corner(pred_bbox)\n                gt_bbox_corner = bbutils.batch_xywh2corner(here_gt_bbox)\n                here_iou = []\n                for i in range(num_seq):\n                    bbox_iou = IoU(pred_bbox_corner[i], gt_bbox_corner[i])\n                    here_iou.append(bbox_iou)\n                iou_list.append(here_iou)\n                x_feat_list.append(x_feat.clone())\n\n        search_images_reverse_list = []\n        search_anno_reverse_list = []\n        action_tensor_reverse_list = []\n        iou_reverse_list = []\n        pre_seq_reverse_list = []\n        x_feat_reverse_list = []\n        target_in_search_reverse_list = []\n        dz_feat_update_reverse_list = []\n        template_all_reverse_list = []\n        for idx in range(np.max(num_frames)):\n            real_idx = np.max(num_frames) - 1 - idx\n            here_images = [img[real_idx] for img in images]  # S, N\n            here_gt_bbox = np.array([gt[real_idx] for gt in gt_bbox])\n\n            here_images = here_images\n            here_gt_bbox = np.concatenate([here_gt_bbox], 0)\n\n            if idx == 0:\n                outputs_template = self.batch_init(template, template_bbox, here_gt_bbox)\n                results['template_images'] = outputs_template['z_1']\n                self.template_temp = outputs_template['z_1'].clone()\n                z_all = [outputs_template['z_1'], outputs_template['z_2']]\n                results['z_all'] = z_all\n                self.dz_feat_update = outputs_template['z_2_feat'].clone()\n\n            else:\n                outputs = self.batch_track(here_images, here_gt_bbox, self.template_temp, self.dz_feat_update, action_mode='half')\n                if outputs == None:\n                    return None\n                template_all_reverse_list.append(self.template_temp.clone())\n                dz_feat_update_reverse_list.append(self.dz_feat_update.clone().to(outputs['dz_feat']))\n\n                x_feat = outputs['x_feat']\n                self.dz_feat_update = outputs['dz_feat']\n                # print(x_feat.shape)\n                pred_bbox = outputs['pred_bboxes']\n                search_images_reverse_list.append(outputs['search_images'])\n                target_in_search_reverse_list.append(outputs['target_in_search'])\n                search_anno_reverse_list.append(outputs['gt_in_crop'])\n                # action_tensor_list.append(outputs['selected_indices'])\n                if len(outputs['pre_seq']) != 8:\n                    print(outputs['pre_seq'])\n                    print(len(outputs['pre_seq']))\n                    print(idx)\n                    print(data['num_frames'])\n                    print(data['search_annos'])\n                    return None\n                pre_seq_reverse_list.append(outputs['pre_seq'])\n                pred_bbox_corner = bbutils.batch_xywh2corner(pred_bbox)\n                gt_bbox_corner = bbutils.batch_xywh2corner(here_gt_bbox)\n                here_iou = []\n                for i in range(num_seq):\n                    bbox_iou = IoU(pred_bbox_corner[i], gt_bbox_corner[i])\n                    here_iou.append(bbox_iou)\n                iou_reverse_list.append(here_iou)\n                x_feat_reverse_list.append(x_feat.clone())\n\n        results['x_feat'] = torch.cat([torch.stack(x_feat_list), torch.stack(x_feat_reverse_list)], dim=2)\n        results['search_images'] = torch.cat([torch.stack(search_images_list), torch.stack(search_images_reverse_list)], dim=1)\n        results['template_images_z0'] = torch.cat([torch.stack(template_all_list), torch.stack(template_all_reverse_list)], dim=1)\n        results['dz_feat_update'] = torch.cat([torch.stack(dz_feat_udpate_list), torch.stack(dz_feat_update_reverse_list)], dim=1)\n        results['search_anno'] = torch.cat([torch.stack(search_anno_list), torch.stack(search_anno_reverse_list)], dim=1)\n        results['pre_seq'] = torch.cat([torch.stack(pre_seq_list), torch.stack(pre_seq_reverse_list)], dim=1)\n        results['target_in_search'] = torch.cat([torch.stack(target_in_search_list), torch.stack(target_in_search_reverse_list)], dim=1)\n\n        iou_tensor = torch.tensor(iou_list, dtype=torch.float)\n        iou_tensor_reverse = torch.tensor(iou_reverse_list, dtype=torch.float)\n        results['baseline_iou'] = torch.cat([iou_tensor[:, :num_seq], iou_tensor_reverse[:, :num_seq]], dim=1)\n        #results['explore_iou'] = iou_tensor[:, num_seq:]\n        #results['action_tensor'] = torch.stack(action_tensor_list)\n\n        return results\n\n\n\n    def forward_pass(self, data):\n        # currently only support 1 template and 1 search region\n        assert len(data['template_images']) == 1\n        assert len(data['search_images']) == 1\n        #print(data['dataset'])\n\n        template_list = []\n        for i in range(self.settings.num_template):\n            template_img_i = data['template_images'][i].view(-1,\n                                                             *data['template_images'].shape[2:])  # (batch, 3, 128, 128)\n            # template_att_i = data['template_att'][i].view(-1, *data['template_att'].shape[2:])  # (batch, 128, 128)\n            template_list.append(template_img_i)\n\n        search_img = data['search_images'][0].view(-1, *data['search_images'].shape[2:])  # (batch, 3, 320, 320)\n        # search_att = data['search_att'][0].view(-1, *data['search_att'].shape[2:])  # (batch, 320, 320)\n\n        box_mask_z = None\n        ce_keep_rate = None\n        if self.cfg.MODEL.BACKBONE.CE_LOC:\n            box_mask_z = generate_mask_cond(self.cfg, template_list[0].shape[0], template_list[0].device,\n                                            data['template_anno'][0])\n\n            ce_start_epoch = self.cfg.TRAIN.CE_START_EPOCH\n            ce_warm_epoch = self.cfg.TRAIN.CE_WARM_EPOCH\n            ce_keep_rate = adjust_keep_rate(data['epoch'], warmup_epochs=ce_start_epoch,\n                                                total_epochs=ce_start_epoch + ce_warm_epoch,\n                                                ITERS_PER_EPOCH=1,\n                                                base_keep_rate=self.cfg.MODEL.BACKBONE.CE_KEEP_RATIO[0])\n\n        if len(template_list) == 1:\n            template_list = template_list[0]\n        gt_bbox = data['search_anno'][-1]\n        begin = self.bins\n        end = self.bins + 1\n        gt_bbox[:, 2] = gt_bbox[:, 0] + gt_bbox[:, 2]\n        gt_bbox[:, 3] = gt_bbox[:, 1] + gt_bbox[:, 3]\n        gt_bbox = gt_bbox.clamp(min=0.0, max=1.0)\n        data['real_bbox'] = gt_bbox\n        seq_ori = gt_bbox * (self.bins - 1)\n        seq_ori = seq_ori.int().to(search_img)\n        B = seq_ori.shape[0]\n        seq_ori_4_4 = seq_ori[:, 0:3]\n        #seq_input = torch.cat([torch.ones((B, 1)).to(search_img) * begin, seq_ori_4_4], dim=1)\n        # seq_input = torch.cat([torch.ones(1).to(target_bounding_box_label_matrix) * begin, seq_shuffle])\n        #seq_output = torch.cat([seq_ori], dim=1)\n        seq_input = torch.cat([torch.ones((B, 1)).to(search_img) * begin, seq_ori], dim=1)\n        # seq_input = torch.cat([torch.ones(1).to(target_bounding_box_label_matrix) * begin, seq_shuffle])\n        seq_output = torch.cat([seq_ori, torch.ones((B, 1)).to(search_img) * end], dim=1)\n        data['seq_input'] = seq_input\n        data['seq_output'] = seq_output\n        out_dict = self.net(template=template_list,\n                            search=search_img,\n                            ce_template_mask=box_mask_z,\n                            ce_keep_rate=ce_keep_rate,\n                            return_last_attn=False,\n                            seq_input=seq_input)\n\n        return out_dict\n\n    def compute_sequence_losses(self, data):\n        num_frames = data['search_images'].shape[0]\n        #template_images = data['template_images'].repeat(num_frames,1,1,1,1)\n        template_images_for = data['template_images_z0'].reshape(-1, *data['template_images_z0'].size()[2:])\n        dz_feat = data['dz_feat_update'].reshape(-1, *data['dz_feat_update'].size()[2:])\n        target_in_search = data['target_in_search'].reshape(-1, *data['target_in_search'].size()[2:])\n        #template_images = template_images.view(-1, *template_images.size()[2:])\n        search_images = data['search_images'].reshape(-1, *data['search_images'].size()[2:])\n        search_anno = data['search_anno'].reshape(-1, *data['search_anno'].size()[2:])\n       # cover_truth = data['cover'].reshape(-1, *data['cover'].size()[2:])\n        #print(data['pre_seq'].shape)\n        #print(data['x_feat'].shape)\n        pre_seq = data['pre_seq'].reshape(-1, 4*self.pre_num)\n        x_feat = data['x_feat'].reshape(-1, *data['x_feat'].size()[2:])\n        # print(\"begin\")\n        # print(template_images.shape)\n        # print(template_images_for.shape)\n        # print(target_in_search.shape)\n        # print(\"end\")\n        epoch = data['epoch']\n        if epoch < 11:\n            self.loss_weight['focal'] = 2\n            plus = 1\n            rem_p = 1\n        else:\n            self.loss_weight['focal'] = 0\n            plus = 1\n            rem_p = 0\n        #print(\"this is looking for\")\n        #print(x_feat.shape)\n        #print(pre_seq.shape)\n        pre_seq = pre_seq.clamp(-0.5 * self.range + 0.5, 0.5 + self.range * 0.5)\n        pre_seq = (pre_seq+(self.range * 0.5 - 0.5)) * (self.bins - 1)\n        #print(pre_seq)\n        outputs = self.net(template_images_for, dz_feat, search_images, seq_input=pre_seq, stage=\"forward_pass\", search_feature=x_feat, target_in_search_img=target_in_search)\n\n        score = outputs['score']\n\n       # cover = outputs['cover']\n\n        renew_loss = outputs['renew_loss']\n\n        pred_feat = outputs[\"feat\"]\n        # generate labels\n        if self.focal == None:\n            weight = torch.ones(self.bins*self.range + 6) * 1\n            weight[self.bins * self.range + 4] = 0.1\n            weight[self.bins * self.range + 3] = 0.1\n            weight[self.bins * self.range + 2] = 0.1\n            weight[self.bins*self.range+1] = 0.1\n            weight[self.bins*self.range] = 0.1\n            weight.to(pred_feat)\n            self.focal = torch.nn.CrossEntropyLoss(weight=weight, size_average=True).to(pred_feat)\n        # target[:, 2] = target[:, 2] + target[:, 0]\n        # target[:, 3] = target[:, 3] + target[:, 1]\n        # real_target = torch.zeros_like(target)\n        # real_target[: ,0] = target[: ,0]\n        # real_target[:, 1] = target[:, 1]\n        # real_target[:, 2] = target[:, 2] + target[:, 0]\n        # real_target[:, 3] = target[:, 3] + target[:, 1]\n        # target = target.reshape(-1).to(torch.int64)\n        # real_target = real_target.reshape(-1).to(torch.int64)\n        # pred = pred_feat.permute(1, 0, 2)[:, 0:4, :].reshape(-1, self.bins + 2)\n        #print(search_anno)\n        search_anno[:, 2] = search_anno[:, 2] + search_anno[: ,0]\n        search_anno[:, 3] = search_anno[:, 3] + search_anno[: ,1]\n        target = (search_anno / self.cfg.DATA.SEARCH.SIZE + (self.range * 0.5 - 0.5)) * (self.bins - 1)\n        #target[:, 2] = target[:, 2] + target[:, 0]\n        #target[:, 3] = target[:, 3] + target[:, 1]\n        target = target.clamp(min=0.0, max=(self.bins * self.range-0.0001))\n        #print(target)\n        target_iou = target\n        end_flag = torch.ones((target.shape[0], 1)) * (self.bins * self.range + 1)\n        end_flag = end_flag.to(target)\n        target = torch.cat([target], dim=1)\n        target = target.reshape(-1).to(torch.int64)\n        pred = pred_feat.permute(1, 0, 2).reshape(-1, self.bins*self.range+6)\n        #print(target)\n        varifocal_loss = self.focal(pred, target)\n        pred = pred_feat[0:4, :, 0:self.bins*self.range]\n        target = target_iou[:, 0:4].to(pred_feat) / (self.bins - 1) - (self.range * 0.5 - 0.5)\n        #print(target)\n        #print(target)\n        out = pred.softmax(-1).to(pred)\n        mul = torch.range((-1*self.range * 0.5 + 0.5)+1/800, (self.range * 0.5 + 0.5)-1/800, 2/800).to(pred)\n        #print(mul)\n        ans = out * mul\n        ans = ans.sum(dim=-1)\n        ans = ans.permute(1, 0).to(pred)\n        extra_seq = ans\n        #print(extra_seq)\n        extra_seq = extra_seq.to(pred)\n#  print(extra_seq)\n\n        cious, iou = SIoU_loss(extra_seq, target, 4)\n        cious = cious.mean()\n\n        score_real = score\n\n        score_loss = self.objective['l1'](score_real, iou)\n\n        #cover_loss = self.objective['l1'](cover, cover_truth)\n\n        giou_loss = cious\n        l1_loss = self.objective['l1'](extra_seq, target)\n        #self.loss_weight['giou'] = 0\n        loss_bb = (self.loss_weight['giou'] * giou_loss + self.loss_weight['l1'] * l1_loss + self.loss_weight[\n            'focal'] * varifocal_loss) * plus\n\n        #pred = pred_feat[0:4, :, :].permute(1, 0, 2)\n        #log_softmax_score = F.log_softmax(pred * self.cfg.DATA.TEMP, dim=-1)\n        #log_softmax_score = log_softmax_score.reshape(-1, self.bins+2)\n        #log = torch.argmax(log_softmax_score, dim=-1)\n        #action_tensor = data['action_tensor'][:, :, 0:4].reshape(-1)\n        #bs = log_softmax_score.shape[0]\n        #selected_logprobs = log_softmax_score[range(bs), action_tensor.view(-1)]\n        #print(\"this is true!\")\n        #print(selected_logprobs)\n        #selected_logprobs = selected_logprobs.reshape(-1, 4)\n        #print(selected_logprobs)\n        #selected_logprobs = torch.mean(selected_logprobs, dim=1)\n        #print(selected_logprobs)\n        #selected_logprobs = selected_logprobs.reshape(-1)\n        #cls_loss = - selected_logprobs * data['reward_tensor'].view(-1)\n        #loss_sl = cls_loss.mean()\n        #print(self.net.box_head.encoder.layers[0].z_self_attn.qkv.weight)\n        #total_losses = loss_bb\n        #total_losses = loss_bb + renew_loss * 0.3 + score_loss * 1\n        total_losses = loss_bb  + renew_loss * 0.3 * rem_p  + score_loss * 1 * rem_p# + cover_loss * 1 * rem_p\n        #total_losses = loss_bb * 1 + renew_loss * 0.3  + score_loss * 0\n\n        mean_iou = iou.detach().mean()\n        status = {\"Loss/total\": total_losses.item()/2,\n                 # \"Loss/cover\": cover_loss.item()/2,\n                  #\"Loss/sl\": loss_sl.item(),\n                  \"Loss/score\": score_loss.item()/2,\n                  \"Loss/giou\": giou_loss.item()/2,\n                  \"Loss/l1\": l1_loss.item()/2,\n                  \"Loss/location\": varifocal_loss.item()/2,\n                  \"Loss/renew\": renew_loss.item()/2,\n                  \"IoU\": mean_iou.item()/2}\n\n        return total_losses, status\n\n\n\n\n    def compute_losses(self, pred_dict, gt_dict, return_status=True):\n        # gt gaussian map\n        bins = self.bins\n        gt_bbox = gt_dict['search_anno'][-1]  # (Ns, batch, 4) (x1,y1,w,h) -> (batch, 4)\n        real_bbox = gt_dict['real_bbox']\n        seq_output = gt_dict['seq_output']\n        pred_feat = pred_dict[\"feat\"]\n        if self.focal == None:\n            weight = torch.ones(bins + 1) * 1\n            weight[bins+1] = 0.1\n            weight[bins] = 0.1\n            weight.to(pred_feat)\n            self.klloss = torch.nn.KLDivLoss(reduction='none').to(pred_feat)\n\n            self.focal = torch.nn.CrossEntropyLoss(weight=weight, size_average=True).to(pred_feat)\n        # compute varfifocal loss\n        pred = pred_feat.permute(1, 0, 2).reshape(-1, bins+1)\n        #print(pred)\n        target = seq_output.reshape(-1).to(torch.int64)\n        #print(target)\n        varifocal_loss = self.focal(pred, target)\n        # compute giou and L1 loss\n        beta = 1\n        pred = pred_feat[0:4, :, 0:bins] * beta\n        target = seq_output[:, 0:4].to(pred_feat)\n        target_box = seq_output[:, 0:4].cpu().numpy()\n        x1,y1,x2,y2 = generate_sa_simdr(target_box)\n        # x1_ = torch.Tensor(x1).to(pred).unsqueeze(1)\n        # y1_ = torch.Tensor(y1).to(pred).unsqueeze(1)\n        # x2_ = torch.Tensor(x2).to(pred).unsqueeze(1)\n        # y2_ = torch.Tensor(y2).to(pred).unsqueeze(1)\n        # KL_target = torch.concat((x1_, y1_, x2_, y2_), dim=1)\n        \n        out = pred.softmax(-1).to(pred)\n        mul = torch.arange(0, bins, 1).to(pred)\n        ans = out * mul\n        ans = ans.sum(dim=-1)\n        ans = ans.permute(1, 0).to(pred)\n        target = target / (bins - 1)\n        extra_seq = ans / (bins - 1)\n        extra_seq = extra_seq.to(pred)\n        #cious, iou = ciou(extra_seq, target)\n        cious, iou = SIoU_loss(extra_seq, target, 4)\n        cious = cious.mean()\n        #cious, iou = self.objective['giou'](extra_seq, target)\n        giou_loss = cious\n        #giou_loss = 1 - cious\n        #giou_loss, iou = self.objective['giou'](extra_seq, target)\n        l1_loss = self.objective['l1'](extra_seq, target)\n        #print(giou_loss)\n        #print(l1_loss)\n        #print(varifocal_loss)\n\n        # gt_gaussian_maps = generate_heatmap(gt_dict['search_anno'], self.cfg.DATA.SEARCH.SIZE, self.cfg.MODEL.BACKBONE.STRIDE)\n        # gt_gaussian_maps = gt_gaussian_maps[-1].unsqueeze(1)\n        # #\n        # # Get boxes\n        # pred_boxes = pred_dict['pred_boxes']\n        # if torch.isnan(pred_boxes).any():\n        #     raise ValueError(\"Network outputs is NAN! Stop Training\")\n        # num_queries = pred_boxes.size(1)\n        # pred_boxes_vec = box_cxcywh_to_xyxy(pred_boxes).view(-1, 4)  # (B,N,4) --> (BN,4) (x1,y1,x2,y2)\n        # gt_boxes_vec = box_xywh_to_xyxy(gt_bbox)[:, None, :].repeat((1, num_queries, 1)).view(-1, 4).clamp(min=0.0,\n        #                                                                                                    max=1.0)  # (B,4) --> (B,1,4) --> (B,N,4)\n\n        # compute giou and iou\n        # try:\n        #     giou_loss, iou = self.objective['giou'](pred_boxes_vec, gt_boxes_vec)  # (BN,4) (BN,4)\n        # except:\n        #     giou_loss, iou = torch.tensor(0.0).cuda(), torch.tensor(0.0).cuda()\n        # # compute l1 loss\n        # l1_loss = self.objective['l1'](pred_boxes_vec, gt_boxes_vec)  # (BN,4) (BN,4)\n        # # compute location loss\n        # if 'score_map' in pred_dict:\n        #     location_loss = self.objective['focal'](pred_dict['score_map'], gt_gaussian_maps)\n        # else:\n        #     location_loss = torch.tensor(0.0, device=l1_loss.device)\n        # weighted sum\n        loss = self.loss_weight['giou'] * giou_loss + self.loss_weight['l1'] * l1_loss + self.loss_weight['focal'] * varifocal_loss\n\n        if return_status:\n            # status for log\n            mean_iou = iou.detach().mean()\n            status = {\"Loss/total\": loss.item(),\n                      \"Loss/giou\": giou_loss.item(),\n                      \"Loss/l1\": l1_loss.item(),\n                      \"Loss/location\": varifocal_loss.item(),\n                      \"IoU\": mean_iou.item()}\n            return loss, status\n        else:\n            return loss\n"
  },
  {
    "path": "artrackv2_mindspore/lib/train/admin/__init__.py",
    "content": "from .environment import env_settings, create_default_local_file_ITP_train\nfrom .stats import AverageMeter, StatValue\n#from .tensorboard import TensorboardWriter\n"
  },
  {
    "path": "artrackv2_mindspore/lib/train/admin/environment.py",
    "content": "import importlib\nimport os\nfrom collections import OrderedDict\n\n\ndef create_default_local_file():\n    path = os.path.join(os.path.dirname(__file__), 'local.py')\n\n    empty_str = '\\'\\''\n    default_settings = OrderedDict({\n        'workspace_dir': empty_str,\n        'tensorboard_dir': 'self.workspace_dir + \\'/tensorboard/\\'',\n        'pretrained_networks': 'self.workspace_dir + \\'/pretrained_networks/\\'',\n        'lasot_dir': empty_str,\n        'got10k_dir': empty_str,\n        'trackingnet_dir': empty_str,\n        'coco_dir': empty_str,\n        'lvis_dir': empty_str,\n        'sbd_dir': empty_str,\n        'imagenet_dir': empty_str,\n        'imagenetdet_dir': empty_str,\n        'ecssd_dir': empty_str,\n        'hkuis_dir': empty_str,\n        'msra10k_dir': empty_str,\n        'davis_dir': empty_str,\n        'youtubevos_dir': empty_str})\n\n    comment = {'workspace_dir': 'Base directory for saving network checkpoints.',\n               'tensorboard_dir': 'Directory for tensorboard files.'}\n\n    with open(path, 'w') as f:\n        f.write('class EnvironmentSettings:\\n')\n        f.write('    def __init__(self):\\n')\n\n        for attr, attr_val in default_settings.items():\n            comment_str = None\n            if attr in comment:\n                comment_str = comment[attr]\n            if comment_str is None:\n                f.write('        self.{} = {}\\n'.format(attr, attr_val))\n            else:\n                f.write('        self.{} = {}    # {}\\n'.format(attr, attr_val, comment_str))\n\n\ndef create_default_local_file_ITP_train(workspace_dir, data_dir):\n    path = os.path.join(os.path.dirname(__file__), 'local.py')\n\n    empty_str = '\\'\\''\n    default_settings = OrderedDict({\n        'workspace_dir': workspace_dir,\n        'tensorboard_dir': os.path.join(workspace_dir, 'tensorboard'),    # Directory for tensorboard files.\n        'pretrained_networks': os.path.join(workspace_dir, 'pretrained_networks'),\n        'lasot_dir': os.path.join(data_dir, 'lasot'),\n        'got10k_dir': os.path.join(data_dir, 'got10k/train'),\n        'got10k_val_dir': os.path.join(data_dir, 'got10k/val'),\n        'lasot_lmdb_dir': os.path.join(data_dir, 'lasot_lmdb'),\n        'got10k_lmdb_dir': os.path.join(data_dir, 'got10k_lmdb'),\n        'trackingnet_dir': os.path.join(data_dir, 'trackingnet'),\n        'trackingnet_lmdb_dir': os.path.join(data_dir, 'trackingnet_lmdb'),\n        'coco_dir': os.path.join(data_dir, 'coco'),\n        'coco_lmdb_dir': os.path.join(data_dir, 'coco_lmdb'),\n        'lvis_dir': empty_str,\n        'sbd_dir': empty_str,\n        'imagenet_dir': os.path.join(data_dir, 'vid'),\n        'imagenet_lmdb_dir': os.path.join(data_dir, 'vid_lmdb'),\n        'imagenetdet_dir': empty_str,\n        'ecssd_dir': empty_str,\n        'hkuis_dir': empty_str,\n        'msra10k_dir': empty_str,\n        'davis_dir': empty_str,\n        'youtubevos_dir': empty_str})\n\n    comment = {'workspace_dir': 'Base directory for saving network checkpoints.',\n               'tensorboard_dir': 'Directory for tensorboard files.'}\n\n    with open(path, 'w') as f:\n        f.write('class EnvironmentSettings:\\n')\n        f.write('    def __init__(self):\\n')\n\n        for attr, attr_val in default_settings.items():\n            comment_str = None\n            if attr in comment:\n                comment_str = comment[attr]\n            if comment_str is None:\n                if attr_val == empty_str:\n                    f.write('        self.{} = {}\\n'.format(attr, attr_val))\n                else:\n                    f.write('        self.{} = \\'{}\\'\\n'.format(attr, attr_val))\n            else:\n                f.write('        self.{} = \\'{}\\'    # {}\\n'.format(attr, attr_val, comment_str))\n\n\ndef env_settings():\n    env_module_name = 'lib.train.admin.local'\n    try:\n        env_module = importlib.import_module(env_module_name)\n        return env_module.EnvironmentSettings()\n    except:\n        env_file = os.path.join(os.path.dirname(__file__), 'local.py')\n\n        create_default_local_file()\n        raise RuntimeError('YOU HAVE NOT SETUP YOUR local.py!!!\\n Go to \"{}\" and set all the paths you need. Then try to run again.'.format(env_file))\n"
  },
  {
    "path": "artrackv2_mindspore/lib/train/admin/local.py",
    "content": "class EnvironmentSettings:\n    def __init__(self):\n        self.workspace_dir = '/home/baiyifan/code/prev_for_2stage'    # Base directory for saving network checkpoints.\n        self.tensorboard_dir = '/home/baiyifan/code/detrack/tensorboard'    # Directory for tensorboard files.\n        self.pretrained_networks = '/home/baiyifan/code/OSTrack/pretrained_networks'\n        self.lasot_dir = '/home/baiyifan/LaSOT/LaSOTBenchmark'\n        self.got10k_dir = '/home/baiyifan/GOT-10k/train'\n        self.got10k_val_dir = '/home/baiyifan/GOT-10k/val'\n        self.lasot_lmdb_dir = '/home/baiyifan/LaSOT/LaSOTBenchmark'\n        self.got10k_lmdb_dir = ''\n        self.trackingnet_dir = '/ssddata/TrackingNet/all_zip'\n        self.trackingnet_lmdb_dir = '/ssddata/TrackingNet/all_zip'\n        self.coco_dir = '/home/baiyifan/coco'\n        self.coco_lmdb_dir = ''\n        self.lvis_dir = ''\n        self.sbd_dir = ''\n        self.imagenet_dir = '/home/baiyifan/code/OSTrack/data/vid'\n        self.imagenet_lmdb_dir = '/home/baiyifan/code/OSTrack/data/vid_lmdb'\n        self.imagenetdet_dir = ''\n        self.ecssd_dir = ''\n        self.hkuis_dir = ''\n        self.msra10k_dir = ''\n        self.davis_dir = ''\n        self.youtubevos_dir = ''\n"
  },
  {
    "path": "artrackv2_mindspore/lib/train/admin/settings.py",
    "content": "from lib.train.admin.environment import env_settings\n\n\nclass Settings:\n    \"\"\" Training settings, e.g. the paths to datasets and networks.\"\"\"\n    def __init__(self):\n        self.set_default()\n\n    def set_default(self):\n        self.env = env_settings()\n        self.use_gpu = True\n\n\n"
  },
  {
    "path": "artrackv2_mindspore/lib/train/admin/stats.py",
    "content": "\n\nclass StatValue:\n    def __init__(self):\n        self.clear()\n\n    def reset(self):\n        self.val = 0\n\n    def clear(self):\n        self.reset()\n        self.history = []\n\n    def update(self, val):\n        self.val = val\n        self.history.append(self.val)\n\n\nclass AverageMeter(object):\n    \"\"\"Computes and stores the average and current value\"\"\"\n    def __init__(self):\n        self.clear()\n        self.has_new_data = False\n\n    def reset(self):\n        self.avg = 0\n        self.val = 0\n        self.sum = 0\n        self.count = 0\n\n    def clear(self):\n        self.reset()\n        self.history = []\n\n    def update(self, val, n=1):\n        self.val = val\n        self.sum += val * n\n        self.count += n\n        self.avg = self.sum / self.count\n\n    def new_epoch(self):\n        if self.count > 0:\n            self.history.append(self.avg)\n            self.reset()\n            self.has_new_data = True\n        else:\n            self.has_new_data = False\n\n\ndef topk_accuracy(output, target, topk=(1,)):\n    \"\"\"Computes the precision@k for the specified values of k\"\"\"\n    single_input = not isinstance(topk, (tuple, list))\n    if single_input:\n        topk = (topk,)\n\n    maxk = max(topk)\n    batch_size = target.size(0)\n\n    _, pred = output.topk(maxk, 1, True, True)\n    pred = pred.t()\n    correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n    res = []\n    for k in topk:\n        correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)[0]\n        res.append(correct_k * 100.0 / batch_size)\n\n    if single_input:\n        return res[0]\n\n    return res\n"
  },
  {
    "path": "artrackv2_mindspore/lib/train/admin/tensorboard.py",
    "content": "#import os\n#from collections import OrderedDict\n#try:\n#    from torch.utils.tensorboard import SummaryWriter\n#except:\n#    print('WARNING: You are using tensorboardX instead sis you have a too old pytorch version.')\n#    from tensorboardX import SummaryWriter\n\n\n#class TensorboardWriter:\n#    def __init__(self, directory, loader_names):\n#        self.directory = directory\n#        self.writer = OrderedDict({name: SummaryWriter(os.path.join(self.directory, name)) for name in loader_names})\n\n#    def write_info(self, script_name, description):\n#        tb_info_writer = SummaryWriter(os.path.join(self.directory, 'info'))\n#        tb_info_writer.add_text('Script_name', script_name)\n#        tb_info_writer.add_text('Description', description)\n#        tb_info_writer.close()\n\n#    def write_epoch(self, stats: OrderedDict, epoch: int, ind=-1):\n#        for loader_name, loader_stats in stats.items():\n#            if loader_stats is None:\n#                continue\n#            for var_name, val in loader_stats.items():\n#                if hasattr(val, 'history') and getattr(val, 'has_new_data', True):\n#                    self.writer[loader_name].add_scalar(var_name, val.history[ind], epoch)"
  },
  {
    "path": "artrackv2_mindspore/lib/train/base_functions.py",
    "content": "import torch\nfrom torch.utils.data.distributed import DistributedSampler\n# datasets related\nfrom lib.train.dataset import Lasot, Got10k, MSCOCOSeq, ImagenetVID, TrackingNet\nfrom lib.train.dataset import Lasot_lmdb, Got10k_lmdb, MSCOCOSeq_lmdb, ImagenetVID_lmdb, TrackingNet_lmdb\nfrom lib.train.data import sampler, opencv_loader, processing, LTRLoader\nimport lib.train.data.transforms as tfm\nfrom lib.utils.misc import is_main_process\n\n\ndef update_settings(settings, cfg):\n    settings.print_interval = cfg.TRAIN.PRINT_INTERVAL\n    settings.search_area_factor = {'template': cfg.DATA.TEMPLATE.FACTOR,\n                                   'search': cfg.DATA.SEARCH.FACTOR}\n    settings.output_sz = {'template': cfg.DATA.TEMPLATE.SIZE,\n                          'search': cfg.DATA.SEARCH.SIZE}\n    settings.center_jitter_factor = {'template': cfg.DATA.TEMPLATE.CENTER_JITTER,\n                                     'search': cfg.DATA.SEARCH.CENTER_JITTER}\n    settings.scale_jitter_factor = {'template': cfg.DATA.TEMPLATE.SCALE_JITTER,\n                                    'search': cfg.DATA.SEARCH.SCALE_JITTER}\n    settings.grad_clip_norm = cfg.TRAIN.GRAD_CLIP_NORM\n    settings.print_stats = None\n    settings.batchsize = cfg.TRAIN.BATCH_SIZE\n    settings.scheduler_type = cfg.TRAIN.SCHEDULER.TYPE\n\n\ndef names2datasets(name_list: list, settings, image_loader):\n    assert isinstance(name_list, list)\n    datasets = []\n    #settings.use_lmdb = True\n    for name in name_list:\n        assert name in [\"LASOT\", \"GOT10K_vottrain\", \"GOT10K_votval\", \"GOT10K_train_full\", \"GOT10K_official_val\",\n                        \"COCO17\", \"VID\", \"TRACKINGNET\"]\n        if name == \"LASOT\":\n            if settings.use_lmdb:\n                print(\"Building lasot dataset from lmdb\")\n                datasets.append(Lasot_lmdb(settings.env.lasot_lmdb_dir, split='train', image_loader=image_loader))\n            else:\n                datasets.append(Lasot(settings.env.lasot_dir, split='train', image_loader=image_loader))\n        if name == \"GOT10K_vottrain\":\n            if settings.use_lmdb:\n                print(\"Building got10k from lmdb\")\n                datasets.append(Got10k_lmdb(settings.env.got10k_lmdb_dir, split='vottrain', image_loader=image_loader))\n            else:\n                datasets.append(Got10k(settings.env.got10k_dir, split='vottrain', image_loader=image_loader))\n        if name == \"GOT10K_train_full\":\n            if settings.use_lmdb:\n                print(\"Building got10k_train_full from lmdb\")\n                datasets.append(Got10k_lmdb(settings.env.got10k_lmdb_dir, split='train_full', image_loader=image_loader))\n            else:\n                datasets.append(Got10k(settings.env.got10k_dir, split='train_full', image_loader=image_loader))\n        if name == \"GOT10K_votval\":\n            if settings.use_lmdb:\n                print(\"Building got10k from lmdb\")\n                datasets.append(Got10k_lmdb(settings.env.got10k_lmdb_dir, split='votval', image_loader=image_loader))\n            else:\n                datasets.append(Got10k(settings.env.got10k_dir, split='votval', image_loader=image_loader))\n        if name == \"GOT10K_official_val\":\n            if settings.use_lmdb:\n                raise ValueError(\"Not implement\")\n            else:\n                datasets.append(Got10k(settings.env.got10k_val_dir, split=None, image_loader=image_loader))\n        if name == \"COCO17\":\n            if settings.use_lmdb:\n                print(\"Building COCO2017 from lmdb\")\n                datasets.append(MSCOCOSeq_lmdb(settings.env.coco_lmdb_dir, version=\"2017\", image_loader=image_loader))\n            else:\n                datasets.append(MSCOCOSeq(settings.env.coco_dir, version=\"2017\", image_loader=image_loader))\n        if name == \"VID\":\n            if settings.use_lmdb:\n                print(\"Building VID from lmdb\")\n                datasets.append(ImagenetVID_lmdb(settings.env.imagenet_lmdb_dir, image_loader=image_loader))\n            else:\n                datasets.append(ImagenetVID(settings.env.imagenet_dir, image_loader=image_loader))\n        if name == \"TRACKINGNET\":\n            if settings.use_lmdb:\n                print(\"Building TrackingNet from lmdb\")\n                datasets.append(TrackingNet_lmdb(settings.env.trackingnet_lmdb_dir, image_loader=image_loader))\n            else:\n                # raise ValueError(\"NOW WE CAN ONLY USE TRACKINGNET FROM LMDB\")\n                datasets.append(TrackingNet(settings.env.trackingnet_dir, image_loader=image_loader))\n    return datasets\n\n\ndef build_dataloaders(cfg, settings):\n    # Data transform\n    transform_joint = tfm.Transform(tfm.ToGrayscale(probability=0.05),\n                                    tfm.RandomHorizontalFlip(probability=0.5))\n\n    transform_train = tfm.Transform(tfm.ToTensorAndJitter(0.2),\n                                    tfm.RandomHorizontalFlip_Norm(probability=0.5),\n                                    tfm.Normalize(mean=cfg.DATA.MEAN, std=cfg.DATA.STD))\n\n    transform_val = tfm.Transform(tfm.ToTensor(),\n                                  tfm.Normalize(mean=cfg.DATA.MEAN, std=cfg.DATA.STD))\n\n    # The tracking pairs processing module\n    output_sz = settings.output_sz\n    search_area_factor = settings.search_area_factor\n\n    data_processing_train = processing.STARKProcessing(search_area_factor=search_area_factor,\n                                                       output_sz=output_sz,\n                                                       center_jitter_factor=settings.center_jitter_factor,\n                                                       scale_jitter_factor=settings.scale_jitter_factor,\n                                                       mode='sequence',\n                                                       transform=transform_train,\n                                                       joint_transform=transform_joint,\n                                                       settings=settings)\n\n    data_processing_val = processing.STARKProcessing(search_area_factor=search_area_factor,\n                                                     output_sz=output_sz,\n                                                     center_jitter_factor=settings.center_jitter_factor,\n                                                     scale_jitter_factor=settings.scale_jitter_factor,\n                                                     mode='sequence',\n                                                     transform=transform_val,\n                                                     joint_transform=transform_joint,\n                                                     settings=settings)\n\n    # Train sampler and loader\n    settings.num_template = getattr(cfg.DATA.TEMPLATE, \"NUMBER\", 1)\n    settings.num_search = getattr(cfg.DATA.SEARCH, \"NUMBER\", 1)\n    sampler_mode = getattr(cfg.DATA, \"SAMPLER_MODE\", \"causal\")\n    train_cls = getattr(cfg.TRAIN, \"TRAIN_CLS\", False)\n    print(\"sampler_mode\", sampler_mode)\n    dataset_train = sampler.TrackingSampler(datasets=names2datasets(cfg.DATA.TRAIN.DATASETS_NAME, settings, opencv_loader),\n                                            p_datasets=cfg.DATA.TRAIN.DATASETS_RATIO,\n                                            samples_per_epoch=cfg.DATA.TRAIN.SAMPLE_PER_EPOCH,\n                                            max_gap=cfg.DATA.MAX_SAMPLE_INTERVAL, num_search_frames=settings.num_search,\n                                            num_template_frames=settings.num_template, processing=data_processing_train,\n                                            frame_sample_mode=sampler_mode, train_cls=train_cls)\n\n    train_sampler = DistributedSampler(dataset_train) if settings.local_rank != -1 else None\n    shuffle = False if settings.local_rank != -1 else True\n\n    loader_train = LTRLoader('train', dataset_train, training=True, batch_size=cfg.TRAIN.BATCH_SIZE, shuffle=shuffle,\n                             num_workers=cfg.TRAIN.NUM_WORKER, drop_last=True, stack_dim=1, sampler=train_sampler)\n\n    # Validation samplers and loaders\n    dataset_val = sampler.TrackingSampler(datasets=names2datasets(cfg.DATA.VAL.DATASETS_NAME, settings, opencv_loader),\n                                          p_datasets=cfg.DATA.VAL.DATASETS_RATIO,\n                                          samples_per_epoch=cfg.DATA.VAL.SAMPLE_PER_EPOCH,\n                                          max_gap=cfg.DATA.MAX_SAMPLE_INTERVAL, num_search_frames=settings.num_search,\n                                          num_template_frames=settings.num_template, processing=data_processing_val,\n                                          frame_sample_mode=sampler_mode, train_cls=train_cls)\n    val_sampler = DistributedSampler(dataset_val) if settings.local_rank != -1 else None\n    loader_val = LTRLoader('val', dataset_val, training=False, batch_size=cfg.TRAIN.BATCH_SIZE,\n                           num_workers=cfg.TRAIN.NUM_WORKER, drop_last=True, stack_dim=1, sampler=val_sampler,\n                           epoch_interval=cfg.TRAIN.VAL_EPOCH_INTERVAL)\n\n    return loader_train, loader_val\n\n\ndef get_optimizer_scheduler(net, cfg):\n    train_cls = getattr(cfg.TRAIN, \"TRAIN_CLS\", False)\n    if train_cls:\n        print(\"Only training classification head. Learnable parameters are shown below.\")\n        param_dicts = [\n            {\"params\": [p for n, p in net.named_parameters() if \"cls\" in n and p.requires_grad]}\n        ]\n\n        for n, p in net.named_parameters():\n            if \"cls\" not in n:\n                p.requires_grad = False\n            else:\n                print(n)\n    else:\n        param_dicts = [\n            {\"params\": [p for n, p in net.named_parameters() if \"backbone\" not in n and p.requires_grad]},\n            {\n                \"params\": [p for n, p in net.named_parameters() if \"backbone\" in n and p.requires_grad],\n                \"lr\": cfg.TRAIN.LR * cfg.TRAIN.BACKBONE_MULTIPLIER,\n            },\n        ]\n        if is_main_process():\n            print(\"Learnable parameters are shown below.\")\n            for n, p in net.named_parameters():\n                if p.requires_grad:\n                    print(n)\n\n    if cfg.TRAIN.OPTIMIZER == \"ADAMW\":\n        optimizer = torch.optim.AdamW(param_dicts, lr=cfg.TRAIN.LR,\n                                      weight_decay=cfg.TRAIN.WEIGHT_DECAY)\n    else:\n        raise ValueError(\"Unsupported Optimizer\")\n    if cfg.TRAIN.SCHEDULER.TYPE == 'step':\n        lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, cfg.TRAIN.LR_DROP_EPOCH)\n    elif cfg.TRAIN.SCHEDULER.TYPE == \"Mstep\":\n        lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer,\n                                                            milestones=cfg.TRAIN.SCHEDULER.MILESTONES,\n                                                            gamma=cfg.TRAIN.SCHEDULER.GAMMA)\n    else:\n        raise ValueError(\"Unsupported scheduler\")\n    return optimizer, lr_scheduler\n"
  },
  {
    "path": "artrackv2_mindspore/lib/train/data/__init__.py",
    "content": "# from .loader import LTRLoader\nfrom .image_loader import jpeg4py_loader, opencv_loader, jpeg4py_loader_w_failsafe, default_image_loader\n"
  },
  {
    "path": "artrackv2_mindspore/lib/train/data/bounding_box_utils.py",
    "content": "import torch\nimport numpy as np\n\ndef batch_center2corner(boxes):\n    xmin = boxes[:, 0] - boxes[:, 2] * 0.5\n    ymin = boxes[:, 1] - boxes[:, 3] * 0.5\n    xmax = boxes[:, 0] + boxes[:, 2] * 0.5\n    ymax = boxes[:, 1] + boxes[:, 3] * 0.5\n\n    if isinstance(boxes, np.ndarray):\n        return np.stack([xmin, ymin, xmax, ymax], 1)\n    else:\n        return torch.stack([xmin, ymin, xmax, ymax], 1)\n\ndef batch_corner2center(boxes):\n    cx = (boxes[:, 0] + boxes[:, 2]) * 0.5\n    cy = (boxes[:, 1] + boxes[:, 3]) * 0.5\n    w = (boxes[:, 2] - boxes[:, 0])\n    h = (boxes[:, 3] - boxes[:, 1])\n\n    if isinstance(boxes, np.ndarray):\n        return np.stack([cx, cy, w, h], 1)\n    else:\n        return torch.stack([cx, cy, w, h], 1)\n\ndef batch_xywh2center(boxes):\n    cx = boxes[:, 0] + (boxes[:, 2] - 1) / 2\n    cy = boxes[:, 1] + (boxes[:, 3] - 1) / 2\n    w = boxes[:, 2]\n    h = boxes[:, 3]\n\n    if isinstance(boxes, np.ndarray):\n        return np.stack([cx, cy, w, h], 1)\n    else:\n        return torch.stack([cx, cy, w, h], 1)\n\ndef batch_xywh2center2(boxes):\n    cx = boxes[:, 0] + boxes[:, 2] / 2\n    cy = boxes[:, 1] + boxes[:, 3] / 2\n    w = boxes[:, 2]\n    h = boxes[:, 3]\n\n    if isinstance(boxes, np.ndarray):\n        return np.stack([cx, cy, w, h], 1)\n    else:\n        return torch.stack([cx, cy, w, h], 1)\n\n\ndef batch_xywh2corner(boxes):\n    xmin = boxes[:, 0]\n    ymin = boxes[:, 1]\n    xmax = boxes[:, 0] + boxes[:, 2]\n    ymax = boxes[:, 1] + boxes[:, 3]\n\n    if isinstance(boxes, np.ndarray):\n        return np.stack([xmin, ymin, xmax, ymax], 1)\n    else:\n        return torch.stack([xmin, ymin, xmax, ymax], 1)\n\ndef rect_to_rel(bb, sz_norm=None):\n    \"\"\"Convert standard rectangular parametrization of the bounding box [x, y, w, h]\n    to relative parametrization [cx/sw, cy/sh, log(w), log(h)], where [cx, cy] is the center coordinate.\n    args:\n        bb  -  N x 4 tensor of boxes.\n        sz_norm  -  [N] x 2 tensor of value of [sw, sh] (optional). sw=w and sh=h if not given.\n    \"\"\"\n\n    c = bb[...,:2] + 0.5 * bb[...,2:]\n    if sz_norm is None:\n        c_rel = c / bb[...,2:]\n    else:\n        c_rel = c / sz_norm\n    sz_rel = torch.log(bb[...,2:])\n    return torch.cat((c_rel, sz_rel), dim=-1)\n\n\ndef rel_to_rect(bb, sz_norm=None):\n    \"\"\"Inverts the effect of rect_to_rel. See above.\"\"\"\n\n    sz = torch.exp(bb[...,2:])\n    if sz_norm is None:\n        c = bb[...,:2] * sz\n    else:\n        c = bb[...,:2] * sz_norm\n    tl = c - 0.5 * sz\n    return torch.cat((tl, sz), dim=-1)\n\n\ndef masks_to_bboxes(mask, fmt='c'):\n\n    \"\"\" Convert a mask tensor to one or more bounding boxes.\n    Note: This function is a bit new, make sure it does what it says.  /Andreas\n    :param mask: Tensor of masks, shape = (..., H, W)\n    :param fmt: bbox layout. 'c' => \"center + size\" or (x_center, y_center, width, height)\n                             't' => \"top left + size\" or (x_left, y_top, width, height)\n                             'v' => \"vertices\" or (x_left, y_top, x_right, y_bottom)\n    :return: tensor containing a batch of bounding boxes, shape = (..., 4)\n    \"\"\"\n    batch_shape = mask.shape[:-2]\n    mask = mask.reshape((-1, *mask.shape[-2:]))\n    bboxes = []\n\n    for m in mask:\n        mx = m.sum(dim=-2).nonzero()\n        my = m.sum(dim=-1).nonzero()\n        bb = [mx.min(), my.min(), mx.max(), my.max()] if (len(mx) > 0 and len(my) > 0) else [0, 0, 0, 0]\n        bboxes.append(bb)\n\n    bboxes = torch.tensor(bboxes, dtype=torch.float32, device=mask.device)\n    bboxes = bboxes.reshape(batch_shape + (4,))\n\n    if fmt == 'v':\n        return bboxes\n\n    x1 = bboxes[..., :2]\n    s = bboxes[..., 2:] - x1 + 1\n\n    if fmt == 'c':\n        return torch.cat((x1 + 0.5 * s, s), dim=-1)\n    elif fmt == 't':\n        return torch.cat((x1, s), dim=-1)\n\n    raise ValueError(\"Undefined bounding box layout '%s'\" % fmt)\n\n\ndef masks_to_bboxes_multi(mask, ids, fmt='c'):\n    assert mask.dim() == 2\n    bboxes = []\n\n    for id in ids:\n        mx = (mask == id).sum(dim=-2).nonzero()\n        my = (mask == id).float().sum(dim=-1).nonzero()\n        bb = [mx.min(), my.min(), mx.max(), my.max()] if (len(mx) > 0 and len(my) > 0) else [0, 0, 0, 0]\n\n        bb = torch.tensor(bb, dtype=torch.float32, device=mask.device)\n\n        x1 = bb[:2]\n        s = bb[2:] - x1 + 1\n\n        if fmt == 'v':\n            pass\n        elif fmt == 'c':\n            bb = torch.cat((x1 + 0.5 * s, s), dim=-1)\n        elif fmt == 't':\n            bb = torch.cat((x1, s), dim=-1)\n        else:\n            raise ValueError(\"Undefined bounding box layout '%s'\" % fmt)\n        bboxes.append(bb)\n\n    return bboxes\n"
  },
  {
    "path": "artrackv2_mindspore/lib/train/data/image_loader.py",
    "content": "import jpeg4py\nimport cv2 as cv\nfrom PIL import Image\nimport numpy as np\n\ndavis_palette = np.repeat(np.expand_dims(np.arange(0,256), 1), 3, 1).astype(np.uint8)\ndavis_palette[:22, :] = [[0, 0, 0], [128, 0, 0], [0, 128, 0], [128, 128, 0],\n                         [0, 0, 128], [128, 0, 128], [0, 128, 128], [128, 128, 128],\n                         [64, 0, 0], [191, 0, 0], [64, 128, 0], [191, 128, 0],\n                         [64, 0, 128], [191, 0, 128], [64, 128, 128], [191, 128, 128],\n                         [0, 64, 0], [128, 64, 0], [0, 191, 0], [128, 191, 0],\n                         [0, 64, 128], [128, 64, 128]]\n\n\ndef default_image_loader(path):\n    \"\"\"The default image loader, reads the image from the given path. It first tries to use the jpeg4py_loader,\n    but reverts to the opencv_loader if the former is not available.\"\"\"\n    if default_image_loader.use_jpeg4py is None:\n        # Try using jpeg4py\n        im = jpeg4py_loader(path)\n        if im is None:\n            default_image_loader.use_jpeg4py = False\n            print('Using opencv_loader instead.')\n        else:\n            default_image_loader.use_jpeg4py = True\n            return im\n    if default_image_loader.use_jpeg4py:\n        return jpeg4py_loader(path)\n    return opencv_loader(path)\n\ndefault_image_loader.use_jpeg4py = None\n\n\ndef jpeg4py_loader(path):\n    \"\"\" Image reading using jpeg4py https://github.com/ajkxyz/jpeg4py\"\"\"\n    try:\n        return jpeg4py.JPEG(path).decode()\n    except Exception as e:\n        print('ERROR: Could not read image \"{}\"'.format(path))\n        print(e)\n        return None\n\n\ndef opencv_loader(path):\n    \"\"\" Read image using opencv's imread function and returns it in rgb format\"\"\"\n    try:\n        im = cv.imread(path, cv.IMREAD_COLOR)\n\n        # convert to rgb and return\n        return cv.cvtColor(im, cv.COLOR_BGR2RGB)\n    except Exception as e:\n        print('ERROR: Could not read image \"{}\"'.format(path))\n        print(e)\n        return None\n\n\ndef jpeg4py_loader_w_failsafe(path):\n    \"\"\" Image reading using jpeg4py https://github.com/ajkxyz/jpeg4py\"\"\"\n    try:\n        return jpeg4py.JPEG(path).decode()\n    except:\n        try:\n            im = cv.imread(path, cv.IMREAD_COLOR)\n\n            # convert to rgb and return\n            return cv.cvtColor(im, cv.COLOR_BGR2RGB)\n        except Exception as e:\n            print('ERROR: Could not read image \"{}\"'.format(path))\n            print(e)\n            return None\n\n\ndef opencv_seg_loader(path):\n    \"\"\" Read segmentation annotation using opencv's imread function\"\"\"\n    try:\n        return cv.imread(path)\n    except Exception as e:\n        print('ERROR: Could not read image \"{}\"'.format(path))\n        print(e)\n        return None\n\n\ndef imread_indexed(filename):\n    \"\"\" Load indexed image with given filename. Used to read segmentation annotations.\"\"\"\n\n    im = Image.open(filename)\n\n    annotation = np.atleast_3d(im)[...,0]\n    return annotation\n\n\ndef imwrite_indexed(filename, array, color_palette=None):\n    \"\"\" Save indexed image as png. Used to save segmentation annotation.\"\"\"\n\n    if color_palette is None:\n        color_palette = davis_palette\n\n    if np.atleast_3d(array).shape[2] != 1:\n        raise Exception(\"Saving indexed PNGs requires 2D array.\")\n\n    im = Image.fromarray(array)\n    im.putpalette(color_palette.ravel())\n    im.save(filename, format='PNG')"
  },
  {
    "path": "artrackv2_mindspore/lib/train/data/loader.py",
    "content": "import torch\nimport torch.utils.data.dataloader\nimport importlib\nimport collections\n# from torch import string_classes\nfrom lib.utils import TensorDict, TensorList\n\nstring_classes = str\n\nif float(torch.__version__[:3]) >= 1.9 or len('.'.join((torch.__version__).split('.')[0:2])) > 3:\n    int_classes = int\nelse:\n    # from torch._six import int_classes\n    int_classes = int\n    # 原代码是没有int_classes = int，为 from torch._six import int_classes\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\ndef _check_use_shared_memory():\n    if hasattr(torch.utils.data.dataloader, '_use_shared_memory'):\n        return getattr(torch.utils.data.dataloader, '_use_shared_memory')\n    collate_lib = importlib.import_module('torch.utils.data._utils.collate')\n    if hasattr(collate_lib, '_use_shared_memory'):\n        return getattr(collate_lib, '_use_shared_memory')\n    return torch.utils.data.get_worker_info() is not None\n\n\ndef ltr_collate(batch):\n    \"\"\"Puts each data field into a tensor with outer dimension batch size\"\"\"\n\n    error_msg = \"batch must contain tensors, numbers, dicts or lists; found {}\"\n    elem_type = type(batch[0])\n    if isinstance(batch[0], torch.Tensor):\n        out = None\n        if _check_use_shared_memory():\n            # If we're in a background process, concatenate directly into a\n            # shared memory tensor to avoid an extra copy\n            numel = sum([x.numel() for x in batch])\n            storage = batch[0].storage()._new_shared(numel)\n            out = batch[0].new(storage)\n        return torch.stack(batch, 0, out=out)\n        # if batch[0].dim() < 4:\n        #     return torch.stack(batch, 0, out=out)\n        # return torch.cat(batch, 0, out=out)\n    elif elem_type.__module__ == 'numpy' and elem_type.__name__ != 'str_' \\\n            and elem_type.__name__ != 'string_':\n        elem = batch[0]\n        if elem_type.__name__ == 'ndarray':\n            # array of string classes and object\n            if torch.utils.data.dataloader.re.search('[SaUO]', elem.dtype.str) is not None:\n                raise TypeError(error_msg.format(elem.dtype))\n\n            return torch.stack([torch.from_numpy(b) for b in batch], 0)\n        if elem.shape == ():  # scalars\n            py_type = float if elem.dtype.name.startswith('float') else int\n            return torch.utils.data.dataloader.numpy_type_map[elem.dtype.name](list(map(py_type, batch)))\n    elif isinstance(batch[0], int_classes):\n        return torch.LongTensor(batch)\n    elif isinstance(batch[0], float):\n        return torch.DoubleTensor(batch)\n    elif isinstance(batch[0], string_classes):\n        return batch\n    elif isinstance(batch[0], TensorDict):\n        return TensorDict({key: ltr_collate([d[key] for d in batch]) for key in batch[0]})\n    elif isinstance(batch[0], collections.Mapping):\n        return {key: ltr_collate([d[key] for d in batch]) for key in batch[0]}\n    elif isinstance(batch[0], TensorList):\n        transposed = zip(*batch)\n        return TensorList([ltr_collate(samples) for samples in transposed])\n    elif isinstance(batch[0], collections.Sequence):\n        transposed = zip(*batch)\n        return [ltr_collate(samples) for samples in transposed]\n    elif batch[0] is None:\n        return batch\n\n    raise TypeError((error_msg.format(type(batch[0]))))\n\n\ndef ltr_collate_stack1(batch):\n    \"\"\"Puts each data field into a tensor. The tensors are stacked at dim=1 to form the batch\"\"\"\n\n    error_msg = \"batch must contain tensors, numbers, dicts or lists; found {}\"\n    elem_type = type(batch[0])\n    if isinstance(batch[0], torch.Tensor):\n        out = None\n        if _check_use_shared_memory():\n            # If we're in a background process, concatenate directly into a\n            # shared memory tensor to avoid an extra copy\n            numel = sum([x.numel() for x in batch])\n            storage = batch[0].storage()._new_shared(numel)\n            out = batch[0].new(storage)\n        return torch.stack(batch, 1, out=out)\n        # if batch[0].dim() < 4:\n        #     return torch.stack(batch, 0, out=out)\n        # return torch.cat(batch, 0, out=out)\n    elif elem_type.__module__ == 'numpy' and elem_type.__name__ != 'str_' \\\n            and elem_type.__name__ != 'string_':\n        elem = batch[0]\n        if elem_type.__name__ == 'ndarray':\n            # array of string classes and object\n            if torch.utils.data.dataloader.re.search('[SaUO]', elem.dtype.str) is not None:\n                raise TypeError(error_msg.format(elem.dtype))\n\n            return torch.stack([torch.from_numpy(b) for b in batch], 1)\n        if elem.shape == ():  # scalars\n            py_type = float if elem.dtype.name.startswith('float') else int\n            return torch.utils.data.dataloader.numpy_type_map[elem.dtype.name](list(map(py_type, batch)))\n    elif isinstance(batch[0], int_classes):\n        return torch.LongTensor(batch)\n    elif isinstance(batch[0], float):\n        return torch.DoubleTensor(batch)\n    elif isinstance(batch[0], string_classes):\n        return batch\n    elif isinstance(batch[0], TensorDict):\n        return TensorDict({key: ltr_collate_stack1([d[key] for d in batch]) for key in batch[0]})\n    elif isinstance(batch[0], collections.Mapping):\n        return {key: ltr_collate_stack1([d[key] for d in batch]) for key in batch[0]}\n    elif isinstance(batch[0], TensorList):\n        transposed = zip(*batch)\n        return TensorList([ltr_collate_stack1(samples) for samples in transposed])\n    elif isinstance(batch[0], collections.Sequence):\n        transposed = zip(*batch)\n        return [ltr_collate_stack1(samples) for samples in transposed]\n    elif batch[0] is None:\n        return batch\n\n    raise TypeError((error_msg.format(type(batch[0]))))\n\n\nclass LTRLoader(torch.utils.data.dataloader.DataLoader):\n    \"\"\"\n    Data loader. Combines a dataset and a sampler, and provides\n    single- or multi-process iterators over the dataset.\n\n    Note: The only difference with default pytorch DataLoader is that an additional option stack_dim is available to\n            select along which dimension the data should be stacked to form a batch.\n\n    Arguments:\n        dataset (Dataset): dataset from which to load the data.\n        batch_size (int, optional): how many samples per batch to load\n            (default: 1).\n        shuffle (bool, optional): set to ``True`` to have the data reshuffled\n            at every epoch (default: False).\n        sampler (Sampler, optional): defines the strategy to draw samples from\n            the dataset. If specified, ``shuffle`` must be False.\n        batch_sampler (Sampler, optional): like sampler, but returns a batch of\n            indices at a time. Mutually exclusive with batch_size, shuffle,\n            sampler, and drop_last.\n        num_workers (int, optional): how many subprocesses to use for data\n            loading. 0 means that the data will be loaded in the main process.\n            (default: 0)\n        collate_fn (callable, optional): merges a list of samples to form a mini-batch.\n        stack_dim (int): Dimension along which to stack to form the batch. (default: 0)\n        pin_memory (bool, optional): If ``True``, the data loader will copy tensors\n            into CUDA pinned memory before returning them.\n        drop_last (bool, optional): set to ``True`` to drop the last incomplete batch,\n            if the dataset size is not divisible by the batch size. If ``False`` and\n            the size of dataset is not divisible by the batch size, then the last batch\n            will be smaller. (default: False)\n        timeout (numeric, optional): if positive, the timeout value for collecting a batch\n            from workers. Should always be non-negative. (default: 0)\n        worker_init_fn (callable, optional): If not None, this will be called on each\n            worker subprocess with the worker id (an int in ``[0, num_workers - 1]``) as\n            input, after seeding and before data loading. (default: None)\n\n    .. note:: By default, each worker will have its PyTorch seed set to\n              ``base_seed + worker_id``, where ``base_seed`` is a long generated\n              by main process using its RNG. However, seeds for other libraries\n              may be duplicated upon initializing workers (w.g., NumPy), causing\n              each worker to return identical random numbers. (See\n              :ref:`dataloader-workers-random-seed` section in FAQ.) You may\n              use ``torch.initial_seed()`` to access the PyTorch seed for each\n              worker in :attr:`worker_init_fn`, and use it to set other seeds\n              before data loading.\n\n    .. warning:: If ``spawn`` start method is used, :attr:`worker_init_fn` cannot be an\n                 unpicklable object, e.g., a lambda function.\n    \"\"\"\n\n    __initialized = False\n\n    def __init__(self, name, dataset, training=True, batch_size=1, shuffle=False, sampler=None, batch_sampler=None,\n                 num_workers=0, epoch_interval=1, collate_fn=None, stack_dim=0, pin_memory=False, drop_last=True,\n                 timeout=0, worker_init_fn=None):\n        print(\"pin_memory is\", pin_memory)\n        if collate_fn is None:\n            if stack_dim == 0:\n                collate_fn = ltr_collate\n            elif stack_dim == 1:\n                collate_fn = ltr_collate_stack1\n            else:\n                raise ValueError('Stack dim no supported. Must be 0 or 1.')\n\n        super(LTRLoader, self).__init__(dataset, batch_size, shuffle, sampler, batch_sampler,\n                 num_workers, collate_fn, pin_memory, drop_last,\n                 timeout, worker_init_fn)\n\n        self.name = name\n        self.training = training\n        self.epoch_interval = epoch_interval\n        self.stack_dim = stack_dim\n"
  },
  {
    "path": "artrackv2_mindspore/lib/train/data/processing.py",
    "content": "import torch\nimport torchvision.transforms as transforms\nfrom lib.utils import TensorDict\nimport lib.train.data.processing_utils as prutils\nimport torch.nn.functional as F\n\n\ndef stack_tensors(x):\n    if isinstance(x, (list, tuple)) and isinstance(x[0], torch.Tensor):\n        return torch.stack(x)\n    return x\n\n\nclass BaseProcessing:\n    \"\"\" Base class for Processing. Processing class is used to process the data returned by a dataset, before passing it\n     through the network. For example, it can be used to crop a search region around the object, apply various data\n     augmentations, etc.\"\"\"\n    def __init__(self, transform=transforms.ToTensor(), template_transform=None, search_transform=None, joint_transform=None):\n        \"\"\"\n        args:\n            transform       - The set of transformations to be applied on the images. Used only if template_transform or\n                                search_transform is None.\n            template_transform - The set of transformations to be applied on the template images. If None, the 'transform'\n                                argument is used instead.\n            search_transform  - The set of transformations to be applied on the search images. If None, the 'transform'\n                                argument is used instead.\n            joint_transform - The set of transformations to be applied 'jointly' on the template and search images.  For\n                                example, it can be used to convert both template and search images to grayscale.\n        \"\"\"\n        self.transform = {'template': transform if template_transform is None else template_transform,\n                          'search':  transform if search_transform is None else search_transform,\n                          'joint': joint_transform}\n\n    def __call__(self, data: TensorDict):\n        raise NotImplementedError\n\n\nclass STARKProcessing(BaseProcessing):\n    \"\"\" The processing class used for training LittleBoy. The images are processed in the following way.\n    First, the target bounding box is jittered by adding some noise. Next, a square region (called search region )\n    centered at the jittered target center, and of area search_area_factor^2 times the area of the jittered box is\n    cropped from the image. The reason for jittering the target box is to avoid learning the bias that the target is\n    always at the center of the search region. The search region is then resized to a fixed size given by the\n    argument output_sz.\n\n    \"\"\"\n\n    def __init__(self, search_area_factor, output_sz, center_jitter_factor, scale_jitter_factor,\n                 mode='pair', settings=None, *args, **kwargs):\n        \"\"\"\n        args:\n            search_area_factor - The size of the search region  relative to the target size.\n            output_sz - An integer, denoting the size to which the search region is resized. The search region is always\n                        square.\n            center_jitter_factor - A dict containing the amount of jittering to be applied to the target center before\n                                    extracting the search region. See _get_jittered_box for how the jittering is done.\n            scale_jitter_factor - A dict containing the amount of jittering to be applied to the target size before\n                                    extracting the search region. See _get_jittered_box for how the jittering is done.\n            mode - Either 'pair' or 'sequence'. If mode='sequence', then output has an extra dimension for frames\n        \"\"\"\n        super().__init__(*args, **kwargs)\n        self.search_area_factor = search_area_factor\n        self.output_sz = output_sz\n        self.center_jitter_factor = center_jitter_factor\n        self.scale_jitter_factor = scale_jitter_factor\n        self.mode = mode\n        self.settings = settings\n\n    def _get_jittered_box(self, box, mode):\n        \"\"\" Jitter the input box\n        args:\n            box - input bounding box\n            mode - string 'template' or 'search' indicating template or search data\n\n        returns:\n            torch.Tensor - jittered box\n        \"\"\"\n\n        jittered_size = box[2:4] * torch.exp(torch.randn(2) * self.scale_jitter_factor[mode])\n        max_offset = (jittered_size.prod().sqrt() * torch.tensor(self.center_jitter_factor[mode]).float())\n        jittered_center = box[0:2] + 0.5 * box[2:4] + max_offset * (torch.rand(2) - 0.5)\n\n        return torch.cat((jittered_center - 0.5 * jittered_size, jittered_size), dim=0)\n\n    def __call__(self, data: TensorDict):\n        \"\"\"\n        args:\n            data - The input data, should contain the following fields:\n                'template_images', search_images', 'template_anno', 'search_anno'\n        returns:\n            TensorDict - output data block with following fields:\n                'template_images', 'search_images', 'template_anno', 'search_anno', 'test_proposals', 'proposal_iou'\n        \"\"\"\n        # Apply joint transforms\n        if self.transform['joint'] is not None:\n            data['template_images'], data['template_anno'], data['template_masks'] = self.transform['joint'](\n                image=data['template_images'], bbox=data['template_anno'], mask=data['template_masks'])\n            data['search_images'], data['search_anno'], data['search_masks'] = self.transform['joint'](\n                image=data['search_images'], bbox=data['search_anno'], mask=data['search_masks'], new_roll=False)\n\n        data[\"target_in_search_images\"] = data[\"search_images\"]\n        data[\"target_in_search_anno\"] = data[\"search_anno\"]\n        data[\"target_in_search_masks\"] = data[\"search_masks\"]\n        self.scale_jitter_factor[\"target_in_search\"] = self.scale_jitter_factor[\"template\"]\n        self.center_jitter_factor[\"target_in_search\"] = self.center_jitter_factor[\"template\"]\n        self.search_area_factor[\"target_in_search\"] = self.search_area_factor[\"template\"]\n        self.output_sz[\"target_in_search\"] = self.output_sz[\"template\"]\n        self.transform[\"target_in_search\"] = self.transform[\"search\"]\n\n        for s in ['template', 'search', 'target_in_search']:\n            assert self.mode == 'sequence' or len(data[s + '_images']) == 1, \\\n                \"In pair mode, num train/test frames must be 1\"\n\n            # Add a uniform noise to the center pos\n            jittered_anno = [self._get_jittered_box(a, s) for a in data[s + '_anno']]\n\n            # 2021.1.9 Check whether data is valid. Avoid too small bounding boxes\n            w, h = torch.stack(jittered_anno, dim=0)[:, 2], torch.stack(jittered_anno, dim=0)[:, 3]\n\n            crop_sz = torch.ceil(torch.sqrt(w * h) * self.search_area_factor[s])\n            if (crop_sz < 1).any():\n                data['valid'] = False\n                # print(\"Too small box is found. Replace it with new data.\")\n                return data\n\n            # Crop image region centered at jittered_anno box and get the attention mask\n            crops, boxes, att_mask, mask_crops = prutils.jittered_center_crop(data[s + '_images'], jittered_anno,\n                                                                              data[s + '_anno'], self.search_area_factor[s],\n                                                                              self.output_sz[s], masks=data[s + '_masks'])\n            # Apply transforms\n            data[s + '_images'], data[s + '_anno'], data[s + '_att'], data[s + '_masks'] = self.transform[s](\n                image=crops, bbox=boxes, att=att_mask, mask=mask_crops, joint=False)\n\n\n            # 2021.1.9 Check whether elements in data[s + '_att'] is all 1\n            # Note that type of data[s + '_att'] is tuple, type of ele is torch.tensor\n            for ele in data[s + '_att']:\n                if (ele == 1).all():\n                    data['valid'] = False\n                    # print(\"Values of original attention mask are all one. Replace it with new data.\")\n                    return data\n            # 2021.1.10 more strict conditions: require the donwsampled masks not to be all 1\n            for ele in data[s + '_att']:\n                feat_size = self.output_sz[s] // 16  # 16 is the backbone stride\n                # (1,1,128,128) (1,1,256,256) --> (1,1,8,8) (1,1,16,16)\n                mask_down = F.interpolate(ele[None, None].float(), size=feat_size).to(torch.bool)[0]\n                if (mask_down == 1).all():\n                    data['valid'] = False\n                    # print(\"Values of down-sampled attention mask are all one. \"\n                    #       \"Replace it with new data.\")\n                    return data\n\n        data['valid'] = True\n        # if we use copy-and-paste augmentation\n        if data[\"template_masks\"] is None or data[\"search_masks\"] is None:\n            data[\"template_masks\"] = torch.zeros((1, self.output_sz[\"template\"], self.output_sz[\"template\"]))\n            data[\"search_masks\"] = torch.zeros((1, self.output_sz[\"search\"], self.output_sz[\"search\"]))\n        # Prepare output\n        if self.mode == 'sequence':\n            data = data.apply(stack_tensors)\n        else:\n            data = data.apply(lambda x: x[0] if isinstance(x, list) else x)\n\n        return data\n"
  },
  {
    "path": "artrackv2_mindspore/lib/train/data/processing_utils.py",
    "content": "import mindspore as ms\nfrom mindspore import ops\nimport math\nimport cv2 as cv\nimport numpy as np\n\n'''modified from the original test implementation\nReplace cv.BORDER_REPLICATE with cv.BORDER_CONSTANT\nAdd a variable called att_mask for computing attention and positional encoding later'''\n\n\ndef sample_target(im, target_bb, search_area_factor, output_sz=None, mask=None):\n    \"\"\" Extracts a square crop centered at target_bb box, of area search_area_factor^2 times target_bb area\n\n    args:\n        im - cv image\n        target_bb - target box [x, y, w, h]\n        search_area_factor - Ratio of crop size to target size\n        output_sz - (float) Size to which the extracted crop is resized (always square). If None, no resizing is done.\n\n    returns:\n        cv image - extracted crop\n        float - the factor by which the crop has been resized to make the crop size equal output_size\n    \"\"\"\n    if not isinstance(target_bb, list):\n        x, y, w, h = target_bb.tolist()\n    else:\n        x, y, w, h = target_bb\n    # Crop image\n    crop_sz = math.ceil(math.sqrt(w * h) * search_area_factor)\n\n    if crop_sz < 1:\n        raise Exception('Too small bounding box.')\n\n    x1 = round(x + 0.5 * w - crop_sz * 0.5)\n    x2 = x1 + crop_sz\n\n    y1 = round(y + 0.5 * h - crop_sz * 0.5)\n    y2 = y1 + crop_sz\n\n    x1_pad = max(0, -x1)\n    x2_pad = max(x2 - im.shape[1] + 1, 0)\n\n    y1_pad = max(0, -y1)\n    y2_pad = max(y2 - im.shape[0] + 1, 0)\n\n    # Crop target\n    im_crop = im[y1 + y1_pad:y2 - y2_pad, x1 + x1_pad:x2 - x2_pad, :]\n    if mask is not None:\n        mask_crop = mask[y1 + y1_pad:y2 - y2_pad, x1 + x1_pad:x2 - x2_pad]\n\n    # Pad\n    im_crop_padded = cv.copyMakeBorder(im_crop, y1_pad, y2_pad, x1_pad, x2_pad, cv.BORDER_CONSTANT)\n    # deal with attention mask\n    H, W, _ = im_crop_padded.shape\n    att_mask = np.ones((H,W))\n    end_x, end_y = -x2_pad, -y2_pad\n    if y2_pad == 0:\n        end_y = None\n    if x2_pad == 0:\n        end_x = None\n    att_mask[y1_pad:end_y, x1_pad:end_x] = 0\n    if mask is not None:\n        mask_crop_padded = ops.pad(mask_crop, pad=(x1_pad, x2_pad, y1_pad, y2_pad), mode='constant', value=0)\n\n    if output_sz is not None:\n        resize_factor = output_sz / crop_sz\n        im_crop_padded = cv.resize(im_crop_padded, (output_sz, output_sz))\n        att_mask = cv.resize(att_mask, (output_sz, output_sz)).astype(np.bool_)\n        if mask is None:\n            return im_crop_padded, resize_factor, att_mask\n        mask_crop_padded = \\\n        ops.interpolate(mask_crop_padded[None, None], (output_sz, output_sz), mode='bilinear', align_corners=False)[0, 0]\n        return im_crop_padded, resize_factor, att_mask, mask_crop_padded\n\n    else:\n        if mask is None:\n            return im_crop_padded, att_mask.astype(np.bool_), 1.0\n        return im_crop_padded, 1.0, att_mask.astype(np.bool_), mask_crop_padded\n\n\ndef transform_image_to_crop(box_in: ms.Tensor, box_extract: ms.Tensor, resize_factor: float,\n                            crop_sz: ms.Tensor, normalize=False) -> ms.Tensor:\n    \"\"\" Transform the box co-ordinates from the original image co-ordinates to the co-ordinates of the cropped image\n    args:\n        box_in - the box for which the co-ordinates are to be transformed\n        box_extract - the box about which the image crop has been extracted.\n        resize_factor - the ratio between the original image scale and the scale of the image crop\n        crop_sz - size of the cropped image\n\n    returns:\n        torch.Tensor - transformed co-ordinates of box_in\n    \"\"\"\n    box_extract_center = box_extract[0:2] + 0.5 * box_extract[2:4]\n\n    box_in_center = box_in[0:2] + 0.5 * box_in[2:4]\n\n    box_out_center = (crop_sz - 1) / 2 + (box_in_center - box_extract_center) * resize_factor\n    box_out_wh = box_in[2:4] * resize_factor\n\n    box_out = ops.cat((box_out_center - 0.5 * box_out_wh, box_out_wh))\n    if normalize:\n        return box_out / crop_sz[0]\n    else:\n        return box_out\n\n\ndef jittered_center_crop(frames, box_extract, box_gt, search_area_factor, output_sz, masks=None):\n    \"\"\" For each frame in frames, extracts a square crop centered at box_extract, of area search_area_factor^2\n    times box_extract area. The extracted crops are then resized to output_sz. Further, the co-ordinates of the box\n    box_gt are transformed to the image crop co-ordinates\n\n    args:\n        frames - list of frames\n        box_extract - list of boxes of same length as frames. The crops are extracted using anno_extract\n        box_gt - list of boxes of same length as frames. The co-ordinates of these boxes are transformed from\n                    image co-ordinates to the crop co-ordinates\n        search_area_factor - The area of the extracted crop is search_area_factor^2 times box_extract area\n        output_sz - The size to which the extracted crops are resized\n\n    returns:\n        list - list of image crops\n        list - box_gt location in the crop co-ordinates\n        \"\"\"\n\n    if masks is None:\n        crops_resize_factors = [sample_target(f, a, search_area_factor, output_sz)\n                                for f, a in zip(frames, box_extract)]\n        frames_crop, resize_factors, att_mask = zip(*crops_resize_factors)\n        masks_crop = None\n    else:\n        crops_resize_factors = [sample_target(f, a, search_area_factor, output_sz, m)\n                                for f, a, m in zip(frames, box_extract, masks)]\n        frames_crop, resize_factors, att_mask, masks_crop = zip(*crops_resize_factors)\n    # frames_crop: tuple of ndarray (128,128,3), att_mask: tuple of ndarray (128,128)\n    crop_sz = ms.Tensor([output_sz, output_sz])\n\n    # find the bb location in the crop\n    '''Note that here we use normalized coord'''\n    box_crop = [transform_image_to_crop(a_gt, a_ex, rf, crop_sz, normalize=True)\n                for a_gt, a_ex, rf in zip(box_gt, box_extract, resize_factors)]  # (x1,y1,w,h) list of tensors\n\n    return frames_crop, box_crop, att_mask, masks_crop\n\n\ndef transform_box_to_crop(box: ms.Tensor, crop_box: ms.Tensor, crop_sz: ms.Tensor, normalize=False) -> ms.Tensor:\n    \"\"\" Transform the box co-ordinates from the original image co-ordinates to the co-ordinates of the cropped image\n    args:\n        box - the box for which the co-ordinates are to be transformed\n        crop_box - bounding box defining the crop in the original image\n        crop_sz - size of the cropped image\n\n    returns:\n        torch.Tensor - transformed co-ordinates of box_in\n    \"\"\"\n\n    box_out = box.clone()\n    box_out[:2] -= crop_box[:2]\n\n    scale_factor = crop_sz / crop_box[2:]\n\n    box_out[:2] *= scale_factor\n    box_out[2:] *= scale_factor\n    if normalize:\n        return box_out / crop_sz[0]\n    else:\n        return box_out\n\n"
  },
  {
    "path": "artrackv2_mindspore/lib/train/data/sampler.py",
    "content": "import random\nimport torch.utils.data\nfrom lib.utils import TensorDict\nimport numpy as np\n\n\ndef no_processing(data):\n    return data\n\n\nclass TrackingSampler(torch.utils.data.Dataset):\n    \"\"\" Class responsible for sampling frames from training sequences to form batches. \n\n    The sampling is done in the following ways. First a dataset is selected at random. Next, a sequence is selected\n    from that dataset. A base frame is then sampled randomly from the sequence. Next, a set of 'train frames' and\n    'test frames' are sampled from the sequence from the range [base_frame_id - max_gap, base_frame_id]  and\n    (base_frame_id, base_frame_id + max_gap] respectively. Only the frames in which the target is visible are sampled.\n    If enough visible frames are not found, the 'max_gap' is increased gradually till enough frames are found.\n\n    The sampled frames are then passed through the input 'processing' function for the necessary processing-\n    \"\"\"\n\n    def __init__(self, datasets, p_datasets, samples_per_epoch, max_gap,\n                 num_search_frames, num_template_frames=1, processing=no_processing, frame_sample_mode='causal',\n                 train_cls=False, pos_prob=0.5):\n        \"\"\"\n        args:\n            datasets - List of datasets to be used for training\n            p_datasets - List containing the probabilities by which each dataset will be sampled\n            samples_per_epoch - Number of training samples per epoch\n            max_gap - Maximum gap, in frame numbers, between the train frames and the test frames.\n            num_search_frames - Number of search frames to sample.\n            num_template_frames - Number of template frames to sample.\n            processing - An instance of Processing class which performs the necessary processing of the data.\n            frame_sample_mode - Either 'causal' or 'interval'. If 'causal', then the test frames are sampled in a causally,\n                                otherwise randomly within the interval.\n        \"\"\"\n        self.datasets = datasets\n        self.train_cls = train_cls  # whether we are training classification\n        self.pos_prob = pos_prob  # probability of sampling positive class when making classification\n\n        # If p not provided, sample uniformly from all videos\n        if p_datasets is None:\n            p_datasets = [len(d) for d in self.datasets]\n\n        # Normalize\n        p_total = sum(p_datasets)\n        self.p_datasets = [x / p_total for x in p_datasets]\n\n        self.samples_per_epoch = samples_per_epoch\n        self.max_gap = max_gap\n        self.num_search_frames = num_search_frames\n        self.num_template_frames = num_template_frames\n        self.processing = processing\n        self.frame_sample_mode = frame_sample_mode\n\n    def __len__(self):\n        return self.samples_per_epoch\n\n    def _sample_visible_ids(self, visible, num_ids=1, min_id=None, max_id=None,\n                            allow_invisible=False, force_invisible=False):\n        \"\"\" Samples num_ids frames between min_id and max_id for which target is visible\n\n        args:\n            visible - 1d Tensor indicating whether target is visible for each frame\n            num_ids - number of frames to be samples\n            min_id - Minimum allowed frame number\n            max_id - Maximum allowed frame number\n\n        returns:\n            list - List of sampled frame numbers. None if not sufficient visible frames could be found.\n        \"\"\"\n        if num_ids == 0:\n            return []\n        if min_id is None or min_id < 0:\n            min_id = 0\n        if max_id is None or max_id > len(visible):\n            max_id = len(visible)\n        # get valid ids\n        if force_invisible:\n            valid_ids = [i for i in range(min_id, max_id) if not visible[i]]\n        else:\n            if allow_invisible:\n                valid_ids = [i for i in range(min_id, max_id)]\n            else:\n                valid_ids = [i for i in range(min_id, max_id) if visible[i]]\n\n        # No visible ids\n        if len(valid_ids) == 0:\n            return None\n\n        return random.choices(valid_ids, k=num_ids)\n\n    def __getitem__(self, index):\n        if self.train_cls:\n            return self.getitem_cls()\n        else:\n            return self.getitem()\n\n    def getitem(self):\n        \"\"\"\n        returns:\n            TensorDict - dict containing all the data blocks\n        \"\"\"\n        valid = False\n        while not valid:\n            # Select a dataset\n            dataset = random.choices(self.datasets, self.p_datasets)[0]\n\n            is_video_dataset = dataset.is_video_sequence()\n\n            # sample a sequence from the given dataset\n            seq_id, visible, seq_info_dict = self.sample_seq_from_dataset(dataset, is_video_dataset)\n\n            if is_video_dataset:\n                template_frame_ids = None\n                search_frame_ids = None\n                gap_increase = 0\n\n                if self.frame_sample_mode == 'causal':\n                    # Sample test and train frames in a causal manner, i.e. search_frame_ids > template_frame_ids\n                    while search_frame_ids is None:\n                        base_frame_id = self._sample_visible_ids(visible, num_ids=1, min_id=self.num_template_frames - 1,\n                                                                 max_id=len(visible) - self.num_search_frames)\n                        prev_frame_ids = self._sample_visible_ids(visible, num_ids=self.num_template_frames - 1,\n                                                                  min_id=base_frame_id[0] - self.max_gap - gap_increase,\n                                                                  max_id=base_frame_id[0])\n                        if prev_frame_ids is None:\n                            gap_increase += 5\n                            continue\n                        template_frame_ids = base_frame_id + prev_frame_ids\n                        search_frame_ids = self._sample_visible_ids(visible, min_id=template_frame_ids[0] + 1,\n                                                                  max_id=template_frame_ids[0] + self.max_gap + gap_increase,\n                                                                  num_ids=self.num_search_frames)\n                        # Increase gap until a frame is found\n                        gap_increase += 5\n\n                elif self.frame_sample_mode == \"trident\" or self.frame_sample_mode == \"trident_pro\":\n                    template_frame_ids, search_frame_ids = self.get_frame_ids_trident(visible)\n                elif self.frame_sample_mode == \"stark\":\n                    template_frame_ids, search_frame_ids = self.get_frame_ids_stark(visible, seq_info_dict[\"valid\"])\n                else:\n                    raise ValueError(\"Illegal frame sample mode\")\n            else:\n                # In case of image dataset, just repeat the image to generate synthetic video\n                template_frame_ids = [1] * self.num_template_frames\n                search_frame_ids = [1] * self.num_search_frames\n            try:\n                template_frames, template_anno, meta_obj_train = dataset.get_frames(seq_id, template_frame_ids, seq_info_dict)\n                search_frames, search_anno, meta_obj_test = dataset.get_frames(seq_id, search_frame_ids, seq_info_dict)\n\n                H, W, _ = template_frames[0].shape\n                template_masks = template_anno['mask'] if 'mask' in template_anno else [torch.zeros((H, W))] * self.num_template_frames\n                search_masks = search_anno['mask'] if 'mask' in search_anno else [torch.zeros((H, W))] * self.num_search_frames\n\n                data = TensorDict({'template_images': template_frames,\n                                   'template_anno': template_anno['bbox'],\n                                   'template_masks': template_masks,\n                                   'search_images': search_frames,\n                                   'search_anno': search_anno['bbox'],\n                                   'search_masks': search_masks,\n                                   'dataset': dataset.get_name(),\n                                   'test_class': meta_obj_test.get('object_class_name')})\n                # make data augmentation\n                data = self.processing(data)\n\n                # check whether data is valid\n                valid = data['valid']\n            except:\n                valid = False\n\n        return data\n\n    def getitem_cls(self):\n        # get data for classification\n        \"\"\"\n        args:\n            index (int): Index (Ignored since we sample randomly)\n            aux (bool): whether the current data is for auxiliary use (e.g. copy-and-paste)\n\n        returns:\n            TensorDict - dict containing all the data blocks\n        \"\"\"\n        valid = False\n        label = None\n        while not valid:\n            # Select a dataset\n            dataset = random.choices(self.datasets, self.p_datasets)[0]\n\n            is_video_dataset = dataset.is_video_sequence()\n\n            # sample a sequence from the given dataset\n            seq_id, visible, seq_info_dict = self.sample_seq_from_dataset(dataset, is_video_dataset)\n            # sample template and search frame ids\n            if is_video_dataset:\n                if self.frame_sample_mode in [\"trident\", \"trident_pro\"]:\n                    template_frame_ids, search_frame_ids = self.get_frame_ids_trident(visible)\n                elif self.frame_sample_mode == \"stark\":\n                    template_frame_ids, search_frame_ids = self.get_frame_ids_stark(visible, seq_info_dict[\"valid\"])\n                else:\n                    raise ValueError(\"illegal frame sample mode\")\n            else:\n                # In case of image dataset, just repeat the image to generate synthetic video\n                template_frame_ids = [1] * self.num_template_frames\n                search_frame_ids = [1] * self.num_search_frames\n            try:\n                # \"try\" is used to handle trackingnet data failure\n                # get images and bounding boxes (for templates)\n                template_frames, template_anno, meta_obj_train = dataset.get_frames(seq_id, template_frame_ids,\n                                                                                    seq_info_dict)\n                H, W, _ = template_frames[0].shape\n                template_masks = template_anno['mask'] if 'mask' in template_anno else [torch.zeros(\n                    (H, W))] * self.num_template_frames\n                # get images and bounding boxes (for searches)\n                # positive samples\n                if random.random() < self.pos_prob:\n                    label = torch.ones(1,)\n                    search_frames, search_anno, meta_obj_test = dataset.get_frames(seq_id, search_frame_ids, seq_info_dict)\n                    search_masks = search_anno['mask'] if 'mask' in search_anno else [torch.zeros(\n                        (H, W))] * self.num_search_frames\n                # negative samples\n                else:\n                    label = torch.zeros(1,)\n                    if is_video_dataset:\n                        search_frame_ids = self._sample_visible_ids(visible, num_ids=1, force_invisible=True)\n                        if search_frame_ids is None:\n                            search_frames, search_anno, meta_obj_test = self.get_one_search()\n                        else:\n                            search_frames, search_anno, meta_obj_test = dataset.get_frames(seq_id, search_frame_ids,\n                                                                                           seq_info_dict)\n                            search_anno[\"bbox\"] = [self.get_center_box(H, W)]\n                    else:\n                        search_frames, search_anno, meta_obj_test = self.get_one_search()\n                    H, W, _ = search_frames[0].shape\n                    search_masks = search_anno['mask'] if 'mask' in search_anno else [torch.zeros(\n                        (H, W))] * self.num_search_frames\n\n                data = TensorDict({'template_images': template_frames,\n                                   'template_anno': template_anno['bbox'],\n                                   'template_masks': template_masks,\n                                   'search_images': search_frames,\n                                   'search_anno': search_anno['bbox'],\n                                   'search_masks': search_masks,\n                                   'dataset': dataset.get_name(),\n                                   'test_class': meta_obj_test.get('object_class_name')})\n\n                # make data augmentation\n                data = self.processing(data)\n                # add classification label\n                data[\"label\"] = label\n                # check whether data is valid\n                valid = data['valid']\n            except:\n                valid = False\n\n        return data\n\n    def get_center_box(self, H, W, ratio=1/8):\n        cx, cy, w, h = W/2, H/2, W * ratio, H * ratio\n        return torch.tensor([int(cx-w/2), int(cy-h/2), int(w), int(h)])\n\n    def sample_seq_from_dataset(self, dataset, is_video_dataset):\n\n        # Sample a sequence with enough visible frames\n        enough_visible_frames = False\n        while not enough_visible_frames:\n            # Sample a sequence\n            seq_id = random.randint(0, dataset.get_num_sequences() - 1)\n\n            # Sample frames\n            seq_info_dict = dataset.get_sequence_info(seq_id)\n            visible = seq_info_dict['visible']\n\n            enough_visible_frames = visible.type(torch.int64).sum().item() > 2 * (\n                    self.num_search_frames + self.num_template_frames) and len(visible) >= 20\n\n            enough_visible_frames = enough_visible_frames or not is_video_dataset\n        return seq_id, visible, seq_info_dict\n\n    def get_one_search(self):\n        # Select a dataset\n        dataset = random.choices(self.datasets, self.p_datasets)[0]\n\n        is_video_dataset = dataset.is_video_sequence()\n        # sample a sequence\n        seq_id, visible, seq_info_dict = self.sample_seq_from_dataset(dataset, is_video_dataset)\n        # sample a frame\n        if is_video_dataset:\n            if self.frame_sample_mode == \"stark\":\n                search_frame_ids = self._sample_visible_ids(seq_info_dict[\"valid\"], num_ids=1)\n            else:\n                search_frame_ids = self._sample_visible_ids(visible, num_ids=1, allow_invisible=True)\n        else:\n            search_frame_ids = [1]\n        # get the image, bounding box and other info\n        search_frames, search_anno, meta_obj_test = dataset.get_frames(seq_id, search_frame_ids, seq_info_dict)\n\n        return search_frames, search_anno, meta_obj_test\n\n    def get_frame_ids_trident(self, visible):\n        # get template and search ids in a 'trident' manner\n        template_frame_ids_extra = []\n        while None in template_frame_ids_extra or len(template_frame_ids_extra) == 0:\n            template_frame_ids_extra = []\n            # first randomly sample two frames from a video\n            template_frame_id1 = self._sample_visible_ids(visible, num_ids=1)  # the initial template id\n            search_frame_ids = self._sample_visible_ids(visible, num_ids=1)  # the search region id\n            # get the dynamic template id\n            for max_gap in self.max_gap:\n                if template_frame_id1[0] >= search_frame_ids[0]:\n                    min_id, max_id = search_frame_ids[0], search_frame_ids[0] + max_gap\n                else:\n                    min_id, max_id = search_frame_ids[0] - max_gap, search_frame_ids[0]\n                if self.frame_sample_mode == \"trident_pro\":\n                    f_id = self._sample_visible_ids(visible, num_ids=1, min_id=min_id, max_id=max_id,\n                                                    allow_invisible=True)\n                else:\n                    f_id = self._sample_visible_ids(visible, num_ids=1, min_id=min_id, max_id=max_id)\n                if f_id is None:\n                    template_frame_ids_extra += [None]\n                else:\n                    template_frame_ids_extra += f_id\n\n        template_frame_ids = template_frame_id1 + template_frame_ids_extra\n        return template_frame_ids, search_frame_ids\n\n    def get_frame_ids_stark(self, visible, valid):\n        # get template and search ids in a 'stark' manner\n        template_frame_ids_extra = []\n        while None in template_frame_ids_extra or len(template_frame_ids_extra) == 0:\n            template_frame_ids_extra = []\n            # first randomly sample two frames from a video\n            template_frame_id1 = self._sample_visible_ids(visible, num_ids=1)  # the initial template id\n            search_frame_ids = self._sample_visible_ids(visible, num_ids=1)  # the search region id\n            # get the dynamic template id\n            for max_gap in self.max_gap:\n                if template_frame_id1[0] >= search_frame_ids[0]:\n                    min_id, max_id = search_frame_ids[0], search_frame_ids[0] + max_gap\n                else:\n                    min_id, max_id = search_frame_ids[0] - max_gap, search_frame_ids[0]\n                \"\"\"we require the frame to be valid but not necessary visible\"\"\"\n                f_id = self._sample_visible_ids(valid, num_ids=1, min_id=min_id, max_id=max_id)\n                if f_id is None:\n                    template_frame_ids_extra += [None]\n                else:\n                    template_frame_ids_extra += f_id\n\n        template_frame_ids = template_frame_id1 + template_frame_ids_extra\n        return template_frame_ids, search_frame_ids"
  },
  {
    "path": "artrackv2_mindspore/lib/train/data/sequence_sampler.py",
    "content": "import random\nimport torch.utils.data\nimport numpy as np\nfrom lib.utils import TensorDict\n\n\nclass SequenceSampler(torch.utils.data.Dataset):\n    \"\"\"\n    Sample sequence for sequence-level training\n    \"\"\"\n\n    def __init__(self, datasets, p_datasets, samples_per_epoch, max_gap,\n                 num_search_frames, num_template_frames=1, frame_sample_mode='sequential', max_interval=10, prob=0.7):\n        \"\"\"\n        args:\n            datasets - List of datasets to be used for training\n            p_datasets - List containing the probabilities by which each dataset will be sampled\n            samples_per_epoch - Number of training samples per epoch\n            max_gap - Maximum gap, in frame numbers, between the train frames and the search frames.\\\n            max_interval - Maximum interval between sampled frames\n            num_search_frames - Number of search frames to sample.\n            num_template_frames - Number of template frames to sample.\n            processing - An instance of Processing class which performs the necessary processing of the data.\n            frame_sample_mode - Either 'causal' or 'interval'. If 'causal', then the search frames are sampled in a causally,\n                                otherwise randomly within the interval.\n            prob - sequential sampling by prob / interval sampling by 1-prob\n        \"\"\"\n        self.datasets = datasets\n\n        # If p not provided, sample uniformly from all videos\n        if p_datasets is None:\n            p_datasets = [len(d) for d in self.datasets]\n\n        # Normalize\n        p_total = sum(p_datasets)\n        self.p_datasets = [x / p_total for x in p_datasets]\n\n        self.samples_per_epoch = samples_per_epoch\n        self.max_gap = max_gap\n        self.max_interval = max_interval\n        self.num_search_frames = num_search_frames\n        self.num_template_frames = num_template_frames\n        self.frame_sample_mode = frame_sample_mode\n        self.prob=prob\n        self.extra=1\n\n    def __len__(self):\n        return self.samples_per_epoch\n\n    def _sample_visible_ids(self, visible, num_ids=1, min_id=None, max_id=None):\n        \"\"\" Samples num_ids frames between min_id and max_id for which target is visible\n\n        args:\n            visible - 1d Tensor indicating whether target is visible for each frame\n            num_ids - number of frames to be samples\n            min_id - Minimum allowed frame number\n            max_id - Maximum allowed frame number\n\n        returns:\n            list - List of sampled frame numbers. None if not sufficient visible frames could be found.\n        \"\"\"\n        if num_ids == 0:\n            return []\n        if min_id is None or min_id < 0:\n            min_id = 0\n        if max_id is None or max_id > len(visible):\n            max_id = len(visible)\n\n        valid_ids = [i for i in range(min_id, max_id) if visible[i]]\n\n        # No visible ids\n        if len(valid_ids) == 0:\n            return None\n\n        return random.choices(valid_ids, k=num_ids)\n\n\n    def _sequential_sample(self, visible):\n        # Sample frames in sequential manner\n        template_frame_ids = self._sample_visible_ids(visible, num_ids=1, min_id=0,\n                                                   max_id=len(visible) - self.num_search_frames)\n        template_another = self._sample_visible_ids(visible, num_ids=1, min_id=template_frame_ids[0],\n                                                   max_id=min(len(visible) - self.num_search_frames, template_frame_ids[0] + self.max_gap))\n        template_frame_ids.append(template_another[0])\n        template_frame_ids.sort()\n\n        if self.max_gap == -1:\n            left = template_frame_ids[1]\n        else:\n            # template frame (1) ->(max_gap) -> search frame (num_search_frames)\n            left_max = min(len(visible) - self.num_search_frames, template_frame_ids[1] + self.max_gap)\n            left = self._sample_visible_ids(visible, num_ids=1, min_id=template_frame_ids[1],\n                                            max_id=left_max)[0]\n\n        valid_ids = [i for i in range(left, len(visible)) if visible[i]]\n        search_frame_ids = valid_ids[:self.num_search_frames]\n\n        # if length is not enough\n        last = search_frame_ids[-1]\n        while len(search_frame_ids) < self.num_search_frames:\n            if last >= len(visible) - 1:\n                search_frame_ids.append(last)\n            else:\n                last += 1\n                if visible[last]:\n                    search_frame_ids.append(last)\n\n        return template_frame_ids, search_frame_ids\n\n\n    def _random_interval_sample(self, visible):\n        # Get valid ids\n        valid_ids = [i for i in range(len(visible)) if visible[i]]\n\n        # Sample template frame\n        avg_interval = self.max_interval\n        while avg_interval * (self.num_search_frames - 1) > len(visible):\n            avg_interval = max(avg_interval - 1, 1)\n\n        while True:\n            template_frame_ids = self._sample_visible_ids(visible, num_ids=1, min_id=0,\n                                                       max_id=len(visible) - avg_interval * (self.num_search_frames - 1))\n            if template_frame_ids == None:\n                avg_interval = avg_interval - 1\n            else:\n                break\n\n            if avg_interval == 0:\n                template_frame_ids = [valid_ids[0]]\n                break\n\n        # Sample first search frame\n        if self.max_gap == -1:\n            search_frame_ids = template_frame_ids\n        else:\n            avg_interval = self.max_interval\n            while avg_interval * (self.num_search_frames - 1) > len(visible):\n                avg_interval = max(avg_interval - 1, 1)\n\n            while True:\n                left_max = min(max(len(visible) - avg_interval * (self.num_search_frames - 1), template_frame_ids[0] + 1),\n                               template_frame_ids[0] + self.max_gap)\n                search_frame_ids = self._sample_visible_ids(visible, num_ids=1, min_id=template_frame_ids[0],\n                                                          max_id=left_max)\n\n                if search_frame_ids == None:\n                    avg_interval = avg_interval - 1\n                else:\n                    break\n\n                if avg_interval == -1:\n                    search_frame_ids = template_frame_ids\n                    break\n\n        # Sample rest of the search frames with random interval\n        last = search_frame_ids[0]\n        while last <= len(visible) - 1 and len(search_frame_ids) < self.num_search_frames:\n            # sample id with interval\n            max_id = min(last + self.max_interval + 1, len(visible))\n            id = self._sample_visible_ids(visible, num_ids=1, min_id=last,\n                                          max_id=max_id)\n\n            if id is None:\n                # If not found in current range, find from previous range\n                last = last + self.max_interval\n            else:\n                search_frame_ids.append(id[0])\n                last = search_frame_ids[-1]\n\n        # if length is not enough, randomly sample new ids\n        if len(search_frame_ids) < self.num_search_frames:\n            valid_ids = [x for x in valid_ids if x > search_frame_ids[0] and x not in search_frame_ids]\n\n            if len(valid_ids) > 0:\n                new_ids = random.choices(valid_ids, k=min(len(valid_ids),\n                                                          self.num_search_frames - len(search_frame_ids)))\n                search_frame_ids = search_frame_ids + new_ids\n                search_frame_ids = sorted(search_frame_ids, key=int)\n\n        # if length is still not enough, duplicate last frame\n        while len(search_frame_ids) < self.num_search_frames:\n            search_frame_ids.append(search_frame_ids[-1])\n\n        for i in range(1, self.num_search_frames):\n            if search_frame_ids[i] - search_frame_ids[i - 1] > self.max_interval:\n                print(search_frame_ids[i] - search_frame_ids[i - 1])\n\n        return template_frame_ids, search_frame_ids\n\n\n    def __getitem__(self, index):\n        \"\"\"\n        args:\n            index (int): Index (Ignored since we sample randomly)\n\n        returns:\n            TensorDict - dict containing all the data blocks\n        \"\"\"\n\n        # Select a dataset\n        dataset = random.choices(self.datasets, self.p_datasets)[0]\n        if dataset.get_name() == 'got10k' :\n            max_gap = self.max_gap\n            max_interval = self.max_interval\n        else:\n            max_gap = self.max_gap\n            max_interval = self.max_interval\n            self.max_gap = max_gap * self.extra\n            self.max_interval = max_interval * self.extra\n            \n        is_video_dataset = dataset.is_video_sequence()\n\n        # Sample a sequence with enough visible frames\n        enough_visible_frames = False\n        while not enough_visible_frames:\n            # Sample a sequence\n            seq_id = random.randint(0, dataset.get_num_sequences() - 1)\n\n            # Sample frames\n            seq_info_dict = dataset.get_sequence_info(seq_id)\n            visible = seq_info_dict['visible']\n\n            enough_visible_frames = visible.type(torch.int64).sum().item() > 2 * (\n                    self.num_search_frames + self.num_template_frames) and len(visible) >= (self.num_search_frames + self.num_template_frames)\n\n            enough_visible_frames = enough_visible_frames or not is_video_dataset\n\n        if is_video_dataset:\n            if self.frame_sample_mode == 'sequential':\n                template_frame_ids, search_frame_ids = self._sequential_sample(visible)\n\n            elif self.frame_sample_mode == 'random_interval':\n                if random.random() < self.prob:\n                    template_frame_ids, search_frame_ids = self._random_interval_sample(visible)\n                else:\n                    template_frame_ids, search_frame_ids = self._sequential_sample(visible)\n            else:\n                self.max_gap = max_gap\n                self.max_interval = max_interval\n                raise NotImplementedError\n        else:\n            # In case of image dataset, just repeat the image to generate synthetic video\n            template_frame_ids = [1] * self.num_template_frames\n            search_frame_ids = [1] * self.num_search_frames\n\n        self.max_gap = max_gap\n        self.max_interval = max_interval\n\n        # print(\"this is template_frame_ids\", template_frame_ids)\n        # print(\"this is search_frame_ids\", search_frame_ids)\n        template_frames, template_anno, meta_obj_template = dataset.get_frames(seq_id, template_frame_ids, seq_info_dict)\n        search_frames, search_anno, meta_obj_search = dataset.get_frames(seq_id, search_frame_ids, seq_info_dict)\n        visible_ratio = search_anno['visible_ratio']\n        template_bbox = [bbox.numpy() for bbox in template_anno['bbox']] # tensor -> numpy array\n        search_bbox = [bbox.numpy() for bbox in search_anno['bbox']] # tensor -> numpy array\n        \n\n        return TensorDict({'template_images': np.array(template_frames).squeeze(),    # 1 template images\n                'template_annos': np.array(template_bbox).squeeze(),\n                'search_images': np.array(search_frames),      # (num_frames) search images\n                'search_annos': np.array(search_bbox),\n                'seq_id': seq_id,\n                'dataset': dataset.get_name(),\n                'search_class': meta_obj_search.get('object_class_name'),\n                'num_frames': len(search_frames),\n                'visible_ratio': visible_ratio\n                })"
  },
  {
    "path": "artrackv2_mindspore/lib/train/data/transforms.py",
    "content": "import random\nimport numpy as np\nimport math\nimport cv2 as cv\nimport torch\nimport torch.nn.functional as F\nimport torchvision.transforms.functional as tvisf\n\n\nclass Transform:\n    \"\"\"A set of transformations, used for e.g. data augmentation.\n    Args of constructor:\n        transforms: An arbitrary number of transformations, derived from the TransformBase class.\n                    They are applied in the order they are given.\n\n    The Transform object can jointly transform images, bounding boxes and segmentation masks.\n    This is done by calling the object with the following key-word arguments (all are optional).\n\n    The following arguments are inputs to be transformed. They are either supplied as a single instance, or a list of instances.\n        image  -  Image\n        coords  -  2xN dimensional Tensor of 2D image coordinates [y, x]\n        bbox  -  Bounding box on the form [x, y, w, h]\n        mask  -  Segmentation mask with discrete classes\n\n    The following parameters can be supplied with calling the transform object:\n        joint [Bool]  -  If True then transform all images/coords/bbox/mask in the list jointly using the same transformation.\n                         Otherwise each tuple (images, coords, bbox, mask) will be transformed independently using\n                         different random rolls. Default: True.\n        new_roll [Bool]  -  If False, then no new random roll is performed, and the saved result from the previous roll\n                            is used instead. Default: True.\n\n    Check the DiMPProcessing class for examples.\n    \"\"\"\n\n    def __init__(self, *transforms):\n        if len(transforms) == 1 and isinstance(transforms[0], (list, tuple)):\n            transforms = transforms[0]\n        self.transforms = transforms\n        self._valid_inputs = ['image', 'coords', 'bbox', 'mask', 'att']\n        self._valid_args = ['joint', 'new_roll']\n        self._valid_all = self._valid_inputs + self._valid_args\n\n    def __call__(self, **inputs):\n        var_names = [k for k in inputs.keys() if k in self._valid_inputs]\n        for v in inputs.keys():\n            if v not in self._valid_all:\n                raise ValueError('Incorrect input \\\"{}\\\" to transform. Only supports inputs {} and arguments {}.'.format(v, self._valid_inputs, self._valid_args))\n\n        joint_mode = inputs.get('joint', True)\n        new_roll = inputs.get('new_roll', True)\n\n        if not joint_mode:\n            out = zip(*[self(**inp) for inp in self._split_inputs(inputs)])\n            return tuple(list(o) for o in out)\n\n        out = {k: v for k, v in inputs.items() if k in self._valid_inputs}\n\n        for t in self.transforms:\n            out = t(**out, joint=joint_mode, new_roll=new_roll)\n        if len(var_names) == 1:\n            return out[var_names[0]]\n        # Make sure order is correct\n        return tuple(out[v] for v in var_names)\n\n    def _split_inputs(self, inputs):\n        var_names = [k for k in inputs.keys() if k in self._valid_inputs]\n        split_inputs = [{k: v for k, v in zip(var_names, vals)} for vals in zip(*[inputs[vn] for vn in var_names])]\n        for arg_name, arg_val in filter(lambda it: it[0]!='joint' and it[0] in self._valid_args, inputs.items()):\n            if isinstance(arg_val, list):\n                for inp, av in zip(split_inputs, arg_val):\n                    inp[arg_name] = av\n            else:\n                for inp in split_inputs:\n                    inp[arg_name] = arg_val\n        return split_inputs\n\n    def __repr__(self):\n        format_string = self.__class__.__name__ + '('\n        for t in self.transforms:\n            format_string += '\\n'\n            format_string += '    {0}'.format(t)\n        format_string += '\\n)'\n        return format_string\n\n\nclass TransformBase:\n    \"\"\"Base class for transformation objects. See the Transform class for details.\"\"\"\n    def __init__(self):\n        \"\"\"2020.12.24 Add 'att' to valid inputs\"\"\"\n        self._valid_inputs = ['image', 'coords', 'bbox', 'mask', 'att']\n        self._valid_args = ['new_roll']\n        self._valid_all = self._valid_inputs + self._valid_args\n        self._rand_params = None\n\n    def __call__(self, **inputs):\n        # Split input\n        input_vars = {k: v for k, v in inputs.items() if k in self._valid_inputs}\n        input_args = {k: v for k, v in inputs.items() if k in self._valid_args}\n\n        # Roll random parameters for the transform\n        if input_args.get('new_roll', True):\n            rand_params = self.roll()\n            if rand_params is None:\n                rand_params = ()\n            elif not isinstance(rand_params, tuple):\n                rand_params = (rand_params,)\n            self._rand_params = rand_params\n\n        outputs = dict()\n        for var_name, var in input_vars.items():\n            if var is not None:\n                transform_func = getattr(self, 'transform_' + var_name)\n                if var_name in ['coords', 'bbox']:\n                    params = (self._get_image_size(input_vars),) + self._rand_params\n                else:\n                    params = self._rand_params\n                if isinstance(var, (list, tuple)):\n                    outputs[var_name] = [transform_func(x, *params) for x in var]\n                else:\n                    outputs[var_name] = transform_func(var, *params)\n        return outputs\n\n    def _get_image_size(self, inputs):\n        im = None\n        for var_name in ['image', 'mask']:\n            if inputs.get(var_name) is not None:\n                im = inputs[var_name]\n                break\n        if im is None:\n            return None\n        if isinstance(im, (list, tuple)):\n            im = im[0]\n        if isinstance(im, np.ndarray):\n            return im.shape[:2]\n        if torch.is_tensor(im):\n            return (im.shape[-2], im.shape[-1])\n        raise Exception('Unknown image type')\n\n    def roll(self):\n        return None\n\n    def transform_image(self, image, *rand_params):\n        \"\"\"Must be deterministic\"\"\"\n        return image\n\n    def transform_coords(self, coords, image_shape, *rand_params):\n        \"\"\"Must be deterministic\"\"\"\n        return coords\n\n    def transform_bbox(self, bbox, image_shape, *rand_params):\n        \"\"\"Assumes [x, y, w, h]\"\"\"\n        # Check if not overloaded\n        if self.transform_coords.__code__ == TransformBase.transform_coords.__code__:\n            return bbox\n\n        coord = bbox.clone().view(-1,2).t().flip(0)\n\n        x1 = coord[1, 0]\n        x2 = coord[1, 0] + coord[1, 1]\n\n        y1 = coord[0, 0]\n        y2 = coord[0, 0] + coord[0, 1]\n\n        coord_all = torch.tensor([[y1, y1, y2, y2], [x1, x2, x2, x1]])\n\n        coord_transf = self.transform_coords(coord_all, image_shape, *rand_params).flip(0)\n        tl = torch.min(coord_transf, dim=1)[0]\n        sz = torch.max(coord_transf, dim=1)[0] - tl\n        bbox_out = torch.cat((tl, sz), dim=-1).reshape(bbox.shape)\n        return bbox_out\n\n    def transform_mask(self, mask, *rand_params):\n        \"\"\"Must be deterministic\"\"\"\n        return mask\n\n    def transform_att(self, att, *rand_params):\n        \"\"\"2020.12.24 Added to deal with attention masks\"\"\"\n        return att\n\n\nclass ToTensor(TransformBase):\n    \"\"\"Convert to a Tensor\"\"\"\n\n    def transform_image(self, image):\n        # handle numpy array\n        if image.ndim == 2:\n            image = image[:, :, None]\n\n        image = torch.from_numpy(image.transpose((2, 0, 1)))\n        # backward compatibility\n        if isinstance(image, torch.ByteTensor):\n            return image.float().div(255)\n        else:\n            return image\n\n    def transfrom_mask(self, mask):\n        if isinstance(mask, np.ndarray):\n            return torch.from_numpy(mask)\n\n    def transform_att(self, att):\n        if isinstance(att, np.ndarray):\n            return torch.from_numpy(att).to(torch.bool)\n        elif isinstance(att, torch.Tensor):\n            return att.to(torch.bool)\n        else:\n            raise ValueError (\"dtype must be np.ndarray or torch.Tensor\")\n\n\nclass ToTensorAndJitter(TransformBase):\n    \"\"\"Convert to a Tensor and jitter brightness\"\"\"\n    def __init__(self, brightness_jitter=0.0, normalize=True):\n        super().__init__()\n        self.brightness_jitter = brightness_jitter\n        self.normalize = normalize\n\n    def roll(self):\n        return np.random.uniform(max(0, 1 - self.brightness_jitter), 1 + self.brightness_jitter)\n\n    def transform_image(self, image, brightness_factor):\n        # handle numpy array\n        image = torch.from_numpy(image.transpose((2, 0, 1)))\n\n        # backward compatibility\n        if self.normalize:\n            return image.float().mul(brightness_factor/255.0).clamp(0.0, 1.0)\n        else:\n            return image.float().mul(brightness_factor).clamp(0.0, 255.0)\n\n    def transform_mask(self, mask, brightness_factor):\n        if isinstance(mask, np.ndarray):\n            return torch.from_numpy(mask)\n        else:\n            return mask\n    def transform_att(self, att, brightness_factor):\n        if isinstance(att, np.ndarray):\n            return torch.from_numpy(att).to(torch.bool)\n        elif isinstance(att, torch.Tensor):\n            return att.to(torch.bool)\n        else:\n            raise ValueError (\"dtype must be np.ndarray or torch.Tensor\")\n\n\nclass Normalize(TransformBase):\n    \"\"\"Normalize image\"\"\"\n    def __init__(self, mean, std, inplace=False):\n        super().__init__()\n        self.mean = mean\n        self.std = std\n        self.inplace = inplace\n\n    def transform_image(self, image):\n        return tvisf.normalize(image, self.mean, self.std, self.inplace)\n\n\nclass ToGrayscale(TransformBase):\n    \"\"\"Converts image to grayscale with probability\"\"\"\n    def __init__(self, probability = 0.5):\n        super().__init__()\n        self.probability = probability\n        self.color_weights = np.array([0.2989, 0.5870, 0.1140], dtype=np.float32)\n\n    def roll(self):\n        return random.random() < self.probability\n\n    def transform_image(self, image, do_grayscale):\n        if do_grayscale:\n            if torch.is_tensor(image):\n                raise NotImplementedError('Implement torch variant.')\n            img_gray = cv.cvtColor(image, cv.COLOR_RGB2GRAY)\n            return np.stack([img_gray, img_gray, img_gray], axis=2)\n            # return np.repeat(np.sum(img * self.color_weights, axis=2, keepdims=True).astype(np.uint8), 3, axis=2)\n        return image\n\n\nclass ToBGR(TransformBase):\n    \"\"\"Converts image to BGR\"\"\"\n    def transform_image(self, image):\n        if torch.is_tensor(image):\n            raise NotImplementedError('Implement torch variant.')\n        img_bgr = cv.cvtColor(image, cv.COLOR_RGB2BGR)\n        return img_bgr\n\n\nclass RandomHorizontalFlip(TransformBase):\n    \"\"\"Horizontally flip image randomly with a probability p.\"\"\"\n    def __init__(self, probability = 0.5):\n        super().__init__()\n        self.probability = probability\n\n    def roll(self):\n        return random.random() < self.probability\n\n    def transform_image(self, image, do_flip):\n        if do_flip:\n            if torch.is_tensor(image):\n                return image.flip((2,))\n            return np.fliplr(image).copy()\n        return image\n\n    def transform_coords(self, coords, image_shape, do_flip):\n        if do_flip:\n            coords_flip = coords.clone()\n            coords_flip[1,:] = (image_shape[1] - 1) - coords[1,:]\n            return coords_flip\n        return coords\n\n    def transform_mask(self, mask, do_flip):\n        if do_flip:\n            if torch.is_tensor(mask):\n                return mask.flip((-1,))\n            return np.fliplr(mask).copy()\n        return mask\n\n    def transform_att(self, att, do_flip):\n        if do_flip:\n            if torch.is_tensor(att):\n                return att.flip((-1,))\n            return np.fliplr(att).copy()\n        return att\n\n\nclass RandomHorizontalFlip_Norm(RandomHorizontalFlip):\n    \"\"\"Horizontally flip image randomly with a probability p.\n    The difference is that the coord is normalized to [0,1]\"\"\"\n    def __init__(self, probability = 0.5):\n        super().__init__()\n        self.probability = probability\n\n    def transform_coords(self, coords, image_shape, do_flip):\n        \"\"\"we should use 1 rather than image_shape\"\"\"\n        if do_flip:\n            coords_flip = coords.clone()\n            coords_flip[1,:] = 1 - coords[1,:]\n            return coords_flip\n        return coords\n"
  },
  {
    "path": "artrackv2_mindspore/lib/train/data/wandb_logger.py",
    "content": "from collections import OrderedDict\r\n\r\ntry:\r\n    import wandb\r\nexcept ImportError:\r\n    raise ImportError(\r\n        'Please run \"pip install wandb\" to install wandb')\r\n\r\n\r\nclass WandbWriter:\r\n    def __init__(self, exp_name, cfg, output_dir, cur_step=0, step_interval=0):\r\n        self.wandb = wandb\r\n        self.step = cur_step\r\n        self.interval = step_interval\r\n        wandb.init(project=\"tracking\", name=exp_name, config=cfg, dir=output_dir)\r\n\r\n    def write_log(self, stats: OrderedDict, epoch=-1):\r\n        self.step += 1\r\n        for loader_name, loader_stats in stats.items():\r\n            if loader_stats is None:\r\n                continue\r\n\r\n            log_dict = {}\r\n            for var_name, val in loader_stats.items():\r\n                if hasattr(val, 'avg'):\r\n                    log_dict.update({loader_name + '/' + var_name: val.avg})\r\n                else:\r\n                    log_dict.update({loader_name + '/' + var_name: val.val})\r\n\r\n                if epoch >= 0:\r\n                    log_dict.update({loader_name + '/epoch': epoch})\r\n\r\n            self.wandb.log(log_dict, step=self.step*self.interval)\r\n"
  },
  {
    "path": "artrackv2_mindspore/lib/train/data_specs/README.md",
    "content": "# README\n\n## Description for different text files\nGOT10K\n- got10k_train_full_split.txt: the complete GOT-10K training set. (9335 videos)\n- got10k_train_split.txt: part of videos from the GOT-10K training set\n- got10k_val_split.txt: another part of videos from the GOT-10K training set\n- got10k_vot_exclude.txt: 1k videos that are forbidden from \"using to train models then testing on VOT\" (as required by [VOT Challenge](https://www.votchallenge.net/vot2020/participation.html))\n- got10k_vot_train_split.txt: part of videos from the \"VOT-permitted\" GOT-10K training set\n- got10k_vot_val_split.txt: another part of videos from the \"VOT-permitted\" GOT-10K training set\n\nLaSOT\n- lasot_train_split.txt: the complete LaSOT training set\n\nTrackingNnet\n- trackingnet_classmap.txt: The map from the sequence name to the target class for the TrackingNet"
  },
  {
    "path": "artrackv2_mindspore/lib/train/data_specs/got10k_train_full_split.txt",
    "content": "3784\n8998\n3906\n1631\n8277\n8358\n2338\n7938\n2988\n8302\n2662\n2663\n2825\n7447\n4781\n2218\n6348\n5860\n4517\n2819\n8075\n5391\n116\n3606\n7976\n7941\n1024\n4519\n1970\n557\n8579\n6908\n993\n7204\n1991\n3674\n8781\n6840\n5\n3225\n3763\n8688\n6778\n5777\n4794\n2744\n8126\n3864\n1733\n2923\n6829\n701\n683\n2081\n1831\n2404\n1459\n2741\n5972\n3618\n7462\n2654\n103\n2174\n6224\n2989\n2506\n2766\n5912\n2699\n3295\n3986\n609\n4895\n6673\n801\n1098\n1602\n2490\n3129\n8476\n3186\n7355\n4784\n4270\n1812\n4226\n2267\n8873\n6544\n6112\n2381\n4752\n753\n3776\n6511\n6016\n731\n2559\n7369\n5866\n563\n7731\n1105\n5603\n50\n4238\n2208\n8725\n4994\n4719\n1444\n8807\n7298\n8139\n8760\n8173\n2332\n4131\n5207\n1065\n8562\n3992\n4024\n2188\n9095\n6765\n1707\n6105\n6922\n5362\n1486\n7898\n4135\n6574\n1551\n998\n6565\n8127\n8927\n2544\n4365\n510\n768\n3535\n3875\n6808\n2931\n487\n1088\n4451\n368\n2470\n8111\n3493\n7338\n8281\n6390\n1271\n4373\n3667\n3494\n3757\n2966\n3756\n7840\n6315\n7827\n3300\n6261\n4163\n2217\n6549\n94\n7236\n9136\n1857\n6691\n3470\n6271\n807\n516\n9311\n6098\n3144\n8420\n5425\n5694\n2643\n6696\n6072\n7285\n3781\n903\n8522\n6092\n5979\n2622\n2529\n855\n3420\n3261\n8953\n7866\n2492\n3157\n359\n1520\n2642\n7452\n759\n36\n8931\n1744\n4350\n1089\n9199\n4295\n1889\n1908\n4868\n4498\n1968\n9103\n3273\n8723\n7413\n4114\n5584\n4874\n1427\n5211\n7618\n1542\n1353\n8158\n4168\n3200\n6345\n8560\n5619\n5953\n3158\n8849\n5831\n1411\n7294\n8103\n6539\n7397\n1006\n5450\n3119\n4274\n5352\n4571\n2319\n4217\n4976\n902\n1814\n2651\n3299\n3398\n982\n2428\n5793\n1346\n7057\n3737\n7329\n4449\n2110\n7405\n1773\n958\n3901\n4127\n8234\n2994\n7066\n1289\n2995\n5871\n3556\n9085\n846\n2366\n585\n7032\n5516\n5230\n3481\n2732\n6658\n7423\n1855\n6384\n3554\n5823\n4948\n7058\n4667\n5377\n2503\n7694\n9191\n9144\n655\n3409\n62\n8019\n8970\n5523\n7403\n3379\n2323\n4833\n5750\n3178\n6548\n8891\n7501\n3280\n7404\n343\n2171\n8397\n1367\n8611\n6118\n6603\n3729\n7182\n9048\n7733\n5642\n7141\n3335\n4845\n5449\n3467\n6250\n163\n5168\n2040\n5339\n3609\n8352\n3426\n8567\n769\n187\n6151\n6437\n7028\n8507\n3970\n9146\n2068\n5028\n7492\n1661\n2815\n2469\n2563\n3814\n8430\n4305\n3479\n5678\n9115\n4132\n1211\n5459\n4814\n545\n4556\n238\n4296\n2724\n1260\n2581\n6087\n4632\n4313\n380\n1209\n5447\n3032\n7942\n8943\n806\n2432\n6130\n4314\n2131\n9045\n6531\n5706\n6747\n7724\n2017\n3292\n5469\n2743\n424\n4233\n7643\n8619\n5192\n4516\n9324\n3537\n9152\n8058\n7526\n8711\n1949\n5982\n1732\n6702\n7027\n6388\n7012\n328\n2130\n452\n306\n7669\n3134\n5761\n3703\n44\n4189\n695\n7672\n5224\n9215\n5644\n3143\n3704\n5443\n2348\n7177\n2328\n4725\n354\n1418\n7810\n7746\n9002\n5759\n7226\n4535\n9160\n4385\n5397\n7249\n2936\n3204\n6287\n385\n2371\n2738\n3636\n9033\n2246\n2680\n6940\n4310\n2054\n9250\n9080\n4568\n5586\n4469\n2038\n3410\n7900\n4332\n6108\n678\n3319\n9079\n1054\n4048\n4751\n1320\n6890\n7931\n1398\n4349\n5299\n5025\n7932\n5738\n7787\n4590\n4020\n1274\n2488\n8497\n3372\n8965\n3219\n799\n3664\n6500\n7093\n4362\n6205\n4244\n4652\n1964\n5945\n6434\n2031\n2684\n6632\n4588\n8271\n3232\n5782\n2904\n6789\n5636\n7200\n3632\n5435\n8203\n3480\n4786\n7579\n3351\n1921\n798\n3646\n3094\n4359\n1654\n5975\n376\n5965\n780\n7821\n9224\n6738\n3185\n2133\n6248\n5996\n2834\n531\n5688\n2448\n7925\n7974\n5924\n6401\n5778\n6594\n5442\n8336\n4522\n3770\n6340\n6328\n4946\n4161\n2954\n2588\n8465\n2885\n1606\n5787\n3407\n3121\n7310\n1413\n1932\n4787\n2579\n3325\n508\n5610\n6480\n4290\n479\n3792\n6628\n2545\n6717\n6972\n2665\n6730\n3547\n6845\n5929\n3540\n4356\n8993\n1052\n2235\n8356\n3403\n8818\n8260\n572\n4159\n1180\n5348\n941\n7948\n2676\n3539\n4866\n6422\n8365\n3217\n1310\n2059\n9177\n1419\n2283\n8892\n8162\n1212\n6277\n3725\n7806\n6149\n7874\n718\n6888\n7118\n277\n656\n8763\n8289\n4759\n5854\n8659\n7710\n3145\n5981\n1881\n5799\n6947\n1609\n6396\n2631\n2887\n318\n2550\n6132\n1736\n2907\n7816\n48\n4304\n8133\n6698\n2760\n7779\n7732\n7642\n1154\n7242\n711\n9262\n539\n8033\n7440\n1913\n5480\n5570\n8594\n8772\n4654\n8974\n6128\n6183\n1071\n8449\n2142\n2298\n524\n1695\n820\n4053\n8241\n1856\n8641\n3981\n217\n1063\n9286\n3152\n221\n5461\n1270\n2006\n7164\n1199\n6951\n5604\n5400\n5309\n3498\n6407\n6661\n7097\n8165\n5169\n3852\n7070\n5702\n4344\n6648\n6904\n3272\n7119\n5795\n2365\n2659\n353\n5444\n6968\n2755\n1924\n2098\n2972\n6006\n5865\n8740\n2418\n3401\n7856\n5841\n598\n836\n1147\n931\n8897\n0\n6049\n1837\n865\n1871\n6116\n6831\n5773\n3587\n303\n1883\n2163\n3070\n1308\n7953\n6300\n6909\n853\n7301\n3279\n123\n7186\n3194\n5553\n5133\n1931\n4622\n6075\n4891\n5722\n5693\n8\n2339\n6596\n71\n379\n4506\n4370\n1238\n2707\n3344\n4254\n8767\n1726\n325\n4148\n5438\n5357\n548\n1332\n6824\n2290\n2335\n3146\n2594\n2315\n3389\n3885\n2621\n4116\n5389\n7412\n7222\n4894\n8595\n2000\n4978\n4721\n6444\n3796\n9321\n2236\n6409\n1523\n1468\n9249\n8270\n2341\n2874\n174\n4757\n4502\n4703\n9034\n9108\n5451\n2619\n5022\n9158\n490\n6540\n1466\n2962\n8771\n3036\n2712\n4539\n1581\n5638\n9246\n4308\n4363\n4647\n4470\n1636\n2511\n1311\n6560\n7519\n8027\n9217\n6464\n6364\n3779\n4822\n3563\n3982\n5896\n5510\n6655\n1524\n2846\n3137\n621\n141\n1887\n6567\n8921\n4671\n6052\n8445\n8699\n7349\n3553\n2117\n7651\n5034\n5383\n649\n3818\n9022\n8414\n1012\n8159\n5081\n8571\n4765\n9135\n4361\n4073\n9142\n727\n2835\n8229\n3989\n4490\n4923\n5477\n1638\n3643\n712\n9044\n2230\n499\n7166\n96\n3172\n8431\n8401\n1470\n6356\n8817\n927\n4212\n2152\n1795\n3812\n4949\n1219\n1538\n3029\n6481\n9042\n7775\n7742\n423\n2085\n7715\n4541\n9061\n5916\n3950\n7420\n4878\n7406\n7046\n7808\n4911\n8804\n6927\n8820\n3264\n300\n8670\n2979\n252\n4407\n3383\n4688\n8504\n6723\n26\n3837\n2489\n4137\n8209\n229\n6490\n2364\n9016\n1763\n1728\n338\n8335\n9063\n5280\n2791\n641\n5454\n4581\n5420\n4548\n2840\n8508\n3463\n7231\n7619\n2560\n1755\n6201\n165\n1471\n6279\n5806\n6867\n5890\n2396\n3416\n1981\n6073\n5872\n3045\n4182\n7607\n3318\n4414\n2998\n6553\n7139\n5624\n2123\n3666\n723\n5110\n6932\n8200\n2222\n8399\n1041\n4138\n1594\n3569\n9253\n393\n7940\n8004\n1475\n6759\n5393\n1107\n2597\n878\n9309\n7576\n5250\n1759\n3142\n2015\n571\n3921\n1255\n7080\n893\n2160\n1355\n82\n1562\n9153\n8583\n4085\n4644\n7196\n9165\n3558\n4550\n6374\n7826\n8602\n4146\n9257\n6083\n874\n8383\n3731\n3374\n3653\n8222\n7344\n470\n1813\n4478\n6871\n7245\n6866\n3998\n7433\n276\n1915\n1988\n8168\n2518\n2686\n831\n6143\n5205\n8718\n1703\n7729\n2077\n7983\n8450\n1195\n9232\n507\n7989\n6974\n4054\n5828\n8655\n6679\n5245\n7783\n5886\n9098\n6491\n8782\n3525\n6542\n131\n8110\n9186\n9074\n4933\n9035\n2607\n4\n2057\n6273\n2711\n5829\n3382\n2696\n3043\n2048\n619\n2499\n5295\n1162\n7807\n3694\n2194\n3149\n1940\n7934\n840\n3592\n8237\n4731\n1324\n8486\n8726\n8573\n2928\n9078\n2272\n2564\n1370\n5911\n7434\n8026\n407\n7546\n2004\n5849\n3034\n7887\n3425\n1118\n926\n3430\n1544\n5902\n2282\n1124\n2334\n129\n1372\n4842\n6473\n4382\n1028\n415\n8269\n8073\n6910\n2796\n3038\n5735\n5080\n2852\n6306\n8842\n9188\n3637\n1066\n532\n928\n5485\n2838\n6753\n9008\n7984\n2816\n8819\n7103\n5977\n5044\n2064\n2599\n4973\n382\n3249\n6446\n6638\n852\n1724\n3368\n892\n3250\n8258\n7962\n4300\n1616\n167\n8855\n2090\n4424\n879\n5136\n5350\n2635\n7828\n8506\n63\n3004\n3847\n3676\n1184\n1705\n6745\n1263\n5020\n746\n1888\n7036\n1033\n3914\n5433\n3905\n4641\n8909\n228\n4801\n3766\n8085\n643\n6914\n9280\n3013\n5657\n3696\n1590\n2920\n8282\n2403\n416\n911\n3849\n4215\n1120\n5490\n296\n2306\n3140\n3742\n4819\n6153\n6414\n760\n3000\n7498\n7108\n6429\n3031\n5314\n751\n3357\n5808\n7505\n98\n7652\n4027\n6257\n3943\n1799\n8577\n5577\n4969\n9163\n2025\n6061\n4026\n5732\n588\n7017\n1415\n4961\n4940\n7152\n538\n706\n2802\n8983\n3375\n1246\n6593\n5837\n1789\n7939\n4997\n5939\n2411\n6133\n199\n7593\n1702\n5406\n6082\n2359\n2912\n6109\n100\n8149\n5470\n2807\n3384\n6413\n3362\n5621\n6019\n9241\n9268\n7703\n4111\n7967\n5458\n7181\n5492\n1112\n6729\n4577\n106\n8853\n3774\n979\n7082\n4610\n1853\n9003\n9292\n2867\n6262\n2245\n3460\n1557\n767\n4796\n8147\n2658\n5769\n6985\n7065\n421\n7990\n3289\n1540\n9316\n2251\n6896\n5947\n4965\n2652\n4480\n963\n9047\n7168\n7824\n3976\n6210\n7018\n7179\n5016\n7789\n6102\n6828\n7659\n9109\n9071\n8115\n7628\n7110\n16\n7513\n835\n939\n4078\n2351\n2322\n3881\n4945\n560\n6837\n6094\n6475\n7901\n3\n771\n8029\n3135\n8044\n7127\n3741\n5156\n7030\n4906\n113\n3747\n7042\n5232\n5225\n3002\n4747\n6879\n5379\n4886\n7192\n4184\n1896\n1834\n8689\n3665\n2957\n6913\n8009\n4851\n6420\n7987\n828\n3003\n8884\n8815\n3198\n8008\n194\n6251\n3303\n3934\n395\n1285\n4169\n1648\n1347\n3600\n4631\n509\n211\n6230\n7241\n8250\n2219\n2582\n8353\n7790\n7583\n4462\n3904\n9004\n6942\n1704\n5686\n8051\n2981\n5511\n6182\n7088\n1699\n1222\n3455\n6189\n1528\n5197\n6221\n7893\n3283\n2837\n7773\n8766\n2942\n8021\n614\n4102\n7362\n1786\n400\n133\n556\n3127\n5237\n3727\n1440\n3873\n6322\n8448\n6285\n8696\n8800\n4009\n3386\n454\n4847\n5685\n9093\n246\n1314\n5895\n6863\n4302\n4260\n8405\n8417\n7116\n255\n3223\n4737\n7852\n6337\n814\n710\n1094\n6103\n5809\n5882\n6336\n4974\n1499\n2806\n3744\n2664\n2436\n4482\n8665\n8918\n1076\n8676\n5725\n9248\n4755\n1447\n9328\n5500\n78\n2653\n792\n6854\n6093\n6172\n3378\n4492\n5529\n5476\n3846\n1391\n383\n4289\n3883\n2648\n3265\n2525\n5402\n4599\n6870\n6877\n4413\n2464\n8519\n2521\n1839\n5822\n5664\n7257\n5375\n6852\n6764\n5182\n8914\n3015\n8509\n3080\n4562\n8979\n6215\n6643\n8601\n6096\n4812\n5246\n7862\n527\n7849\n6737\n12\n2468\n7961\n275\n27\n5932\n3840\n7341\n4996\n8564\n2154\n3788\n6138\n7831\n4442\n757\n4464\n1170\n2568\n19\n323\n6584\n7675\n3441\n2067\n9027\n2486\n4379\n4744\n1737\n7563\n301\n3907\n4742\n6857\n1221\n9284\n8458\n8236\n2897\n4004\n1526\n5345\n4423\n6246\n8578\n1057\n3711\n4986\n4785\n3997\n7311\n4788\n107\n8387\n2041\n2608\n8628\n5830\n6031\n783\n6817\n3293\n541\n773\n8473\n2501\n7247\n5667\n804\n483\n1639\n696\n6060\n5429\n5762\n1527\n7342\n1329\n6225\n7895\n381\n8030\n8520\n8362\n4734\n3526\n9273\n2039\n4142\n5084\n875\n6905\n8968\n5275\n3052\n650\n7509\n232\n2595\n3631\n1810\n4355\n8315\n8908\n1777\n4834\n3164\n2336\n1543\n6212\n8346\n3024\n3719\n1242\n6265\n8101\n3133\n6150\n6358\n3316\n4089\n1647\n4629\n7117\n2596\n5366\n1225\n6371\n624\n2209\n1428\n1158\n7648\n466\n8765\n802\n153\n4639\n3657\n6482\n9320\n2693\n6591\n3294\n2617\n5052\n6305\n3227\n8784\n7170\n93\n5868\n6716\n1671\n178\n2703\n954\n3254\n2262\n5046\n5743\n8647\n6393\n7706\n6604\n3728\n6978\n7489\n7474\n8754\n2740\n2233\n6038\n1491\n8814\n2080\n2358\n5944\n5653\n1164\n9259\n4518\n7343\n5748\n3897\n923\n5967\n2677\n3503\n1202\n4966\n1836\n1863\n6634\n1962\n9096\n9064\n977\n4049\n1464\n658\n536\n3402\n8064\n1309\n259\n7999\n8122\n910\n224\n6152\n7142\n6070\n7523\n8411\n2408\n6766\n9214\n9312\n8325\n6192\n626\n6025\n6240\n8708\n4630\n6777\n1075\n8906\n408\n9269\n6236\n9067\n2514\n8568\n2324\n156\n3136\n3530\n7878\n7308\n4335\n2065\n3845\n4453\n3356\n1450\n371\n7219\n5171\n201\n8642\n2099\n477\n1603\n8339\n7430\n3061\n235\n8291\n1133\n8474\n7035\n8653\n989\n4569\n9092\n8347\n3102\n1743\n9086\n5140\n7438\n1530\n4342\n2460\n7646\n5047\n5071\n5430\n6944\n610\n2803\n1448\n4696\n6156\n4386\n4248\n4256\n994\n2112\n805\n8011\n8276\n8999\n4956\n1712\n2795\n7553\n6436\n2158\n9083\n3184\n5784\n4428\n612\n5288\n6222\n1365\n5074\n6848\n575\n5213\n2175\n4240\n351\n2086\n2656\n5150\n9255\n8189\n7735\n1261\n1344\n4097\n8674\n2984\n4235\n5998\n6488\n537\n1267\n7486\n7124\n6245\n7955\n7337\n5436\n1194\n8226\n209\n1710\n7906\n4357\n4139\n5679\n2584\n2854\n1004\n8246\n8586\n5087\n1878\n4926\n6637\n3197\n7757\n8249\n4055\n6502\n1248\n990\n3928\n2770\n2751\n1020\n6426\n4190\n6839\n2671\n884\n3871\n9212\n4179\n3394\n10\n5861\n5316\n6869\n2985\n8905\n8559\n4457\n2480\n2313\n4100\n4395\n6835\n7799\n7890\n2785\n5468\n7302\n5862\n1803\n6376\n3171\n8591\n717\n7053\n1655\n4489\n2522\n2921\n8555\n1984\n895\n8949\n1305\n738\n7606\n112\n3042\n1325\n437\n3167\n3340\n511\n3689\n5813\n8982\n69\n4421\n7150\n550\n8829\n8685\n3147\n8956\n3166\n7023\n8633\n3308\n2014\n3573\n3880\n4045\n2069\n6051\n4950\n702\n6664\n8418\n2454\n6181\n4853\n4166\n7022\n7418\n3605\n9181\n7172\n5031\n4589\n7858\n6586\n6351\n8334\n7504\n634\n3759\n1890\n890\n6959\n5085\n4919\n2161\n1191\n256\n3610\n7079\n3427\n4071\n7323\n2982\n7263\n7444\n4251\n5846\n4864\n3649\n4311\n7461\n8120\n4582\n6373\n2805\n4872\n4869\n5493\n5867\n2670\n7099\n30\n8933\n930\n7919\n501\n7261\n5289\n7449\n7772\n3613\n7848\n3196\n474\n205\n841\n2611\n6185\n3088\n409\n7239\n5938\n7871\n1343\n6705\n1027\n5596\n2199\n9113\n5471\n6134\n838\n2345\n8359\n4061\n1474\n3229\n270\n4245\n1979\n5995\n1517\n8652\n4006\n4880\n6137\n4693\n2528\n6996\n2926\n5798\n2477\n2549\n1128\n3341\n6014\n4479\n2861\n4208\n5175\n5174\n5118\n3736\n5463\n1588\n2327\n8380\n7982\n1514\n1058\n4586\n6608\n7985\n3044\n1822\n3628\n6851\n549\n1811\n2184\n2601\n4608\n8922\n2540\n6659\n3859\n307\n3650\n3767\n8167\n505\n4366\n4824\n5520\n461\n1933\n2401\n8106\n2055\n7844\n8544\n8838\n4797\n7419\n6686\n7670\n6039\n5672\n5141\n6543\n206\n5252\n4718\n888\n1601\n3218\n5114\n713\n4022\n4419\n6708\n397\n425\n6612\n5057\n1729\n6573\n4729\n4080\n1034\n2961\n534\n8194\n5598\n9218\n2424\n329\n4154\n1597\n922\n109\n8823\n3578\n9038\n8437\n3307\n128\n8032\n1412\n7333\n8762\n8851\n8865\n3056\n468\n3808\n3064\n8798\n7052\n7767\n9231\n1086\n2162\n6566\n2109\n3439\n6122\n3642\n7696\n8610\n5279\n1808\n8687\n8377\n817\n8714\n6066\n4008\n3640\n6015\n1021\n7601\n4855\n6017\n87\n7071\n2730\n7268\n3614\n6084\n6117\n6924\n9102\n2829\n375\n8724\n2095\n22\n1541\n2970\n633\n139\n451\n4521\n179\n1396\n3876\n5824\n8020\n426\n4982\n4172\n1157\n190\n4859\n1455\n3110\n3323\n9104\n858\n6719\n6428\n4495\n8551\n2141\n3984\n3066\n67\n4299\n5821\n8444\n6581\n6097\n7090\n7781\n8944\n3085\n8606\n2114\n5355\n8901\n1461\n3301\n422\n7000\n4820\n5790\n1379\n7536\n4199\n8736\n8991\n5241\n1698\n1294\n1753\n196\n2987\n8680\n4658\n4144\n8639\n6441\n8255\n8156\n3677\n6385\n6520\n7700\n3760\n6001\n1144\n5478\n7394\n8057\n5018\n4232\n5235\n6844\n3111\n8802\n867\n949\n7843\n573\n2278\n6801\n7629\n2714\n5105\n6946\n2697\n5315\n1571\n8677\n2537\n4374\n3833\n7820\n3750\n2033\n6526\n3884\n8706\n7195\n417\n3603\n3001\n6284\n5873\n5718\n8576\n8457\n3589\n5839\n459\n3626\n6342\n8729\n6933\n607\n6053\n8228\n3773\n1805\n6365\n5142\n6069\n1389\n9026\n570\n4614\n5712\n5533\n9222\n2821\n1897\n819\n766\n4060\n4902\n5905\n6842\n5446\n1277\n4303\n2836\n934\n1014\n7822\n7494\n3466\n665\n1047\n5881\n3328\n4664\n315\n1315\n1462\n8616\n7725\n2756\n5749\n1730\n8184\n4567\n5065\n7499\n8867\n1304\n3669\n9192\n410\n8177\n6710\n1210\n2329\n8443\n3911\n1899\n7686\n3315\n7190\n6180\n3116\n5341\n4394\n8337\n9182\n6969\n5715\n2172\n1742\n2782\n3715\n9195\n7960\n2517\n4890\n8294\n2337\n8014\n3353\n7475\n2193\n4843\n8831\n4200\n4653\n6196\n6957\n3063\n2996\n8959\n8973\n6529\n3457\n5274\n8002\n6823\n6154\n5561\n1780\n9318\n7657\n1758\n6503\n7678\n3274\n1625\n4327\n3236\n8575\n3155\n4707\n4331\n1494\n8756\n3174\n1074\n8116\n8295\n8311\n3048\n3752\n6050\n6483\n8003\n9175\n4674\n1642\n2556\n6166\n7165\n8441\n5413\n3990\n1640\n1778\n7500\n8304\n1395\n4315\n5949\n3364\n242\n5763\n1036\n249\n2430\n7426\n8131\n411\n6267\n2045\n6606\n899\n8065\n9052\n7507\n5779\n5616\n2107\n5408\n2980\n6310\n5776\n4328\n821\n3251\n2354\n7076\n1700\n5313\n6736\n79\n8212\n3959\n5677\n7545\n160\n6790\n6859\n3659\n6770\n1106\n8846\n956\n7472\n2050\n8099\n4795\n8053\n9293\n7037\n1646\n9307\n1069\n5322\n5332\n2708\n8977\n917\n2419\n184\n2105\n1578\n3923\n5780\n1903\n2512\n429\n5582\n493\n4972\n445\n8286\n555\n320\n8300\n322\n617\n3413\n4459\n525\n5631\n6314\n5157\n5300\n8545\n182\n1031\n4429\n2495\n7586\n1534\n3099\n3916\n3738\n1919\n535\n2119\n1299\n177\n1838\n2159\n4099\n8285\n5172\n8540\n6020\n7683\n3073\n3115\n1673\n3087\n3488\n2416\n1894\n5942\n3597\n5834\n2007\n43\n1779\n4174\n2023\n2546\n2429\n9006\n436\n4214\n4536\n3693\n5426\n6767\n5903\n4368\n2170\n5051\n7490\n7882\n2859\n5035\n7835\n5372\n7122\n925\n3253\n6338\n8393\n4093\n5848\n7588\n2683\n8049\n5403\n5894\n8745\n8550\n2941\n3484\n9029\n4461\n8022\n725\n2355\n1619\n3030\n1975\n5623\n2415\n1957\n6141\n9278\n3226\n3062\n5670\n7326\n8759\n8496\n6619\n8187\n8262\n6199\n951\n7183\n668\n2388\n4698\n5681\n8240\n2851\n871\n4988\n9084\n9089\n3162\n1167\n8244\n5227\n6461\n2831\n776\n5010\n5770\n5282\n3574\n5102\n1278\n2281\n5455\n305\n4628\n4663\n9119\n7487\n8746\n4889\n6569\n1175\n102\n2386\n8940\n2479\n5566\n53\n8833\n1918\n8001\n321\n6786\n6861\n4358\n2771\n7467\n975\n4777\n605\n3543\n2600\n7584\n9299\n4530\n6477\n7364\n7328\n183\n4761\n7543\n304\n1196\n4623\n7839\n2139\n5519\n1953\n533\n5989\n7590\n7428\n6346\n6162\n1091\n1946\n6260\n4405\n5676\n8924\n7171\n8409\n1866\n6379\n3411\n2387\n3051\n7398\n154\n1185\n6442\n6004\n1611\n2165\n9018\n8323\n616\n3995\n8952\n1533\n7853\n4194\n213\n789\n4991\n3675\n7456\n5752\n175\n7556\n4195\n907\n2248\n9057\n8467\n4594\n1017\n7968\n880\n7446\n3304\n1666\n4942\n3867\n4802\n9156\n6357\n4621\n887\n6213\n5261\n1336\n521\n8928\n1818\n7864\n4792\n6742\n157\n1593\n823\n7235\n5303\n5633\n1100\n1692\n8047\n5993\n1460\n6714\n1630\n6440\n6307\n3608\n292\n212\n401\n5974\n7107\n8301\n8342\n2720\n4583\n2757\n7315\n833\n4466\n4236\n1282\n5273\n2149\n287\n8484\n2380\n8119\n7167\n737\n5076\n6598\n3596\n5382\n2650\n8980\n3421\n1356\n1954\n7823\n1172\n2226\n1941\n6136\n7274\n2256\n4928\n324\n1407\n4410\n4579\n1061\n7113\n486\n862\n3435\n6956\n2873\n1465\n6113\n8225\n8512\n6806\n272\n6008\n1241\n88\n5662\n3555\n689\n8733\n2812\n7453\n6282\n420\n2471\n4477\n7495\n1445\n594\n6939\n1564\n8704\n8590\n7992\n7374\n5796\n9298\n4213\n5713\n5864\n326\n5513\n402\n464\n608\n1951\n8640\n8180\n3347\n3459\n4162\n2690\n7478\n5856\n5240\n2389\n3022\n602\n5547\n1798\n1345\n9276\n599\n3673\n3277\n1635\n8625\n1567\n5928\n636\n5671\n2896\n3477\n412\n7575\n4201\n685\n4760\n1229\n4275\n8960\n3123\n4471\n5941\n3355\n3999\n7157\n6354\n7741\n6850\n8783\n1943\n6769\n7330\n8721\n8477\n1381\n848\n778\n6408\n2644\n5817\n1441\n1723\n2144\n2776\n2368\n120\n367\n8839\n8749\n5353\n4158\n3148\n9114\n1233\n9228\n8857\n2895\n1286\n200\n6755\n5125\n5857\n1657\n7658\n5097\n5000\n942\n7020\n586\n784\n7078\n6194\n8658\n8957\n9325\n1851\n8911\n4862\n7004\n1186\n8824\n1651\n2999\n561\n7639\n4316\n5086\n3187\n7912\n2624\n9183\n8487\n5089\n8475\n7554\n4031\n6297\n6059\n5329\n115\n2058\n7650\n7634\n7121\n2485\n7805\n2241\n7713\n4352\n2409\n1026\n2745\n4549\n6474\n5124\n5201\n6556\n6617\n9091\n3945\n8402\n5648\n5257\n2192\n4901\n7750\n6131\n6027\n6352\n4625\n1254\n5498\n3720\n8261\n3939\n5576\n3685\n6713\n8472\n991\n8354\n8068\n5655\n5997\n1029\n7506\n6740\n2575\n2990\n4898\n583\n7402\n3290\n5388\n6715\n8235\n5361\n4970\n1363\n3338\n5731\n9014\n5358\n2216\n2856\n635\n1193\n3705\n6334\n7666\n5270\n1384\n6368\n8604\n3564\n1937\n2481\n1341\n721\n2100\n3958\n6551\n3813\n2592\n7980\n5385\n319\n2357\n8761\n8910\n8693\n1204\n489\n4827\n8024\n7832\n6427\n3895\n89\n9068\n8067\n1708\n1111\n8963\n1902\n9251\n5719\n9143\n5537\n9169\n77\n5365\n1840\n485\n4456\n2841\n1169\n3271\n7144\n6886\n9140\n7173\n6003\n1659\n1807\n8371\n2439\n274\n4660\n3448\n6623\n347\n2103\n3400\n2106\n9073\n8169\n3687\n3305\n4416\n8454\n6635\n332\n2433\n2909\n3839\n4063\n1944\n6509\n1296\n7770\n1880\n6610\n4075\n9331\n4484\n302\n418\n4219\n1333\n2350\n6498\n8424\n4694\n4883\n5269\n6580\n5007\n6722\n1669\n8470\n2571\n513\n3810\n7049\n6332\n7363\n3532\n8456\n2097\n297\n8841\n7180\n714\n1587\n5234\n4268\n2320\n7372\n660\n8503\n1668\n8847\n1101\n7275\n3336\n6460\n722\n7782\n3947\n502\n4258\n2132\n1835\n181\n3841\n427\n3446\n2551\n8324\n6963\n4284\n7297\n7577\n3399\n9148\n8213\n5656\n8440\n851\n657\n2446\n4292\n6992\n976\n1108\n2681\n3237\n8582\n377\n5969\n5287\n9209\n8523\n7178\n7833\n6175\n2126\n3023\n5090\n7491\n6640\n6077\n2221\n2780\n1694\n4094\n144\n6161\n3203\n7123\n749\n3625\n3848\n980\n2270\n7819\n3672\n7689\n7203\n2718\n1714\n2884\n3474\n3802\n3851\n4224\n7237\n5415\n7998\n7207\n4106\n9036\n1046\n8731\n5070\n6818\n4592\n6056\n693\n1328\n3309\n5791\n2629\n2736\n202\n388\n7886\n4417\n8786\n8822\n4035\n7718\n8492\n5505\n1192\n4388\n8941\n5019\n7538\n6732\n7296\n6389\n5923\n1405\n3278\n3917\n1688\n8374\n443\n4037\n9099\n5190\n6402\n4177\n9310\n7747\n4348\n7197\n4844\n4998\n5609\n4345\n29\n3332\n8648\n4107\n346\n2577\n3941\n1215\n3782\n8252\n4706\n2675\n3790\n7459\n6164\n7316\n1149\n6687\n582\n3139\n5040\n7645\n3882\n7322\n4034\n1861\n4701\n8757\n3208\n8801\n6349\n8907\n1823\n4528\n4789\n143\n4746\n9234\n3866\n9245\n1911\n1366\n4393\n2061\n859\n1959\n6967\n3138\n7382\n9031\n6237\n845\n80\n6911\n7163\n5229\n4736\n8738\n33\n8543\n357\n3193\n7262\n4448\n6796\n6793\n3321\n7569\n6411\n7692\n7340\n1417\n5847\n3836\n2678\n1188\n8727\n223\n8615\n7417\n5771\n3170\n8061\n2935\n8263\n8257\n6883\n1276\n1239\n812\n6258\n3922\n7525\n8117\n3039\n603\n8554\n7573\n2787\n3445\n5115\n3478\n962\n3961\n6570\n7722\n216\n2797\n5154\n2530\n4904\n2405\n7542\n4021\n3252\n5370\n9302\n236\n4532\n1361\n3373\n1716\n2183\n1583\n3783\n868\n1687\n8925\n1433\n6198\n8208\n6367\n7603\n882\n3469\n1645\n7654\n1176\n4231\n150\n7997\n5456\n7031\n4375\n8840\n5634\n6945\n705\n3442\n4774\n3822\n7148\n1922\n8459\n6249\n8713\n6197\n8599\n6071\n6756\n1634\n950\n5640\n7749\n5920\n6622\n4783\n7837\n7479\n7229\n3919\n1797\n5272\n8945\n4908\n5439\n6903\n5833\n6930\n8197\n9261\n1711\n5483\n6046\n4285\n8852\n7409\n8971\n8278\n7534\n7792\n2444\n7496\n8063\n1665\n248\n3894\n4585\n1982\n66\n6651\n4850\n1240\n7511\n7524\n9258\n2075\n3979\n4714\n7592\n965\n2919\n8239\n1842\n8013\n4750\n2344\n6155\n3468\n31\n2087\n1599\n1573\n5883\n7613\n195\n3749\n644\n2189\n8779\n8743\n9005\n8081\n1040\n7785\n5820\n8830\n5495\n4867\n2710\n3843\n491\n7153\n6217\n1148\n4741\n1761\n5484\n3423\n5474\n6916\n5876\n7252\n1739\n8930\n6647\n5198\n4903\n8488\n7366\n2774\n2726\n2385\n7625\n3179\n2211\n8845\n6600\n399\n6810\n3447\n6684\n4915\n8368\n1867\n2325\n2101\n1335\n7734\n3722\n7437\n3716\n7025\n4000\n6897\n1408\n7154\n5013\n2204\n9233\n4225\n3817\n1877\n9161\n2197\n6991\n3390\n280\n1892\n1612\n7753\n2801\n7246\n7909\n6229\n9314\n8407\n1436\n3879\n6432\n6849\n5326\n5327\n8535\n7910\n7745\n5545\n7916\n207\n1783\n6158\n8517\n7361\n8070\n6430\n119\n6146\n4183\n1083\n7385\n4497\n9133\n1686\n3765\n5099\n595\n8046\n4418\n4043\n2361\n7915\n9149\n1717\n1141\n6375\n1018\n5602\n1262\n7485\n9178\n6629\n3339\n8934\n4648\n7988\n6252\n3440\n864\n5418\n3874\n7280\n6191\n8388\n4323\n6792\n4324\n2232\n7228\n8684\n7813\n6187\n6678\n3177\n3534\n4953\n4402\n7739\n6319\n2414\n8700\n5946\n8238\n4533\n6917\n4167\n4618\n2115\n2268\n3081\n1247\n4001\n8580\n7636\n3101\n2195\n1559\n3714\n2484\n7188\n6028\n7530\n2828\n1977\n3238\n6496\n2340\n110\n3247\n7532\n7541\n924\n1632\n484\n4487\n4439\n6447\n1319\n4944\n6347\n1791\n2285\n8087\n5452\n91\n1166\n162\n5185\n7933\n4743\n1627\n7259\n8620\n8525\n8207\n5845\n9011\n5525\n4269\n4700\n1824\n8186\n8872\n8299\n3957\n8242\n4558\n6439\n2666\n5943\n6958\n8112\n5121\n8806\n6170\n7688\n3486\n2082\n7436\n2778\n1096\n786\n2206\n5170\n1443\n6030\n3312\n9151\n8485\n6404\n8498\n2883\n8961\n2280\n8341\n9137\n4337\n2809\n2445\n809\n8298\n8643\n8316\n4951\n6853\n1572\n3215\n3938\n2249\n6515\n1337\n8328\n7712\n1429\n4117\n5441\n3230\n4152\n7225\n3513\n6953\n1507\n348\n3639\n5739\n2673\n1550\n6301\n1652\n8453\n204\n6833\n8056\n2200\n5217\n1854\n4711\n7368\n4572\n4032\n7531\n1013\n3634\n2875\n6058\n8307\n7609\n1766\n904\n667\n5410\n6578\n3601\n1664\n3233\n7390\n8178\n4486\n4952\n4427\n4876\n9166\n3107\n2772\n6295\n5001\n5296\n3371\n6518\n6327\n854\n1615\n8288\n1912\n5927\n6202\n5814\n9032\n1059\n3214\n6547\n7038\n5781\n6926\n4390\n6114\n1622\n4318\n5803\n5984\n736\n3561\n6554\n5045\n4277\n7386\n9081\n8462\n2034\n4955\n2701\n932\n1298\n7758\n7176\n9205\n2276\n3077\n3803\n3562\n8054\n7946\n295\n1843\n7728\n1629\n7768\n3663\n6363\n2971\n431\n9285\n2513\n1116\n3656\n4529\n6366\n5758\n6339\n8398\n816\n4153\n648\n2536\n1826\n7870\n8113\n7730\n7101\n6555\n9256\n6774\n1072\n4578\n2598\n3604\n5880\n861\n8273\n3350\n3117\n4685\n9219\n4334\n5165\n2035\n7224\n4066\n4253\n4447\n3815\n5038\n253\n3658\n2252\n330\n3967\n6443\n2143\n7336\n6135\n593\n2734\n8390\n4655\n7800\n1399\n1173\n5618\n2822\n7905\n7503\n4431\n2443\n1568\n3909\n1974\n2496\n4772\n5164\n4105\n2138\n2864\n3799\n3924\n4882\n8245\n1585\n5528\n5692\n5730\n5832\n137\n3175\n2894\n2062\n3899\n2752\n4028\n2113\n5411\n293\n2647\n730\n3758\n1667\n8879\n9303\n6653\n3698\n3968\n3053\n503\n2150\n4645\n2257\n4627\n8303\n7966\n8742\n4692\n5901\n8547\n2277\n5546\n986\n370\n4697\n8712\n4804\n4881\n1182\n6650\n7290\n3487\n2814\n5668\n7567\n5333\n3724\n4164\n3084\n8896\n3888\n6537\n17\n6882\n3531\n704\n1037\n8866\n5263\n6758\n3762\n1393\n3824\n5575\n5112\n214\n1439\n5700\n8932\n1306\n5011\n6928\n5173\n4098\n1132\n7352\n4778\n7723\n1368\n2390\n670\n2685\n5855\n1772\n6380\n3853\n940\n5424\n6091\n1748\n6193\n5297\n6572\n8877\n6874\n430\n5041\n5267\n1145\n7448\n620\n9112\n4294\n1432\n72\n130\n2393\n7920\n4597\n6614\n8889\n3697\n1895\n3462\n2616\n3978\n4791\n7846\n7780\n8372\n428\n6559\n8326\n9211\n2363\n1525\n5980\n7888\n3331\n8118\n7899\n615\n7377\n791\n5930\n6627\n8322\n1138\n770\n8460\n5100\n8274\n8350\n6316\n2893\n7594\n9236\n5082\n8150\n1986\n1909\n8902\n2145\n3617\n3501\n7\n2426\n5056\n8016\n2702\n5360\n8135\n8385\n8378\n8018\n8574\n720\n8893\n3021\n1978\n4782\n1816\n2083\n4051\n1446\n5870\n971\n9097\n8006\n4222\n8287\n686\n1377\n611\n8153\n4920\n4808\n1536\n679\n4096\n3891\n4884\n432\n4615\n8988\n5560\n3451\n5589\n3514\n6169\n1414\n3244\n1490\n7100\n3588\n690\n7317\n4171\n2266\n6800\n108\n2793\n5151\n6977\n2587\n8188\n8752\n6318\n5815\n5116\n263\n3311\n5191\n5689\n289\n3392\n5755\n1022\n5548\n9319\n8937\n6011\n7632\n5328\n4993\n4141\n5407\n1865\n520\n7305\n7208\n526\n3645\n1859\n2520\n3523\n8629\n7304\n8881\n3076\n4005\n8329\n2205\n2214\n6925\n8691\n4136\n8883\n974\n7873\n7952\n3965\n5887\n7964\n7189\n2406\n2783\n8086\n405\n6568\n5147\n2021\n4727\n4826\n7674\n1600\n5078\n2949\n6624\n6541\n8986\n5740\n4679\n8500\n3591\n4434\n398\n983\n7544\n1478\n4570\n6012\n465\n9330\n7206\n808\n8737\n2356\n4959\n8812\n6955\n3599\n2168\n1420\n1721\n1794\n5897\n8422\n2\n4023\n2739\n3619\n8797\n5496\n8951\n8181\n6893\n9254\n1809\n5682\n4309\n6929\n2742\n5988\n3363\n4493\n8434\n4210\n1503\n1876\n5094\n4600\n4936\n4798\n3933\n5216\n646\n7660\n3098\n8773\n4076\n1576\n5335\n3746\n3327\n47\n4602\n8636\n4129\n363\n6417\n7416\n9025\n4377\n4766\n2779\n4151\n9046\n7860\n3154\n3476\n7620\n966\n2052\n8344\n1752\n7199\n4412\n8895\n8882\n2463\n339\n56\n5390\n4821\n7555\n6558\n1905\n5258\n8880\n4205\n3580\n6735\n1023\n4511\n3850\n161\n7395\n2532\n3349\n7055\n7387\n758\n1907\n872\n3006\n659\n815\n1961\n6902\n7668\n4708\n1904\n4433\n5159\n6816\n8664\n6918\n1016\n6513\n7314\n5364\n7480\n9313\n716\n3395\n6843\n2292\n918\n4329\n1035\n6344\n8593\n3404\n5212\n837\n480\n8524\n1342\n3690\n6797\n7414\n288\n8863\n3352\n1628\n24\n135\n3314\n2181\n8650\n5915\n8078\n6812\n1375\n6040\n906\n5635\n7126\n1387\n7458\n6119\n5591\n3795\n1531\n95\n1960\n7522\n3033\n898\n4607\n4921\n3913\n2623\n4430\n6268\n7063\n1326\n9075\n2505\n7400\n1284\n2951\n747\n6466\n1357\n6493\n7320\n5892\n576\n5107\n5559\n97\n2583\n6361\n8843\n3509\n7892\n6086\n1476\n4612\n7427\n4267\n9094\n7050\n6048\n8455\n8382\n2227\n284\n2898\n3221\n2353\n2157\n5990\n5810\n3581\n7279\n6188\n7859\n3549\n5539\n7918\n2022\n9066\n630\n2500\n5111\n6561\n5127\n8095\n5569\n6123\n1338\n8605\n3491\n4187\n8220\n7334\n9213\n3067\n6997\n2853\n4735\n4372\n1489\n5954\n6662\n2207\n973\n3361\n960\n6350\n4170\n7431\n8076\n1129\n750\n7559\n7194\n2261\n2300\n6590\n5893\n6889\n3125\n8788\n334\n7286\n3472\n8164\n7693\n1469\n1181\n669\n7515\n5563\n4773\n3210\n6324\n3113\n9070\n3638\n7551\n2541\n3506\n5138\n4069\n7198\n7560\n3306\n6100\n2932\n4473\n1741\n14\n4672\n7564\n8748\n8874\n3804\n3678\n2240\n2610\n2862\n1358\n5716\n42\n5176\n9326\n8464\n1038\n2993\n3017\n9072\n32\n4809\n4364\n2808\n4125\n448\n152\n7299\n5431\n6178\n793\n3444\n9120\n8410\n4963\n772\n5457\n6954\n3014\n6881\n286\n553\n1948\n6398\n6255\n3057\n8646\n6176\n2700\n7106\n5663\n6683\n1281\n6013\n8799\n7635\n9289\n1885\n442\n2225\n6294\n5054\n2674\n7884\n8730\n8216\n4203\n1488\n7111\n4013\n3623\n7950\n1971\n1966\n3248\n2900\n1553\n472\n3865\n7796\n6937\n4591\n8098\n5208\n294\n5627\n5691\n5687\n7149\n4879\n3624\n7005\n2773\n3112\n9185\n1633\n7830\n5101\n8707\n8469\n4678\n4860\n700\n5527\n9194\n2794\n5068\n2639\n1177\n4282\n6492\n8128\n5859\n5029\n5123\n2877\n522\n5048\n7230\n2104\n6642\n6731\n2717\n5149\n2043\n9059\n5277\n844\n1394\n3262\n5515\n6706\n3651\n9105\n7671\n2880\n3607\n6410\n2508\n8463\n2394\n1916\n1125\n5343\n3322\n5307\n4547\n1589\n8478\n8899\n2955\n8028\n7293\n4619\n4058\n2781\n8715\n1272\n5734\n4474\n4863\n4367\n49\n8844\n5605\n8671\n6743\n4281\n7077\n1874\n2626\n2516\n258\n5249\n6186\n7958\n5432\n3801\n6288\n4732\n9121\n7558\n2527\n4661\n6819\n3835\n7508\n584\n215\n5036\n4261\n8978\n5228\n647\n4657\n2591\n5931\n5088\n9204\n929\n4381\n5421\n2965\n5050\n6495\n5033\n4799\n959\n6115\n3520\n1232\n5811\n317\n8976\n7705\n3842\n2178\n7187\n1373\n7112\n2694\n8627\n8493\n3991\n7441\n6308\n2589\n6462\n3406\n7673\n8660\n2902\n752\n1025\n849\n7682\n6982\n6652\n3612\n298\n5148\n4873\n3414\n1693\n1458\n327\n2016\n5002\n6768\n7016\n5583\n3270\n857\n8232\n7158\n7981\n4676\n4675\n2164\n8360\n6709\n8143\n365\n4062\n4527\n7928\n9009\n6228\n5818\n2533\n9305\n8887\n55\n2507\n8870\n6649\n5158\n76\n5595\n6693\n5306\n8666\n3020\n7527\n3082\n6304\n1591\n6145\n6868\n7205\n9107\n1165\n6773\n172\n1993\n4176\n8400\n4611\n7589\n8702\n5386\n6095\n6335\n1561\n8805\n5963\n7393\n3681\n2037\n4968\n7451\n3360\n7466\n8361\n4455\n4064\n5422\n1689\n3977\n7269\n362\n4178\n4145\n6127\n5162\n2399\n9225\n7068\n1650\n794\n3007\n1348\n7736\n444\n6081\n5298\n2026\n2543\n9087\n3593\n7425\n3730\n8468\n2641\n7529\n1720\n6377\n8732\n5851\n7956\n3150\n3785\n6485\n3611\n2869\n8510\n4775\n4463\n1251\n9124\n6873\n3391\n6505\n4118\n1617\n8837\n7051\n3213\n3668\n5347\n8452\n6289\n5840\n478\n3522\n453\n3376\n6190\n3342\n2237\n2870\n5178\n5567\n5952\n6919\n3005\n134\n3397\n7443\n8539\n6822\n5264\n3288\n5962\n8421\n6744\n8608\n4656\n1802\n2073\n4271\n1043\n2922\n8211\n2196\n5260\n3789\n7211\n7571\n7834\n5680\n2047\n5502\n3369\n3437\n3286\n5517\n3912\n8386\n1442\n6961\n2191\n2417\n9088\n5155\n6813\n4520\n7375\n1224\n811\n1891\n3748\n4123\n2789\n5305\n8419\n7248\n9237\n992\n4038\n4499\n2060\n5538\n850\n2669\n7612\n104\n9290\n2526\n1287\n4160\n4633\n7125\n742\n744\n4534\n2407\n7714\n4555\n8764\n7661\n4722\n7721\n3205\n6657\n1214\n3754\n6080\n4593\n3018\n8792\n2294\n4450\n7701\n9301\n127\n7069\n4513\n6243\n8025\n4010\n8632\n4715\n5284\n4574\n726\n4252\n4561\n7354\n299\n6088\n1090\n5012\n5684\n3489\n5639\n4888\n1584\n1969\n4846\n2915\n6804\n2775\n7306\n6506\n9306\n5231\n7740\n4283\n953\n6725\n458\n8290\n1504\n1539\n8885\n138\n3764\n1256\n257\n335\n1011\n7060\n5986\n9323\n4740\n8994\n4140\n6807\n8254\n3963\n9297\n2102\n2964\n9207\n4910\n8709\n4411\n1672\n457\n5852\n8037\n4932\n3679\n8794\n2362\n8592\n495\n8432\n1608\n2155\n7411\n2881\n9244\n37\n6535\n8219\n4505\n8635\n1928\n8384\n2570\n8996\n7610\n2128\n8728\n6656\n8935\n6681\n2070\n176\n9062\n972\n514\n1796\n4039\n6838\n2462\n230\n569\n5521\n4637\n4939\n4420\n2863\n672\n4995\n3807\n447\n1656\n2005\n5113\n3297\n8858\n2118\n6309\n1926\n481\n1156\n1509\n1228\n1787\n5978\n8678\n3951\n2929\n4980\n5039\n4713\n7002\n151\n5536\n8148\n3823\n4709\n2299\n142\n7067\n2372\n3761\n9\n2265\n5747\n2764\n724\n2913\n3151\n4525\n6370\n4247\n9329\n5494\n3721\n629\n3621\n7371\n59\n1999\n6704\n3734\n2698\n4691\n6938\n9117\n8415\n6353\n6750\n9077\n2679\n7623\n2478\n7321\n6611\n4007\n2076\n5772\n6416\n2264\n8348\n2672\n6546\n754\n6934\n7908\n8546\n4404\n592\n4748\n6625\n2129\n7944\n2377\n6\n8929\n8275\n3515\n4524\n3660\n8710\n419\n6878\n170\n8313\n7460\n8753\n2917\n6891\n6663\n4918\n7129\n396\n7256\n3500\n631\n5585\n8343\n2695\n6168\n6292\n3176\n5092\n5160\n3701\n9021\n7221\n7825\n1216\n1438\n3471\n2318\n8923\n6223\n2182\n7621\n8514\n9010\n8987\n1252\n1972\n1872\n1715\n8205\n6463\n8138\n8989\n5661\n2890\n565\n2427\n8946\n1303\n3718\n6000\n3620\n1560\n5276\n8089\n9260\n1467\n6173\n7641\n7520\n5061\n4677\n5757\n4400\n2620\n2719\n8995\n2079\n6644\n1683\n8141\n7754\n5744\n2952\n7568\n654\n7457\n5368\n3310\n1510\n4440\n1513\n3072\n8034\n1456\n9164\n3163\n3035\n6111\n5042\n7161\n1401\n1084\n8000\n6672\n8531\n5404\n6550\n8379\n9141\n8681\n7752\n6394\n7011\n3739\n8253\n978\n4771\n6024\n4828\n7959\n1649\n1727\n7073\n8349\n6952\n661\n7283\n3159\n2590\n3496\n8741\n3969\n2956\n4565\n920\n1830\n8558\n1930\n6677\n6825\n8256\n7454\n7521\n4710\n1768\n3753\n6459\n5606\n5292\n1397\n240\n2733\n946\n6711\n3242\n2627\n4929\n5006\n3202\n132\n2295\n2746\n1293\n2124\n5405\n4065\n818\n7464\n1820\n4398\n1312\n6994\n6920\n261\n987\n6120\n3109\n331\n2986\n4338\n7774\n5122\n8396\n1364\n8969\n6712\n8161\n7083\n7595\n5940\n1566\n6419\n8634\n4432\n6047\n4749\n6076\n1161\n8217\n674\n8494\n3688\n2447\n4704\n969\n7477\n1160\n3243\n3173\n4979\n9288\n6860\n1662\n6171\n225\n5143\n313\n8327\n3275\n3385\n7626\n3103\n4401\n6794\n5600\n5043\n7664\n933\n6830\n4452\n3980\n1604\n5875\n6633\n4635\n5756\n3329\n1751\n8108\n4817\n1989\n1237\n1893\n2848\n9334\n51\n8875\n4981\n5417\n4134\n877\n6688\n3545\n4943\n5615\n2476\n1684\n3652\n7396\n1769\n1171\n6563\n3415\n3644\n340\n6630\n8284\n3256\n7240\n5371\n3405\n2108\n6360\n1734\n5612\n8638\n2343\n1103\n7803\n6809\n3055\n188\n8031\n3124\n3683\n4537\n988\n2297\n4893\n6499\n3396\n839\n4467\n5195\n4041\n6457\n4441\n6378\n6472\n6195\n4912\n6884\n5922\n7014\n1660\n38\n1595\n6752\n4554\n1292\n2709\n3800\n6057\n1980\n8775\n6587\n6392\n6263\n7214\n5219\n282\n309\n6685\n2253\n6311\n4092\n18\n7570\n5543\n4081\n2515\n6278\n8690\n5294\n6184\n5215\n9130\n6720\n250\n7250\n4983\n639\n3567\n7841\n2636\n4067\n8446\n5703\n8609\n2586\n7695\n1253\n6701\n7930\n6317\n5921\n7719\n8501\n7312\n4110\n6219\n4552\n5059\n4088\n7975\n9132\n6054\n692\n3412\n4079\n6754\n6950\n5281\n3028\n8321\n3877\n7614\n8939\n4188\n2223\n239\n4745\n6875\n7096\n5571\n4403\n2640\n5556\n1845\n6690\n1825\n4157\n314\n4682\n8825\n1003\n6206\n8093\n7215\n6465\n99\n8077\n6631\n4206\n2523\n366\n1208\n6043\n4640\n1457\n5475\n4985\n1351\n3090\n5625\n7307\n8466\n2003\n8854\n218\n1500\n4476\n2293\n1847\n5032\n2147\n866\n3710\n2552\n1749\n6692\n3926\n4112\n6458\n735\n9171\n60\n9304\n6726\n2630\n2882\n1178\n1151\n4922\n4662\n173\n7233\n1776\n6533\n4113\n2423\n2425\n4343\n5800\n970\n6372\n1009\n6607\n3068\n8435\n6423\n3126\n4813\n1709\n1201\n7104\n5620\n3932\n5701\n5724\n3366\n8050\n4984\n5023\n9203\n5079\n627\n290\n779\n5572\n5233\n1392\n4975\n8534\n8210\n2269\n1143\n2475\n2562\n905\n4546\n267\n3536\n8538\n449\n101\n7367\n2722\n4605\n7356\n6781\n8537\n8697\n6820\n8340\n8926\n3821\n2349\n2259\n6545\n8100\n8395\n2258\n2911\n5108\n3946\n1406\n8683\n8296\n5579\n2177\n8264\n1425\n3940\n957\n3647\n515\n5342\n8363\n2449\n3108\n1001\n2937\n3452\n5574\n4319\n9184\n8381\n945\n6876\n600\n5714\n4871\n8532\n1852\n8856\n392\n2018\n8878\n369\n5711\n9230\n5304\n7266\n1681\n7829\n2309\n4683\n8938\n2255\n6159\n3207\n4651\n2029\n4341\n5106\n5794\n9024\n4712\n2434\n7151\n7359\n6431\n1290\n5918\n8705\n3438\n5554\n8876\n7415\n6290\n5373\n3805\n2950\n2331\n6772\n8997\n6576\n2307\n8515\n4033\n3428\n6487\n6595\n45\n5792\n333\n762\n2383\n3388\n666\n2166\n460\n943\n364\n6980\n8223\n8221\n637\n6218\n4108\n5381\n4649\n5096\n1614\n8768\n5095\n3809\n5030\n984\n3538\n5120\n2498\n5222\n5613\n5486\n5119\n241\n5707\n9227\n544\n4109\n7771\n728\n3671\n9327\n1230\n9270\n1070\n8565\n4769\n7056\n5654\n7965\n1793\n5956\n7883\n1362\n5479\n8769\n8821\n8320\n1901\n1994\n2461\n5552\n389\n2839\n6467\n2762\n4763\n3499\n1487\n7599\n4488\n3241\n8272\n1131\n4496\n7006\n7265\n4897\n2747\n6618\n5291\n4563\n5146\n1939\n6369\n8548\n6163\n5526\n4068\n9030\n5349\n8433\n748\n1477\n4265\n9200\n3878\n462\n6846\n9040\n4806\n3519\n6798\n5464\n5179\n546\n6044\n8114\n7216\n6276\n1495\n494\n8146\n5434\n856\n8403\n8071\n3972\n5544\n3337\n6855\n1546\n2824\n1718\n6009\n2042\n251\n9076\n3330\n5004\n192\n4717\n3797\n1146\n394\n7814\n7699\n4659\n4689\n4156\n7903\n9054\n7332\n7811\n1119\n5531\n6782\n5210\n8412\n2633\n7924\n4624\n8314\n5666\n3240\n2310\n4262\n8160\n4553\n8196\n2661\n7213\n7455\n7399\n870\n6126\n1227\n1226\n781\n937\n6343\n2578\n2892\n4124\n2792\n5696\n6865\n6455\n8312\n5193\n6026\n5251\n3787\n4460\n4687\n7923\n1140\n9106\n796\n2482\n9170\n8695\n2749\n6734\n4825\n114\n8319\n827\n4175\n390\n7611\n7484\n1249\n7727\n955\n579\n3629\n8915\n2958\n885\n7227\n1424\n4810\n4604\n1535\n774\n7518\n5428\n1955\n8233\n2645\n2167\n6484\n3855\n1502\n4861\n2333\n2973\n4829\n1906\n3966\n476\n9023\n6960\n3483\n2748\n5891\n8174\n7702\n8948\n5324\n4396\n1605\n2823\n7348\n7347\n5933\n310\n9082\n916\n4255\n203\n4239\n5976\n6200\n6435\n4425\n787\n1121\n6034\n13\n39\n3104\n5961\n5507\n5785\n1463\n7339\n1575\n7801\n5445\n8283\n5951\n6995\n999\n5163\n6023\n3786\n6536\n5850\n3524\n3528\n4508\n6674\n2939\n8227\n4598\n7550\n8495\n8622\n1152\n4538\n4003\n1318\n739\n3296\n8202\n1552\n6204\n5236\n3576\n4699\n9238\n1879\n488\n2274\n433\n5587\n1678\n9282\n7914\n8552\n6445\n7971\n8331\n6880\n7476\n7282\n1570\n7271\n3827\n6489\n8091\n9287\n7351\n1765\n5286\n6921\n542\n1762\n8553\n4987\n894\n3622\n7855\n92\n3131\n4811\n3590\n6517\n4510\n733\n4954\n1360\n5669\n2842\n8107\n5646\n5968\n1618\n1827\n7709\n8521\n5807\n5321\n9239\n5501\n3745\n4437\n1586\n7273\n5265\n6605\n7917\n1607\n6074\n4668\n7061\n1580\n8694\n8461\n4573\n618\n9173\n5243\n435\n8770\n2421\n7450\n3870\n8308\n2605\n2934\n9240\n6887\n4512\n1198\n7585\n7691\n7738\n2843\n8423\n7929\n6971\n7854\n86\n9128\n4298\n622\n790\n9155\n6579\n2203\n7716\n1265\n8645\n3834\n1174\n7380\n623\n8936\n4306\n8082\n4312\n8661\n5753\n7243\n2768\n8155\n85\n4143\n3047\n8479\n7809\n2833\n5555\n7578\n1637\n1936\n8130\n5549\n8062\n7143\n5522\n8966\n5614\n8105\n8719\n7655\n7502\n8268\n5760\n6695\n5565\n7615\n9226\n4870\n4507\n3160\n4835\n1598\n2465\n4422\n5248\n7867\n1078\n5015\n6660\n1676\n5354\n6391\n5351\n7184\n6280\n5936\n6124\n1327\n2906\n269\n8292\n2466\n8809\n5167\n8142\n8204\n2713\n1910\n2930\n2494\n5592\n7384\n7726\n5727\n625\n1735\n5710\n5518\n2491\n1410\n4989\n5183\n8777\n6562\n4947\n3692\n6129\n384\n1097\n2084\n5209\n3723\n7272\n6895\n2459\n543\n8621\n5394\n6211\n2074\n1511\n2524\n7776\n5055\n7191\n6207\n7922\n281\n8436\n2918\n3141\n4800\n6323\n7631\n8903\n2716\n3735\n3012\n5301\n3975\n2800\n7963\n105\n1920\n7391\n4909\n1754\n4816\n5488\n5145\n5098\n5139\n5268\n9317\n8631\n4346\n7318\n136\n3993\n1220\n2151\n308\n7483\n7582\n3071\n1339\n3777\n8191\n5378\n7087\n1056\n7465\n5608\n6564\n512\n2754\n2687\n1596\n5376\n1512\n566\n6382\n7360\n1757\n8035\n2296\n4264\n3551\n1053\n4716\n1537\n8518\n254\n6253\n7132\n8557\n3490\n9267\n5473\n2412\n7539\n7136\n6670\n3974\n891\n1323\n5958\n1217\n2879\n9118\n1259\n2317\n7033\n2467\n6665\n6244\n2180\n2140\n7098\n5126\n6395\n4150\n547\n4120\n4307\n1725\n2737\n8549\n8195\n1245\n6286\n935\n1756\n1701\n1626\n7379\n3492\n3717\n5802\n2817\n1234\n1005\n4101\n21\n2576\n4650\n3381\n1030\n2844\n1641\n936\n2729\n6469\n8913\n8369\n5994\n341\n81\n4083\n1685\n5152\n3380\n8739\n6615\n3829\n164\n7927\n4779\n829\n4216\n8528\n3641\n4606\n2769\n6970\n1545\n8850\n4971\n5489\n2008\n4564\n8682\n7784\n5768\n9252\n901\n438\n3577\n2765\n5904\n664\n3348\n6298\n3602\n2502\n8617\n7684\n4293\n5166\n5805\n4126\n2451\n6906\n7234\n9243\n3778\n2940\n1087\n9053\n5026\n2504\n5283\n2820\n4242\n797\n3925\n1383\n8750\n7861\n1403\n6973\n7617\n968\n3065\n5395\n4347\n8144\n2688\n6527\n8597\n8673\n7327\n6331\n1422\n7115\n244\n7013\n2092\n54\n7970\n5742\n3464\n4823\n8588\n2938\n3060\n6406\n4149\n2375\n6616\n8803\n1555\n4369\n1380\n3011\n6144\n3367\n4990\n7370\n7131\n1995\n2602\n985\n8785\n8480\n9125\n1927\n3269\n3771\n1032\n7378\n6900\n5726\n2731\n2020\n4503\n3313\n6727\n8793\n2304\n523\n6036\n58\n7993\n5512\n5049\n2721\n8482\n673\n7937\n1168\n4472\n8247\n7287\n9017\n6421\n9190\n3584\n1819\n1792\n2810\n6033\n638\n6749\n7677\n981\n7160\n4726\n1886\n7845\n7911\n6975\n568\n7422\n4613\n4501\n2569\n4263\n3206\n4133\n2420\n3706\n8894\n2263\n5774\n4925\n9180\n8888\n2945\n2091\n1873\n6303\n729\n6728\n2156\n3267\n1860\n6597\n1374\n4930\n5253\n938\n580\n5825\n4839\n166\n8198\n6892\n8701\n74\n7094\n7284\n8954\n3156\n6140\n4279\n5594\n2229\n7535\n5466\n8413\n7105\n8192\n2632\n7638\n9308\n8530\n832\n4643\n2201\n3268\n4322\n6510\n2967\n262\n403\n7973\n1258\n8828\n4036\n5838\n9263\n8529\n2788\n4202\n237\n3838\n1291\n2305\n4056\n5628\n7281\n1430\n6476\n7935\n2850\n6041\n2013\n4016\n4576\n5312\n6827\n6321\n8669\n8439\n830\n1942\n1519\n2750\n6106\n6993\n6235\n5899\n7313\n5331\n4371\n7086\n4399\n8600\n2660\n5409\n3465\n5499\n6231\n5745\n1801\n5337\n4468\n1451\n4192\n1275\n8230\n2302\n1114\n4960\n8860\n3900\n6468\n5058\n1505\n8868\n5588\n3858\n1947\n2565\n1472\n8499\n243\n8442\n6583\n7085\n5374\n2250\n4291\n4426\n492\n2311\n8305\n3662\n5338\n8780\n7488\n3890\n5005\n2442\n4680\n7358\n9116\n4397\n5999\n587\n7902\n83\n3566\n2134\n8942\n4767\n6601\n2456\n1745\n5736\n5254\n8017\n4015\n7690\n3798\n8947\n1067\n2116\n7945\n590\n2547\n2535\n64\n2053\n5359\n2493\n6669\n4351\n6412\n7473\n6147\n7175\n6983\n5196\n745\n2657\n3497\n697\n3161\n7528\n2239\n5991\n3201\n7681\n2440\n5189\n2959\n2044\n8917\n2046\n6313\n6333\n5318\n2763\n4301\n2555\n2213\n2933\n4121\n1340\n3903\n4392\n7889\n5323\n1055\n707\n3857\n518\n6078\n5134\n6645\n9138\n1592\n680\n4446\n7943\n3461\n3887\n5601\n2321\n6621\n558\n4914\n913\n5637\n6453\n8511\n4531\n1218\n5508\n2603\n6802\n8426\n8297\n2947\n5971\n6552\n5262\n5935\n782\n7435\n8357\n6139\n1136\n1473\n5008\n3585\n3627\n2914\n5356\n2997\n2347\n881\n5652\n4849\n8808\n8351\n4017\n2010\n6836\n7616\n4391\n3630\n3712\n6099\n2969\n5238\n4333\n2301\n4406\n1236\n1050\n1864\n1104\n8408\n8251\n8795\n5879\n3365\n7481\n8206\n2452\n1767\n8859\n124\n3948\n4444\n8962\n4438\n5003\n1740\n8428\n3105\n5117\n1095\n1480\n8755\n7881\n3097\n4877\n155\n1917\n2455\n6042\n337\n6724\n6045\n8483\n7135\n2242\n4566\n1679\n834\n1746\n795\n3548\n2314\n2036\n4046\n9129\n6979\n7084\n5091\n2413\n8170\n5775\n1817\n529\n7220\n813\n2916\n5130\n8972\n126\n1243\n2370\n4831\n9122\n3010\n5104\n2613\n6761\n7482\n909\n2146\n4595\n5340\n3512\n6283\n2346\n653\n6121\n2615\n7421\n1869\n1002\n8834\n2991\n8992\n632\n1093\n4543\n645\n2352\n4115\n373\n1483\n6966\n8598\n3896\n3434\n5987\n8318\n1815\n1223\n1548\n6885\n5073\n6330\n2573\n1369\n4095\n1431\n2185\n5766\n1301\n7258\n8048\n7598\n2847\n1996\n2378\n8561\n743\n6381\n271\n1956\n7439\n7596\n7134\n6636\n5804\n1858\n6214\n4730\n8536\n1203\n3118\n9202\n1875\n5885\n8975\n168\n5898\n4014\n4186\n3346\n3041\n5558\n9296\n8157\n4339\n3234\n1738\n2604\n6803\n5387\n5590\n125\n2173\n8012\n8005\n4858\n3069\n651\n372\n378\n8366\n6299\n1449\n7793\n8541\n3235\n8043\n3086\n3983\n6949\n4690\n2176\n6494\n7637\n8406\n3856\n7408\n350\n7021\n8224\n7044\n7662\n6697\n7679\n169\n528\n7029\n2790\n7138\n7432\n7602\n8333\n1582\n1378\n519\n482\n9279\n8015\n6592\n4514\n3542\n2612\n628\n5053\n6699\n6227\n2094\n1621\n847\n3598\n2728\n8490\n7276\n6620\n8345\n9216\n4278\n4059\n9058\n5063\n5816\n4173\n8134\n1997\n3182\n3224\n8129\n5109\n4494\n189\n7640\n8243\n180\n2963\n1123\n5593\n3263\n4185\n7140\n8990\n6320\n9275\n4601\n4854\n5907\n1135\n8083\n5964\n7788\n1992\n8069\n9174\n6160\n35\n8572\n2865\n46\n3952\n6418\n2510\n5783\n20\n3816\n2715\n3930\n2548\n5204\n4122\n4103\n708\n7756\n3825\n777\n3550\n8502\n3929\n5440\n6751\n7764\n4070\n7331\n3743\n9131\n9206\n3828\n23\n41\n4197\n234\n5723\n7622\n8832\n4626\n2169\n5599\n2976\n5266\n1967\n1150\n5334\n90\n822\n2538\n3169\n6771\n7442\n498\n4967\n5580\n7581\n7680\n4728\n1115\n4040\n1064\n3106\n6266\n4415\n9294\n5597\n7059\n197\n7218\n6948\n5690\n4234\n1653\n4485\n4019\n3370\n919\n1330\n6085\n2078\n3768\n5427\n4545\n2435\n8862\n3633\n8145\n5221\n1388\n5913\n8140\n7471\n7156\n6989\n1190\n6832\n2830\n4387\n3454\n7469\n2910\n4526\n5187\n2410\n9223\n6247\n6912\n4681\n1300\n7407\n8612\n6523\n3616\n6894\n7253\n4515\n5874\n5448\n7137\n7957\n1130\n3092\n7054\n3516\n5797\n1000\n2727\n4336\n9090\n6403\n7255\n8919\n6522\n6760\n8898\n4803\n1938\n374\n8686\n9150\n3985\n7045\n3475\n6065\n7991\n1409\n7851\n6671\n6090\n5826\n7857\n1155\n8964\n1117\n7072\n6064\n2497\n4899\n2397\n3189\n2369\n15\n5027\n5754\n8950\n5617\n8391\n914\n6264\n279\n6174\n5184\n3733\n7392\n5278\n2924\n567\n7994\n352\n8084\n2148\n2723\n3359\n70\n1870\n7708\n220\n3994\n9013\n3191\n9220\n4155\n5717\n1110\n2198\n9179\n785\n5325\n4770\n4250\n52\n4634\n5072\n9037\n601\n8036\n7996\n2483\n7232\n8675\n8836\n1279\n5346\n7676\n6104\n1515\n4603\n5607\n7894\n5144\n2628\n68\n440\n3586\n3083\n4830\n4378\n7762\n1134\n4542\n7850\n6296\n2866\n4011\n8751\n4776\n7954\n7102\n5697\n2032\n5729\n5017\n6962\n2051\n1092\n764\n9019\n2759\n8581\n1484\n8618\n912\n2382\n4892\n8447\n8176\n5491\n5695\n5504\n1060\n7064\n709\n578\n4320\n2379\n7649\n8416\n1613\n5344\n7512\n7865\n3037\n6689\n6557\n1569\n5955\n3707\n9168\n8566\n1775\n5950\n6943\n7804\n434\n6179\n9300\n1142\n7947\n6456\n6291\n5789\n6538\n9134\n3049\n5075\n5399\n5161\n1623\n948\n6302\n6063\n7516\n117\n506\n3302\n7146\n355\n3854\n1081\n2827\n1496\n2574\n6167\n3183\n4287\n5482\n1722\n7319\n7277\n3860\n3443\n3298\n8364\n3826\n7254\n2360\n5093\n7039\n6325\n4230\n2567\n6241\n4443\n559\n2625\n4228\n8967\n6405\n1674\n3936\n4475\n8556\n8585\n896\n3713\n6259\n4297\n6718\n2392\n2279\n4927\n1283\n2374\n2860\n7665\n663\n596\n6293\n6805\n2811\n7383\n8306\n8330\n3153\n2153\n2618\n2441\n3615\n8092\n552\n5285\n5255\n8124\n9247\n5530\n8175\n6242\n5660\n3433\n1610\n1832\n3892\n3862\n640\n2127\n2474\n4196\n3495\n7217\n5206\n4836\n7759\n4376\n800\n4227\n3699\n9055\n5665\n6826\n7463\n9065\n4720\n5069\n3245\n3453\n3358\n6532\n5970\n7921\n4087\n1547\n3424\n8040\n7995\n6787\n9069\n8716\n2561\n8199\n1479\n2767\n7818\n7145\n604\n7597\n4896\n9281\n4666\n185\n8171\n7978\n3059\n9196\n9221\n2135\n1800\n2974\n1529\n5948\n446\n4436\n8672\n3508\n6208\n5673\n6998\n5203\n278\n7041\n9110\n5853\n8121\n1764\n3046\n2400\n6575\n4738\n2228\n7761\n9322\n7019\n6931\n6383\n6762\n283\n3935\n2534\n7717\n6785\n471\n8214\n231\n4241\n5310\n3844\n5746\n2011\n7209\n336\n6433\n756\n9167\n6741\n3345\n7685\n4018\n6682\n9147\n4790\n5836\n5906\n8747\n676\n3964\n6362\n3510\n7510\n2308\n1806\n5917\n1189\n4012\n3387\n1331\n5319\n5423\n8900\n147\n3780\n1696\n9111\n6783\n6497\n4104\n1898\n3987\n260\n4616\n2121\n9283\n1400\n2437\n4670\n2735\n1163\n2096\n6521\n1423\n4523\n2243\n6667\n6990\n3944\n6915\n6763\n5611\n404\n2691\n1015\n7092\n7562\n8624\n2291\n4193\n5934\n5503\n2326\n4408\n2960\n842\n1963\n3354\n5568\n9050\n3806\n439\n9154\n6055\n6451\n2190\n7633\n688\n4354\n8890\n2813\n2872\n8102\n8317\n6609\n1497\n8389\n6449\n1682\n3594\n5103\n5812\n863\n268\n3054\n8079\n2260\n2027\n3091\n7687\n6703\n3557\n2019\n8427\n2799\n8182\n6641\n3168\n2284\n1934\n4865\n1077\n6507\n1658\n3811\n1774\n7897\n2238\n2943\n191\n3869\n3246\n4057\n3188\n414\n8072\n7838\n1382\n4962\n6010\n5363\n4042\n1983\n4077\n7429\n1833\n3583\n4044\n1109\n1295\n386\n5481\n3927\n311\n1349\n5651\n5878\n562\n2202\n8904\n765\n1501\n8654\n2975\n2689\n3680\n5180\n1900\n7707\n4723\n8912\n4029\n3579\n869\n2888\n8657\n6599\n741\n4288\n2244\n7357\n5704\n8791\n208\n8587\n7969\n4805\n8526\n4887\n8871\n7468\n3343\n886\n7794\n5764\n2646\n6454\n6101\n7885\n7744\n1297\n4119\n4856\n122\n2286\n2925\n5131\n3570\n5843\n3027\n5320\n5626\n540\n1862\n5401\n7335\n699\n7760\n9198\n3259\n7345\n8698\n1280\n6479\n3100\n3988\n1322\n5737\n1268\n3257\n6791\n3326\n4815\n7644\n1082\n2826\n6821\n8984\n2553\n5290\n5909\n4762\n9242\n8096\n8066\n4325\n6666\n7193\n7114\n8060\n2376\n7872\n6788\n3544\n5460\n3507\n2509\n6626\n3429\n5542\n4220\n2968\n5271\n4249\n3863\n1868\n5581\n2012\n6270\n8038\n4050\n121\n2845\n1565\n1998\n2275\n5524\n6068\n7624\n4913\n9277\n1506\n803\n8848\n5925\n2450\n2072\n8190\n4753\n9162\n1923\n825\n7303\n9028\n2088\n8516\n1556\n5937\n7847\n2367\n7549\n1049\n1521\n4739\n3931\n8958\n4130\n7877\n7876\n897\n5985\n7346\n7537\n111\n3700\n1126\n7896\n1288\n3419\n4673\n1051\n5720\n1068\n3458\n146\n291\n6256\n5514\n2857\n4580\n6239\n6525\n8717\n391\n4841\n6676\n4360\n1453\n4211\n73\n1675\n1987\n4025\n1321\n662\n8265\n6424\n2758\n7765\n7656\n3209\n7497\n7600\n9039\n7697\n5177\n2983\n5622\n9295\n1200\n3284\n964\n2024\n1269\n4551\n8088\n5659\n2212\n5199\n5551\n8607\n5573\n2247\n5200\n6341\n7951\n8429\n7720\n5919\n1273\n3529\n6707\n9176\n7552\n3255\n5649\n6110\n9235\n1137\n9272\n775\n788\n5786\n5186\n6746\n2667\n9145\n7630\n3953\n1828\n8827\n6471\n4702\n7815\n467\n6387\n3195\n6238\n6508\n2373\n5983\n4931\n2948\n921\n2438\n517\n3949\n2137\n3216\n5683\n3695\n1719\n4837\n9159\n6981\n860\n7410\n5497\n1770\n5557\n8810\n5194\n4857\n9100\n6329\n2609\n1925\n3686\n9041\n4924\n349\n9187\n3393\n3661\n7120\n6858\n4587\n3831\n3130\n5396\n5060\n6486\n3937\n8023\n824\n5398\n1354\n8861\n5534\n7292\n4389\n6029\n6226\n3505\n4326\n7445\n581\n6089\n3450\n7324\n6516\n6775\n1207\n4575\n5135\n9265\n3918\n9020\n3473\n3898\n7812\n6571\n6757\n6639\n2557\n1206\n6148\n7325\n8790\n4938\n7026\n4383\n8041\n1250\n7267\n1952\n7561\n8811\n4941\n8373\n4848\n6602\n8355\n8104\n5214\n6654\n4330\n995\n3181\n3422\n456\n1782\n3408\n6530\n719\n7587\n5910\n3058\n740\n2009\n4207\n5336\n2798\n9229\n8668\n2473\n4221\n1493\n3281\n171\n9157\n9139\n7766\n6220\n9127\n3324\n5308\n3708\n2431\n8080\n2093\n2585\n406\n7040\n5064\n5247\n4758\n6512\n2953\n4257\n4935\n2705\n2572\n3436\n8513\n5884\n1385\n4852\n2637\n7091\n2761\n6007\n8332\n6694\n2422\n4917\n2186\n6898\n1390\n6965\n3132\n7698\n475\n2002\n2692\n5024\n7365\n7373\n4091\n1731\n947\n3962\n8692\n1788\n8734\n8656\n6862\n6856\n1950\n1914\n5658\n3635\n1620\n4780\n2580\n1454\n2786\n687\n7238\n3648\n6452\n1197\n3190\n5900\n9043\n4958\n1935\n1821\n1187\n1153\n7737\n7223\n3820\n7169\n7350\n5674\n6254\n3025\n6680\n1690\n2899\n3893\n1577\n5728\n9189\n5077\n34\n3560\n2179\n5462\n1402\n3654\n1376\n7936\n4246\n5506\n1179\n5647\n4686\n8644\n1352\n2855\n6079\n2254\n2668\n2287\n2457\n3418\n7264\n677\n3074\n2655\n1042\n2210\n4504\n7089\n8309\n4209\n4280\n3258\n2977\n84\n4705\n1244\n3511\n6355\n8813\n3228\n9266\n1122\n613\n732\n5202\n8425\n2638\n6470\n2886\n3541\n8132\n2063\n8201\n5129\n2818\n7949\n6936\n8090\n4465\n7295\n5239\n7009\n9271\n8563\n2832\n952\n8136\n6776\n3565\n5188\n7288\n6999\n285\n5487\n7763\n7608\n8584\n2071\n7868\n2804\n3655\n7048\n6847\n3276\n4082\n4272\n3910\n3709\n1574\n4559\n7580\n7081\n5014\n7769\n8183\n6386\n7574\n356\n4937\n2487\n9315\n7572\n3040\n671\n2682\n8626\n3868\n8623\n387\n8679\n4074\n1481\n3527\n3595\n4754\n2453\n1579\n4638\n9123\n1829\n316\n3009\n3691\n763\n4875\n3572\n4642\n3128\n4273\n2777\n6032\n4793\n233\n7147\n996\n3199\n8835\n3517\n7210\n6125\n6037\n3684\n8589\n3915\n3095\n8310\n3180\n7043\n4458\n2889\n57\n4483\n7667\n8375\n1434\n7493\n6986\n4733\n8471\n5827\n2111\n1313\n7986\n3075\n2614\n7547\n4977\n8527\n3212\n7300\n5842\n5244\n3291\n597\n1007\n2030\n227\n3830\n5540\n247\n5643\n9333\n1958\n3096\n1371\n5220\n7926\n2927\n1516\n7130\n193\n1522\n6165\n6923\n3794\n4223\n5535\n2472\n8630\n3971\n9101\n2946\n222\n4609\n7291\n8542\n6501\n7548\n4557\n6274\n1010\n5226\n7309\n1317\n9056\n6275\n1624\n1099\n4191\n4030\n7270\n5392\n2316\n3819\n1670\n8154\n8045\n4807\n8864\n2391\n5908\n8338\n8218\n6400\n9193\n3165\n843\n6613\n6941\n4380\n9332\n5629\n7557\n4321\n3702\n681\n734\n1159\n4665\n5959\n1697\n5509\n8774\n7389\n3832\n3751\n8637\n3079\n1680\n6841\n703\n684\n8293\n3682\n5733\n4818\n3231\n3078\n5562\n9001\n3889\n7024\n2519\n1713\n3287\n219\n6021\n8776\n2289\n7212\n4832\n4684\n4617\n4237\n2649\n8185\n6326\n3568\n551\n1426\n4181\n8869\n312\n2905\n4165\n8248\n2558\n900\n1044\n8613\n7743\n5437\n7604\n3122\n5708\n8649\n2878\n4695\n4491\n1929\n7533\n5223\n7711\n915\n1844\n5751\n3008\n8055\n961\n6142\n4636\n61\n198\n2271\n5698\n4596\n4500\n5709\n5819\n7972\n2992\n1643\n1048\n6281\n8886\n360\n4198\n1841\n6814\n3960\n2606\n7001\n5888\n450\n7133\n7015\n7034\n5153\n8920\n5066\n469\n1302\n8816\n463\n8651\n5869\n8193\n6582\n5578\n1231\n9274\n7260\n7751\n8052\n6799\n2089\n2342\n8451\n3260\n5550\n7795\n2288\n1205\n40\n496\n8367\n7836\n5973\n3908\n5242\n5062\n2706\n997\n6514\n5419\n9201\n1965\n6062\n3050\n5302\n8735\n358\n2398\n7470\n1644\n8179\n7047\n1549\n5414\n2539\n7381\n589\n8166\n8505\n6035\n3956\n4540\n6721\n8074\n1062\n2384\n2531\n7159\n3502\n3902\n4584\n2554\n264\n8720\n2849\n4916\n5218\n7202\n883\n4560\n1677\n4317\n7863\n4509\n6577\n2903\n1452\n1416\n5369\n473\n6233\n6359\n5992\n4934\n8059\n6834\n4907\n3320\n8267\n8280\n2066\n2402\n1485\n3772\n3732\n4764\n9126\n3575\n5564\n4768\n5641\n1884\n2330\n1804\n344\n698\n3089\n1532\n4454\n761\n7289\n8094\n3432\n1747\n6811\n8722\n8826\n4646\n3222\n8614\n2901\n7003\n652\n8663\n4266\n413\n810\n75\n3334\n4905\n6438\n4756\n5137\n6528\n6534\n6988\n6177\n8533\n889\n5384\n7201\n5132\n7802\n6864\n3973\n873\n4840\n1482\n8376\n3769\n5858\n6675\n4286\n2593\n5863\n4353\n7817\n7540\n4999\n4838\n2303\n6002\n7913\n1508\n5317\n7755\n2784\n4964\n3431\n6209\n3755\n6022\n6399\n6232\n3954\n455\n5416\n6448\n1558\n7591\n245\n140\n9210\n6585\n4084\n967\n7798\n6795\n7095\n6733\n3861\n9264\n361\n1045\n755\n8042\n7074\n7778\n6415\n4724\n6450\n2049\n1563\n1307\n3485\n1790\n7869\n3282\n6907\n3920\n2868\n5801\n5632\n1079\n5009\n3955\n7517\n5128\n3417\n3019\n2725\n1784\n2312\n2753\n6976\n342\n8266\n1849\n2273\n5037\n7880\n3793\n7401\n5412\n8279\n1257\n3670\n9049\n3266\n8955\n6519\n8916\n2858\n694\n5650\n1019\n4669\n1785\n3533\n5877\n2704\n8603\n3726\n6668\n497\n1085\n6815\n6157\n6646\n6964\n186\n8097\n5645\n8481\n8215\n3775\n2542\n7514\n5699\n4072\n3518\n5767\n3239\n3740\n1404\n8981\n4086\n6397\n6984\n4204\n6899\n682\n6589\n3317\n2944\n3456\n4340\n7424\n9208\n6504\n4409\n1\n145\n1882\n4620\n2634\n4992\n5453\n4481\n3377\n266\n7875\n530\n1235\n7605\n504\n1771\n8489\n345\n7353\n7797\n7174\n5914\n2871\n5721\n6067\n3582\n7653\n5467\n6234\n691\n8758\n2122\n1213\n2908\n1492\n1437\n2187\n1266\n2395\n7278\n8491\n5256\n1554\n8163\n5966\n7128\n7904\n1691\n6272\n1264\n3996\n1706\n1334\n1316\n6478\n6935\n1518\n6700\n8703\n8744\n8152\n8778\n5367\n4218\n9007\n6312\n606\n7565\n5293\n2891\n675\n2125\n2120\n826\n7008\n5705\n7748\n8010\n1498\n5330\n5472\n2215\n7627\n3016\n6588\n1850\n4128\n8569\n6987\n7566\n148\n8151\n8789\n7907\n8596\n715\n6018\n9060\n3872\n1750\n5889\n4047\n5960\n3120\n3449\n1421\n1102\n3333\n9197\n8796\n8123\n8007\n2028\n8404\n1945\n1985\n8109\n5380\n8438\n3504\n6739\n4180\n5835\n4243\n25\n4002\n1976\n3482\n8392\n158\n5181\n4885\n8985\n11\n6872\n6425\n5926\n7062\n5083\n8394\n4259\n5844\n1990\n3942\n5532\n2220\n28\n5957\n149\n6748\n1663\n3559\n7647\n2566\n1359\n8787\n5259\n7010\n554\n8231\n4229\n6005\n8172\n8125\n1350\n3571\n9051\n1973\n1386\n1781\n5788\n159\n7007\n3220\n1846\n3093\n4445\n2056\n8370\n3211\n1113\n4384\n2231\n273\n4276\n642\n7663\n5311\n265\n226\n9012\n7879\n118\n7109\n7251\n1760\n8667\n2876\n7162\n3552\n6901\n6779\n5021\n6524\n4957\n3114\n4544\n441\n1848\n2136\n2458\n8662\n1127\n5541\n3026\n1080\n6780\n2224\n8259\n1073\n9000\n7244\n7977\n500\n4435\n7376\n7979\n1435\n9291\n7704\n3791\n3521\n210\n7388\n1039\n6269\n4052\n8570\n3285\n564\n8039\n3546\n6203\n1183\n6107\n4147\n6216\n2234\n7185\n3192\n7155\n2001\n7777\n876\n944\n908\n7791\n5465\n6784\n65\n9172\n5675\n7075\n3886\n7891\n2978\n1008\n5630\n591\n5067\n1139\n577\n9015\n574\n8137\n7786\n5765\n4900\n4090\n7842\n5741\n"
  },
  {
    "path": "artrackv2_mindspore/lib/train/data_specs/got10k_train_split.txt",
    "content": "3784\n8998\n3906\n1631\n8277\n8358\n2338\n7938\n2988\n8302\n2662\n2663\n2825\n7447\n4781\n2218\n6348\n5860\n4517\n2819\n8075\n5391\n116\n3606\n7976\n7941\n1024\n4519\n1970\n557\n8579\n6908\n993\n7204\n1991\n3674\n8781\n6840\n5\n3225\n3763\n8688\n6778\n5777\n4794\n2744\n8126\n3864\n1733\n2923\n6829\n701\n683\n2081\n1831\n2404\n1459\n2741\n5972\n3618\n7462\n2654\n103\n2174\n6224\n2989\n2506\n2766\n5912\n2699\n3295\n3986\n609\n4895\n6673\n801\n1098\n1602\n2490\n3129\n8476\n3186\n7355\n4784\n4270\n1812\n4226\n2267\n8873\n6544\n6112\n2381\n4752\n753\n3776\n6511\n6016\n731\n2559\n7369\n5866\n563\n7731\n1105\n5603\n50\n4238\n2208\n8725\n4994\n4719\n1444\n8807\n7298\n8139\n8760\n8173\n2332\n4131\n5207\n1065\n8562\n3992\n4024\n2188\n9095\n6765\n1707\n6105\n6922\n5362\n1486\n7898\n4135\n6574\n1551\n998\n6565\n8127\n8927\n2544\n4365\n510\n768\n3535\n3875\n6808\n2931\n487\n1088\n4451\n368\n2470\n8111\n3493\n7338\n8281\n6390\n1271\n4373\n3667\n3494\n3757\n2966\n3756\n7840\n6315\n7827\n3300\n6261\n4163\n2217\n6549\n94\n7236\n9136\n1857\n6691\n3470\n6271\n807\n516\n9311\n6098\n3144\n8420\n5425\n5694\n2643\n6696\n6072\n7285\n3781\n903\n8522\n6092\n5979\n2622\n2529\n855\n3420\n3261\n8953\n7866\n2492\n3157\n359\n1520\n2642\n7452\n759\n36\n8931\n1744\n4350\n1089\n9199\n4295\n1889\n1908\n4868\n4498\n1968\n9103\n3273\n8723\n7413\n4114\n5584\n4874\n1427\n5211\n7618\n1542\n1353\n8158\n4168\n3200\n6345\n8560\n5619\n5953\n3158\n8849\n5831\n1411\n7294\n8103\n6539\n7397\n1006\n5450\n3119\n4274\n5352\n4571\n2319\n4217\n4976\n902\n1814\n2651\n3299\n3398\n982\n2428\n5793\n1346\n7057\n3737\n7329\n4449\n2110\n7405\n1773\n958\n3901\n4127\n8234\n2994\n7066\n1289\n2995\n5871\n3556\n9085\n846\n2366\n585\n7032\n5516\n5230\n3481\n2732\n6658\n7423\n1855\n6384\n3554\n5823\n4948\n7058\n4667\n5377\n2503\n7694\n9191\n9144\n655\n3409\n62\n8019\n8970\n5523\n7403\n3379\n2323\n4833\n5750\n3178\n6548\n8891\n7501\n3280\n7404\n343\n2171\n8397\n1367\n8611\n6118\n6603\n3729\n7182\n9048\n7733\n5642\n7141\n3335\n4845\n5449\n3467\n6250\n163\n5168\n2040\n5339\n3609\n8352\n3426\n8567\n769\n187\n6151\n6437\n7028\n8507\n3970\n9146\n2068\n5028\n7492\n1661\n2815\n2469\n2563\n3814\n8430\n4305\n3479\n5678\n9115\n4132\n1211\n5459\n4814\n545\n4556\n238\n4296\n2724\n1260\n2581\n6087\n4632\n4313\n380\n1209\n5447\n3032\n7942\n8943\n806\n2432\n6130\n4314\n2131\n9045\n6531\n5706\n6747\n7724\n2017\n3292\n5469\n2743\n424\n4233\n7643\n8619\n5192\n4516\n9324\n3537\n9152\n8058\n7526\n8711\n1949\n5982\n1732\n6702\n7027\n6388\n7012\n328\n2130\n452\n306\n7669\n3134\n5761\n3703\n44\n4189\n695\n7672\n5224\n9215\n5644\n3143\n3704\n5443\n2348\n7177\n2328\n4725\n354\n1418\n7810\n7746\n9002\n5759\n7226\n4535\n9160\n4385\n5397\n7249\n2936\n3204\n6287\n385\n2371\n2738\n3636\n9033\n2246\n2680\n6940\n4310\n2054\n9250\n9080\n4568\n5586\n4469\n2038\n3410\n7900\n4332\n6108\n678\n3319\n9079\n1054\n4048\n4751\n1320\n6890\n7931\n1398\n4349\n5299\n5025\n7932\n5738\n7787\n4590\n4020\n1274\n2488\n8497\n3372\n8965\n3219\n799\n3664\n6500\n7093\n4362\n6205\n4244\n4652\n1964\n5945\n6434\n2031\n2684\n6632\n4588\n8271\n3232\n5782\n2904\n6789\n5636\n7200\n3632\n5435\n8203\n3480\n4786\n7579\n3351\n1921\n798\n3646\n3094\n4359\n1654\n5975\n376\n5965\n780\n7821\n9224\n6738\n3185\n2133\n6248\n5996\n2834\n531\n5688\n2448\n7925\n7974\n5924\n6401\n5778\n6594\n5442\n8336\n4522\n3770\n6340\n6328\n4946\n4161\n2954\n2588\n8465\n2885\n1606\n5787\n3407\n3121\n7310\n1413\n1932\n4787\n2579\n3325\n508\n5610\n6480\n4290\n479\n3792\n6628\n2545\n6717\n6972\n2665\n6730\n3547\n6845\n5929\n3540\n4356\n8993\n1052\n2235\n8356\n3403\n8818\n8260\n572\n4159\n1180\n5348\n941\n7948\n2676\n3539\n4866\n6422\n8365\n3217\n1310\n2059\n9177\n1419\n2283\n8892\n8162\n1212\n6277\n3725\n7806\n6149\n7874\n718\n6888\n7118\n277\n656\n8763\n8289\n4759\n5854\n8659\n7710\n3145\n5981\n1881\n5799\n6947\n1609\n6396\n2631\n2887\n318\n2550\n6132\n1736\n2907\n7816\n48\n4304\n8133\n6698\n2760\n7779\n7732\n7642\n1154\n7242\n711\n9262\n539\n8033\n7440\n1913\n5480\n5570\n8594\n8772\n4654\n8974\n6128\n6183\n1071\n8449\n2142\n2298\n524\n1695\n820\n4053\n8241\n1856\n8641\n3981\n217\n1063\n9286\n3152\n221\n5461\n1270\n2006\n7164\n1199\n6951\n5604\n5400\n5309\n3498\n6407\n6661\n7097\n8165\n5169\n3852\n7070\n5702\n4344\n6648\n6904\n3272\n7119\n5795\n2365\n2659\n353\n5444\n6968\n2755\n1924\n2098\n2972\n6006\n5865\n8740\n2418\n3401\n7856\n5841\n598\n836\n1147\n931\n8897\n0\n6049\n1837\n865\n1871\n6116\n6831\n5773\n3587\n303\n1883\n2163\n3070\n1308\n7953\n6300\n6909\n853\n7301\n3279\n123\n7186\n3194\n5553\n5133\n1931\n4622\n6075\n4891\n5722\n5693\n8\n2339\n6596\n71\n379\n4506\n4370\n1238\n2707\n3344\n4254\n8767\n1726\n325\n4148\n5438\n5357\n548\n1332\n6824\n2290\n2335\n3146\n2594\n2315\n3389\n3885\n2621\n4116\n5389\n7412\n7222\n4894\n8595\n2000\n4978\n4721\n6444\n3796\n9321\n2236\n6409\n1523\n1468\n9249\n8270\n2341\n2874\n174\n4757\n4502\n4703\n9034\n9108\n5451\n2619\n5022\n9158\n490\n6540\n1466\n2962\n8771\n3036\n2712\n4539\n1581\n5638\n9246\n4308\n4363\n4647\n4470\n1636\n2511\n1311\n6560\n7519\n8027\n9217\n6464\n6364\n3779\n4822\n3563\n3982\n5896\n5510\n6655\n1524\n2846\n3137\n621\n141\n1887\n6567\n8921\n4671\n6052\n8445\n8699\n7349\n3553\n2117\n7651\n5034\n5383\n649\n3818\n9022\n8414\n1012\n8159\n5081\n8571\n4765\n9135\n4361\n4073\n9142\n727\n2835\n8229\n3989\n4490\n4923\n5477\n1638\n3643\n712\n9044\n2230\n499\n7166\n96\n3172\n8431\n8401\n1470\n6356\n8817\n927\n4212\n2152\n1795\n3812\n4949\n1219\n1538\n3029\n6481\n9042\n7775\n7742\n423\n2085\n7715\n4541\n9061\n5916\n3950\n7420\n4878\n7406\n7046\n7808\n4911\n8804\n6927\n8820\n3264\n300\n8670\n2979\n252\n4407\n3383\n4688\n8504\n6723\n26\n3837\n2489\n4137\n8209\n229\n6490\n2364\n9016\n1763\n1728\n338\n8335\n9063\n5280\n2791\n641\n5454\n4581\n5420\n4548\n2840\n8508\n3463\n7231\n7619\n2560\n1755\n6201\n165\n1471\n6279\n5806\n6867\n5890\n2396\n3416\n1981\n6073\n5872\n3045\n4182\n7607\n3318\n4414\n2998\n6553\n7139\n5624\n2123\n3666\n723\n5110\n6932\n8200\n2222\n8399\n1041\n4138\n1594\n3569\n9253\n393\n7940\n8004\n1475\n6759\n5393\n1107\n2597\n878\n9309\n7576\n5250\n1759\n3142\n2015\n571\n3921\n1255\n7080\n893\n2160\n1355\n82\n1562\n9153\n8583\n4085\n4644\n7196\n9165\n3558\n4550\n6374\n7826\n8602\n4146\n9257\n6083\n874\n8383\n3731\n3374\n3653\n8222\n7344\n470\n1813\n4478\n6871\n7245\n6866\n3998\n7433\n276\n1915\n1988\n8168\n2518\n2686\n831\n6143\n5205\n8718\n1703\n7729\n2077\n7983\n8450\n1195\n9232\n507\n7989\n6974\n4054\n5828\n8655\n6679\n5245\n7783\n5886\n9098\n6491\n8782\n3525\n6542\n131\n8110\n9186\n9074\n4933\n9035\n2607\n4\n2057\n6273\n2711\n5829\n3382\n2696\n3043\n2048\n619\n2499\n5295\n1162\n7807\n3694\n2194\n3149\n1940\n7934\n840\n3592\n8237\n4731\n1324\n8486\n8726\n8573\n2928\n9078\n2272\n2564\n1370\n5911\n7434\n8026\n407\n7546\n2004\n5849\n3034\n7887\n3425\n1118\n926\n3430\n1544\n5902\n2282\n1124\n2334\n129\n1372\n4842\n6473\n4382\n1028\n415\n8269\n8073\n6910\n2796\n3038\n5735\n5080\n2852\n6306\n8842\n9188\n3637\n1066\n532\n928\n5485\n2838\n6753\n9008\n7984\n2816\n8819\n7103\n5977\n5044\n2064\n2599\n4973\n382\n3249\n6446\n6638\n852\n1724\n3368\n892\n3250\n8258\n7962\n4300\n1616\n167\n8855\n2090\n4424\n879\n5136\n5350\n2635\n7828\n8506\n63\n3004\n3847\n3676\n1184\n1705\n6745\n1263\n5020\n746\n1888\n7036\n1033\n3914\n5433\n3905\n4641\n8909\n228\n4801\n3766\n8085\n643\n6914\n9280\n3013\n5657\n3696\n1590\n2920\n8282\n2403\n416\n911\n3849\n4215\n1120\n5490\n296\n2306\n3140\n3742\n4819\n6153\n6414\n760\n3000\n7498\n7108\n6429\n3031\n5314\n751\n3357\n5808\n7505\n98\n7652\n4027\n6257\n3943\n1799\n8577\n5577\n4969\n9163\n2025\n6061\n4026\n5732\n588\n7017\n1415\n4961\n4940\n7152\n538\n706\n2802\n8983\n3375\n1246\n6593\n5837\n1789\n7939\n4997\n5939\n2411\n6133\n199\n7593\n1702\n5406\n6082\n2359\n2912\n6109\n100\n8149\n5470\n2807\n3384\n6413\n3362\n5621\n6019\n9241\n9268\n7703\n4111\n7967\n5458\n7181\n5492\n1112\n6729\n4577\n106\n8853\n3774\n979\n7082\n4610\n1853\n9003\n9292\n2867\n6262\n2245\n3460\n1557\n767\n4796\n8147\n2658\n5769\n6985\n7065\n421\n7990\n3289\n1540\n9316\n2251\n6896\n5947\n4965\n2652\n4480\n963\n9047\n7168\n7824\n3976\n6210\n7018\n7179\n5016\n7789\n6102\n6828\n7659\n9109\n9071\n8115\n7628\n7110\n16\n7513\n835\n939\n4078\n2351\n2322\n3881\n4945\n560\n6837\n6094\n6475\n7901\n3\n771\n8029\n3135\n8044\n7127\n3741\n5156\n7030\n4906\n113\n3747\n7042\n5232\n5225\n3002\n4747\n6879\n5379\n4886\n7192\n4184\n1896\n1834\n8689\n3665\n2957\n6913\n8009\n4851\n6420\n7987\n828\n3003\n8884\n8815\n3198\n8008\n194\n6251\n3303\n3934\n395\n1285\n4169\n1648\n1347\n3600\n4631\n509\n211\n6230\n7241\n8250\n2219\n2582\n8353\n7790\n7583\n4462\n3904\n9004\n6942\n1704\n5686\n8051\n2981\n5511\n6182\n7088\n1699\n1222\n3455\n6189\n1528\n5197\n6221\n7893\n3283\n2837\n7773\n8766\n2942\n8021\n614\n4102\n7362\n1786\n400\n133\n556\n3127\n5237\n3727\n1440\n3873\n6322\n8448\n6285\n8696\n8800\n4009\n3386\n454\n4847\n5685\n9093\n246\n1314\n5895\n6863\n4302\n4260\n8405\n8417\n7116\n255\n3223\n4737\n7852\n6337\n814\n710\n1094\n6103\n5809\n5882\n6336\n4974\n1499\n2806\n3744\n2664\n2436\n4482\n8665\n8918\n1076\n8676\n5725\n9248\n4755\n1447\n9328\n5500\n78\n2653\n792\n6854\n6093\n6172\n3378\n4492\n5529\n5476\n3846\n1391\n383\n4289\n3883\n2648\n3265\n2525\n5402\n4599\n6870\n6877\n4413\n2464\n8519\n2521\n1839\n5822\n5664\n7257\n5375\n6852\n6764\n5182\n8914\n3015\n8509\n3080\n4562\n8979\n6215\n6643\n8601\n6096\n4812\n5246\n7862\n527\n7849\n6737\n12\n2468\n7961\n275\n27\n5932\n3840\n7341\n4996\n8564\n2154\n3788\n6138\n7831\n4442\n757\n4464\n1170\n2568\n19\n323\n6584\n7675\n3441\n2067\n9027\n2486\n4379\n4744\n1737\n7563\n301\n3907\n4742\n6857\n1221\n9284\n8458\n8236\n2897\n4004\n1526\n5345\n4423\n6246\n8578\n1057\n3711\n4986\n4785\n3997\n7311\n4788\n107\n8387\n2041\n2608\n8628\n5830\n6031\n783\n6817\n3293\n541\n773\n8473\n2501\n7247\n5667\n804\n483\n1639\n696\n6060\n5429\n5762\n1527\n7342\n1329\n6225\n7895\n381\n8030\n8520\n8362\n4734\n3526\n9273\n2039\n4142\n5084\n875\n6905\n8968\n5275\n3052\n650\n7509\n232\n2595\n3631\n1810\n4355\n8315\n8908\n1777\n4834\n3164\n2336\n1543\n6212\n8346\n3024\n3719\n1242\n6265\n8101\n3133\n6150\n6358\n3316\n4089\n1647\n4629\n7117\n2596\n5366\n1225\n6371\n624\n2209\n1428\n1158\n7648\n466\n8765\n802\n153\n4639\n3657\n6482\n9320\n2693\n6591\n3294\n2617\n5052\n6305\n3227\n8784\n7170\n93\n5868\n6716\n1671\n178\n2703\n954\n3254\n2262\n5046\n5743\n8647\n6393\n7706\n6604\n3728\n6978\n7489\n7474\n8754\n2740\n2233\n6038\n1491\n8814\n2080\n2358\n5944\n5653\n1164\n9259\n4518\n7343\n5748\n3897\n923\n5967\n2677\n3503\n1202\n4966\n1836\n1863\n6634\n1962\n9096\n9064\n977\n4049\n1464\n658\n536\n3402\n8064\n1309\n259\n7999\n8122\n910\n224\n6152\n7142\n6070\n7523\n8411\n2408\n6766\n9214\n9312\n8325\n6192\n626\n6025\n6240\n8708\n4630\n6777\n1075\n8906\n408\n9269\n6236\n9067\n2514\n8568\n2324\n156\n3136\n3530\n7878\n7308\n4335\n2065\n3845\n4453\n3356\n1450\n371\n7219\n5171\n201\n8642\n2099\n477\n1603\n8339\n7430\n3061\n235\n8291\n1133\n8474\n7035\n8653\n989\n4569\n9092\n8347\n3102\n1743\n9086\n5140\n7438\n1530\n4342\n2460\n7646\n5047\n5071\n5430\n6944\n610\n2803\n1448\n4696\n6156\n4386\n4248\n4256\n994\n2112\n805\n8011\n8276\n8999\n4956\n1712\n2795\n7553\n6436\n2158\n9083\n3184\n5784\n4428\n612\n5288\n6222\n1365\n5074\n6848\n575\n5213\n2175\n4240\n351\n2086\n2656\n5150\n9255\n8189\n7735\n1261\n1344\n4097\n8674\n2984\n4235\n5998\n6488\n537\n1267\n7486\n7124\n6245\n7955\n7337\n5436\n1194\n8226\n209\n1710\n7906\n4357\n4139\n5679\n2584\n2854\n1004\n8246\n8586\n5087\n1878\n4926\n6637\n3197\n7757\n8249\n4055\n6502\n1248\n990\n3928\n2770\n2751\n1020\n6426\n4190\n6839\n2671\n884\n3871\n9212\n4179\n3394\n10\n5861\n5316\n6869\n2985\n8905\n8559\n4457\n2480\n2313\n4100\n4395\n6835\n7799\n7890\n2785\n5468\n7302\n5862\n1803\n6376\n3171\n8591\n717\n7053\n1655\n4489\n2522\n2921\n8555\n1984\n895\n8949\n1305\n738\n7606\n112\n3042\n1325\n437\n3167\n3340\n511\n3689\n5813\n8982\n69\n4421\n7150\n550\n8829\n8685\n3147\n8956\n3166\n7023\n8633\n3308\n2014\n3573\n3880\n4045\n2069\n6051\n4950\n702\n6664\n8418\n2454\n6181\n4853\n4166\n7022\n7418\n3605\n9181\n7172\n5031\n4589\n7858\n6586\n6351\n8334\n7504\n634\n3759\n1890\n890\n6959\n5085\n4919\n2161\n1191\n256\n3610\n7079\n3427\n4071\n7323\n2982\n7263\n7444\n4251\n5846\n4864\n3649\n4311\n7461\n8120\n4582\n6373\n2805\n4872\n4869\n5493\n5867\n2670\n7099\n30\n8933\n930\n7919\n501\n7261\n5289\n7449\n7772\n3613\n7848\n3196\n474\n205\n841\n2611\n6185\n3088\n409\n7239\n5938\n7871\n1343\n6705\n1027\n5596\n2199\n9113\n5471\n6134\n838\n2345\n8359\n4061\n1474\n3229\n270\n4245\n1979\n5995\n1517\n8652\n4006\n4880\n6137\n4693\n2528\n6996\n2926\n5798\n2477\n2549\n1128\n3341\n6014\n4479\n2861\n4208\n5175\n5174\n5118\n3736\n5463\n1588\n2327\n8380\n7982\n1514\n1058\n4586\n6608\n7985\n3044\n1822\n3628\n6851\n549\n1811\n2184\n2601\n4608\n8922\n2540\n6659\n3859\n307\n3650\n3767\n8167\n505\n4366\n4824\n5520\n461\n1933\n2401\n8106\n2055\n7844\n8544\n8838\n4797\n7419\n6686\n7670\n6039\n5672\n5141\n6543\n206\n5252\n4718\n888\n1601\n3218\n5114\n713\n4022\n4419\n6708\n397\n425\n6612\n5057\n1729\n6573\n4729\n4080\n1034\n2961\n534\n8194\n5598\n9218\n2424\n329\n4154\n1597\n922\n109\n8823\n3578\n9038\n8437\n3307\n128\n8032\n1412\n7333\n8762\n8851\n8865\n3056\n468\n3808\n3064\n8798\n7052\n7767\n9231\n1086\n2162\n6566\n2109\n3439\n6122\n3642\n7696\n8610\n5279\n1808\n8687\n8377\n817\n8714\n6066\n4008\n3640\n6015\n1021\n7601\n4855\n6017\n87\n7071\n2730\n7268\n3614\n6084\n6117\n6924\n9102\n2829\n375\n8724\n2095\n22\n1541\n2970\n633\n139\n451\n4521\n179\n1396\n3876\n5824\n8020\n426\n4982\n4172\n1157\n190\n4859\n1455\n3110\n3323\n9104\n858\n6719\n6428\n4495\n8551\n2141\n3984\n3066\n67\n4299\n5821\n8444\n6581\n6097\n7090\n7781\n8944\n3085\n8606\n2114\n5355\n8901\n1461\n3301\n422\n7000\n4820\n5790\n1379\n7536\n4199\n8736\n8991\n5241\n1698\n1294\n1753\n196\n2987\n8680\n4658\n4144\n8639\n6441\n8255\n8156\n3677\n6385\n6520\n7700\n3760\n6001\n1144\n5478\n7394\n8057\n5018\n4232\n5235\n6844\n3111\n8802\n867\n949\n7843\n573\n2278\n6801\n7629\n2714\n5105\n6946\n2697\n5315\n1571\n8677\n2537\n4374\n3833\n7820\n3750\n2033\n6526\n3884\n8706\n7195\n417\n3603\n3001\n6284\n5873\n5718\n8576\n8457\n3589\n5839\n459\n3626\n6342\n8729\n6933\n607\n6053\n8228\n3773\n1805\n6365\n5142\n6069\n1389\n9026\n570\n4614\n5712\n5533\n9222\n2821\n1897\n819\n766\n4060\n4902\n5905\n6842\n5446\n1277\n4303\n2836\n934\n1014\n7822\n7494\n3466\n665\n1047\n5881\n3328\n4664\n315\n1315\n1462\n8616\n7725\n2756\n5749\n1730\n8184\n4567\n5065\n7499\n8867\n1304\n3669\n9192\n410\n8177\n6710\n1210\n2329\n8443\n3911\n1899\n7686\n3315\n7190\n6180\n3116\n5341\n4394\n8337\n9182\n6969\n5715\n2172\n1742\n2782\n3715\n9195\n7960\n2517\n4890\n8294\n2337\n8014\n3353\n7475\n2193\n4843\n8831\n4200\n4653\n6196\n6957\n3063\n2996\n8959\n8973\n6529\n3457\n5274\n8002\n6823\n6154\n5561\n1780\n9318\n7657\n1758\n6503\n7678\n3274\n1625\n4327\n3236\n8575\n3155\n4707\n4331\n1494\n8756\n3174\n1074\n8116\n8295\n8311\n3048\n3752\n6050\n6483\n8003\n9175\n4674\n1642\n2556\n6166\n7165\n8441\n5413\n3990\n1640\n1778\n7500\n8304\n1395\n4315\n5949\n3364\n242\n5763\n1036\n249\n2430\n7426\n8131\n411\n6267\n2045\n6606\n899\n8065\n9052\n7507\n5779\n5616\n2107\n5408\n2980\n6310\n5776\n4328\n821\n3251\n2354\n7076\n1700\n5313\n6736\n79\n8212\n3959\n5677\n7545\n160\n6790\n6859\n3659\n6770\n1106\n8846\n956\n7472\n2050\n8099\n4795\n8053\n9293\n7037\n1646\n9307\n1069\n5322\n5332\n2708\n8977\n917\n2419\n184\n2105\n1578\n3923\n5780\n1903\n2512\n429\n5582\n493\n4972\n445\n8286\n555\n320\n8300\n322\n617\n3413\n4459\n525\n5631\n6314\n5157\n5300\n8545\n182\n1031\n4429\n2495\n7586\n1534\n3099\n3916\n3738\n1919\n535\n2119\n1299\n177\n1838\n2159\n4099\n8285\n5172\n8540\n6020\n7683\n3073\n3115\n1673\n3087\n3488\n2416\n1894\n5942\n3597\n5834\n2007\n43\n1779\n4174\n2023\n2546\n2429\n9006\n436\n4214\n4536\n3693\n5426\n6767\n5903\n4368\n2170\n5051\n7490\n7882\n2859\n5035\n7835\n5372\n7122\n925\n3253\n6338\n8393\n4093\n5848\n7588\n2683\n8049\n5403\n5894\n8745\n8550\n2941\n3484\n9029\n4461\n8022\n725\n2355\n1619\n3030\n1975\n5623\n2415\n1957\n6141\n9278\n3226\n3062\n5670\n7326\n8759\n8496\n6619\n8187\n8262\n6199\n951\n7183\n668\n2388\n4698\n5681\n8240\n2851\n871\n4988\n9084\n9089\n3162\n1167\n8244\n5227\n6461\n2831\n776\n5010\n5770\n5282\n3574\n5102\n1278\n2281\n5455\n305\n4628\n4663\n9119\n7487\n8746\n4889\n6569\n1175\n102\n2386\n8940\n2479\n5566\n53\n8833\n1918\n8001\n321\n6786\n6861\n4358\n2771\n7467\n975\n4777\n605\n3543\n2600\n7584\n9299\n4530\n6477\n7364\n7328\n183\n4761\n7543\n304\n1196\n4623\n7839\n2139\n5519\n1953\n533\n5989\n7590\n7428\n6346\n6162\n1091\n1946\n6260\n4405\n5676\n8924\n7171\n8409\n1866\n6379\n3411\n2387\n3051\n7398\n154\n1185\n6442\n6004\n1611\n2165\n9018\n8323\n616\n3995\n8952\n1533\n7853\n4194\n213\n789\n4991\n3675\n7456\n5752\n175\n7556\n4195\n907\n2248\n9057\n8467\n4594\n1017\n7968\n880\n7446\n3304\n1666\n4942\n3867\n4802\n9156\n6357\n4621\n887\n6213\n5261\n1336\n521\n8928\n1818\n7864\n4792\n6742\n157\n1593\n823\n7235\n5303\n5633\n1100\n1692\n8047\n5993\n1460\n6714\n1630\n6440\n6307\n3608\n292\n212\n401\n5974\n7107\n8301\n8342\n2720\n4583\n2757\n7315\n833\n4466\n4236\n1282\n5273\n2149\n287\n8484\n2380\n8119\n7167\n737\n5076\n6598\n3596\n5382\n2650\n8980\n3421\n1356\n1954\n7823\n1172\n2226\n1941\n6136\n7274\n2256\n4928\n324\n1407\n4410\n4579\n1061\n7113\n486\n862\n3435\n6956\n2873\n1465\n6113\n8225\n8512\n6806\n272\n6008\n1241\n88\n5662\n3555\n689\n8733\n2812\n7453\n6282\n420\n2471\n4477\n7495\n1445\n594\n6939\n1564\n8704\n8590\n7992\n7374\n5796\n9298\n4213\n5713\n5864\n326\n5513\n402\n464\n608\n1951\n8640\n8180\n3347\n3459\n4162\n2690\n7478\n5856\n5240\n2389\n3022\n602\n5547\n1798\n1345\n9276\n599\n3673\n3277\n1635\n8625\n1567\n5928\n636\n5671\n2896\n3477\n412\n7575\n4201\n685\n4760\n1229\n4275\n8960\n3123\n4471\n5941\n3355\n3999\n7157\n6354\n7741\n6850\n8783\n1943\n6769\n7330\n8721\n8477\n1381\n848\n778\n6408\n2644\n5817\n1441\n1723\n2144\n2776\n2368\n120\n367\n8839\n8749\n5353\n4158\n3148\n9114\n1233\n9228\n8857\n2895\n1286\n200\n6755\n5125\n5857\n1657\n7658\n5097\n5000\n942\n7020\n586\n784\n7078\n6194\n8658\n8957\n9325\n1851\n8911\n4862\n7004\n1186\n8824\n1651\n2999\n561\n7639\n4316\n5086\n3187\n7912\n2624\n9183\n8487\n5089\n8475\n7554\n4031\n6297\n6059\n5329\n115\n2058\n7650\n7634\n7121\n2485\n7805\n2241\n7713\n4352\n2409\n1026\n2745\n4549\n6474\n5124\n5201\n6556\n6617\n9091\n3945\n8402\n5648\n5257\n2192\n4901\n7750\n6131\n6027\n6352\n4625\n1254\n5498\n3720\n8261\n3939\n5576\n3685\n6713\n8472\n991\n8354\n8068\n5655\n5997\n1029\n7506\n6740\n2575\n2990\n4898\n583\n7402\n3290\n5388\n6715\n8235\n5361\n4970\n1363\n3338\n5731\n9014\n5358\n2216\n2856\n635\n1193\n3705\n6334\n7666\n5270\n1384\n6368\n8604\n3564\n1937\n2481\n1341\n721\n2100\n3958\n6551\n3813\n2592\n7980\n5385\n319\n2357\n8761\n8910\n8693\n1204\n489\n4827\n8024\n7832\n6427\n3895\n89\n9068\n8067\n1708\n1111\n8963\n1902\n9251\n5719\n9143\n5537\n9169\n77\n5365\n1840\n485\n4456\n2841\n1169\n3271\n7144\n6886\n9140\n7173\n6003\n1659\n1807\n8371\n2439\n274\n4660\n3448\n6623\n347\n2103\n3400\n2106\n9073\n8169\n3687\n3305\n4416\n8454\n6635\n332\n2433\n2909\n3839\n4063\n1944\n6509\n1296\n7770\n1880\n6610\n4075\n9331\n4484\n302\n418\n4219\n1333\n2350\n6498\n8424\n4694\n4883\n5269\n6580\n5007\n6722\n1669\n8470\n2571\n513\n3810\n7049\n6332\n7363\n3532\n8456\n2097\n297\n8841\n7180\n714\n1587\n5234\n4268\n2320\n7372\n660\n8503\n1668\n8847\n1101\n7275\n3336\n6460\n722\n7782\n3947\n502\n4258\n2132\n1835\n181\n3841\n427\n3446\n2551\n8324\n6963\n4284\n7297\n7577\n3399\n9148\n8213\n5656\n8440\n851\n657\n2446\n4292\n6992\n976\n1108\n2681\n3237\n8582\n377\n5969\n5287\n9209\n8523\n7178\n7833\n6175\n2126\n3023\n5090\n7491\n6640\n6077\n2221\n2780\n1694\n4094\n144\n6161\n3203\n7123\n749\n3625\n3848\n980\n2270\n7819\n3672\n7689\n7203\n2718\n1714\n2884\n3474\n3802\n3851\n4224\n7237\n5415\n7998\n7207\n4106\n9036\n1046\n8731\n5070\n6818\n4592\n6056\n693\n1328\n3309\n5791\n2629\n2736\n202\n388\n7886\n4417\n8786\n8822\n4035\n7718\n8492\n5505\n1192\n4388\n8941\n5019\n7538\n6732\n7296\n6389\n5923\n1405\n3278\n3917\n1688\n8374\n443\n4037\n9099\n5190\n6402\n4177\n9310\n7747\n4348\n7197\n4844\n4998\n5609\n4345\n29\n3332\n8648\n4107\n346\n2577\n3941\n1215\n3782\n8252\n4706\n2675\n3790\n7459\n6164\n7316\n1149\n6687\n582\n3139\n5040\n7645\n3882\n7322\n4034\n1861\n4701\n8757\n3208\n8801\n6349\n8907\n1823\n4528\n4789\n143\n4746\n9234\n3866\n9245\n1911\n1366\n4393\n2061\n859\n1959\n6967\n3138\n7382\n9031\n6237\n845\n80\n6911\n7163\n5229\n4736\n8738\n33\n8543\n357\n3193\n7262\n4448\n6796\n6793\n3321\n7569\n6411\n7692\n7340\n1417\n5847\n3836\n2678\n1188\n8727\n223\n8615\n7417\n5771\n3170\n8061\n2935\n8263\n8257\n6883\n1276\n1239\n812\n6258\n3922\n7525\n8117\n3039\n603\n8554\n7573\n2787\n3445\n5115\n3478\n962\n3961\n6570\n7722\n216\n2797\n5154\n2530\n4904\n2405\n7542\n4021\n3252\n5370\n9302\n236\n4532\n1361\n3373\n1716\n2183\n1583\n3783\n868\n1687\n8925\n1433\n6198\n8208\n6367\n7603\n882\n3469\n1645\n7654\n1176\n4231\n150\n7997\n5456\n7031\n4375\n8840\n5634\n6945\n705\n3442\n4774\n3822\n7148\n1922\n8459\n6249\n8713\n6197\n8599\n6071\n6756\n1634\n950\n5640\n7749\n5920\n6622\n4783\n7837\n7479\n7229\n3919\n1797\n5272\n8945\n4908\n5439\n6903\n5833\n6930\n8197\n9261\n1711\n5483\n6046\n4285\n8852\n7409\n8971\n8278\n7534\n7792\n2444\n7496\n8063\n1665\n248\n3894\n4585\n1982\n66\n6651\n4850\n1240\n7511\n7524\n9258\n2075\n3979\n4714\n7592\n965\n2919\n8239\n1842\n8013\n4750\n2344\n6155\n3468\n31\n2087\n1599\n1573\n5883\n7613\n195\n3749\n644\n2189\n8779\n8743\n9005\n8081\n1040\n7785\n5820\n8830\n5495\n4867\n2710\n3843\n491\n7153\n6217\n1148\n4741\n1761\n5484\n3423\n5474\n6916\n5876\n7252\n1739\n8930\n6647\n5198\n4903\n8488\n7366\n2774\n2726\n2385\n7625\n3179\n2211\n8845\n6600\n399\n6810\n3447\n6684\n4915\n8368\n1867\n2325\n2101\n1335\n7734\n3722\n7437\n3716\n7025\n4000\n6897\n1408\n7154\n5013\n2204\n9233\n4225\n3817\n1877\n9161\n2197\n6991\n3390\n280\n1892\n1612\n7753\n2801\n7246\n7909\n6229\n9314\n8407\n1436\n3879\n6432\n6849\n5326\n5327\n8535\n7910\n7745\n5545\n7916\n207\n1783\n6158\n8517\n7361\n8070\n6430\n119\n6146\n4183\n1083\n7385\n4497\n9133\n1686\n3765\n5099\n595\n8046\n4418\n4043\n2361\n7915\n9149\n1717\n1141\n6375\n1018\n5602\n1262\n7485\n9178\n6629\n3339\n8934\n4648\n7988\n6252\n3440\n864\n5418\n3874\n7280\n6191\n8388\n4323\n6792\n4324\n2232\n7228\n8684\n7813\n6187\n6678\n3177\n3534\n4953\n4402\n7739\n6319\n2414\n8700\n5946\n8238\n4533\n6917\n4167\n4618\n2115\n2268\n3081\n1247\n4001\n8580\n7636\n3101\n2195\n1559\n3714\n2484\n7188\n6028\n7530\n2828\n1977\n3238\n6496\n2340\n110\n3247\n7532\n7541\n924\n1632\n484\n4487\n4439\n6447\n1319\n4944\n6347\n1791\n2285\n8087\n5452\n91\n1166\n162\n5185\n7933\n4743\n1627\n7259\n8620\n8525\n8207\n5845\n9011\n5525\n4269\n4700\n1824\n8186\n8872\n8299\n3957\n8242\n4558\n6439\n2666\n5943\n6958\n8112\n5121\n8806\n6170\n7688\n3486\n2082\n7436\n2778\n1096\n786\n2206\n5170\n1443\n6030\n3312\n9151\n8485\n6404\n8498\n2883\n8961\n2280\n8341\n9137\n4337\n2809\n2445\n809\n8298\n8643\n8316\n4951\n6853\n1572\n3215\n3938\n2249\n6515\n1337\n8328\n7712\n1429\n4117\n5441\n3230\n4152\n7225\n3513\n6953\n1507\n348\n3639\n5739\n2673\n1550\n6301\n1652\n8453\n204\n6833\n8056\n2200\n5217\n1854\n4711\n7368\n4572\n4032\n7531\n1013\n3634\n2875\n6058\n8307\n7609\n1766\n904\n667\n5410\n6578\n3601\n1664\n3233\n7390\n8178\n4486\n4952\n4427\n4876\n9166\n3107\n2772\n6295\n5001\n5296\n3371\n6518\n6327\n854\n1615\n8288\n1912\n5927\n6202\n5814\n9032\n1059\n3214\n6547\n7038\n5781\n6926\n4390\n6114\n1622\n4318\n5803\n5984\n736\n3561\n6554\n5045\n4277\n7386\n9081\n8462\n2034\n4955\n2701\n932\n1298\n7758\n7176\n9205\n2276\n3077\n3803\n3562\n8054\n7946\n295\n1843\n7728\n1629\n7768\n3663\n6363\n2971\n431\n9285\n2513\n1116\n3656\n4529\n6366\n5758\n6339\n8398\n816\n4153\n648\n2536\n1826\n7870\n8113\n7730\n7101\n6555\n9256\n6774\n1072\n4578\n2598\n3604\n5880\n861\n8273\n3350\n3117\n4685\n9219\n4334\n5165\n2035\n7224\n4066\n4253\n4447\n3815\n5038\n253\n3658\n2252\n330\n3967\n6443\n2143\n7336\n6135\n593\n2734\n8390\n4655\n7800\n1399\n1173\n5618\n2822\n7905\n7503\n4431\n2443\n1568\n3909\n1974\n2496\n4772\n5164\n4105\n2138\n2864\n3799\n3924\n4882\n8245\n1585\n5528\n5692\n5730\n5832\n137\n3175\n2894\n2062\n3899\n2752\n4028\n2113\n5411\n293\n2647\n730\n3758\n1667\n8879\n9303\n6653\n3698\n3968\n3053\n503\n2150\n4645\n2257\n4627\n8303\n7966\n8742\n4692\n5901\n8547\n2277\n5546\n986\n370\n4697\n8712\n4804\n4881\n1182\n6650\n7290\n3487\n2814\n5668\n7567\n5333\n3724\n4164\n3084\n8896\n3888\n6537\n17\n6882\n3531\n704\n1037\n8866\n5263\n6758\n3762\n1393\n3824\n5575\n5112\n214\n1439\n5700\n8932\n1306\n5011\n6928\n5173\n4098\n1132\n7352\n4778\n7723\n1368\n2390\n670\n2685\n5855\n1772\n6380\n3853\n940\n5424\n6091\n1748\n6193\n5297\n6572\n8877\n6874\n430\n5041\n5267\n1145\n7448\n620\n9112\n4294\n1432\n72\n130\n2393\n7920\n4597\n6614\n8889\n3697\n1895\n3462\n2616\n3978\n4791\n7846\n7780\n8372\n428\n6559\n8326\n9211\n2363\n1525\n5980\n7888\n3331\n8118\n7899\n615\n7377\n791\n5930\n6627\n8322\n1138\n770\n8460\n5100\n8274\n8350\n6316\n2893\n7594\n9236\n5082\n8150\n1986\n1909\n8902\n2145\n3617\n3501\n7\n2426\n5056\n8016\n2702\n5360\n8135\n8385\n8378\n8018\n8574\n720\n8893\n3021\n1978\n4782\n1816\n2083\n4051\n1446\n5870\n971\n9097\n8006\n4222\n8287\n686\n1377\n611\n8153\n4920\n4808\n1536\n679\n4096\n3891\n4884\n432\n4615\n8988\n5560\n3451\n5589\n3514\n6169\n1414\n3244\n1490\n7100\n3588\n690\n7317\n4171\n2266\n6800\n108\n2793\n5151\n6977\n2587\n8188\n8752\n6318\n5815\n5116\n263\n3311\n5191\n5689\n289\n3392\n5755\n1022\n5548\n9319\n8937\n6011\n7632\n5328\n4993\n4141\n5407\n1865\n520\n7305\n7208\n526\n3645\n1859\n2520\n3523\n8629\n7304\n8881\n3076\n4005\n8329\n2205\n2214\n6925\n8691\n4136\n8883\n974\n7873\n7952\n3965\n5887\n7964\n7189\n2406\n2783\n8086\n405\n6568\n5147\n2021\n4727\n4826\n7674\n1600\n5078\n2949\n6624\n6541\n8986\n5740\n4679\n8500\n3591\n4434\n398\n983\n7544\n1478\n4570\n6012\n465\n9330\n7206\n808\n8737\n2356\n4959\n8812\n6955\n3599\n2168\n1420\n1721\n1794\n5897\n8422\n2\n4023\n2739\n3619\n8797\n5496\n8951\n8181\n6893\n9254\n1809\n5682\n4309\n6929\n2742\n5988\n3363\n4493\n8434\n4210\n1503\n1876\n5094\n4600\n4936\n4798\n3933\n5216\n646\n7660\n3098\n8773\n4076\n1576\n5335\n3746\n3327\n47\n4602\n8636\n4129\n363\n6417\n7416\n9025\n4377\n4766\n2779\n4151\n9046\n7860\n3154\n3476\n7620\n966\n2052\n8344\n1752\n7199\n4412\n8895\n8882\n2463\n339\n56\n5390\n4821\n7555\n6558\n1905\n5258\n8880\n4205\n3580\n6735\n1023\n4511\n3850\n161\n7395\n2532\n3349\n7055\n7387\n758\n1907\n872\n3006\n659\n815\n1961\n6902\n7668\n4708\n1904\n4433\n5159\n6816\n8664\n6918\n1016\n6513\n7314\n5364\n7480\n9313\n716\n3395\n6843\n2292\n918\n4329\n1035\n6344\n8593\n3404\n5212\n837\n480\n8524\n1342\n3690\n6797\n7414\n288\n8863\n3352\n1628\n24\n135\n3314\n2181\n8650\n5915\n8078\n6812\n1375\n6040\n906\n5635\n7126\n1387\n7458\n6119\n5591\n3795\n1531\n95\n1960\n7522\n3033\n898\n4607\n4921\n3913\n2623\n4430\n6268\n7063\n1326\n9075\n2505\n7400\n1284\n2951\n747\n6466\n1357\n6493\n7320\n5892\n576\n5107\n5559\n97\n2583\n6361\n8843\n3509\n7892\n6086\n1476\n4612\n7427\n4267\n9094\n7050\n6048\n8455\n8382\n2227\n284\n2898\n3221\n2353\n2157\n5990\n5810\n3581\n7279\n6188\n7859\n3549\n5539\n7918\n2022\n9066\n630\n2500\n5111\n6561\n5127\n8095\n5569\n6123\n1338\n8605\n3491\n4187\n8220\n7334\n9213\n3067\n6997\n2853\n4735\n4372\n1489\n5954\n6662\n2207\n973\n3361\n960\n6350\n4170\n7431\n8076\n1129\n750\n7559\n7194\n2261\n2300\n6590\n5893\n6889\n3125\n8788\n334\n7286\n3472\n8164\n7693\n1469\n1181\n669\n7515\n5563\n4773\n3210\n6324\n3113\n9070\n3638\n7551\n2541\n3506\n5138\n4069\n7198\n7560\n3306\n6100\n2932\n4473\n1741\n14\n4672\n7564\n8748\n8874\n3804\n3678\n2240\n2610\n2862\n1358\n5716\n42\n5176\n9326\n8464\n1038\n2993\n3017\n9072\n32\n4809\n4364\n2808\n4125\n448\n152\n7299\n5431\n6178\n793\n3444\n9120\n8410\n4963\n772\n5457\n6954\n3014\n6881\n286\n553\n1948\n6398\n6255\n3057\n8646\n6176\n2700\n7106\n5663\n6683\n1281\n6013\n8799\n7635\n9289\n1885\n442\n2225\n6294\n5054\n2674\n7884\n8730\n8216\n4203\n1488\n7111\n4013\n3623\n7950\n1971\n1966\n3248\n2900\n1553\n472\n3865\n7796\n6937\n4591\n8098\n5208\n294\n5627\n5691\n5687\n7149\n4879\n3624\n7005\n2773\n3112\n9185\n1633\n7830\n5101\n8707\n8469\n4678\n4860\n700\n5527\n9194\n2794\n5068\n2639\n1177\n4282\n6492\n8128\n5859\n5029\n5123\n2877\n522\n5048\n7230\n2104\n6642\n6731\n2717\n5149\n2043\n9059\n5277\n844\n1394\n3262\n5515\n6706\n3651\n9105\n7671\n2880\n3607\n6410\n2508\n8463\n2394\n1916\n1125\n5343\n3322\n5307\n4547\n1589\n8478\n8899\n2955\n8028\n7293\n4619\n4058\n2781\n8715\n1272\n5734\n4474\n4863\n4367\n49\n8844\n5605\n8671\n6743\n4281\n7077\n1874\n2626\n2516\n258\n5249\n6186\n7958\n5432\n3801\n6288\n4732\n9121\n7558\n2527\n4661\n6819\n3835\n7508\n584\n215\n5036\n4261\n8978\n5228\n647\n4657\n2591\n5931\n5088\n9204\n929\n4381\n5421\n2965\n5050\n6495\n5033\n4799\n959\n6115\n3520\n1232\n5811\n317\n8976\n7705\n3842\n2178\n7187\n1373\n7112\n2694\n8627\n8493\n3991\n7441\n6308\n2589\n6462\n3406\n7673\n8660\n2902\n752\n1025\n849\n7682\n6982\n6652\n3612\n298\n5148\n4873\n3414\n1693\n1458\n327\n2016\n5002\n6768\n7016\n5583\n3270\n857\n8232\n7158\n7981\n4676\n4675\n2164\n8360\n6709\n8143\n365\n4062\n4527\n7928\n9009\n6228\n5818\n2533\n9305\n8887\n55\n2507\n8870\n6649\n5158\n76\n5595\n6693\n5306\n8666\n3020\n7527\n3082\n6304\n1591\n6145\n6868\n7205\n9107\n1165\n6773\n172\n1993\n4176\n8400\n4611\n7589\n8702\n5386\n6095\n6335\n1561\n8805\n5963\n7393\n3681\n2037\n4968\n7451\n3360\n7466\n8361\n4455\n4064\n5422\n1689\n3977\n7269\n362\n4178\n4145\n6127\n5162\n2399\n9225\n7068\n1650\n794\n3007\n1348\n7736\n444\n6081\n5298\n2026\n2543\n9087\n3593\n7425\n3730\n8468\n2641\n7529\n1720\n6377\n8732\n5851\n7956\n3150\n3785\n6485\n3611\n2869\n8510\n4775\n4463\n1251\n9124\n6873\n3391\n6505\n4118\n1617\n8837\n7051\n3213\n3668\n5347\n8452\n6289\n5840\n478\n3522\n453\n3376\n6190\n3342\n2237\n2870\n5178\n5567\n5952\n6919\n3005\n134\n3397\n7443\n8539\n6822\n5264\n3288\n5962\n8421\n6744\n8608\n4656\n1802\n2073\n4271\n1043\n2922\n8211\n2196\n5260\n3789\n7211\n7571\n7834\n5680\n2047\n5502\n3369\n3437\n3286\n5517\n3912\n8386\n1442\n6961\n2191\n2417\n9088\n5155\n6813\n4520\n7375\n1224\n811\n1891\n3748\n4123\n2789\n5305\n8419\n7248\n9237\n992\n4038\n4499\n2060\n5538\n850\n2669\n7612\n104\n9290\n2526\n1287\n4160\n4633\n7125\n742\n744\n4534\n2407\n7714\n4555\n8764\n7661\n4722\n7721\n3205\n6657\n1214\n3754\n6080\n4593\n3018\n8792\n2294\n4450\n7701\n9301\n127\n7069\n4513\n6243\n8025\n4010\n8632\n4715\n5284\n4574\n726\n4252\n4561\n7354\n299\n6088\n1090\n5012\n5684\n3489\n5639\n4888\n1584\n1969\n4846\n2915\n6804\n2775\n7306\n6506\n9306\n5231\n7740\n4283\n953\n6725\n458\n8290\n1504\n1539\n8885\n138\n3764\n1256\n257\n335\n1011\n7060\n5986\n9323\n4740\n8994\n4140\n6807\n8254\n3963\n9297\n2102\n2964\n9207\n4910\n8709\n4411\n1672\n457\n5852\n8037\n4932\n3679\n8794\n2362\n8592\n495\n8432\n1608\n2155\n7411\n2881\n9244\n37\n6535\n8219\n4505\n8635\n1928\n8384\n2570\n8996\n7610\n2128\n8728\n6656\n8935\n6681\n2070\n176\n9062\n972\n514\n1796\n4039\n6838\n2462\n230\n569\n5521\n4637\n4939\n4420\n2863\n672\n4995\n3807\n447\n1656\n2005\n5113\n3297\n8858\n2118\n6309\n1926\n481\n1156\n1509\n1228\n1787\n5978\n8678\n3951\n2929\n4980\n5039\n4713\n7002\n151\n5536\n8148\n3823\n4709\n2299\n142\n7067\n2372\n3761\n9\n2265\n5747\n2764\n724\n2913\n3151\n4525\n6370\n4247\n9329\n5494\n3721\n629\n3621\n7371\n59\n1999\n6704\n3734\n2698\n4691\n6938\n9117\n8415\n6353\n6750\n9077\n2679\n7623\n2478\n7321\n6611\n4007\n2076\n5772\n6416\n2264\n8348\n2672\n6546\n754\n6934\n7908\n8546\n4404\n592\n4748\n6625\n2129\n7944\n2377\n6\n8929\n8275\n3515\n4524\n3660\n8710\n419\n6878\n170\n8313\n7460\n8753\n2917\n6891\n6663\n4918\n7129\n396\n7256\n3500\n631\n5585\n8343\n2695\n6168\n6292\n3176\n5092\n5160\n3701\n9021\n7221\n7825\n1216\n1438\n3471\n2318\n8923\n6223\n2182\n7621\n8514\n9010\n8987\n1252\n1972\n1872\n1715\n8205\n6463\n8138\n8989\n5661\n2890\n565\n2427\n8946\n1303\n3718\n6000\n3620\n1560\n5276\n8089\n9260\n1467\n6173\n7641\n7520\n5061\n4677\n5757\n4400\n2620\n2719\n8995\n2079\n6644\n1683\n8141\n7754\n5744\n2952\n7568\n654\n7457\n5368\n3310\n1510\n4440\n1513\n3072\n8034\n1456\n9164\n3163\n3035\n6111\n5042\n7161\n1401\n1084\n8000\n6672\n8531\n5404\n6550\n8379\n9141\n8681\n7752\n6394\n7011\n3739\n8253\n978\n4771\n6024\n4828\n7959\n1649\n1727\n7073\n8349\n6952\n661\n7283\n3159\n2590\n3496\n8741\n3969\n2956\n4565\n920\n1830\n8558\n1930\n6677\n6825\n8256\n7454\n7521\n4710\n1768\n3753\n6459\n5606\n5292\n1397\n240\n2733\n946\n6711\n3242\n2627\n4929\n5006\n3202\n132\n2295\n2746\n1293\n2124\n5405\n4065\n818\n7464\n1820\n4398\n1312\n6994\n6920\n261\n987\n6120\n3109\n331\n2986\n4338\n7774\n5122\n8396\n1364\n8969\n6712\n8161\n7083\n7595\n5940\n1566\n6419\n8634\n4432\n6047\n4749\n6076\n1161\n8217\n674\n8494\n3688\n2447\n4704\n969\n7477\n1160\n3243\n3173\n4979\n9288\n6860\n1662\n6171\n225\n5143\n313\n8327\n3275\n3385\n7626\n3103\n4401\n6794\n5600\n5043\n7664\n933\n6830\n4452\n3980\n1604\n5875\n6633\n4635\n5756\n3329\n1751\n8108\n4817\n1989\n1237\n1893\n2848\n9334\n51\n8875\n4981\n5417\n4134\n877\n6688\n3545\n4943\n5615\n2476\n1684\n3652\n7396\n1769\n1171\n6563\n3415\n3644\n340\n6630\n8284\n3256\n7240\n5371\n3405\n2108\n6360\n1734\n5612\n8638\n2343\n1103\n7803\n6809\n3055\n188\n8031\n3124\n3683\n4537\n988\n2297\n4893\n6499\n3396\n839\n4467\n5195\n4041\n6457\n4441\n6378\n6472\n6195\n4912\n6884\n5922\n7014\n1660\n38\n1595\n6752\n4554\n1292\n2709\n3800\n6057\n1980\n8775\n6587\n6392\n6263\n7214\n5219\n282\n309\n6685\n2253\n6311\n4092\n18\n7570\n5543\n4081\n2515\n6278\n8690\n5294\n6184\n5215\n9130\n6720\n250\n7250\n4983\n639\n3567\n7841\n2636\n4067\n8446\n5703\n8609\n2586\n7695\n1253\n6701\n7930\n6317\n5921\n7719\n8501\n7312\n4110\n6219\n4552\n5059\n4088\n7975\n9132\n6054\n692\n3412\n4079\n6754\n6950\n5281\n3028\n8321\n3877\n7614\n8939\n4188\n2223\n239\n4745\n6875\n7096\n5571\n4403\n2640\n5556\n1845\n6690\n1825\n4157\n314\n4682\n8825\n1003\n6206\n8093\n7215\n6465\n99\n8077\n6631\n4206\n2523\n366\n1208\n6043\n4640\n1457\n5475\n4985\n1351\n3090\n5625\n7307\n8466\n2003\n8854\n218\n1500\n4476\n2293\n1847\n5032\n2147\n866\n3710\n2552\n1749\n6692\n3926\n4112\n6458\n735\n9171\n60\n9304\n6726\n2630\n2882\n1178\n1151\n4922\n4662\n173\n7233\n1776\n6533\n4113\n2423\n2425\n4343\n5800\n970\n6372\n1009\n6607\n3068\n8435\n6423\n3126\n4813\n1709\n1201\n7104\n5620\n3932\n5701\n5724\n3366\n8050\n4984\n5023\n9203\n5079\n627\n290\n779\n5572\n5233\n1392\n4975\n8534\n8210\n2269\n1143\n2475\n2562\n905\n4546\n267\n3536\n8538\n449\n101\n7367\n2722\n4605\n7356\n6781\n8537\n8697\n6820\n8340\n8926\n3821\n2349\n2259\n6545\n8100\n8395\n2258\n2911\n5108\n3946\n1406\n8683\n8296\n5579\n2177\n8264\n1425\n3940\n957\n3647\n515\n5342\n8363\n2449\n3108\n1001\n2937\n3452\n5574\n4319\n9184\n8381\n945\n6876\n600\n5714\n4871\n8532\n1852\n8856\n392\n2018\n8878\n369\n5711\n9230\n5304\n7266\n1681\n7829\n2309\n4683\n8938\n2255\n6159\n3207\n4651\n2029\n4341\n5106\n5794\n9024\n4712\n2434\n7151\n7359\n6431\n1290\n5918\n8705\n3438\n5554\n8876\n7415\n6290\n5373\n3805\n2950\n2331\n6772\n8997\n6576\n2307\n8515\n4033\n3428\n6487\n6595\n45\n5792\n333\n762\n2383\n3388\n666\n2166\n460\n943\n364\n6980\n8223\n8221\n637\n6218\n4108\n5381\n4649\n5096\n1614\n8768\n5095\n3809\n5030\n984\n3538\n5120\n2498\n5222\n5613\n5486\n5119\n241\n5707\n9227\n544\n4109\n7771\n728\n3671\n9327\n1230\n9270\n1070\n8565\n4769\n7056\n5654\n7965\n1793\n5956\n7883\n1362\n5479\n8769\n8821\n8320\n1901\n1994\n2461\n5552\n389\n2839\n6467\n2762\n4763\n3499\n1487\n7599\n4488\n3241\n8272\n1131\n4496\n7006\n7265\n4897\n2747\n6618\n5291\n4563\n5146\n1939\n6369\n8548\n6163\n5526\n4068\n9030\n5349\n8433\n748\n1477\n4265\n9200\n3878\n462\n6846\n9040\n4806\n3519\n6798\n5464\n5179\n546\n6044\n8114\n7216\n6276\n1495\n494\n8146\n5434\n856\n8403\n8071\n3972\n5544\n3337\n6855\n1546\n2824\n1718\n6009\n2042\n251\n9076\n3330\n5004\n192\n4717\n3797\n1146\n394\n7814\n7699\n4659\n4689\n4156\n7903\n9054\n7332\n7811\n1119\n5531\n6782\n5210\n8412\n2633\n7924\n4624\n8314\n5666\n3240\n2310\n4262\n8160\n4553\n8196\n2661\n7213\n7455\n7399\n870\n6126\n1227\n1226\n781\n937\n6343\n2578\n2892\n4124\n2792\n5696\n6865\n6455\n8312\n5193\n6026\n5251\n3787\n4460\n4687\n7923\n1140\n9106\n796\n2482\n9170\n8695\n2749\n6734\n4825\n114\n8319\n827\n4175\n390\n7611\n7484\n1249\n7727\n955\n579\n3629\n8915\n2958\n885\n7227\n1424\n4810\n4604\n1535\n774\n7518\n5428\n1955\n8233\n2645\n2167\n6484\n3855\n1502\n4861\n2333\n2973\n4829\n1906\n3966\n476\n9023\n6960\n3483\n2748\n5891\n8174\n7702\n8948\n5324\n4396\n1605\n2823\n7348\n7347\n5933\n310\n9082\n916\n4255\n203\n4239\n5976\n6200\n6435\n4425\n787\n1121\n6034\n13\n39\n3104\n5961\n5507\n5785\n1463\n7339\n1575\n7801\n5445\n8283\n5951\n6995\n999\n5163\n6023\n3786\n6536\n5850\n3524\n3528\n4508\n6674\n2939\n8227\n4598\n7550\n8495\n8622\n1152\n4538\n4003\n1318\n739\n3296\n8202\n1552\n6204\n5236\n3576\n4699\n9238\n1879\n488\n2274\n433\n5587\n1678\n9282\n7914\n8552\n6445\n7971\n8331\n6880\n7476\n7282\n1570\n7271\n3827\n6489\n8091\n9287\n7351\n1765\n5286\n6921\n542\n1762\n8553\n4987\n894\n3622\n7855\n92\n3131\n4811\n3590\n6517\n4510\n733\n4954\n1360\n5669\n2842\n8107\n5646\n5968\n1618\n1827\n7709\n8521\n5807\n5321\n9239\n5501\n3745\n4437\n1586\n7273\n5265\n6605\n7917\n1607\n6074\n4668\n7061\n1580\n8694\n8461\n4573\n618\n9173\n5243\n435\n8770\n2421\n7450\n3870\n8308\n2605\n2934\n9240\n6887\n4512\n1198\n7585\n7691\n7738\n2843\n8423\n7929\n6971\n7854\n86\n9128\n4298\n622\n790\n9155\n6579\n2203\n7716\n1265\n8645\n3834\n1174\n7380\n623\n8936\n4306\n8082\n4312\n8661\n5753\n7243\n2768\n8155\n85\n4143\n3047\n8479\n7809\n2833\n5555\n7578\n1637\n1936\n8130\n5549\n8062\n7143\n5522\n8966\n5614\n8105\n8719\n7655\n7502\n8268\n5760\n6695\n5565\n7615\n9226\n4870\n4507\n3160\n4835\n1598\n2465\n4422\n5248\n7867\n1078\n5015\n6660\n1676\n5354\n6391\n5351\n7184\n6280\n5936\n6124\n1327\n2906\n269\n8292\n2466\n8809\n5167\n8142\n8204\n2713\n1910\n2930\n2494\n5592\n7384\n7726\n5727\n625\n1735\n5710\n5518\n2491\n1410\n4989\n5183\n8777\n6562\n4947\n3692\n6129\n384\n1097\n2084\n5209\n3723\n7272\n6895\n2459\n543\n8621\n5394\n6211\n2074\n1511\n2524\n7776\n5055\n7191\n6207\n7922\n281\n8436\n2918\n3141\n4800\n6323\n7631\n8903\n2716\n3735\n3012\n5301\n3975\n2800\n7963\n105\n1920\n7391\n4909\n1754\n4816\n5488\n5145\n5098\n5139\n5268\n9317\n8631\n4346\n7318\n136\n3993\n1220\n2151\n308\n7483\n7582\n3071\n1339\n3777\n8191\n5378\n7087\n1056\n7465\n5608\n6564\n512\n2754\n2687\n1596\n5376\n1512\n566\n6382\n7360\n1757\n8035\n2296\n4264\n3551\n1053\n4716\n1537\n8518\n254\n6253\n7132\n8557\n3490\n9267\n5473\n2412\n7539\n7136\n6670\n3974\n891\n1323\n5958\n1217\n2879\n9118\n1259\n2317\n7033\n2467\n6665\n6244\n2180\n2140\n7098\n5126\n6395\n4150\n547\n4120\n4307\n1725\n2737\n8549\n8195\n1245\n6286\n935\n1756\n1701\n1626\n7379\n3492\n3717\n5802\n2817\n1234\n1005\n4101\n21\n2576\n4650\n3381\n1030\n2844\n1641\n936\n2729\n6469\n8913\n8369\n5994\n341\n81\n4083\n1685\n5152\n3380\n8739\n6615\n3829\n164\n7927\n4779\n829\n4216\n8528\n3641\n4606\n2769\n6970\n1545\n8850\n4971\n5489\n2008\n4564\n8682\n7784\n5768\n9252\n901\n438\n3577\n2765\n5904\n664\n3348\n6298\n3602\n2502\n8617\n7684\n4293\n5166\n5805\n4126\n2451\n6906\n7234\n9243\n3778\n2940\n1087\n9053\n5026\n2504\n5283\n2820\n4242\n797\n3925\n1383\n8750\n7861\n1403\n6973\n7617\n968\n3065\n5395\n4347\n8144\n2688\n6527\n8597\n8673\n7327\n6331\n1422\n7115\n244\n7013\n2092\n54\n7970\n5742\n3464\n4823\n8588\n2938\n3060\n6406\n4149\n2375\n6616\n8803\n1555\n4369\n1380\n3011\n6144\n3367\n4990\n7370\n7131\n1995\n2602\n985\n8785\n8480\n9125\n1927\n3269\n3771\n1032\n7378\n6900\n5726\n2731\n2020\n4503\n3313\n6727\n8793\n2304\n523\n6036\n58\n7993\n5512\n5049\n2721\n8482\n673\n7937\n1168\n4472\n8247\n7287\n9017\n6421\n9190\n3584\n1819\n1792\n2810\n6033\n638\n6749\n7677\n981\n7160\n4726\n1886\n7845\n7911\n6975\n568\n7422\n4613\n4501\n2569\n4263\n3206\n4133\n2420\n3706\n8894\n2263\n5774\n4925\n9180\n8888\n2945\n2091\n1873\n6303\n729\n6728\n2156\n3267\n1860\n6597\n1374\n4930\n5253\n938\n580\n5825\n4839\n166\n8198\n6892\n8701\n74\n7094\n7284\n8954\n3156\n6140\n4279\n5594\n2229\n7535\n5466\n8413\n7105\n8192\n2632\n7638\n9308\n8530\n832\n4643\n2201\n3268\n4322\n6510\n2967\n262\n403\n7973\n1258\n8828\n4036\n5838\n9263\n8529\n2788\n4202\n237\n3838\n1291\n2305\n4056\n5628\n7281\n1430\n6476\n7935\n2850\n6041\n2013\n4016\n4576\n5312\n6827\n6321\n8669\n8439\n830\n1942\n1519\n2750\n6106\n6993\n6235\n5899\n7313\n5331\n4371\n7086\n4399\n8600\n2660\n5409\n3465\n5499\n6231\n5745\n1801\n5337\n4468\n1451\n4192\n1275\n8230\n2302\n1114\n4960\n8860\n3900\n6468\n5058\n1505\n8868\n5588\n3858\n1947\n2565\n1472\n8499\n243\n8442\n6583\n7085\n5374\n2250\n4291\n4426\n492\n2311\n8305\n3662\n5338\n8780\n7488\n3890\n5005\n2442\n4680\n7358\n9116\n4397\n5999\n587\n7902\n83\n3566\n2134\n8942\n4767\n6601\n2456\n1745\n5736\n5254\n8017\n4015\n7690\n3798\n8947\n1067\n2116\n7945\n590\n2547\n2535\n64\n2053\n5359\n2493\n6669\n4351\n6412\n7473\n6147\n7175\n6983\n5196\n745\n2657\n3497\n697\n3161\n7528\n2239\n5991\n3201\n7681\n2440\n5189\n2959\n2044\n8917\n2046\n6313\n6333\n5318\n2763\n4301\n2555\n2213\n2933\n4121\n1340\n3903\n4392\n7889\n5323\n1055\n707\n3857\n518\n6078\n5134\n6645\n9138\n1592\n680\n4446\n7943\n3461\n3887\n5601\n2321\n6621\n558\n4914\n913\n5637\n6453\n8511\n4531\n1218\n5508\n2603\n6802\n8426\n8297\n2947\n5971\n6552\n5262\n5935\n782\n7435\n8357\n6139\n1136\n1473\n5008\n3585\n3627\n2914\n5356\n2997\n2347\n881\n5652\n4849\n8808\n8351\n4017\n2010\n6836\n7616\n4391\n3630\n3712\n6099\n2969\n5238\n4333\n2301\n4406\n1236\n1050\n1864\n1104\n8408\n8251\n8795\n5879\n3365\n7481\n8206\n2452\n1767\n8859\n124\n3948\n4444\n8962\n4438\n5003\n1740\n8428\n3105\n5117\n1095\n1480\n8755\n7881\n3097\n4877\n155\n1917\n2455\n6042\n337\n6724\n6045\n8483\n7135\n2242\n4566\n1679\n834\n1746\n795\n3548\n2314\n2036\n4046\n9129\n6979\n7084\n5091\n2413\n8170\n5775\n1817\n529\n7220\n813\n2916\n5130\n8972\n126\n1243\n2370\n4831\n9122\n3010\n5104\n2613\n6761\n7482\n909\n2146\n4595\n5340\n3512\n6283\n2346\n653\n6121\n2615\n7421\n1869\n1002\n8834\n2991\n8992\n632\n1093\n4543\n645\n2352\n4115\n373\n1483\n6966\n8598\n3896\n3434\n5987\n8318\n1815\n1223\n1548\n6885\n5073\n6330\n2573\n1369\n4095\n1431\n2185\n5766\n1301\n7258\n8048\n7598\n2847\n1996\n2378\n8561\n743\n6381\n271\n1956\n7439\n7596\n7134\n6636\n5804\n1858\n6214\n4730\n8536\n1203\n3118\n9202\n1875\n5885\n8975\n168\n5898\n4014\n4186\n3346\n3041\n5558\n9296\n8157\n4339\n3234\n1738\n2604\n6803\n5387\n5590\n125\n2173\n8012\n8005\n4858\n3069\n651\n372\n378\n8366\n6299\n1449\n7793\n8541\n3235\n8043\n3086\n3983\n6949\n4690\n2176\n6494\n7637\n8406\n3856\n7408\n350\n7021\n8224\n7044\n7662\n6697\n7679\n169\n528\n7029\n2790\n7138\n7432\n7602\n8333\n1582\n1378\n519\n482\n9279\n8015\n6592\n4514\n3542\n2612\n628\n5053\n6699\n6227\n2094\n1621\n847\n3598\n2728\n8490\n7276\n6620\n8345\n9216\n4278\n4059\n9058\n5063\n5816\n4173\n8134\n1997\n3182\n3224\n8129\n5109\n4494\n189\n7640\n8243\n180\n2963\n1123\n5593\n3263\n4185\n7140\n8990\n6320\n9275\n4601\n4854\n5907\n1135\n8083\n5964\n7788\n1992\n8069\n9174\n6160\n35\n8572\n2865\n46\n3952\n6418\n2510\n5783\n20\n3816\n2715\n3930\n2548\n5204\n4122\n4103\n708\n7756\n3825\n777\n3550\n8502\n3929\n5440\n6751\n7764\n4070\n7331\n3743\n9131\n9206\n3828\n23\n41\n4197\n234\n5723\n7622\n8832\n4626\n2169\n5599\n2976\n5266\n1967\n1150\n5334\n90\n822\n2538\n3169\n6771\n7442\n498\n4967\n5580\n7581\n7680\n4728\n1115\n4040\n1064\n3106\n6266\n4415\n9294\n5597\n7059\n197\n7218\n6948\n5690\n4234\n1653\n4485\n4019\n3370\n919\n1330\n6085\n2078\n3768\n5427\n4545\n2435\n8862\n3633\n8145\n5221\n1388\n5913\n8140\n7471\n7156\n6989\n1190\n6832\n2830\n4387\n3454\n7469\n2910\n4526\n5187\n2410\n9223\n6247\n6912\n4681\n1300\n7407\n8612\n6523\n3616\n6894\n7253\n4515\n5874\n5448\n7137\n7957\n1130\n3092\n7054\n3516\n5797\n1000\n2727\n4336\n9090\n6403\n7255\n8919\n6522\n6760\n8898\n4803\n1938\n374\n8686\n9150\n3985\n7045\n3475\n6065\n7991\n1409\n7851\n6671\n6090\n5826\n7857\n1155\n8964\n1117\n7072\n6064\n2497\n4899\n2397\n3189\n2369\n15\n5027\n5754\n8950\n5617\n8391\n914\n6264\n279\n6174\n5184\n3733\n7392\n5278\n2924\n567\n7994\n352\n8084\n2148\n2723\n3359\n70\n1870\n7708\n220\n3994\n9013\n3191\n9220\n4155\n5717\n1110\n2198\n9179\n785\n5325\n4770\n4250\n52\n4634\n5072\n9037\n601\n8036\n7996\n2483\n7232\n8675\n8836\n1279\n5346\n7676\n6104\n1515\n4603\n5607\n7894\n5144\n2628\n68\n440\n3586\n3083\n4830\n4378\n7762\n1134\n4542\n7850\n6296\n2866\n4011\n8751\n4776\n7954\n7102\n5697\n2032\n5729\n5017\n6962\n2051\n1092\n764\n9019\n2759\n8581\n1484\n8618\n912\n2382\n4892\n8447\n8176\n5491\n5695\n5504\n1060\n7064\n709\n578\n4320\n2379\n7649\n8416\n1613\n5344\n7512\n7865\n3037\n6689\n6557\n1569\n5955\n3707\n9168\n8566\n1775\n5950\n6943\n7804\n434\n6179\n9300\n1142\n7947\n6456\n6291\n5789\n6538\n9134\n3049\n5075\n5399\n5161\n1623\n948\n6302\n6063\n7516\n117\n506\n3302\n7146\n355\n3854\n1081\n2827\n1496\n2574\n6167\n3183\n4287\n5482\n1722\n7319\n7277\n3860\n3443\n3298\n8364\n3826\n7254\n2360\n5093\n7039\n6325\n4230\n2567\n6241\n4443\n559\n2625\n4228\n8967\n6405\n1674\n3936\n4475\n8556\n8585\n896\n3713\n6259\n4297\n6718\n2392\n2279\n4927\n1283\n2374\n2860\n7665\n663\n596\n6293\n6805\n2811\n7383\n8306\n8330\n3153\n2153\n2618\n2441\n3615\n8092\n552\n5285\n5255\n8124\n9247\n5530\n8175\n6242\n5660\n3433\n1610\n1832\n3892\n3862\n640\n2127\n2474\n4196\n3495\n7217\n5206\n4836\n7759\n4376\n800\n4227\n3699\n9055\n5665\n6826\n7463\n9065\n4720\n5069\n3245\n3453\n3358\n6532\n5970\n7921\n4087\n1547\n3424\n8040\n7995\n6787\n9069\n8716\n2561\n8199\n1479\n2767\n7818\n7145\n604\n7597\n4896\n9281\n4666\n185\n8171\n7978\n3059\n9196\n9221\n2135\n1800\n2974\n1529\n5948\n446\n4436\n8672\n3508\n6208\n5673\n6998\n5203\n278\n7041\n9110\n5853\n8121\n1764\n3046\n2400\n6575\n4738\n2228\n7761\n9322\n7019\n6931\n6383\n6762\n283\n3935\n2534\n7717\n6785\n471\n8214\n231\n4241\n5310\n3844\n5746\n2011\n7209\n336\n6433\n756\n9167\n6741\n3345\n7685\n4018\n6682\n9147\n4790\n5836\n5906\n8747\n676\n3964\n6362\n3510\n7510\n2308\n1806\n5917\n1189\n4012\n3387\n1331\n5319\n5423\n8900\n147\n3780\n1696\n9111\n6783\n6497\n4104\n1898\n3987\n260\n4616\n2121\n9283\n1400\n2437\n4670\n2735\n1163\n2096\n6521\n1423\n4523\n2243\n6667\n6990\n3944\n6915\n6763\n5611\n404\n2691\n1015\n7092\n7562\n8624\n2291\n4193\n5934\n5503\n2326\n4408\n2960\n842\n1963\n3354\n5568\n9050\n3806\n439\n9154\n6055\n6451\n2190\n7633\n688\n4354\n8890\n2813\n2872\n8102\n8317\n6609\n1497\n8389\n6449\n1682\n3594\n5103\n5812\n863\n268\n3054\n8079\n2260\n2027\n3091\n7687\n6703\n3557\n2019\n8427\n2799\n8182\n6641\n3168\n2284\n1934\n4865\n1077\n6507\n1658\n3811\n1774\n7897\n2238\n2943\n191\n3869\n3246\n4057\n3188\n414\n8072\n7838\n1382\n4962\n6010\n5363\n4042\n1983\n4077\n7429\n1833\n3583\n4044\n1109\n1295\n386\n5481\n3927\n311\n"
  },
  {
    "path": "artrackv2_mindspore/lib/train/data_specs/got10k_val_split.txt",
    "content": "1349\n5651\n5878\n562\n2202\n8904\n765\n1501\n8654\n2975\n2689\n3680\n5180\n1900\n7707\n4723\n8912\n4029\n3579\n869\n2888\n8657\n6599\n741\n4288\n2244\n7357\n5704\n8791\n208\n8587\n7969\n4805\n8526\n4887\n8871\n7468\n3343\n886\n7794\n5764\n2646\n6454\n6101\n7885\n7744\n1297\n4119\n4856\n122\n2286\n2925\n5131\n3570\n5843\n3027\n5320\n5626\n540\n1862\n5401\n7335\n699\n7760\n9198\n3259\n7345\n8698\n1280\n6479\n3100\n3988\n1322\n5737\n1268\n3257\n6791\n3326\n4815\n7644\n1082\n2826\n6821\n8984\n2553\n5290\n5909\n4762\n9242\n8096\n8066\n4325\n6666\n7193\n7114\n8060\n2376\n7872\n6788\n3544\n5460\n3507\n2509\n6626\n3429\n5542\n4220\n2968\n5271\n4249\n3863\n1868\n5581\n2012\n6270\n8038\n4050\n121\n2845\n1565\n1998\n2275\n5524\n6068\n7624\n4913\n9277\n1506\n803\n8848\n5925\n2450\n2072\n8190\n4753\n9162\n1923\n825\n7303\n9028\n2088\n8516\n1556\n5937\n7847\n2367\n7549\n1049\n1521\n4739\n3931\n8958\n4130\n7877\n7876\n897\n5985\n7346\n7537\n111\n3700\n1126\n7896\n1288\n3419\n4673\n1051\n5720\n1068\n3458\n146\n291\n6256\n5514\n2857\n4580\n6239\n6525\n8717\n391\n4841\n6676\n4360\n1453\n4211\n73\n1675\n1987\n4025\n1321\n662\n8265\n6424\n2758\n7765\n7656\n3209\n7497\n7600\n9039\n7697\n5177\n2983\n5622\n9295\n1200\n3284\n964\n2024\n1269\n4551\n8088\n5659\n2212\n5199\n5551\n8607\n5573\n2247\n5200\n6341\n7951\n8429\n7720\n5919\n1273\n3529\n6707\n9176\n7552\n3255\n5649\n6110\n9235\n1137\n9272\n775\n788\n5786\n5186\n6746\n2667\n9145\n7630\n3953\n1828\n8827\n6471\n4702\n7815\n467\n6387\n3195\n6238\n6508\n2373\n5983\n4931\n2948\n921\n2438\n517\n3949\n2137\n3216\n5683\n3695\n1719\n4837\n9159\n6981\n860\n7410\n5497\n1770\n5557\n8810\n5194\n4857\n9100\n6329\n2609\n1925\n3686\n9041\n4924\n349\n9187\n3393\n3661\n7120\n6858\n4587\n3831\n3130\n5396\n5060\n6486\n3937\n8023\n824\n5398\n1354\n8861\n5534\n7292\n4389\n6029\n6226\n3505\n4326\n7445\n581\n6089\n3450\n7324\n6516\n6775\n1207\n4575\n5135\n9265\n3918\n9020\n3473\n3898\n7812\n6571\n6757\n6639\n2557\n1206\n6148\n7325\n8790\n4938\n7026\n4383\n8041\n1250\n7267\n1952\n7561\n8811\n4941\n8373\n4848\n6602\n8355\n8104\n5214\n6654\n4330\n995\n3181\n3422\n456\n1782\n3408\n6530\n719\n7587\n5910\n3058\n740\n2009\n4207\n5336\n2798\n9229\n8668\n2473\n4221\n1493\n3281\n171\n9157\n9139\n7766\n6220\n9127\n3324\n5308\n3708\n2431\n8080\n2093\n2585\n406\n7040\n5064\n5247\n4758\n6512\n2953\n4257\n4935\n2705\n2572\n3436\n8513\n5884\n1385\n4852\n2637\n7091\n2761\n6007\n8332\n6694\n2422\n4917\n2186\n6898\n1390\n6965\n3132\n7698\n475\n2002\n2692\n5024\n7365\n7373\n4091\n1731\n947\n3962\n8692\n1788\n8734\n8656\n6862\n6856\n1950\n1914\n5658\n3635\n1620\n4780\n2580\n1454\n2786\n687\n7238\n3648\n6452\n1197\n3190\n5900\n9043\n4958\n1935\n1821\n1187\n1153\n7737\n7223\n3820\n7169\n7350\n5674\n6254\n3025\n6680\n1690\n2899\n3893\n1577\n5728\n9189\n5077\n34\n3560\n2179\n5462\n1402\n3654\n1376\n7936\n4246\n5506\n1179\n5647\n4686\n8644\n1352\n2855\n6079\n2254\n2668\n2287\n2457\n3418\n7264\n677\n3074\n2655\n1042\n2210\n4504\n7089\n8309\n4209\n4280\n3258\n2977\n84\n4705\n1244\n3511\n6355\n8813\n3228\n9266\n1122\n613\n732\n5202\n8425\n2638\n6470\n2886\n3541\n8132\n2063\n8201\n5129\n2818\n7949\n6936\n8090\n4465\n7295\n5239\n7009\n9271\n8563\n2832\n952\n8136\n6776\n3565\n5188\n7288\n6999\n285\n5487\n7763\n7608\n8584\n2071\n7868\n2804\n3655\n7048\n6847\n3276\n4082\n4272\n3910\n3709\n1574\n4559\n7580\n7081\n5014\n7769\n8183\n6386\n7574\n356\n4937\n2487\n9315\n7572\n3040\n671\n2682\n8626\n3868\n8623\n387\n8679\n4074\n1481\n3527\n3595\n4754\n2453\n1579\n4638\n9123\n1829\n316\n3009\n3691\n763\n4875\n3572\n4642\n3128\n4273\n2777\n6032\n4793\n233\n7147\n996\n3199\n8835\n3517\n7210\n6125\n6037\n3684\n8589\n3915\n3095\n8310\n3180\n7043\n4458\n2889\n57\n4483\n7667\n8375\n1434\n7493\n6986\n4733\n8471\n5827\n2111\n1313\n7986\n3075\n2614\n7547\n4977\n8527\n3212\n7300\n5842\n5244\n3291\n597\n1007\n2030\n227\n3830\n5540\n247\n5643\n9333\n1958\n3096\n1371\n5220\n7926\n2927\n1516\n7130\n193\n1522\n6165\n6923\n3794\n4223\n5535\n2472\n8630\n3971\n9101\n2946\n222\n4609\n7291\n8542\n6501\n7548\n4557\n6274\n1010\n5226\n7309\n1317\n9056\n6275\n1624\n1099\n4191\n4030\n7270\n5392\n2316\n3819\n1670\n8154\n8045\n4807\n8864\n2391\n5908\n8338\n8218\n6400\n9193\n3165\n843\n6613\n6941\n4380\n9332\n5629\n7557\n4321\n3702\n681\n734\n1159\n4665\n5959\n1697\n5509\n8774\n7389\n3832\n3751\n8637\n3079\n1680\n6841\n703\n684\n8293\n3682\n5733\n4818\n3231\n3078\n5562\n9001\n3889\n7024\n2519\n1713\n3287\n219\n6021\n8776\n2289\n7212\n4832\n4684\n4617\n4237\n2649\n8185\n6326\n3568\n551\n1426\n4181\n8869\n312\n2905\n4165\n8248\n2558\n900\n1044\n8613\n7743\n5437\n7604\n3122\n5708\n8649\n2878\n4695\n4491\n1929\n7533\n5223\n7711\n915\n1844\n5751\n3008\n8055\n961\n6142\n4636\n61\n198\n2271\n5698\n4596\n4500\n5709\n5819\n7972\n2992\n1643\n1048\n6281\n8886\n360\n4198\n1841\n6814\n3960\n2606\n7001\n5888\n450\n7133\n7015\n7034\n5153\n8920\n5066\n469\n1302\n8816\n463\n8651\n5869\n8193\n6582\n5578\n1231\n9274\n7260\n7751\n8052\n6799\n2089\n2342\n8451\n3260\n5550\n7795\n2288\n1205\n40\n496\n8367\n7836\n5973\n3908\n5242\n5062\n2706\n997\n6514\n5419\n9201\n1965\n6062\n3050\n5302\n8735\n358\n2398\n7470\n1644\n8179\n7047\n1549\n5414\n2539\n7381\n589\n8166\n8505\n6035\n3956\n4540\n6721\n8074\n1062\n2384\n2531\n7159\n3502\n3902\n4584\n2554\n264\n8720\n2849\n4916\n5218\n7202\n883\n4560\n1677\n4317\n7863\n4509\n6577\n2903\n1452\n1416\n5369\n473\n6233\n6359\n5992\n4934\n8059\n6834\n4907\n3320\n8267\n8280\n2066\n2402\n1485\n3772\n3732\n4764\n9126\n3575\n5564\n4768\n5641\n1884\n2330\n1804\n344\n698\n3089\n1532\n4454\n761\n7289\n8094\n3432\n1747\n6811\n8722\n8826\n4646\n3222\n8614\n2901\n7003\n652\n8663\n4266\n413\n810\n75\n3334\n4905\n6438\n4756\n5137\n6528\n6534\n6988\n6177\n8533\n889\n5384\n7201\n5132\n7802\n6864\n3973\n873\n4840\n1482\n8376\n3769\n5858\n6675\n4286\n2593\n5863\n4353\n7817\n7540\n4999\n4838\n2303\n6002\n7913\n1508\n5317\n7755\n2784\n4964\n3431\n6209\n3755\n6022\n6399\n6232\n3954\n455\n5416\n6448\n1558\n7591\n245\n140\n9210\n6585\n4084\n967\n7798\n6795\n7095\n6733\n3861\n9264\n361\n1045\n755\n8042\n7074\n7778\n6415\n4724\n6450\n2049\n1563\n1307\n3485\n1790\n7869\n3282\n6907\n3920\n2868\n5801\n5632\n1079\n5009\n3955\n7517\n5128\n3417\n3019\n2725\n1784\n2312\n2753\n6976\n342\n8266\n1849\n2273\n5037\n7880\n3793\n7401\n5412\n8279\n1257\n3670\n9049\n3266\n8955\n6519\n8916\n2858\n694\n5650\n1019\n4669\n1785\n3533\n5877\n2704\n8603\n3726\n6668\n497\n1085\n6815\n6157\n6646\n6964\n186\n8097\n5645\n8481\n8215\n3775\n2542\n7514\n5699\n4072\n3518\n5767\n3239\n3740\n1404\n8981\n4086\n6397\n6984\n4204\n6899\n682\n6589\n3317\n2944\n3456\n4340\n7424\n9208\n6504\n4409\n1\n145\n1882\n4620\n2634\n4992\n5453\n4481\n3377\n266\n7875\n530\n1235\n7605\n504\n1771\n8489\n345\n7353\n7797\n7174\n5914\n2871\n5721\n6067\n3582\n7653\n5467\n6234\n691\n8758\n2122\n1213\n2908\n1492\n1437\n2187\n1266\n2395\n7278\n8491\n5256\n1554\n8163\n5966\n7128\n7904\n1691\n6272\n1264\n3996\n1706\n1334\n1316\n6478\n6935\n1518\n6700\n8703\n8744\n8152\n8778\n5367\n4218\n9007\n6312\n606\n7565\n5293\n2891\n675\n2125\n2120\n826\n7008\n5705\n7748\n8010\n1498\n5330\n5472\n2215\n7627\n3016\n6588\n1850\n4128\n8569\n6987\n7566\n148\n8151\n8789\n7907\n8596\n715\n6018\n9060\n3872\n1750\n5889\n4047\n5960\n3120\n3449\n1421\n1102\n3333\n9197\n8796\n8123\n8007\n2028\n8404\n1945\n1985\n8109\n5380\n8438\n3504\n6739\n4180\n5835\n4243\n25\n4002\n1976\n3482\n8392\n158\n5181\n4885\n8985\n11\n6872\n6425\n5926\n7062\n5083\n8394\n4259\n5844\n1990\n3942\n5532\n2220\n28\n5957\n149\n6748\n1663\n3559\n7647\n2566\n1359\n8787\n5259\n7010\n554\n8231\n4229\n6005\n8172\n8125\n1350\n3571\n9051\n1973\n1386\n1781\n5788\n159\n7007\n3220\n1846\n3093\n4445\n2056\n8370\n3211\n1113\n4384\n2231\n273\n4276\n642\n7663\n5311\n265\n226\n9012\n7879\n118\n7109\n7251\n1760\n8667\n2876\n7162\n3552\n6901\n6779\n5021\n6524\n4957\n3114\n4544\n441\n1848\n2136\n2458\n8662\n1127\n5541\n3026\n1080\n6780\n2224\n8259\n1073\n9000\n7244\n7977\n500\n4435\n7376\n7979\n1435\n9291\n7704\n3791\n3521\n210\n7388\n1039\n6269\n4052\n8570\n3285\n564\n8039\n3546\n6203\n1183\n6107\n4147\n6216\n2234\n7185\n3192\n7155\n2001\n7777\n876\n944\n908\n7791\n5465\n6784\n65\n9172\n5675\n7075\n3886\n7891\n2978\n1008\n5630\n591\n5067\n1139\n577\n9015\n574\n8137\n7786\n5765\n4900\n4090\n7842\n5741\n"
  },
  {
    "path": "artrackv2_mindspore/lib/train/data_specs/got10k_vot_exclude.txt",
    "content": "GOT-10k_Train_000004\nGOT-10k_Train_000013\nGOT-10k_Train_000015\nGOT-10k_Train_000020\nGOT-10k_Train_000024\nGOT-10k_Train_000034\nGOT-10k_Train_000038\nGOT-10k_Train_000048\nGOT-10k_Train_000051\nGOT-10k_Train_000059\nGOT-10k_Train_000077\nGOT-10k_Train_000081\nGOT-10k_Train_000089\nGOT-10k_Train_000093\nGOT-10k_Train_000094\nGOT-10k_Train_000096\nGOT-10k_Train_000104\nGOT-10k_Train_000107\nGOT-10k_Train_000108\nGOT-10k_Train_000120\nGOT-10k_Train_000132\nGOT-10k_Train_000170\nGOT-10k_Train_000186\nGOT-10k_Train_000212\nGOT-10k_Train_000213\nGOT-10k_Train_000222\nGOT-10k_Train_000223\nGOT-10k_Train_000240\nGOT-10k_Train_000246\nGOT-10k_Train_000249\nGOT-10k_Train_000266\nGOT-10k_Train_000268\nGOT-10k_Train_000287\nGOT-10k_Train_000293\nGOT-10k_Train_000305\nGOT-10k_Train_000316\nGOT-10k_Train_000319\nGOT-10k_Train_000322\nGOT-10k_Train_000331\nGOT-10k_Train_000334\nGOT-10k_Train_000354\nGOT-10k_Train_000361\nGOT-10k_Train_000368\nGOT-10k_Train_000382\nGOT-10k_Train_000401\nGOT-10k_Train_000417\nGOT-10k_Train_000448\nGOT-10k_Train_000454\nGOT-10k_Train_000458\nGOT-10k_Train_000466\nGOT-10k_Train_000475\nGOT-10k_Train_000484\nGOT-10k_Train_000488\nGOT-10k_Train_000501\nGOT-10k_Train_000510\nGOT-10k_Train_000512\nGOT-10k_Train_000519\nGOT-10k_Train_000539\nGOT-10k_Train_000544\nGOT-10k_Train_000555\nGOT-10k_Train_000564\nGOT-10k_Train_000568\nGOT-10k_Train_000583\nGOT-10k_Train_000587\nGOT-10k_Train_000593\nGOT-10k_Train_000621\nGOT-10k_Train_000624\nGOT-10k_Train_000625\nGOT-10k_Train_000638\nGOT-10k_Train_000648\nGOT-10k_Train_000654\nGOT-10k_Train_000669\nGOT-10k_Train_000701\nGOT-10k_Train_000709\nGOT-10k_Train_000712\nGOT-10k_Train_000731\nGOT-10k_Train_000734\nGOT-10k_Train_000737\nGOT-10k_Train_000744\nGOT-10k_Train_000746\nGOT-10k_Train_000748\nGOT-10k_Train_000762\nGOT-10k_Train_000764\nGOT-10k_Train_000765\nGOT-10k_Train_000766\nGOT-10k_Train_000767\nGOT-10k_Train_000775\nGOT-10k_Train_000783\nGOT-10k_Train_000790\nGOT-10k_Train_000829\nGOT-10k_Train_000857\nGOT-10k_Train_000859\nGOT-10k_Train_000867\nGOT-10k_Train_000872\nGOT-10k_Train_000880\nGOT-10k_Train_000884\nGOT-10k_Train_000909\nGOT-10k_Train_000915\nGOT-10k_Train_000922\nGOT-10k_Train_000928\nGOT-10k_Train_000933\nGOT-10k_Train_000941\nGOT-10k_Train_000961\nGOT-10k_Train_000966\nGOT-10k_Train_000968\nGOT-10k_Train_000971\nGOT-10k_Train_000972\nGOT-10k_Train_000995\nGOT-10k_Train_001003\nGOT-10k_Train_001010\nGOT-10k_Train_001011\nGOT-10k_Train_001019\nGOT-10k_Train_001021\nGOT-10k_Train_001035\nGOT-10k_Train_001039\nGOT-10k_Train_001047\nGOT-10k_Train_001057\nGOT-10k_Train_001069\nGOT-10k_Train_001077\nGOT-10k_Train_001079\nGOT-10k_Train_001085\nGOT-10k_Train_001088\nGOT-10k_Train_001091\nGOT-10k_Train_001104\nGOT-10k_Train_001112\nGOT-10k_Train_001113\nGOT-10k_Train_001124\nGOT-10k_Train_001128\nGOT-10k_Train_001143\nGOT-10k_Train_001145\nGOT-10k_Train_001146\nGOT-10k_Train_001148\nGOT-10k_Train_001150\nGOT-10k_Train_001154\nGOT-10k_Train_001156\nGOT-10k_Train_001157\nGOT-10k_Train_001163\nGOT-10k_Train_001181\nGOT-10k_Train_001184\nGOT-10k_Train_001189\nGOT-10k_Train_001200\nGOT-10k_Train_001225\nGOT-10k_Train_001264\nGOT-10k_Train_001288\nGOT-10k_Train_001296\nGOT-10k_Train_001298\nGOT-10k_Train_001299\nGOT-10k_Train_001314\nGOT-10k_Train_001319\nGOT-10k_Train_001329\nGOT-10k_Train_001331\nGOT-10k_Train_001340\nGOT-10k_Train_001374\nGOT-10k_Train_001384\nGOT-10k_Train_001394\nGOT-10k_Train_001407\nGOT-10k_Train_001415\nGOT-10k_Train_001430\nGOT-10k_Train_001433\nGOT-10k_Train_001453\nGOT-10k_Train_001457\nGOT-10k_Train_001471\nGOT-10k_Train_001473\nGOT-10k_Train_001480\nGOT-10k_Train_001484\nGOT-10k_Train_001489\nGOT-10k_Train_001514\nGOT-10k_Train_001537\nGOT-10k_Train_001544\nGOT-10k_Train_001545\nGOT-10k_Train_001551\nGOT-10k_Train_001558\nGOT-10k_Train_001560\nGOT-10k_Train_001562\nGOT-10k_Train_001563\nGOT-10k_Train_001570\nGOT-10k_Train_001576\nGOT-10k_Train_001604\nGOT-10k_Train_001615\nGOT-10k_Train_001617\nGOT-10k_Train_001618\nGOT-10k_Train_001619\nGOT-10k_Train_001624\nGOT-10k_Train_001650\nGOT-10k_Train_001651\nGOT-10k_Train_001663\nGOT-10k_Train_001673\nGOT-10k_Train_001685\nGOT-10k_Train_001692\nGOT-10k_Train_001700\nGOT-10k_Train_001722\nGOT-10k_Train_001731\nGOT-10k_Train_001732\nGOT-10k_Train_001738\nGOT-10k_Train_001740\nGOT-10k_Train_001742\nGOT-10k_Train_001747\nGOT-10k_Train_001759\nGOT-10k_Train_001769\nGOT-10k_Train_001781\nGOT-10k_Train_001791\nGOT-10k_Train_001794\nGOT-10k_Train_001795\nGOT-10k_Train_001818\nGOT-10k_Train_001833\nGOT-10k_Train_001836\nGOT-10k_Train_001841\nGOT-10k_Train_001852\nGOT-10k_Train_001863\nGOT-10k_Train_001865\nGOT-10k_Train_001878\nGOT-10k_Train_001898\nGOT-10k_Train_001919\nGOT-10k_Train_001923\nGOT-10k_Train_001929\nGOT-10k_Train_001935\nGOT-10k_Train_001938\nGOT-10k_Train_001942\nGOT-10k_Train_001955\nGOT-10k_Train_001964\nGOT-10k_Train_001966\nGOT-10k_Train_001982\nGOT-10k_Train_002005\nGOT-10k_Train_002009\nGOT-10k_Train_002035\nGOT-10k_Train_002068\nGOT-10k_Train_002073\nGOT-10k_Train_002076\nGOT-10k_Train_002084\nGOT-10k_Train_002112\nGOT-10k_Train_002115\nGOT-10k_Train_002116\nGOT-10k_Train_002123\nGOT-10k_Train_002125\nGOT-10k_Train_002129\nGOT-10k_Train_002139\nGOT-10k_Train_002146\nGOT-10k_Train_002166\nGOT-10k_Train_002168\nGOT-10k_Train_002176\nGOT-10k_Train_002184\nGOT-10k_Train_002190\nGOT-10k_Train_002192\nGOT-10k_Train_002211\nGOT-10k_Train_002216\nGOT-10k_Train_002233\nGOT-10k_Train_002240\nGOT-10k_Train_002247\nGOT-10k_Train_002250\nGOT-10k_Train_002252\nGOT-10k_Train_002253\nGOT-10k_Train_002261\nGOT-10k_Train_002274\nGOT-10k_Train_002276\nGOT-10k_Train_002292\nGOT-10k_Train_002302\nGOT-10k_Train_002304\nGOT-10k_Train_002305\nGOT-10k_Train_002320\nGOT-10k_Train_002345\nGOT-10k_Train_002355\nGOT-10k_Train_002359\nGOT-10k_Train_002363\nGOT-10k_Train_002374\nGOT-10k_Train_002376\nGOT-10k_Train_002389\nGOT-10k_Train_002393\nGOT-10k_Train_002400\nGOT-10k_Train_002408\nGOT-10k_Train_002418\nGOT-10k_Train_002437\nGOT-10k_Train_002440\nGOT-10k_Train_002442\nGOT-10k_Train_002454\nGOT-10k_Train_002456\nGOT-10k_Train_002465\nGOT-10k_Train_002466\nGOT-10k_Train_002474\nGOT-10k_Train_002479\nGOT-10k_Train_002484\nGOT-10k_Train_002511\nGOT-10k_Train_002514\nGOT-10k_Train_002517\nGOT-10k_Train_002523\nGOT-10k_Train_002527\nGOT-10k_Train_002534\nGOT-10k_Train_002555\nGOT-10k_Train_002587\nGOT-10k_Train_002589\nGOT-10k_Train_002612\nGOT-10k_Train_002627\nGOT-10k_Train_002639\nGOT-10k_Train_002652\nGOT-10k_Train_002693\nGOT-10k_Train_002699\nGOT-10k_Train_002716\nGOT-10k_Train_002725\nGOT-10k_Train_002727\nGOT-10k_Train_002730\nGOT-10k_Train_002755\nGOT-10k_Train_002756\nGOT-10k_Train_002760\nGOT-10k_Train_002763\nGOT-10k_Train_002837\nGOT-10k_Train_002841\nGOT-10k_Train_002856\nGOT-10k_Train_002862\nGOT-10k_Train_002863\nGOT-10k_Train_002866\nGOT-10k_Train_002877\nGOT-10k_Train_002884\nGOT-10k_Train_002886\nGOT-10k_Train_002887\nGOT-10k_Train_002907\nGOT-10k_Train_002908\nGOT-10k_Train_002909\nGOT-10k_Train_002914\nGOT-10k_Train_002920\nGOT-10k_Train_002922\nGOT-10k_Train_002936\nGOT-10k_Train_002940\nGOT-10k_Train_002944\nGOT-10k_Train_002953\nGOT-10k_Train_002961\nGOT-10k_Train_002964\nGOT-10k_Train_002996\nGOT-10k_Train_003003\nGOT-10k_Train_003004\nGOT-10k_Train_003007\nGOT-10k_Train_003012\nGOT-10k_Train_003027\nGOT-10k_Train_003028\nGOT-10k_Train_003033\nGOT-10k_Train_003034\nGOT-10k_Train_003036\nGOT-10k_Train_003044\nGOT-10k_Train_003056\nGOT-10k_Train_003069\nGOT-10k_Train_003078\nGOT-10k_Train_003079\nGOT-10k_Train_003095\nGOT-10k_Train_003096\nGOT-10k_Train_003107\nGOT-10k_Train_003108\nGOT-10k_Train_003127\nGOT-10k_Train_003128\nGOT-10k_Train_003129\nGOT-10k_Train_003132\nGOT-10k_Train_003146\nGOT-10k_Train_003155\nGOT-10k_Train_003173\nGOT-10k_Train_003208\nGOT-10k_Train_003239\nGOT-10k_Train_003245\nGOT-10k_Train_003246\nGOT-10k_Train_003262\nGOT-10k_Train_003275\nGOT-10k_Train_003283\nGOT-10k_Train_003296\nGOT-10k_Train_003308\nGOT-10k_Train_003310\nGOT-10k_Train_003313\nGOT-10k_Train_003317\nGOT-10k_Train_003318\nGOT-10k_Train_003354\nGOT-10k_Train_003379\nGOT-10k_Train_003384\nGOT-10k_Train_003396\nGOT-10k_Train_003401\nGOT-10k_Train_003423\nGOT-10k_Train_003435\nGOT-10k_Train_003438\nGOT-10k_Train_003442\nGOT-10k_Train_003444\nGOT-10k_Train_003455\nGOT-10k_Train_003456\nGOT-10k_Train_003464\nGOT-10k_Train_003466\nGOT-10k_Train_003474\nGOT-10k_Train_003482\nGOT-10k_Train_003488\nGOT-10k_Train_003502\nGOT-10k_Train_003515\nGOT-10k_Train_003520\nGOT-10k_Train_003530\nGOT-10k_Train_003551\nGOT-10k_Train_003570\nGOT-10k_Train_003571\nGOT-10k_Train_003578\nGOT-10k_Train_003583\nGOT-10k_Train_003590\nGOT-10k_Train_003593\nGOT-10k_Train_003618\nGOT-10k_Train_003626\nGOT-10k_Train_003650\nGOT-10k_Train_003652\nGOT-10k_Train_003663\nGOT-10k_Train_003690\nGOT-10k_Train_003704\nGOT-10k_Train_003709\nGOT-10k_Train_003716\nGOT-10k_Train_003721\nGOT-10k_Train_003722\nGOT-10k_Train_003724\nGOT-10k_Train_003729\nGOT-10k_Train_003756\nGOT-10k_Train_003768\nGOT-10k_Train_003782\nGOT-10k_Train_003786\nGOT-10k_Train_003788\nGOT-10k_Train_003791\nGOT-10k_Train_003820\nGOT-10k_Train_003821\nGOT-10k_Train_003827\nGOT-10k_Train_003834\nGOT-10k_Train_003835\nGOT-10k_Train_003839\nGOT-10k_Train_003843\nGOT-10k_Train_003854\nGOT-10k_Train_003856\nGOT-10k_Train_003881\nGOT-10k_Train_003899\nGOT-10k_Train_003904\nGOT-10k_Train_003906\nGOT-10k_Train_003913\nGOT-10k_Train_003937\nGOT-10k_Train_003940\nGOT-10k_Train_003943\nGOT-10k_Train_003950\nGOT-10k_Train_003972\nGOT-10k_Train_003974\nGOT-10k_Train_003978\nGOT-10k_Train_003981\nGOT-10k_Train_003982\nGOT-10k_Train_004003\nGOT-10k_Train_004004\nGOT-10k_Train_004008\nGOT-10k_Train_004012\nGOT-10k_Train_004013\nGOT-10k_Train_004030\nGOT-10k_Train_004036\nGOT-10k_Train_004040\nGOT-10k_Train_004052\nGOT-10k_Train_004054\nGOT-10k_Train_004055\nGOT-10k_Train_004057\nGOT-10k_Train_004063\nGOT-10k_Train_004068\nGOT-10k_Train_004072\nGOT-10k_Train_004075\nGOT-10k_Train_004078\nGOT-10k_Train_004082\nGOT-10k_Train_004102\nGOT-10k_Train_004103\nGOT-10k_Train_004105\nGOT-10k_Train_004111\nGOT-10k_Train_004120\nGOT-10k_Train_004122\nGOT-10k_Train_004124\nGOT-10k_Train_004142\nGOT-10k_Train_004158\nGOT-10k_Train_004170\nGOT-10k_Train_004175\nGOT-10k_Train_004181\nGOT-10k_Train_004190\nGOT-10k_Train_004193\nGOT-10k_Train_004194\nGOT-10k_Train_004199\nGOT-10k_Train_004202\nGOT-10k_Train_004217\nGOT-10k_Train_004225\nGOT-10k_Train_004229\nGOT-10k_Train_004230\nGOT-10k_Train_004234\nGOT-10k_Train_004241\nGOT-10k_Train_004246\nGOT-10k_Train_004249\nGOT-10k_Train_004255\nGOT-10k_Train_004268\nGOT-10k_Train_004276\nGOT-10k_Train_004292\nGOT-10k_Train_004293\nGOT-10k_Train_004295\nGOT-10k_Train_004296\nGOT-10k_Train_004302\nGOT-10k_Train_004324\nGOT-10k_Train_004337\nGOT-10k_Train_004342\nGOT-10k_Train_004351\nGOT-10k_Train_004356\nGOT-10k_Train_004376\nGOT-10k_Train_004380\nGOT-10k_Train_004395\nGOT-10k_Train_004398\nGOT-10k_Train_004399\nGOT-10k_Train_004408\nGOT-10k_Train_004430\nGOT-10k_Train_004439\nGOT-10k_Train_004440\nGOT-10k_Train_004462\nGOT-10k_Train_004473\nGOT-10k_Train_004476\nGOT-10k_Train_004478\nGOT-10k_Train_004481\nGOT-10k_Train_004483\nGOT-10k_Train_004484\nGOT-10k_Train_004503\nGOT-10k_Train_004513\nGOT-10k_Train_004517\nGOT-10k_Train_004533\nGOT-10k_Train_004536\nGOT-10k_Train_004594\nGOT-10k_Train_004595\nGOT-10k_Train_004607\nGOT-10k_Train_004619\nGOT-10k_Train_004626\nGOT-10k_Train_004642\nGOT-10k_Train_004646\nGOT-10k_Train_004652\nGOT-10k_Train_004658\nGOT-10k_Train_004660\nGOT-10k_Train_004661\nGOT-10k_Train_004668\nGOT-10k_Train_004673\nGOT-10k_Train_004679\nGOT-10k_Train_004694\nGOT-10k_Train_004702\nGOT-10k_Train_004709\nGOT-10k_Train_004717\nGOT-10k_Train_004757\nGOT-10k_Train_004768\nGOT-10k_Train_004824\nGOT-10k_Train_004826\nGOT-10k_Train_004833\nGOT-10k_Train_004839\nGOT-10k_Train_004843\nGOT-10k_Train_004852\nGOT-10k_Train_004862\nGOT-10k_Train_004865\nGOT-10k_Train_004878\nGOT-10k_Train_004880\nGOT-10k_Train_004881\nGOT-10k_Train_004902\nGOT-10k_Train_004906\nGOT-10k_Train_004920\nGOT-10k_Train_004950\nGOT-10k_Train_004951\nGOT-10k_Train_004952\nGOT-10k_Train_004973\nGOT-10k_Train_004983\nGOT-10k_Train_004984\nGOT-10k_Train_004990\nGOT-10k_Train_004993\nGOT-10k_Train_004995\nGOT-10k_Train_005004\nGOT-10k_Train_005007\nGOT-10k_Train_005022\nGOT-10k_Train_005024\nGOT-10k_Train_005040\nGOT-10k_Train_005046\nGOT-10k_Train_005047\nGOT-10k_Train_005058\nGOT-10k_Train_005063\nGOT-10k_Train_005072\nGOT-10k_Train_005097\nGOT-10k_Train_005098\nGOT-10k_Train_005099\nGOT-10k_Train_005108\nGOT-10k_Train_005113\nGOT-10k_Train_005119\nGOT-10k_Train_005126\nGOT-10k_Train_005146\nGOT-10k_Train_005166\nGOT-10k_Train_005191\nGOT-10k_Train_005207\nGOT-10k_Train_005255\nGOT-10k_Train_005269\nGOT-10k_Train_005280\nGOT-10k_Train_005310\nGOT-10k_Train_005317\nGOT-10k_Train_005319\nGOT-10k_Train_005334\nGOT-10k_Train_005338\nGOT-10k_Train_005339\nGOT-10k_Train_005354\nGOT-10k_Train_005364\nGOT-10k_Train_005382\nGOT-10k_Train_005385\nGOT-10k_Train_005389\nGOT-10k_Train_005390\nGOT-10k_Train_005396\nGOT-10k_Train_005398\nGOT-10k_Train_005399\nGOT-10k_Train_005401\nGOT-10k_Train_005413\nGOT-10k_Train_005415\nGOT-10k_Train_005420\nGOT-10k_Train_005457\nGOT-10k_Train_005465\nGOT-10k_Train_005488\nGOT-10k_Train_005493\nGOT-10k_Train_005510\nGOT-10k_Train_005523\nGOT-10k_Train_005538\nGOT-10k_Train_005553\nGOT-10k_Train_005556\nGOT-10k_Train_005575\nGOT-10k_Train_005577\nGOT-10k_Train_005582\nGOT-10k_Train_005594\nGOT-10k_Train_005606\nGOT-10k_Train_005611\nGOT-10k_Train_005636\nGOT-10k_Train_005639\nGOT-10k_Train_005642\nGOT-10k_Train_005651\nGOT-10k_Train_005652\nGOT-10k_Train_005653\nGOT-10k_Train_005681\nGOT-10k_Train_005686\nGOT-10k_Train_005689\nGOT-10k_Train_005701\nGOT-10k_Train_005712\nGOT-10k_Train_005716\nGOT-10k_Train_005724\nGOT-10k_Train_005731\nGOT-10k_Train_005732\nGOT-10k_Train_005734\nGOT-10k_Train_005741\nGOT-10k_Train_005764\nGOT-10k_Train_005767\nGOT-10k_Train_005788\nGOT-10k_Train_005791\nGOT-10k_Train_005800\nGOT-10k_Train_005813\nGOT-10k_Train_005816\nGOT-10k_Train_005830\nGOT-10k_Train_005852\nGOT-10k_Train_005876\nGOT-10k_Train_005877\nGOT-10k_Train_005884\nGOT-10k_Train_005910\nGOT-10k_Train_005929\nGOT-10k_Train_005943\nGOT-10k_Train_005958\nGOT-10k_Train_005995\nGOT-10k_Train_006002\nGOT-10k_Train_006010\nGOT-10k_Train_006018\nGOT-10k_Train_006021\nGOT-10k_Train_006022\nGOT-10k_Train_006040\nGOT-10k_Train_006046\nGOT-10k_Train_006057\nGOT-10k_Train_006075\nGOT-10k_Train_006087\nGOT-10k_Train_006099\nGOT-10k_Train_006115\nGOT-10k_Train_006126\nGOT-10k_Train_006129\nGOT-10k_Train_006142\nGOT-10k_Train_006161\nGOT-10k_Train_006163\nGOT-10k_Train_006193\nGOT-10k_Train_006195\nGOT-10k_Train_006204\nGOT-10k_Train_006206\nGOT-10k_Train_006215\nGOT-10k_Train_006216\nGOT-10k_Train_006220\nGOT-10k_Train_006224\nGOT-10k_Train_006232\nGOT-10k_Train_006241\nGOT-10k_Train_006247\nGOT-10k_Train_006287\nGOT-10k_Train_006300\nGOT-10k_Train_006315\nGOT-10k_Train_006318\nGOT-10k_Train_006322\nGOT-10k_Train_006337\nGOT-10k_Train_006341\nGOT-10k_Train_006344\nGOT-10k_Train_006348\nGOT-10k_Train_006349\nGOT-10k_Train_006363\nGOT-10k_Train_006366\nGOT-10k_Train_006376\nGOT-10k_Train_006378\nGOT-10k_Train_006395\nGOT-10k_Train_006402\nGOT-10k_Train_006406\nGOT-10k_Train_006412\nGOT-10k_Train_006413\nGOT-10k_Train_006427\nGOT-10k_Train_006448\nGOT-10k_Train_006459\nGOT-10k_Train_006464\nGOT-10k_Train_006474\nGOT-10k_Train_006477\nGOT-10k_Train_006482\nGOT-10k_Train_006483\nGOT-10k_Train_006496\nGOT-10k_Train_006498\nGOT-10k_Train_006499\nGOT-10k_Train_006505\nGOT-10k_Train_006506\nGOT-10k_Train_006514\nGOT-10k_Train_006533\nGOT-10k_Train_006563\nGOT-10k_Train_006569\nGOT-10k_Train_006573\nGOT-10k_Train_006584\nGOT-10k_Train_006585\nGOT-10k_Train_006587\nGOT-10k_Train_006591\nGOT-10k_Train_006592\nGOT-10k_Train_006598\nGOT-10k_Train_006605\nGOT-10k_Train_006631\nGOT-10k_Train_006633\nGOT-10k_Train_006644\nGOT-10k_Train_006651\nGOT-10k_Train_006654\nGOT-10k_Train_006672\nGOT-10k_Train_006717\nGOT-10k_Train_006728\nGOT-10k_Train_006736\nGOT-10k_Train_006740\nGOT-10k_Train_006746\nGOT-10k_Train_006754\nGOT-10k_Train_006759\nGOT-10k_Train_006766\nGOT-10k_Train_006789\nGOT-10k_Train_006796\nGOT-10k_Train_006797\nGOT-10k_Train_006817\nGOT-10k_Train_006818\nGOT-10k_Train_006849\nGOT-10k_Train_006851\nGOT-10k_Train_006855\nGOT-10k_Train_006872\nGOT-10k_Train_006879\nGOT-10k_Train_006900\nGOT-10k_Train_006912\nGOT-10k_Train_006926\nGOT-10k_Train_006936\nGOT-10k_Train_006955\nGOT-10k_Train_006968\nGOT-10k_Train_006969\nGOT-10k_Train_006979\nGOT-10k_Train_006980\nGOT-10k_Train_006984\nGOT-10k_Train_006986\nGOT-10k_Train_006991\nGOT-10k_Train_007017\nGOT-10k_Train_007032\nGOT-10k_Train_007035\nGOT-10k_Train_007048\nGOT-10k_Train_007064\nGOT-10k_Train_007065\nGOT-10k_Train_007075\nGOT-10k_Train_007077\nGOT-10k_Train_007081\nGOT-10k_Train_007083\nGOT-10k_Train_007089\nGOT-10k_Train_007106\nGOT-10k_Train_007107\nGOT-10k_Train_007131\nGOT-10k_Train_007138\nGOT-10k_Train_007144\nGOT-10k_Train_007150\nGOT-10k_Train_007168\nGOT-10k_Train_007170\nGOT-10k_Train_007177\nGOT-10k_Train_007181\nGOT-10k_Train_007183\nGOT-10k_Train_007190\nGOT-10k_Train_007208\nGOT-10k_Train_007220\nGOT-10k_Train_007223\nGOT-10k_Train_007247\nGOT-10k_Train_007273\nGOT-10k_Train_007284\nGOT-10k_Train_007289\nGOT-10k_Train_007293\nGOT-10k_Train_007294\nGOT-10k_Train_007296\nGOT-10k_Train_007316\nGOT-10k_Train_007322\nGOT-10k_Train_007355\nGOT-10k_Train_007360\nGOT-10k_Train_007362\nGOT-10k_Train_007364\nGOT-10k_Train_007388\nGOT-10k_Train_007392\nGOT-10k_Train_007403\nGOT-10k_Train_007404\nGOT-10k_Train_007426\nGOT-10k_Train_007427\nGOT-10k_Train_007443\nGOT-10k_Train_007446\nGOT-10k_Train_007461\nGOT-10k_Train_007482\nGOT-10k_Train_007489\nGOT-10k_Train_007499\nGOT-10k_Train_007503\nGOT-10k_Train_007507\nGOT-10k_Train_007515\nGOT-10k_Train_007521\nGOT-10k_Train_007523\nGOT-10k_Train_007525\nGOT-10k_Train_007535\nGOT-10k_Train_007559\nGOT-10k_Train_007566\nGOT-10k_Train_007582\nGOT-10k_Train_007586\nGOT-10k_Train_007596\nGOT-10k_Train_007616\nGOT-10k_Train_007623\nGOT-10k_Train_007634\nGOT-10k_Train_007637\nGOT-10k_Train_007643\nGOT-10k_Train_007645\nGOT-10k_Train_007653\nGOT-10k_Train_007660\nGOT-10k_Train_007661\nGOT-10k_Train_007663\nGOT-10k_Train_007672\nGOT-10k_Train_007700\nGOT-10k_Train_007710\nGOT-10k_Train_007714\nGOT-10k_Train_007717\nGOT-10k_Train_007718\nGOT-10k_Train_007737\nGOT-10k_Train_007741\nGOT-10k_Train_007746\nGOT-10k_Train_007763\nGOT-10k_Train_007769\nGOT-10k_Train_007780\nGOT-10k_Train_007803\nGOT-10k_Train_007821\nGOT-10k_Train_007825\nGOT-10k_Train_007839\nGOT-10k_Train_007848\nGOT-10k_Train_007873\nGOT-10k_Train_007877\nGOT-10k_Train_007882\nGOT-10k_Train_007894\nGOT-10k_Train_007905\nGOT-10k_Train_007908\nGOT-10k_Train_007911\nGOT-10k_Train_007914\nGOT-10k_Train_007918\nGOT-10k_Train_007929\nGOT-10k_Train_007936\nGOT-10k_Train_007938\nGOT-10k_Train_007965\nGOT-10k_Train_007969\nGOT-10k_Train_007973\nGOT-10k_Train_007987\nGOT-10k_Train_007999\nGOT-10k_Train_008001\nGOT-10k_Train_008034\nGOT-10k_Train_008050\nGOT-10k_Train_008056\nGOT-10k_Train_008068\nGOT-10k_Train_008073\nGOT-10k_Train_008089\nGOT-10k_Train_008095\nGOT-10k_Train_008101\nGOT-10k_Train_008128\nGOT-10k_Train_008139\nGOT-10k_Train_008147\nGOT-10k_Train_008154\nGOT-10k_Train_008171\nGOT-10k_Train_008180\nGOT-10k_Train_008193\nGOT-10k_Train_008194\nGOT-10k_Train_008201\nGOT-10k_Train_008212\nGOT-10k_Train_008226\nGOT-10k_Train_008230\nGOT-10k_Train_008231\nGOT-10k_Train_008236\nGOT-10k_Train_008239\nGOT-10k_Train_008241\nGOT-10k_Train_008243\nGOT-10k_Train_008249\nGOT-10k_Train_008250\nGOT-10k_Train_008273\nGOT-10k_Train_008278\nGOT-10k_Train_008291\nGOT-10k_Train_008310\nGOT-10k_Train_008311\nGOT-10k_Train_008317\nGOT-10k_Train_008319\nGOT-10k_Train_008331\nGOT-10k_Train_008332\nGOT-10k_Train_008344\nGOT-10k_Train_008369\nGOT-10k_Train_008377\nGOT-10k_Train_008386\nGOT-10k_Train_008392\nGOT-10k_Train_008396\nGOT-10k_Train_008432\nGOT-10k_Train_008438\nGOT-10k_Train_008439\nGOT-10k_Train_008440\nGOT-10k_Train_008442\nGOT-10k_Train_008443\nGOT-10k_Train_008455\nGOT-10k_Train_008471\nGOT-10k_Train_008484\nGOT-10k_Train_008490\nGOT-10k_Train_008492\nGOT-10k_Train_008499\nGOT-10k_Train_008502\nGOT-10k_Train_008507\nGOT-10k_Train_008520\nGOT-10k_Train_008525\nGOT-10k_Train_008568\nGOT-10k_Train_008587\nGOT-10k_Train_008589\nGOT-10k_Train_008591\nGOT-10k_Train_008606\nGOT-10k_Train_008612\nGOT-10k_Train_008623\nGOT-10k_Train_008628\nGOT-10k_Train_008633\nGOT-10k_Train_008634\nGOT-10k_Train_008645\nGOT-10k_Train_008656\nGOT-10k_Train_008668\nGOT-10k_Train_008670\nGOT-10k_Train_008702\nGOT-10k_Train_008714\nGOT-10k_Train_008723\nGOT-10k_Train_008731\nGOT-10k_Train_008732\nGOT-10k_Train_008734\nGOT-10k_Train_008747\nGOT-10k_Train_008787\nGOT-10k_Train_008794\nGOT-10k_Train_008805\nGOT-10k_Train_008829\nGOT-10k_Train_008837\nGOT-10k_Train_008838\nGOT-10k_Train_008853\nGOT-10k_Train_008878\nGOT-10k_Train_008879\nGOT-10k_Train_008880\nGOT-10k_Train_008891\nGOT-10k_Train_008895\nGOT-10k_Train_008907\nGOT-10k_Train_008909\nGOT-10k_Train_008922\nGOT-10k_Train_008935\nGOT-10k_Train_008939\nGOT-10k_Train_008972\nGOT-10k_Train_008975\nGOT-10k_Train_008976\nGOT-10k_Train_009002\nGOT-10k_Train_009031\nGOT-10k_Train_009040\nGOT-10k_Train_009052\nGOT-10k_Train_009056\nGOT-10k_Train_009057\nGOT-10k_Train_009066\nGOT-10k_Train_009076\nGOT-10k_Train_009103\nGOT-10k_Train_009115\nGOT-10k_Train_009117\nGOT-10k_Train_009127\nGOT-10k_Train_009137\nGOT-10k_Train_009145\nGOT-10k_Train_009150\nGOT-10k_Train_009155\nGOT-10k_Train_009156\nGOT-10k_Train_009160\nGOT-10k_Train_009179\nGOT-10k_Train_009181\nGOT-10k_Train_009196\nGOT-10k_Train_009203\nGOT-10k_Train_009216\nGOT-10k_Train_009219\nGOT-10k_Train_009222\nGOT-10k_Train_009224\nGOT-10k_Train_009229\nGOT-10k_Train_009231\nGOT-10k_Train_009235\nGOT-10k_Train_009242\nGOT-10k_Train_009263\nGOT-10k_Train_009265\nGOT-10k_Train_009280\nGOT-10k_Train_009282\nGOT-10k_Train_009300\nGOT-10k_Train_009301\nGOT-10k_Train_009329\nGOT-10k_Train_009332\nGOT-10k_Train_009334"
  },
  {
    "path": "artrackv2_mindspore/lib/train/data_specs/got10k_vot_train_split.txt",
    "content": "3784\n8998\n1631\n8277\n8358\n2338\n2988\n8302\n2662\n2663\n2825\n7447\n4781\n2218\n5860\n2819\n8075\n5391\n116\n3606\n7976\n7941\n1024\n4519\n1970\n557\n8579\n6908\n993\n7204\n1991\n3674\n8781\n6840\n5\n3225\n3763\n8688\n6778\n5777\n4794\n2744\n8126\n3864\n1733\n2923\n6829\n683\n2081\n1831\n2404\n1459\n2741\n5972\n7462\n2654\n103\n2174\n2989\n2506\n2766\n5912\n3295\n3986\n609\n4895\n6673\n801\n1098\n1602\n2490\n8476\n3186\n4784\n4270\n1812\n4226\n2267\n8873\n6544\n6112\n2381\n4752\n753\n3776\n6511\n6016\n2559\n7369\n5866\n563\n7731\n1105\n5603\n50\n4238\n2208\n8725\n4994\n4719\n1444\n8807\n7298\n8760\n8173\n2332\n4131\n1065\n8562\n3992\n4024\n2188\n9095\n6765\n1707\n6105\n6922\n5362\n1486\n7898\n4135\n6574\n998\n6565\n8127\n8927\n2544\n4365\n768\n3535\n3875\n6808\n2931\n487\n4451\n2470\n8111\n3493\n7338\n8281\n6390\n1271\n4373\n3667\n3494\n3757\n2966\n7840\n7827\n3300\n6261\n4163\n2217\n6549\n7236\n9136\n1857\n6691\n3470\n6271\n807\n516\n9311\n6098\n3144\n8420\n5425\n5694\n2643\n6696\n6072\n7285\n3781\n903\n8522\n6092\n5979\n2622\n2529\n855\n3420\n3261\n8953\n7866\n2492\n3157\n359\n1520\n2642\n7452\n759\n36\n8931\n1744\n4350\n1089\n9199\n1889\n1908\n4868\n4498\n1968\n3273\n7413\n4114\n5584\n4874\n1427\n5211\n7618\n1542\n1353\n8158\n4168\n3200\n6345\n8560\n5619\n5953\n3158\n8849\n5831\n1411\n8103\n6539\n7397\n1006\n5450\n3119\n4274\n5352\n4571\n2319\n4976\n902\n1814\n2651\n3299\n3398\n982\n2428\n5793\n1346\n7057\n3737\n7329\n4449\n2110\n7405\n1773\n958\n3901\n4127\n8234\n2994\n7066\n1289\n2995\n5871\n3556\n9085\n846\n2366\n585\n5516\n5230\n3481\n2732\n6658\n7423\n1855\n6384\n3554\n5823\n4948\n7058\n4667\n5377\n2503\n7694\n9191\n9144\n655\n3409\n62\n8019\n8970\n2323\n5750\n3178\n6548\n7501\n3280\n343\n2171\n8397\n1367\n8611\n6118\n6603\n7182\n9048\n7733\n7141\n3335\n4845\n5449\n3467\n6250\n163\n5168\n2040\n3609\n8352\n3426\n8567\n769\n187\n6151\n6437\n7028\n3970\n9146\n5028\n7492\n1661\n2815\n2469\n2563\n3814\n8430\n4305\n3479\n5678\n4132\n1211\n5459\n4814\n545\n4556\n238\n2724\n1260\n2581\n4632\n4313\n380\n1209\n5447\n3032\n7942\n8943\n806\n2432\n6130\n4314\n2131\n9045\n6531\n5706\n6747\n7724\n2017\n3292\n5469\n2743\n424\n4233\n8619\n5192\n4516\n9324\n3537\n9152\n8058\n7526\n8711\n1949\n5982\n6702\n7027\n6388\n7012\n328\n2130\n452\n306\n7669\n3134\n5761\n3703\n44\n4189\n695\n5224\n9215\n5644\n3143\n5443\n2348\n2328\n4725\n1418\n7810\n5759\n7226\n4535\n4385\n5397\n7249\n3204\n385\n2371\n2738\n3636\n9033\n2246\n2680\n6940\n4310\n2054\n9250\n9080\n4568\n5586\n4469\n2038\n3410\n7900\n4332\n6108\n678\n3319\n9079\n1054\n4048\n4751\n1320\n6890\n7931\n1398\n4349\n5299\n5025\n7932\n5738\n7787\n4590\n4020\n1274\n2488\n8497\n3372\n8965\n3219\n799\n3664\n6500\n7093\n4362\n6205\n4244\n5945\n6434\n2031\n2684\n6632\n4588\n8271\n3232\n5782\n2904\n7200\n3632\n5435\n8203\n3480\n4786\n7579\n3351\n1921\n798\n3646\n3094\n4359\n1654\n5975\n376\n5965\n780\n6738\n3185\n2133\n6248\n5996\n2834\n531\n5688\n2448\n7925\n7974\n5924\n6401\n5778\n6594\n5442\n8336\n4522\n3770\n6340\n6328\n4946\n4161\n2954\n2588\n8465\n2885\n1606\n5787\n3407\n3121\n7310\n1413\n1932\n4787\n2579\n3325\n508\n5610\n6480\n4290\n479\n3792\n6628\n2545\n6972\n2665\n6730\n3547\n6845\n3540\n8993\n1052\n2235\n8356\n3403\n8818\n8260\n572\n4159\n1180\n5348\n7948\n2676\n3539\n4866\n6422\n8365\n3217\n1310\n2059\n9177\n1419\n2283\n8892\n8162\n1212\n6277\n3725\n7806\n6149\n7874\n718\n6888\n7118\n277\n656\n8763\n8289\n4759\n5854\n8659\n3145\n5981\n1881\n5799\n6947\n1609\n6396\n2631\n318\n2550\n6132\n1736\n7816\n4304\n8133\n6698\n7779\n7732\n7642\n7242\n711\n9262\n8033\n7440\n1913\n5480\n5570\n8594\n8772\n4654\n8974\n6128\n6183\n1071\n8449\n2142\n2298\n524\n1695\n820\n4053\n1856\n8641\n217\n1063\n9286\n3152\n221\n5461\n1270\n2006\n7164\n1199\n6951\n5604\n5400\n5309\n3498\n6407\n6661\n7097\n8165\n5169\n3852\n7070\n5702\n4344\n6648\n6904\n3272\n7119\n5795\n2365\n2659\n353\n5444\n1924\n2098\n2972\n6006\n5865\n8740\n7856\n5841\n598\n836\n1147\n931\n8897\n0\n6049\n1837\n865\n1871\n6116\n6831\n5773\n3587\n303\n1883\n2163\n3070\n1308\n7953\n6909\n853\n7301\n3279\n123\n7186\n3194\n5133\n1931\n4622\n4891\n5722\n5693\n8\n2339\n6596\n71\n379\n4506\n4370\n1238\n2707\n3344\n4254\n8767\n1726\n325\n4148\n5438\n5357\n548\n1332\n6824\n2290\n2335\n2594\n2315\n3389\n3885\n2621\n4116\n7412\n7222\n4894\n8595\n2000\n4978\n4721\n6444\n3796\n9321\n2236\n6409\n1523\n1468\n9249\n8270\n2341\n2874\n174\n4502\n4703\n9034\n9108\n5451\n2619\n9158\n490\n6540\n1466\n2962\n8771\n2712\n4539\n1581\n5638\n9246\n4308\n4363\n4647\n4470\n1636\n1311\n6560\n7519\n8027\n9217\n6364\n3779\n4822\n3563\n5896\n6655\n1524\n2846\n3137\n141\n1887\n6567\n8921\n4671\n6052\n8445\n8699\n7349\n3553\n2117\n7651\n5034\n5383\n649\n3818\n9022\n8414\n1012\n8159\n5081\n8571\n4765\n9135\n4361\n4073\n9142\n727\n2835\n8229\n3989\n4490\n4923\n5477\n1638\n3643\n9044\n2230\n499\n7166\n3172\n8431\n8401\n1470\n6356\n8817\n927\n4212\n2152\n3812\n4949\n1219\n1538\n3029\n6481\n9042\n7775\n7742\n423\n2085\n7715\n4541\n9061\n5916\n7420\n7406\n7046\n7808\n4911\n8804\n6927\n8820\n3264\n300\n2979\n252\n4407\n3383\n4688\n8504\n6723\n26\n3837\n2489\n4137\n8209\n229\n6490\n2364\n9016\n1763\n1728\n338\n8335\n9063\n2791\n641\n5454\n4581\n4548\n2840\n8508\n3463\n7231\n7619\n2560\n1755\n6201\n165\n6279\n5806\n6867\n5890\n2396\n3416\n1981\n6073\n5872\n3045\n4182\n7607\n4414\n2998\n6553\n7139\n5624\n3666\n723\n5110\n6932\n8200\n2222\n8399\n1041\n4138\n1594\n3569\n9253\n393\n7940\n8004\n1475\n5393\n1107\n2597\n878\n9309\n7576\n5250\n3142\n2015\n571\n3921\n1255\n7080\n893\n2160\n1355\n82\n9153\n8583\n4085\n4644\n7196\n9165\n3558\n4550\n6374\n7826\n8602\n4146\n9257\n6083\n874\n8383\n3731\n3374\n3653\n8222\n7344\n470\n1813\n6871\n7245\n6866\n3998\n7433\n276\n1915\n1988\n8168\n2518\n2686\n831\n6143\n5205\n8718\n1703\n7729\n2077\n7983\n8450\n1195\n9232\n507\n7989\n6974\n5828\n8655\n6679\n5245\n7783\n5886\n9098\n6491\n8782\n3525\n6542\n131\n8110\n9186\n9074\n4933\n9035\n2607\n2057\n6273\n2711\n5829\n3382\n2696\n3043\n2048\n619\n2499\n5295\n1162\n7807\n3694\n2194\n3149\n1940\n7934\n840\n3592\n8237\n4731\n1324\n8486\n8726\n8573\n2928\n9078\n2272\n2564\n1370\n5911\n7434\n8026\n407\n7546\n2004\n5849\n7887\n3425\n1118\n926\n3430\n5902\n2282\n2334\n129\n1372\n4842\n6473\n4382\n1028\n415\n8269\n6910\n2796\n3038\n5735\n5080\n2852\n6306\n8842\n9188\n3637\n1066\n532\n5485\n2838\n6753\n9008\n7984\n2816\n8819\n7103\n5977\n5044\n2064\n2599\n3249\n6446\n6638\n852\n1724\n3368\n892\n3250\n8258\n7962\n4300\n1616\n167\n8855\n2090\n4424\n879\n5136\n5350\n2635\n7828\n8506\n63\n3847\n3676\n1705\n6745\n1263\n5020\n1888\n7036\n1033\n3914\n5433\n3905\n4641\n228\n4801\n3766\n8085\n643\n6914\n3013\n5657\n3696\n1590\n8282\n2403\n416\n911\n3849\n4215\n1120\n5490\n296\n2306\n3140\n3742\n4819\n6153\n6414\n760\n3000\n7498\n7108\n6429\n3031\n5314\n751\n3357\n5808\n7505\n98\n7652\n4027\n6257\n1799\n8577\n4969\n9163\n2025\n6061\n4026\n588\n4961\n4940\n7152\n538\n706\n2802\n8983\n3375\n1246\n6593\n5837\n1789\n7939\n4997\n5939\n2411\n6133\n199\n7593\n1702\n5406\n6082\n2912\n6109\n100\n8149\n5470\n2807\n3362\n5621\n6019\n9241\n9268\n7703\n7967\n5458\n5492\n6729\n4577\n106\n3774\n979\n7082\n4610\n1853\n9003\n9292\n2867\n6262\n2245\n3460\n1557\n4796\n2658\n5769\n6985\n421\n7990\n3289\n1540\n9316\n2251\n6896\n5947\n4965\n4480\n963\n9047\n7824\n3976\n6210\n7018\n7179\n5016\n7789\n6102\n6828\n7659\n9109\n9071\n8115\n7628\n7110\n16\n7513\n835\n939\n2351\n2322\n4945\n560\n6837\n6094\n6475\n7901\n3\n771\n8029\n3135\n8044\n7127\n3741\n5156\n7030\n113\n3747\n7042\n5232\n5225\n3002\n4747\n5379\n4886\n7192\n4184\n1896\n1834\n8689\n3665\n2957\n6913\n8009\n4851\n6420\n828\n8884\n8815\n3198\n8008\n194\n6251\n3303\n3934\n395\n1285\n4169\n1648\n1347\n3600\n4631\n509\n211\n6230\n7241\n2219\n2582\n8353\n7790\n7583\n9004\n6942\n1704\n8051\n2981\n5511\n6182\n7088\n1699\n1222\n6189\n1528\n5197\n6221\n7893\n7773\n8766\n2942\n8021\n614\n1786\n400\n133\n556\n5237\n3727\n1440\n3873\n8448\n6285\n8696\n8800\n4009\n3386\n4847\n5685\n9093\n5895\n6863\n4260\n8405\n8417\n7116\n255\n3223\n4737\n7852\n814\n710\n1094\n6103\n5809\n5882\n6336\n4974\n1499\n2806\n3744\n2664\n2436\n4482\n8665\n8918\n1076\n8676\n5725\n9248\n4755\n1447\n9328\n5500\n78\n2653\n792\n6854\n6093\n6172\n3378\n4492\n5529\n5476\n3846\n1391\n383\n4289\n3883\n2648\n3265\n2525\n5402\n4599\n6870\n6877\n4413\n2464\n8519\n2521\n1839\n5822\n5664\n7257\n5375\n6852\n6764\n5182\n8914\n3015\n8509\n3080\n4562\n8979\n6643\n8601\n6096\n4812\n5246\n7862\n527\n7849\n6737\n12\n2468\n7961\n275\n27\n5932\n3840\n7341\n4996\n8564\n2154\n6138\n7831\n4442\n757\n4464\n1170\n2568\n19\n323\n7675\n3441\n2067\n9027\n2486\n4379\n4744\n1737\n7563\n301\n3907\n4742\n6857\n1221\n9284\n8458\n2897\n1526\n5345\n4423\n6246\n8578\n3711\n4986\n4785\n3997\n7311\n4788\n8387\n2041\n2608\n6031\n3293\n541\n773\n8473\n2501\n5667\n804\n483\n1639\n696\n6060\n5429\n5762\n1527\n7342\n6225\n7895\n381\n8030\n8362\n4734\n3526\n9273\n2039\n5084\n875\n6905\n8968\n5275\n3052\n650\n7509\n232\n2595\n3631\n1810\n4355\n8315\n8908\n1777\n4834\n3164\n2336\n1543\n6212\n8346\n3024\n3719\n1242\n6265\n3133\n6150\n6358\n3316\n4089\n1647\n4629\n7117\n2596\n5366\n6371\n2209\n1428\n1158\n7648\n8765\n802\n153\n4639\n3657\n9320\n3294\n2617\n5052\n6305\n3227\n8784\n5868\n6716\n1671\n178\n2703\n954\n3254\n2262\n5743\n8647\n6393\n7706\n6604\n3728\n6978\n7474\n8754\n2740\n6038\n1491\n8814\n2080\n2358\n5944\n1164\n9259\n4518\n7343\n5748\n3897\n923\n5967\n2677\n3503\n1202\n4966\n6634\n1962\n9096\n9064\n977\n4049\n1464\n658\n536\n3402\n8064\n1309\n259\n8122\n910\n224\n6152\n7142\n6070\n8411\n9214\n9312\n8325\n6192\n626\n6025\n6240\n8708\n4630\n6777\n1075\n8906\n408\n9269\n6236\n9067\n2324\n156\n3136\n7878\n7308\n4335\n2065\n3845\n4453\n3356\n1450\n371\n7219\n5171\n201\n8642\n2099\n477\n1603\n8339\n7430\n3061\n235\n1133\n8474\n8653\n989\n4569\n9092\n8347\n3102\n1743\n9086\n5140\n7438\n1530\n2460\n7646\n5071\n5430\n6944\n610\n2803\n1448\n4696\n6156\n4386\n4248\n4256\n994\n805\n8011\n8276\n8999\n4956\n1712\n2795\n7553\n6436\n2158\n9083\n3184\n5784\n4428\n612\n5288\n6222\n1365\n5074\n6848\n575\n5213\n2175\n4240\n351\n2086\n2656\n5150\n9255\n8189\n7735\n1261\n1344\n4097\n8674\n2984\n4235\n5998\n6488\n537\n1267\n7486\n7124\n6245\n7955\n7337\n5436\n1194\n209\n1710\n7906\n4357\n4139\n5679\n2584\n2854\n1004\n8246\n8586\n5087\n4926\n6637\n3197\n7757\n6502\n1248\n990\n3928\n2770\n2751\n1020\n6426\n6839\n2671\n3871\n9212\n4179\n3394\n10\n5861\n5316\n6869\n2985\n8905\n8559\n4457\n2480\n2313\n4100\n6835\n7799\n7890\n2785\n5468\n7302\n5862\n1803\n3171\n717\n7053\n1655\n4489\n2522\n2921\n8555\n1984\n895\n8949\n1305\n738\n7606\n112\n3042\n1325\n437\n3167\n3340\n511\n3689\n8982\n69\n4421\n550\n8685\n3147\n8956\n3166\n7023\n2014\n3573\n3880\n4045\n2069\n6051\n702\n6664\n8418\n6181\n4853\n4166\n7022\n7418\n3605\n7172\n5031\n4589\n7858\n6586\n6351\n8334\n7504\n634\n3759\n1890\n890\n6959\n5085\n4919\n2161\n1191\n256\n3610\n7079\n3427\n4071\n7323\n2982\n7263\n7444\n4251\n5846\n4864\n3649\n4311\n8120\n4582\n6373\n2805\n4872\n4869\n5867\n2670\n7099\n30\n8933\n930\n7919\n7261\n5289\n7449\n7772\n3613\n3196\n474\n205\n841\n2611\n6185\n3088\n409\n7239\n5938\n7871\n1343\n6705\n1027\n5596\n2199\n9113\n5471\n6134\n838\n8359\n4061\n1474\n3229\n270\n4245\n1979\n1517\n8652\n4006\n6137\n4693\n2528\n6996\n2926\n5798\n2477\n2549\n3341\n6014\n4479\n2861\n4208\n5175\n5174\n5118\n3736\n5463\n1588\n2327\n8380\n7982\n1058\n4586\n6608\n7985\n1822\n3628\n549\n1811\n2601\n4608\n2540\n6659\n3859\n307\n3767\n8167\n505\n4366\n5520\n461\n1933\n2401\n8106\n2055\n7844\n8544\n4797\n7419\n6686\n7670\n6039\n5672\n5141\n6543\n206\n5252\n4718\n888\n1601\n3218\n5114\n713\n4022\n4419\n6708\n397\n425\n6612\n5057\n1729\n4729\n4080\n1034\n534\n5598\n9218\n2424\n329\n4154\n1597\n109\n8823\n9038\n8437\n3307\n128\n8032\n1412\n7333\n8762\n8851\n8865\n468\n3808\n3064\n8798\n7052\n7767\n1086\n2162\n6566\n2109\n3439\n6122\n3642\n7696\n8610\n5279\n1808\n8687\n817\n6066\n3640\n6015\n7601\n4855\n6017\n87\n7071\n7268\n3614\n6084\n6117\n6924\n9102\n2829\n375\n8724\n2095\n22\n1541\n2970\n633\n139\n451\n4521\n179\n1396\n3876\n5824\n8020\n426\n4982\n4172\n190\n4859\n1455\n3110\n3323\n9104\n858\n6719\n6428\n4495\n8551\n2141\n3984\n3066\n67\n4299\n5821\n8444\n6581\n6097\n7090\n7781\n8944\n3085\n2114\n5355\n8901\n1461\n3301\n422\n7000\n4820\n5790\n1379\n7536\n8736\n8991\n5241\n1698\n1294\n1753\n196\n2987\n8680\n4144\n8639\n6441\n8255\n8156\n3677\n6385\n6520\n3760\n6001\n1144\n5478\n7394\n8057\n5018\n4232\n5235\n6844\n3111\n8802\n949\n7843\n573\n2278\n6801\n7629\n2714\n5105\n6946\n2697\n5315\n1571\n8677\n2537\n4374\n3833\n7820\n3750\n2033\n6526\n3884\n8706\n7195\n3603\n3001\n6284\n5873\n5718\n8576\n8457\n3589\n5839\n459\n6342\n8729\n6933\n607\n6053\n8228\n3773\n1805\n6365\n5142\n6069\n1389\n9026\n570\n4614\n5533\n2821\n1897\n819\n4060\n5905\n6842\n5446\n1277\n4303\n2836\n934\n1014\n7822\n7494\n665\n5881\n3328\n4664\n315\n1315\n1462\n8616\n7725\n5749\n1730\n8184\n4567\n5065\n8867\n1304\n3669\n9192\n410\n8177\n6710\n1210\n2329\n3911\n1899\n7686\n3315\n6180\n3116\n5341\n4394\n8337\n9182\n5715\n2172\n2782\n3715\n9195\n7960\n4890\n8294\n2337\n8014\n3353\n7475\n2193\n8831\n4200\n4653\n6196\n6957\n3063\n8959\n8973\n6529\n3457\n5274\n8002\n6823\n6154\n5561\n1780\n9318\n7657\n1758\n6503\n7678\n3274\n1625\n4327\n3236\n8575\n4707\n4331\n1494\n8756\n3174\n1074\n8116\n8295\n3048\n3752\n6050\n8003\n9175\n4674\n1642\n2556\n6166\n7165\n8441\n3990\n1640\n1778\n7500\n8304\n1395\n4315\n5949\n3364\n242\n5763\n1036\n2430\n8131\n411\n6267\n2045\n6606\n899\n8065\n5779\n5616\n2107\n5408\n2980\n6310\n5776\n4328\n821\n3251\n2354\n7076\n5313\n79\n3959\n5677\n7545\n160\n6790\n6859\n3659\n6770\n1106\n8846\n956\n7472\n2050\n8099\n4795\n8053\n9293\n7037\n1646\n9307\n5322\n5332\n2708\n8977\n917\n2419\n184\n2105\n1578\n3923\n5780\n1903\n2512\n429\n493\n4972\n445\n8286\n320\n8300\n617\n3413\n4459\n525\n5631\n6314\n5157\n5300\n8545\n182\n1031\n4429\n2495\n1534\n3099\n3916\n3738\n535\n2119\n177\n1838\n2159\n4099\n8285\n5172\n8540\n6020\n7683\n3073\n3115\n3087\n2416\n1894\n5942\n3597\n5834\n2007\n43\n1779\n4174\n2023\n2546\n2429\n9006\n436\n4214\n3693\n5426\n6767\n5903\n4368\n2170\n5051\n7490\n2859\n5035\n7835\n5372\n7122\n925\n3253\n6338\n8393\n4093\n5848\n7588\n2683\n8049\n5403\n5894\n8745\n8550\n2941\n3484\n9029\n4461\n8022\n725\n3030\n1975\n5623\n2415\n1957\n6141\n9278\n3226\n3062\n5670\n7326\n8759\n8496\n6619\n8187\n8262\n6199\n951\n668\n2388\n4698\n8240\n2851\n871\n4988\n9084\n9089\n3162\n1167\n8244\n5227\n6461\n2831\n776\n5010\n5770\n5282\n3574\n5102\n1278\n2281\n5455\n4628\n4663\n9119\n7487\n8746\n4889\n1175\n102\n2386\n8940\n5566\n53\n8833\n1918\n321\n6786\n6861\n4358\n2771\n7467\n975\n4777\n605\n3543\n2600\n7584\n9299\n4530\n7328\n183\n4761\n7543\n304\n1196\n4623\n5519\n1953\n533\n5989\n7590\n7428\n6346\n6162\n1946\n6260\n4405\n5676\n8924\n7171\n8409\n1866\n6379\n3411\n2387\n3051\n7398\n154\n1185\n6442\n6004\n1611\n2165\n9018\n8323\n616\n3995\n8952\n1533\n7853\n789\n4991\n3675\n7456\n5752\n175\n7556\n4195\n907\n2248\n8467\n1017\n7968\n3304\n1666\n4942\n3867\n4802\n6357\n4621\n887\n6213\n5261\n1336\n521\n8928\n7864\n4792\n6742\n157\n1593\n823\n7235\n5303\n5633\n1100\n8047\n5993\n1460\n6714\n1630\n6440\n6307\n3608\n292\n5974\n8301\n8342\n2720\n4583\n2757\n7315\n833\n4466\n4236\n1282\n5273\n2149\n2380\n8119\n7167\n5076\n3596\n2650\n8980\n3421\n1356\n1954\n7823\n1172\n2226\n1941\n6136\n7274\n2256\n4928\n324\n4410\n4579\n1061\n7113\n486\n862\n6956\n2873\n1465\n6113\n8225\n8512\n6806\n272\n6008\n1241\n88\n5662\n3555\n689\n8733\n2812\n7453\n6282\n420\n2471\n4477\n7495\n1445\n594\n6939\n1564\n8704\n8590\n7992\n7374\n5796\n9298\n4213\n5713\n5864\n326\n5513\n402\n464\n608\n1951\n8640\n3347\n3459\n4162\n2690\n7478\n5856\n5240\n3022\n602\n5547\n1798\n1345\n9276\n599\n3673\n3277\n1635\n8625\n1567\n5928\n636\n5671\n2896\n3477\n412\n7575\n4201\n685\n4760\n1229\n4275\n8960\n3123\n4471\n5941\n3355\n3999\n7157\n6354\n6850\n8783\n1943\n6769\n7330\n8721\n8477\n1381\n848\n778\n6408\n2644\n5817\n1441\n1723\n2144\n2776\n2368\n367\n8839\n8749\n5353\n3148\n9114\n1233\n9228\n8857\n2895\n1286\n200\n6755\n5125\n5857\n1657\n7658\n5000\n942\n7020\n586\n784\n7078\n6194\n8658\n8957\n9325\n1851\n8911\n7004\n1186\n8824\n2999\n561\n7639\n4316\n5086\n3187\n7912\n2624\n9183\n8487\n5089\n8475\n7554\n4031\n6297\n6059\n5329\n115\n2058\n7650\n7121\n2485\n7805\n2241\n7713\n4352\n2409\n1026\n2745\n4549\n5124\n5201\n6556\n6617\n9091\n3945\n8402\n5648\n5257\n4901\n7750\n6131\n6027\n6352\n4625\n1254\n5498\n3720\n8261\n3939\n5576\n3685\n6713\n8472\n991\n8354\n5655\n5997\n1029\n7506\n2575\n2990\n4898\n7402\n3290\n5388\n6715\n8235\n5361\n4970\n1363\n3338\n9014\n5358\n635\n1193\n3705\n6334\n7666\n5270\n6368\n8604\n3564\n1937\n2481\n1341\n721\n2100\n3958\n6551\n3813\n2592\n7980\n2357\n8761\n8910\n8693\n1204\n489\n4827\n8024\n7832\n3895\n9068\n8067\n1708\n1111\n8963\n1902\n9251\n5719\n9143\n5537\n9169\n5365\n1840\n485\n4456\n1169\n3271\n6886\n9140\n7173\n6003\n1659\n1807\n8371\n2439\n274\n3448\n6623\n347\n2103\n3400\n2106\n9073\n8169\n3687\n3305\n4416\n8454\n6635\n332\n2433\n1944\n6509\n7770\n1880\n6610\n9331\n302\n418\n4219\n1333\n2350\n8424\n4883\n6580\n6722\n1669\n8470\n2571\n513\n3810\n7049\n6332\n7363\n3532\n8456\n2097\n297\n8841\n7180\n714\n1587\n5234\n7372\n660\n8503\n1668\n8847\n1101\n7275\n3336\n6460\n722\n7782\n3947\n502\n4258\n2132\n1835\n181\n3841\n427\n3446\n2551\n8324\n6963\n4284\n7297\n7577\n3399\n9148\n8213\n5656\n851\n657\n2446\n6992\n976\n1108\n2681\n3237\n8582\n377\n5969\n5287\n9209\n8523\n7178\n7833\n6175\n2126\n3023\n5090\n7491\n6640\n6077\n2221\n2780\n1694\n4094\n144\n3203\n7123\n749\n3625\n3848\n980\n2270\n7819\n3672\n7689\n7203\n2718\n1714\n3802\n3851\n4224\n7237\n7998\n7207\n4106\n9036\n1046\n5070\n4592\n6056\n693\n1328\n3309\n2629\n2736\n202\n388\n7886\n4417\n8786\n8822\n4035\n5505\n1192\n4388\n8941\n5019\n7538\n6732\n6389\n5923\n1405\n3278\n3917\n1688\n8374\n443\n4037\n9099\n5190\n4177\n9310\n7747\n4348\n7197\n4844\n4998\n5609\n4345\n29\n3332\n8648\n4107\n346\n2577\n3941\n1215\n8252\n4706\n2675\n3790\n7459\n6164\n1149\n6687\n582\n3139\n3882\n4034\n1861\n4701\n8757\n8801\n1823\n4528\n4789\n143\n4746\n9234\n3866\n9245\n1911\n1366\n4393\n2061\n1959\n6967\n3138\n7382\n6237\n845\n80\n6911\n7163\n5229\n4736\n8738\n33\n8543\n357\n3193\n7262\n4448\n6793\n3321\n7569\n6411\n7692\n7340\n1417\n5847\n3836\n2678\n1188\n8727\n8615\n7417\n5771\n3170\n8061\n2935\n8263\n8257\n6883\n1276\n1239\n812\n6258\n3922\n8117\n3039\n603\n8554\n7573\n2787\n3445\n5115\n3478\n962\n3961\n6570\n7722\n216\n2797\n5154\n2530\n4904\n2405\n7542\n4021\n3252\n5370\n9302\n236\n4532\n1361\n3373\n1716\n2183\n1583\n3783\n868\n1687\n8925\n6198\n8208\n6367\n7603\n882\n3469\n1645\n7654\n1176\n4231\n150\n7997\n5456\n7031\n4375\n8840\n5634\n6945\n705\n4774\n3822\n7148\n1922\n8459\n6249\n8713\n6197\n8599\n6071\n6756\n1634\n950\n5640\n7749\n5920\n6622\n4783\n7837\n7479\n7229\n3919\n1797\n5272\n8945\n4908\n5439\n6903\n5833\n6930\n8197\n9261\n1711\n5483\n4285\n8852\n7409\n8971\n7534\n7792\n2444\n7496\n8063\n1665\n248\n3894\n4585\n66\n4850\n1240\n7511\n7524\n9258\n2075\n3979\n4714\n7592\n965\n2919\n1842\n8013\n4750\n2344\n6155\n3468\n31\n2087\n1599\n1573\n5883\n7613\n195\n3749\n644\n2189\n8779\n8743\n9005\n8081\n1040\n7785\n5820\n8830\n5495\n4867\n2710\n491\n7153\n6217\n4741\n1761\n5484\n5474\n6916\n7252\n1739\n8930\n6647\n5198\n4903\n8488\n7366\n2774\n2726\n2385\n7625\n3179\n8845\n6600\n399\n6810\n3447\n6684\n4915\n8368\n1867\n2325\n2101\n1335\n7734\n7437\n7025\n4000\n6897\n1408\n7154\n5013\n2204\n9233\n3817\n1877\n9161\n2197\n3390\n280\n1892\n1612\n7753\n2801\n7246\n7909\n6229\n9314\n8407\n1436\n3879\n6432\n5326\n5327\n8535\n7910\n7745\n5545\n7916\n207\n1783\n6158\n8517\n7361\n8070\n6430\n119\n6146\n4183\n1083\n7385\n4497\n9133\n1686\n3765\n595\n8046\n4418\n4043\n2361\n7915\n9149\n1717\n1141\n6375\n1018\n5602\n1262\n7485\n9178\n6629\n3339\n8934\n4648\n7988\n6252\n3440\n864\n5418\n3874\n7280\n6191\n8388\n4323\n6792\n2232\n7228\n8684\n7813\n6187\n6678\n3177\n3534\n4953\n4402\n7739\n6319\n2414\n8700\n5946\n8238\n6917\n4167\n4618\n2268\n3081\n1247\n4001\n8580\n7636\n3101\n2195\n1559\n3714\n7188\n6028\n7530\n2828\n1977\n3238\n2340\n110\n3247\n7532\n7541\n924\n1632\n4487\n6447\n4944\n6347\n2285\n8087\n5452\n91\n1166\n162\n5185\n7933\n4743\n1627\n7259\n8620\n8207\n5845\n9011\n5525\n4269\n4700\n1824\n8186\n8872\n8299\n3957\n8242\n4558\n6439\n2666\n6958\n8112\n5121\n8806\n6170\n7688\n3486\n2082\n7436\n2778\n1096\n786\n2206\n5170\n1443\n6030\n3312\n9151\n8485\n6404\n8498\n2883\n8961\n2280\n8341\n2809\n2445\n809\n8298\n8643\n8316\n6853\n1572\n3215\n3938\n2249\n6515\n1337\n8328\n7712\n1429\n4117\n5441\n3230\n4152\n7225\n3513\n6953\n1507\n348\n3639\n5739\n2673\n1550\n6301\n1652\n8453\n204\n6833\n2200\n5217\n1854\n4711\n7368\n4572\n4032\n7531\n1013\n3634\n2875\n6058\n8307\n7609\n1766\n904\n667\n5410\n6578\n3601\n1664\n3233\n7390\n8178\n4486\n4427\n4876\n9166\n2772\n6295\n5001\n5296\n3371\n6518\n6327\n854\n8288\n1912\n5927\n6202\n5814\n9032\n1059\n3214\n6547\n7038\n5781\n4390\n6114\n1622\n4318\n5803\n5984\n736\n3561\n6554\n5045\n4277\n7386\n9081\n8462\n2034\n4955\n2701\n932\n7758\n7176\n9205\n3077\n3803\n3562\n8054\n7946\n295\n1843\n7728\n1629\n7768\n2971\n431\n9285\n2513\n1116\n3656\n4529\n5758\n6339\n8398\n816\n4153\n2536\n1826\n7870\n8113\n7730\n7101\n6555\n9256\n6774\n1072\n4578\n2598\n3604\n5880\n861\n3350\n3117\n4685\n4334\n5165\n7224\n4066\n4253\n4447\n3815\n5038\n253\n3658\n330\n3967\n6443\n2143\n7336\n6135\n2734\n8390\n4655\n7800\n1399\n1173\n5618\n2822\n4431\n2443\n1568\n3909\n1974\n2496\n4772\n5164\n2138\n2864\n3799\n3924\n4882\n8245\n1585\n5528\n5692\n5730\n5832\n137\n3175\n2894\n2062\n2752\n4028\n2113\n5411\n2647\n730\n3758\n1667\n9303\n6653\n3698\n3968\n3053\n503\n2150\n4645\n2257\n4627\n8303\n7966\n8742\n4692\n5901\n8547\n2277\n5546\n986\n370\n4697\n8712\n4804\n1182\n6650\n7290\n3487\n2814\n5668\n7567\n5333\n4164\n3084\n8896\n3888\n6537\n17\n6882\n3531\n704\n1037\n8866\n5263\n6758\n3762\n1393\n3824\n5112\n214\n1439\n5700\n8932\n1306\n5011\n6928\n5173\n4098\n1132\n7352\n4778\n7723\n1368\n2390\n670\n2685\n5855\n1772\n6380\n3853\n940\n5424\n6091\n1748\n5297\n6572\n8877\n6874\n430\n5041\n5267\n7448\n620\n9112\n4294\n1432\n72\n130\n7920\n4597\n6614\n8889\n3697\n1895\n3462\n2616\n4791\n7846\n8372\n428\n6559\n8326\n9211\n1525\n5980\n7888\n3331\n8118\n7899\n615\n7377\n791\n5930\n6627\n8322\n1138\n770\n8460\n5100\n8274\n8350\n6316\n2893\n7594\n9236\n5082\n8150\n1986\n1909\n8902\n2145\n3617\n3501\n7\n2426\n5056\n8016\n2702\n5360\n8135\n8385\n8378\n8018\n8574\n720\n8893\n3021\n1978\n4782\n1816\n2083\n4051\n1446\n5870\n9097\n8006\n4222\n8287\n686\n1377\n611\n8153\n4808\n1536\n679\n4096\n3891\n4884\n432\n4615\n8988\n5560\n3451\n5589\n3514\n6169\n1414\n3244\n1490\n7100\n3588\n690\n7317\n4171\n2266\n6800\n2793\n5151\n6977\n8188\n8752\n5815\n5116\n263\n3311\n289\n3392\n5755\n1022\n5548\n9319\n8937\n6011\n7632\n5328\n4141\n5407\n520\n7305\n526\n3645\n1859\n2520\n3523\n8629\n7304\n8881\n3076\n4005\n8329\n2205\n2214\n6925\n8691\n4136\n8883\n974\n7952\n3965\n5887\n7964\n7189\n2406\n2783\n8086\n405\n6568\n5147\n2021\n4727\n7674\n1600\n5078\n2949\n6624\n6541\n8986\n5740\n8500\n3591\n4434\n398\n983\n7544\n1478\n4570\n6012\n465\n9330\n7206\n808\n8737\n2356\n4959\n8812\n3599\n1420\n1721\n5897\n8422\n2\n4023\n2739\n3619\n8797\n5496\n8951\n8181\n6893\n9254\n1809\n5682\n4309\n6929\n2742\n5988\n3363\n4493\n8434\n4210\n1503\n1876\n5094\n4600\n4936\n4798\n3933\n5216\n646\n3098\n8773\n4076\n5335\n3746\n3327\n47\n4602\n8636\n4129\n363\n6417\n7416\n9025\n4377\n4766\n2779\n4151\n9046\n7860\n3154\n3476\n7620\n2052\n1752\n7199\n4412\n8882\n2463\n339\n56\n4821\n7555\n6558\n1905\n5258\n4205\n3580\n6735\n1023\n4511\n3850\n161\n7395\n2532\n3349\n7055\n7387\n758\n1907\n3006\n659\n815\n1961\n6902\n7668\n4708\n1904\n4433\n5159\n6816\n8664\n6918\n1016\n6513\n7314\n7480\n9313\n716\n3395\n6843\n918\n4329\n8593\n3404\n5212\n837\n480\n8524\n1342\n7414\n288\n8863\n3352\n1628\n135\n3314\n2181\n8650\n5915\n8078\n6812\n1375\n906\n5635\n7126\n1387\n7458\n6119\n5591\n3795\n1531\n95\n1960\n7522\n898\n4921\n2623\n6268\n7063\n1326\n9075\n2505\n7400\n1284\n2951\n747\n6466\n1357\n6493\n7320\n5892\n576\n5107\n5559\n97\n2583\n6361\n8843\n3509\n7892\n6086\n1476\n4612\n4267\n9094\n7050\n6048\n8382\n2227\n284\n2898\n3221\n2353\n2157\n5990\n5810\n3581\n7279\n6188\n7859\n3549\n5539\n2022\n630\n2500\n5111\n6561\n5127\n5569\n6123\n1338\n8605\n3491\n4187\n8220\n7334\n9213\n3067\n6997\n2853\n4735\n4372\n5954\n6662\n2207\n973\n3361\n960\n6350\n7431\n8076\n1129\n750\n7194\n2300\n6590\n5893\n6889\n3125\n8788\n7286\n3472\n8164\n7693\n1469\n5563\n4773\n3210\n6324\n3113\n9070\n3638\n7551\n2541\n3506\n5138\n4069\n7198\n7560\n3306\n6100\n2932\n1741\n14\n4672\n7564\n8748\n8874\n3804\n3678\n2610\n1358\n42\n5176\n9326\n8464\n1038\n2993\n3017\n9072\n32\n4809\n4364\n2808\n4125\n152\n7299\n5431\n6178\n793\n9120\n8410\n4963\n772\n6954\n3014\n6881\n286\n553\n1948\n6398\n6255\n3057\n8646\n6176\n2700\n5663\n6683\n1281\n6013\n8799\n7635\n9289\n1885\n442\n2225\n6294\n5054\n2674\n7884\n8730\n8216\n4203\n1488\n7111\n3623\n7950\n1971\n3248\n2900\n1553\n472\n3865\n7796\n6937\n4591\n8098\n5208\n294\n5627\n5691\n5687\n7149\n4879\n3624\n7005\n2773\n3112\n9185\n1633\n7830\n5101\n8707\n8469\n4678\n4860\n700\n5527\n9194\n2794\n5068\n1177\n4282\n6492\n5859\n5029\n5123\n522\n5048\n7230\n2104\n6642\n6731\n2717\n5149\n2043\n9059\n5277\n844\n5515\n6706\n3651\n9105\n7671\n2880\n3607\n6410\n2508\n8463\n2394\n1916\n1125\n5343\n3322\n5307\n4547\n1589\n8478\n8899\n2955\n8028\n4058\n2781\n8715\n1272\n4474\n4863\n4367\n49\n8844\n5605\n8671\n6743\n4281\n1874\n2626\n2516\n258\n5249\n6186\n7958\n5432\n3801\n6288\n4732\n9121\n7558\n6819\n7508\n584\n215\n5036\n4261\n8978\n5228\n647\n4657\n2591\n5931\n5088\n9204\n929\n4381\n5421\n2965\n5050\n6495\n5033\n4799\n959\n1232\n5811\n317\n7705\n3842\n2178\n7187\n1373\n7112\n2694\n8627\n8493\n3991\n7441\n6308\n6462\n3406\n7673\n8660\n2902\n752\n1025\n849\n7682\n6982\n6652\n3612\n298\n5148\n4873\n3414\n1693\n1458\n327\n2016\n5002\n6768\n7016\n5583\n3270\n8232\n7158\n7981\n4676\n4675\n2164\n8360\n6709\n8143\n365\n4062\n4527\n7928\n9009\n6228\n5818\n2533\n9305\n8887\n55\n2507\n8870\n6649\n5158\n76\n5595\n6693\n5306\n8666\n3020\n7527\n3082\n6304\n1591\n6145\n6868\n7205\n9107\n1165\n6773\n172\n1993\n4176\n8400\n4611\n7589\n5386\n6095\n6335\n1561\n5963\n7393\n3681\n2037\n4968\n7451\n3360\n7466\n8361\n4455\n4064\n5422\n1689\n3977\n7269\n362\n4178\n4145\n6127\n5162\n2399\n9225\n7068\n794\n1348\n7736\n444\n6081\n5298\n2026\n2543\n9087\n7425\n3730\n8468\n2641\n7529\n1720\n6377\n5851\n7956\n3150\n3785\n6485\n3611\n2869\n8510\n4775\n4463\n1251\n9124\n6873\n3391\n4118\n7051\n3213\n3668\n5347\n8452\n6289\n5840\n478\n3522\n453\n3376\n6190\n3342\n2237\n2870\n5178\n5567\n5952\n6919\n3005\n134\n3397\n8539\n6822\n5264\n3288\n5962\n8421\n6744\n8608\n4656\n1802\n4271\n1043\n8211\n2196\n5260\n3789\n7211\n7571\n7834\n5680\n2047\n5502\n3369\n3437\n3286\n5517\n3912\n1442\n6961\n2191\n2417\n9088\n5155\n6813\n4520\n7375\n1224\n811\n1891\n3748\n4123\n2789\n5305\n8419\n7248\n9237\n992\n4038\n4499\n2060\n850\n2669\n7612\n9290\n2526\n1287\n4160\n4633\n7125\n742\n4534\n2407\n4555\n8764\n4722\n7721\n3205\n6657\n1214\n3754\n6080\n4593\n3018\n8792\n2294\n4450\n7701\n127\n7069\n6243\n8025\n4010\n8632\n4715\n5284\n4574\n726\n4252\n4561\n7354\n299\n6088\n1090\n5012\n5684\n3489\n4888\n1584\n1969\n4846\n2915\n6804\n2775\n7306\n9306\n5231\n7740\n4283\n953\n6725\n8290\n1504\n1539\n8885\n138\n3764\n1256\n257\n335\n7060\n5986\n9323\n4740\n8994\n4140\n6807\n8254\n3963\n9297\n2102\n9207\n4910\n8709\n4411\n1672\n457\n8037\n4932\n3679\n2362\n8592\n495\n1608\n2155\n7411\n2881\n9244\n37\n6535\n8219\n4505\n8635\n1928\n8384\n2570\n8996\n7610\n2128\n8728\n6656\n6681\n2070\n176\n9062\n514\n1796\n4039\n6838\n2462\n230\n569\n5521\n4637\n4939\n4420\n672\n3807\n447\n1656\n3297\n8858\n2118\n6309\n1926\n481\n1509\n1228\n1787\n5978\n8678\n3951\n2929\n4980\n5039\n4713\n7002\n151\n5536\n8148\n3823\n2299\n142\n7067\n2372\n3761\n9\n2265\n5747\n2764\n724\n2913\n3151\n4525\n6370\n4247\n5494\n629\n3621\n7371\n1999\n6704\n3734\n2698\n4691\n6938\n8415\n6353\n6750\n9077\n2679\n2478\n7321\n6611\n4007\n5772\n6416\n2264\n8348\n2672\n6546\n754\n6934\n8546\n4404\n592\n4748\n6625\n7944\n2377\n6\n8929\n8275\n4524\n3660\n8710\n419\n6878\n8313\n7460\n8753\n2917\n6891\n6663\n4918\n7129\n396\n7256\n3500\n631\n5585\n8343\n2695\n6168\n6292\n3176\n5092\n5160\n3701\n9021\n7221\n1216\n1438\n3471\n2318\n8923\n6223\n2182\n7621\n8514\n9010\n8987\n1252\n1972\n1872\n1715\n8205\n6463\n8138\n8989\n5661\n2890\n565\n2427\n8946\n1303\n3718\n6000\n3620\n5276\n9260\n1467\n6173\n7641\n7520\n5061\n4677\n5757\n4400\n2620\n2719\n8995\n2079\n1683\n8141\n7754\n5744\n2952\n7568\n7457\n5368\n1510\n1513\n3072\n1456\n9164\n3163\n3035\n6111\n5042\n7161\n1401\n1084\n8000\n8531\n5404\n6550\n8379\n9141\n8681\n7752\n6394\n7011\n3739\n8253\n978\n4771\n6024\n4828\n7959\n1649\n1727\n7073\n8349\n6952\n661\n7283\n3159\n2590\n3496\n8741\n3969\n2956\n4565\n920\n1830\n8558\n1930\n6677\n6825\n8256\n7454\n4710\n1768\n3753\n5292\n1397\n2733\n946\n6711\n3242\n4929\n5006\n3202\n2295\n2746\n1293\n2124\n5405\n4065\n818\n7464\n1820\n1312\n6994\n6920\n261\n987\n6120\n3109\n2986\n4338\n7774\n5122\n1364\n8969\n6712\n8161\n7595\n5940\n1566\n6419\n4432\n6047\n4749\n6076\n1161\n8217\n674\n8494\n3688\n2447\n4704\n969\n7477\n1160\n3243\n4979\n9288\n6860\n1662\n6171\n225\n5143\n313\n8327\n3385\n7626\n3103\n4401\n6794\n5600\n5043\n7664\n6830\n4452\n3980\n5875\n4635\n5756\n3329\n1751\n8108\n4817\n1989\n1237\n1893\n2848\n8875\n4981\n5417\n4134\n877\n6688\n3545\n4943\n5615\n2476\n1684\n7396\n1171\n3415\n3644\n340\n6630\n8284\n3256\n7240\n5371\n3405\n2108\n6360\n1734\n5612\n8638\n2343\n1103\n6809\n3055\n188\n8031\n3124\n3683\n4537\n988\n2297\n4893\n839\n4467\n5195\n4041\n6457\n4441\n6472\n4912\n6884\n5922\n7014\n1660\n1595\n6752\n4554\n1292\n2709\n3800\n1980\n8775\n6392\n6263\n7214\n5219\n282\n309\n6685\n6311\n4092\n18\n7570\n5543\n4081\n2515\n6278\n8690\n5294\n6184\n5215\n9130\n6720\n250\n7250\n639\n3567\n7841\n2636\n4067\n8446\n5703\n8609\n2586\n7695\n1253\n6701\n7930\n6317\n5921\n7719\n8501\n7312\n4110\n6219\n4552\n5059\n4088\n7975\n9132\n6054\n692\n3412\n4079\n6950\n5281\n8321\n3877\n7614\n4188\n2223\n239\n4745\n6875\n7096\n5571\n4403\n2640\n1845\n6690\n1825\n4157\n314\n4682\n8825\n8093\n7215\n6465\n99\n8077\n4206\n366\n1208\n6043\n4640\n5475\n4985\n1351\n3090\n5625\n7307\n8466\n2003\n8854\n218\n1500\n2293\n1847\n5032\n2147\n866\n3710\n2552\n1749\n6692\n3926\n4112\n6458\n735\n9171\n60\n9304\n6726\n2630\n2882\n1178\n1151\n4922\n4662\n173\n7233\n1776\n4113\n2423\n2425\n4343\n970\n6372\n1009\n6607\n3068\n8435\n6423\n3126\n4813\n1709\n1201\n7104\n5620\n3932\n3366\n5023\n5079\n627\n290\n779\n5572\n5233\n1392\n4975\n8534\n8210\n2269\n2475\n2562\n905\n4546\n267\n3536\n8538\n449\n101\n7367\n2722\n4605\n7356\n6781\n8537\n8697\n6820\n8340\n8926\n2349\n2259\n6545\n8100\n8395\n2258\n2911\n3946\n1406\n8683\n8296\n5579\n2177\n8264\n1425\n957\n3647\n515\n5342\n8363\n2449\n1001\n2937\n3452\n5574\n4319\n9184\n8381\n945\n6876\n600\n5714\n4871\n8532\n8856\n392\n2018\n369\n5711\n9230\n5304\n7266\n1681\n7829\n2309\n4683\n8938\n2255\n6159\n3207\n4651\n2029\n4341\n5106\n5794\n9024\n4712\n2434\n7151\n7359\n6431\n1290\n5918\n8705\n5554\n8876\n7415\n6290\n5373\n3805\n2950\n2331\n6772\n8997\n6576\n2307\n8515\n4033\n3428\n6487\n6595\n45\n5792\n333\n2383\n3388\n666\n460\n943\n364\n8223\n8221\n637\n6218\n4108\n5381\n4649\n5096\n1614\n8768\n5095\n3809\n5030\n984\n3538\n5120\n2498\n5222\n5613\n5486\n241\n5707\n9227\n4109\n7771\n728\n3671\n9327\n1230\n9270\n1070\n8565\n4769\n7056\n5654\n1793\n5956\n7883\n1362\n5479\n8769\n8821\n8320\n1901\n1994\n2461\n5552\n389\n2839\n6467\n2762\n4763\n3499\n1487\n7599\n4488\n3241\n8272\n1131\n4496\n7006\n7265\n4897\n2747\n6618\n5291\n4563\n1939\n6369\n8548\n5526\n9030\n5349\n8433\n1477\n4265\n9200\n3878\n462\n6846\n4806\n3519\n6798\n5464\n5179\n546\n6044\n8114\n7216\n6276\n1495\n494\n8146\n5434\n856\n8403\n8071\n5544\n3337\n1546\n2824\n1718\n6009\n2042\n251\n3330\n192\n3797\n394\n7814\n7699\n4659\n4689\n4156\n7903\n9054\n7332\n7811\n1119\n5531\n6782\n5210\n8412\n2633\n7924\n4624\n8314\n5666\n3240\n2310\n4262\n8160\n4553\n8196\n2661\n7213\n7455\n7399\n870\n1227\n1226\n781\n937\n6343\n2578\n2892\n2792\n5696\n6865\n6455\n8312\n5193\n6026\n5251\n3787\n4460\n4687\n7923\n1140\n9106\n796\n2482\n9170\n8695\n2749\n6734\n4825\n114\n827\n390\n7611\n7484\n1249\n7727\n955\n579\n3629\n8915\n2958\n885\n7227\n1424\n4810\n4604\n1535\n774\n7518\n5428\n8233\n2645\n2167\n6484\n3855\n1502\n4861\n2333\n2973\n4829\n1906\n3966\n476\n9023\n6960\n3483\n2748\n5891\n8174\n7702\n8948\n5324\n4396\n1605\n2823\n7348\n7347\n5933\n310\n9082\n916\n203\n4239\n5976\n6200\n6435\n4425\n787\n1121\n6034\n39\n3104\n5961\n5507\n5785\n1463\n7339\n1575\n7801\n5445\n8283\n5951\n6995\n999\n5163\n6023\n6536\n5850\n3524\n3528\n4508\n6674\n2939\n8227\n4598\n7550\n8495\n8622\n1152\n4538\n1318\n739\n8202\n1552\n5236\n3576\n4699\n9238\n1879\n433\n5587\n1678\n8552\n6445\n7971\n6880\n7476\n7282\n7271\n6489\n8091\n9287\n7351\n1765\n5286\n6921\n542\n1762\n8553\n4987\n894\n3622\n7855\n92\n3131\n4811\n6517\n4510\n733\n4954\n1360\n5669\n2842\n8107\n5646\n5968\n1827\n7709\n8521\n5807\n5321\n9239\n5501\n3745\n4437\n1586\n5265\n7917\n1607\n6074\n7061\n1580\n8694\n8461\n4573\n618\n9173\n5243\n435\n8770\n2421\n7450\n3870\n8308\n2605\n2934\n9240\n6887\n4512\n1198\n7585\n7691\n7738\n2843\n8423\n6971\n7854\n86\n9128\n4298\n622\n6579\n2203\n7716\n1265\n1174\n7380\n623\n8936\n4306\n8082\n4312\n8661\n5753\n7243\n2768\n8155\n85\n4143\n3047\n8479\n7809\n2833\n5555\n7578\n1637\n1936\n8130\n5549\n8062\n7143\n5522\n8966\n5614\n8105\n8719\n7655\n7502\n8268\n5760\n6695\n5565\n7615\n9226\n4870\n4507\n3160\n4835\n1598\n4422\n5248\n7867\n1078\n5015\n6660\n1676\n6391\n5351\n7184\n6280\n5936\n6124\n1327\n2906\n269\n8292\n8809\n5167\n8142\n8204\n2713\n1910\n2930\n2494\n5592\n7384\n7726\n5727\n1735\n5710\n5518\n2491\n1410\n4989\n5183\n8777\n6562\n4947\n3692\n384\n1097\n5209\n3723\n7272\n6895\n2459\n543\n8621\n5394\n6211\n2074\n1511\n2524\n7776\n5055\n7191\n6207\n7922\n281\n8436\n2918\n3141\n4800\n6323\n7631\n8903\n3735\n5301\n3975\n2800\n7963\n105\n1920\n7391\n4909\n1754\n4816\n5145\n5139\n5268\n9317\n8631\n4346\n7318\n136\n3993\n1220\n2151\n308\n7483\n3071\n1339\n3777\n8191\n5378\n7087\n1056\n7465\n5608\n6564\n2754\n2687\n1596\n5376\n1512\n566\n6382\n1757\n8035\n2296\n4264\n1053\n4716\n8518\n254\n6253\n7132\n8557\n3490\n9267\n5473\n2412\n7539\n7136\n6670\n891\n1323\n1217\n2879\n9118\n1259\n2317\n7033\n2467\n6665\n6244\n2180\n2140\n7098\n4150\n547\n4307\n1725\n2737\n8549\n8195\n1245\n6286\n935\n1756\n1701\n1626\n7379\n3492\n3717\n5802\n2817\n1234\n1005\n4101\n21\n2576\n4650\n3381\n1030\n2844\n1641\n936\n2729\n6469\n8913\n5994\n341\n4083\n5152\n3380\n8739\n6615\n3829\n164\n7927\n4779\n4216\n8528\n3641\n4606\n2769\n6970\n8850\n4971\n5489\n2008\n4564\n8682\n7784\n5768\n9252\n901\n438\n3577\n2765\n5904\n664\n3348\n6298\n3602\n2502\n8617\n7684\n5805\n4126\n2451\n6906\n7234\n9243\n3778\n1087\n9053\n5026\n2504\n5283\n2820\n4242\n797\n3925\n1383\n8750\n7861\n1403\n6973\n7617\n3065\n5395\n4347\n8144\n2688\n6527\n8597\n8673\n7327\n6331\n1422\n7115\n244\n7013\n2092\n54\n7970\n5742\n4823\n8588\n2938\n3060\n4149\n2375\n6616\n8803\n1555\n4369\n1380\n3011\n6144\n3367\n7370\n1995\n2602\n985\n8785\n8480\n9125\n1927\n3269\n3771\n1032\n7378\n5726\n2731\n2020\n6727\n8793\n523\n6036\n58\n7993\n5512\n5049\n2721\n8482\n673\n7937\n1168\n4472\n8247\n7287\n9017\n6421\n9190\n3584\n1819\n1792\n2810\n6033\n6749\n7677\n981\n7160\n4726\n1886\n7845\n6975\n7422\n4613\n4501\n2569\n4263\n3206\n4133\n2420\n3706\n8894\n2263\n5774\n4925\n9180\n8888\n2945\n2091\n1873\n6303\n729\n2156\n3267\n1860\n6597\n4930\n5253\n938\n580\n5825\n166\n8198\n6892\n8701\n74\n7094\n8954\n3156\n6140\n4279\n2229\n5466\n8413\n7105\n8192\n2632\n7638\n9308\n8530\n832\n4643\n2201\n3268\n4322\n6510\n2967\n262\n403\n1258\n8828\n5838\n8529\n2788\n237\n3838\n1291\n4056\n5628\n7281\n6476\n7935\n2850\n6041\n2013\n4016\n4576\n5312\n6827\n6321\n8669\n830\n1519\n2750\n6106\n6993\n6235\n5899\n7313\n5331\n4371\n7086\n8600\n2660\n5409\n3465\n5499\n6231\n5745\n1801\n5337\n4468\n1451\n4192\n1275\n1114\n4960\n8860\n3900\n6468\n1505\n8868\n5588\n3858\n1947\n2565\n1472\n243\n6583\n7085\n5374\n4291\n4426\n492\n2311\n8305\n3662\n8780\n7488\n3890\n5005\n4680\n7358\n9116\n4397\n5999\n7902\n83\n3566\n2134\n8942\n4767\n6601\n1745\n5736\n5254\n8017\n4015\n7690\n3798\n8947\n1067\n7945\n590\n2547\n2535\n64\n2053\n5359\n2493\n6669\n7473\n6147\n7175\n6983\n5196\n745\n2657\n3497\n697\n3161\n7528\n2239\n5991\n3201\n7681\n5189\n2959\n2044\n8917\n2046\n6313\n6333\n5318\n4301\n2213\n2933\n4121\n3903\n4392\n7889\n5323\n1055\n707\n3857\n518\n6078\n5134\n6645\n9138\n1592\n680\n4446\n7943\n3461\n3887\n5601\n2321\n6621\n558\n4914\n913\n5637\n6453\n8511\n4531\n1218\n5508\n2603\n6802\n8426\n8297\n2947\n5971\n6552\n5262\n5935\n782\n7435\n8357\n6139\n1136\n5008\n3585\n3627\n5356\n2997\n2347\n881\n4849\n8808\n8351\n4017\n2010\n6836\n4391\n3630\n3712\n2969\n5238\n4333\n2301\n4406\n1236\n1050\n1864\n8408\n8251\n8795\n5879\n3365\n7481\n8206\n2452\n1767\n8859\n124\n3948\n4444\n8962\n4438\n5003\n8428\n3105\n5117\n1095\n8755\n7881\n3097\n4877\n155\n1917\n2455\n6042\n337\n6724\n6045\n8483\n7135\n2242\n4566\n1679\n834\n1746\n795\n3548\n2314\n2036\n4046\n9129\n7084\n5091\n2413\n8170\n5775\n1817\n529\n813\n2916\n5130\n126\n1243\n2370\n4831\n9122\n3010\n5104\n2613\n6761\n5340\n3512\n6283\n2346\n653\n6121\n2615\n7421\n1869\n1002\n8834\n2991\n8992\n632\n1093\n4543\n645\n2352\n4115\n373\n1483\n6966\n8598\n3896\n3434\n5987\n8318\n1815\n1223\n1548\n6885\n5073\n6330\n2573\n1369\n4095\n1431\n2185\n5766\n1301\n7258\n8048\n7598\n2847\n1996\n2378\n8561\n743\n6381\n271\n1956\n7439\n7134\n6636\n5804\n1858\n6214\n4730\n8536\n1203\n3118\n9202\n1875\n5885\n168\n5898\n4014\n4186\n3346\n3041\n5558\n9296\n8157\n4339\n3234\n2604\n6803\n5387\n5590\n125\n2173\n8012\n8005\n4858\n651\n372\n378\n8366\n6299\n1449\n7793\n8541\n3235\n8043\n3086\n3983\n6949\n4690\n6494\n8406\n7408\n350\n7021\n8224\n7044\n7662\n6697\n7679\n169\n528\n7029\n2790\n7432\n7602\n8333\n1582\n1378\n482\n9279\n8015\n4514\n3542\n628\n5053\n6699\n6227\n2094\n1621\n847\n3598\n2728\n7276\n6620\n8345\n4278\n4059\n9058\n4173\n8134\n1997\n3182\n3224\n8129\n5109\n4494\n189\n7640\n180\n2963\n1123\n5593\n3263\n4185\n7140\n8990\n6320\n9275\n4601\n4854\n5907\n1135\n8083\n5964\n7788\n1992\n8069\n9174\n6160\n35\n8572\n2865\n46\n3952\n6418\n2510\n5783\n3816\n2715\n3930\n2548\n5204\n708\n7756\n3825\n777\n3550\n3929\n5440\n6751\n7764\n4070\n7331\n3743\n9131\n9206\n3828\n23\n41\n4197\n234\n5723\n7622\n8832\n2169\n5599\n2976\n5266\n1967\n90\n822\n2538\n3169\n6771\n7442\n498\n4967\n5580\n7581\n7680\n4728\n1115\n1064\n3106\n6266\n4415\n9294\n5597\n7059\n197\n7218\n6948\n5690\n1653\n4485\n4019\n3370\n919\n1330\n6085\n2078\n5427\n4545\n2435\n8862\n3633\n8145\n5221\n1388\n5913\n8140\n7471\n7156\n6989\n1190\n6832\n2830\n4387\n3454\n7469\n2910\n4526\n5187\n2410\n9223\n4681\n1300\n7407\n6523\n3616\n6894\n7253\n4515\n5874\n5448\n7137\n7957\n1130\n3092\n7054\n3516\n5797\n1000\n4336\n9090\n6403\n7255\n8919\n6522\n6760\n8898\n4803\n374\n8686\n3985\n7045\n3475\n6065\n7991\n1409\n7851\n6671\n6090\n5826\n7857\n1155\n8964\n1117\n7072\n6064\n2497\n4899\n2397\n3189\n2369\n5027\n5754\n8950\n5617\n8391\n914\n6264\n279\n6174\n5184\n3733\n5278\n2924\n567\n7994\n352\n8084\n2148\n2723\n3359\n70\n1870\n7708\n220\n3994\n9013\n3191\n9220\n4155\n5717\n1110\n2198\n785\n5325\n4770\n4250\n52\n4634\n9037\n601\n8036\n7996\n2483\n7232\n8675\n8836\n1279\n5346\n7676\n6104\n1515\n4603\n5607\n5144\n2628\n68\n440\n3586\n3083\n4830\n4378\n7762\n1134\n4542\n7850\n6296\n4011\n8751\n4776\n7954\n7102\n5697\n2032\n5729\n5017\n6962\n2051\n1092\n9019\n2759\n8581\n8618\n912\n2382\n4892\n8447\n8176\n5491\n5695\n5504\n1060\n578\n4320\n2379\n7649\n8416\n1613\n5344\n7512\n7865\n3037\n6689\n6557\n1569\n5955\n3707\n9168\n8566\n1775\n5950\n6943\n7804\n434\n6179\n1142\n7947\n6456\n6291\n5789\n6538\n9134\n3049\n5075\n5161\n1623\n948\n6302\n6063\n7516\n117\n506\n3302\n7146\n355\n1081\n2827\n1496\n2574\n6167\n3183\n4287\n5482\n7319\n7277\n3860\n3443\n3298\n8364\n3826\n7254\n2360\n5093\n7039\n6325\n2567\n4443\n559\n2625\n4228\n8967\n6405\n1674\n3936\n4475\n8556\n8585\n896\n3713\n6259\n4297\n6718\n2392\n2279\n4927\n1283\n2860\n7665\n663\n596\n6293\n6805\n2811\n7383\n8306\n8330\n3153\n2153\n2618\n2441\n3615\n8092\n552\n5285\n8124\n9247\n5530\n8175\n6242\n5660\n3433\n1610\n1832\n3892\n3862\n640\n2127\n4196\n3495\n7217\n5206\n4836\n7759\n800\n4227\n3699\n9055\n5665\n6826\n7463\n9065\n4720\n5069\n3453\n3358\n6532\n5970\n7921\n4087\n1547\n3424\n8040\n7995\n6787\n9069\n8716\n2561\n8199\n1479\n2767\n7818\n7145\n604\n7597\n4896\n9281\n4666\n185\n7978\n3059\n9221\n2135\n1800\n2974\n1529\n5948\n446\n4436\n8672\n3508\n6208\n5673\n6998\n5203\n278\n7041\n9110\n5853\n8121\n1764\n3046\n6575\n4738\n2228\n7761\n9322\n7019\n6931\n6383\n6762\n283\n3935\n6785\n471\n8214\n231\n3844\n5746\n2011\n7209\n336\n6433\n756\n9167\n6741\n3345\n7685\n4018\n6682\n9147\n4790\n5836\n5906\n676\n3964\n6362\n3510\n7510\n2308\n1806\n5917\n3387\n5423\n8900\n147\n3780\n1696\n9111\n6783\n6497\n4104\n3987\n260\n4616\n2121\n9283\n1400\n4670\n2735\n2096\n6521\n1423\n4523\n2243\n6667\n6990\n3944\n6915\n6763\n404\n2691\n1015\n7092\n7562\n8624\n2291\n5934\n5503\n2326\n2960\n842\n1963\n5568\n9050\n3806\n439\n9154\n6055\n6451\n7633\n688\n4354\n8890\n2813\n2872\n8102\n6609\n1497\n8389\n6449\n1682\n3594\n5103\n5812\n863\n3054\n8079\n2260\n2027\n3091\n7687\n6703\n3557\n2019\n8427\n2799\n8182\n6641\n3168\n2284\n1934\n6507\n1658\n3811\n1774\n7897\n2238\n2943\n191\n3869\n3188\n414\n8072\n7838\n1382\n4962\n5363\n4042\n1983\n4077\n7429\n4044\n1109\n1295\n386\n5481\n3927\n311\n"
  },
  {
    "path": "artrackv2_mindspore/lib/train/data_specs/got10k_vot_val_split.txt",
    "content": "1349\n5878\n562\n2202\n8904\n1501\n8654\n2975\n2689\n3680\n5180\n1900\n7707\n4723\n8912\n4029\n3579\n869\n2888\n8657\n6599\n741\n4288\n2244\n7357\n5704\n8791\n208\n4805\n8526\n4887\n8871\n7468\n3343\n886\n7794\n2646\n6454\n6101\n7885\n7744\n1297\n4119\n4856\n122\n2286\n2925\n5131\n5843\n5320\n5626\n540\n1862\n7335\n699\n7760\n9198\n3259\n7345\n8698\n1280\n6479\n3100\n3988\n1322\n5737\n1268\n3257\n6791\n3326\n4815\n7644\n1082\n2826\n6821\n8984\n2553\n5290\n5909\n4762\n8096\n8066\n4325\n6666\n7193\n7114\n8060\n7872\n6788\n3544\n5460\n3507\n2509\n6626\n3429\n5542\n4220\n2968\n5271\n3863\n1868\n5581\n2012\n6270\n8038\n4050\n121\n2845\n1565\n1998\n2275\n5524\n6068\n7624\n4913\n9277\n1506\n803\n8848\n5925\n2450\n2072\n8190\n4753\n9162\n825\n7303\n9028\n2088\n8516\n1556\n5937\n7847\n2367\n7549\n1049\n1521\n4739\n3931\n8958\n4130\n7876\n897\n5985\n7346\n7537\n111\n3700\n1126\n7896\n3419\n1051\n5720\n1068\n3458\n146\n291\n6256\n5514\n2857\n4580\n6239\n6525\n8717\n391\n4841\n6676\n4360\n4211\n73\n1675\n1987\n4025\n1321\n662\n8265\n6424\n2758\n7765\n7656\n3209\n7497\n7600\n9039\n7697\n5177\n2983\n5622\n9295\n3284\n964\n2024\n1269\n4551\n8088\n5659\n2212\n5199\n5551\n8607\n5573\n5200\n7951\n8429\n7720\n5919\n1273\n3529\n6707\n9176\n7552\n3255\n5649\n6110\n1137\n9272\n788\n5786\n5186\n2667\n7630\n3953\n1828\n8827\n6471\n7815\n467\n6387\n3195\n6238\n6508\n2373\n5983\n4931\n2948\n921\n2438\n517\n3949\n2137\n3216\n5683\n3695\n1719\n4837\n9159\n6981\n860\n7410\n5497\n1770\n5557\n8810\n5194\n4857\n9100\n6329\n2609\n1925\n3686\n9041\n4924\n349\n9187\n3393\n3661\n7120\n6858\n4587\n3831\n3130\n5060\n6486\n8023\n824\n1354\n8861\n5534\n7292\n4389\n6029\n6226\n3505\n4326\n7445\n581\n6089\n3450\n7324\n6516\n6775\n1207\n4575\n5135\n3918\n9020\n3473\n3898\n7812\n6571\n6757\n6639\n2557\n1206\n6148\n7325\n8790\n4938\n7026\n4383\n8041\n1250\n7267\n1952\n7561\n8811\n4941\n8373\n4848\n6602\n8355\n8104\n5214\n4330\n3181\n3422\n456\n1782\n3408\n6530\n719\n7587\n3058\n740\n4207\n5336\n2798\n2473\n4221\n1493\n3281\n171\n9157\n9139\n7766\n3324\n5308\n3708\n2431\n8080\n2093\n2585\n406\n7040\n5064\n5247\n4758\n6512\n4257\n4935\n2705\n2572\n3436\n8513\n1385\n2637\n7091\n2761\n6007\n6694\n2422\n4917\n2186\n6898\n1390\n6965\n7698\n2002\n2692\n7365\n7373\n4091\n947\n3962\n8692\n1788\n6862\n6856\n1950\n1914\n5658\n3635\n1620\n4780\n2580\n1454\n2786\n687\n7238\n3648\n6452\n1197\n3190\n5900\n9043\n4958\n1821\n1187\n1153\n7169\n7350\n5674\n6254\n3025\n6680\n1690\n2899\n3893\n1577\n5728\n9189\n5077\n3560\n2179\n5462\n1402\n3654\n1376\n5506\n1179\n5647\n4686\n8644\n1352\n2855\n6079\n2254\n2668\n2287\n2457\n3418\n7264\n677\n3074\n2655\n1042\n2210\n4504\n8309\n4209\n4280\n3258\n2977\n84\n4705\n1244\n3511\n6355\n8813\n3228\n9266\n1122\n613\n732\n5202\n8425\n2638\n6470\n3541\n8132\n2063\n5129\n2818\n7949\n8090\n4465\n7295\n5239\n7009\n9271\n8563\n2832\n952\n8136\n6776\n3565\n5188\n7288\n6999\n285\n5487\n7608\n8584\n2071\n7868\n2804\n3655\n6847\n3276\n4272\n3910\n1574\n4559\n7580\n5014\n8183\n6386\n7574\n356\n4937\n2487\n9315\n7572\n3040\n671\n2682\n8626\n3868\n387\n8679\n4074\n1481\n3527\n3595\n4754\n2453\n1579\n4638\n9123\n1829\n3009\n3691\n763\n4875\n3572\n4273\n2777\n6032\n4793\n233\n7147\n996\n3199\n8835\n3517\n7210\n6125\n6037\n3684\n3915\n3180\n7043\n4458\n2889\n57\n7667\n8375\n1434\n7493\n4733\n5827\n2111\n1313\n7986\n3075\n2614\n7547\n4977\n8527\n3212\n7300\n5842\n5244\n3291\n597\n1007\n2030\n227\n3830\n5540\n247\n5643\n9333\n1958\n1371\n5220\n7926\n2927\n1516\n7130\n193\n1522\n6165\n6923\n3794\n4223\n5535\n2472\n8630\n3971\n9101\n2946\n4609\n7291\n8542\n6501\n7548\n4557\n6274\n5226\n7309\n1317\n6275\n1099\n4191\n7270\n5392\n2316\n3819\n1670\n8045\n4807\n8864\n2391\n5908\n8338\n8218\n6400\n9193\n3165\n843\n6613\n6941\n5629\n7557\n4321\n3702\n681\n1159\n4665\n5959\n1697\n5509\n8774\n7389\n3832\n3751\n8637\n1680\n6841\n703\n684\n8293\n3682\n5733\n4818\n3231\n5562\n9001\n3889\n7024\n2519\n1713\n3287\n219\n8776\n2289\n7212\n4832\n4684\n4617\n4237\n2649\n8185\n6326\n3568\n551\n1426\n8869\n312\n2905\n4165\n8248\n2558\n900\n1044\n8613\n7743\n5437\n7604\n3122\n5708\n8649\n2878\n4695\n4491\n7533\n5223\n7711\n1844\n5751\n3008\n8055\n4636\n61\n198\n2271\n5698\n4596\n4500\n5709\n5819\n7972\n2992\n1643\n1048\n6281\n8886\n360\n4198\n6814\n3960\n2606\n7001\n5888\n450\n7133\n7015\n7034\n5153\n8920\n5066\n469\n1302\n8816\n463\n8651\n5869\n6582\n5578\n1231\n9274\n7260\n7751\n8052\n6799\n2089\n2342\n8451\n3260\n5550\n7795\n2288\n1205\n40\n496\n8367\n7836\n5973\n3908\n5242\n5062\n2706\n997\n5419\n9201\n1965\n6062\n3050\n5302\n8735\n358\n2398\n7470\n1644\n8179\n7047\n1549\n5414\n2539\n7381\n589\n8166\n8505\n6035\n3956\n4540\n6721\n8074\n1062\n2384\n2531\n7159\n3902\n4584\n2554\n264\n8720\n2849\n4916\n5218\n7202\n883\n4560\n1677\n4317\n7863\n4509\n6577\n2903\n1452\n1416\n5369\n473\n6233\n6359\n5992\n4934\n8059\n6834\n4907\n3320\n8267\n8280\n2066\n2402\n1485\n3772\n3732\n4764\n9126\n3575\n5564\n5641\n1884\n2330\n1804\n344\n698\n3089\n1532\n4454\n761\n8094\n3432\n6811\n8722\n8826\n3222\n8614\n2901\n7003\n652\n8663\n4266\n413\n810\n75\n3334\n4905\n6438\n4756\n5137\n6528\n6534\n6988\n6177\n8533\n889\n5384\n7201\n5132\n7802\n6864\n3973\n873\n4840\n1482\n8376\n3769\n5858\n6675\n4286\n2593\n5863\n4353\n7817\n7540\n4999\n4838\n2303\n7913\n1508\n7755\n2784\n4964\n3431\n6209\n3755\n6399\n3954\n455\n5416\n7591\n245\n140\n9210\n4084\n967\n7798\n6795\n7095\n6733\n3861\n9264\n1045\n755\n8042\n7074\n7778\n6415\n4724\n6450\n2049\n1307\n3485\n1790\n7869\n3282\n6907\n3920\n2868\n5801\n5632\n5009\n3955\n7517\n5128\n3417\n3019\n1784\n2312\n2753\n6976\n342\n8266\n1849\n2273\n5037\n7880\n3793\n7401\n5412\n8279\n1257\n3670\n9049\n3266\n8955\n6519\n8916\n2858\n694\n5650\n4669\n1785\n3533\n2704\n8603\n3726\n6668\n497\n6815\n6157\n6646\n6964\n8097\n5645\n8481\n8215\n3775\n2542\n7514\n5699\n3518\n3740\n1404\n8981\n4086\n6397\n4204\n6899\n682\n6589\n4340\n7424\n9208\n6504\n4409\n1\n145\n1882\n4620\n2634\n4992\n5453\n3377\n7875\n530\n1235\n7605\n504\n1771\n8489\n345\n7353\n7797\n7174\n5914\n2871\n5721\n6067\n3582\n5467\n6234\n691\n8758\n2122\n1213\n1492\n1437\n2187\n1266\n2395\n7278\n8491\n5256\n1554\n8163\n5966\n7128\n7904\n1691\n6272\n3996\n1706\n1334\n1316\n6478\n6935\n1518\n6700\n8703\n8744\n8152\n8778\n5367\n4218\n9007\n6312\n606\n7565\n5293\n2891\n675\n2120\n826\n7008\n5705\n7748\n8010\n1498\n5330\n5472\n2215\n7627\n3016\n6588\n1850\n4128\n8569\n6987\n148\n8151\n8789\n7907\n8596\n715\n9060\n3872\n1750\n5889\n4047\n5960\n3120\n3449\n1421\n1102\n3333\n9197\n8796\n8123\n8007\n2028\n8404\n1945\n1985\n8109\n5380\n3504\n6739\n4180\n5835\n4243\n25\n4002\n1976\n158\n5181\n4885\n8985\n11\n6425\n5926\n7062\n5083\n8394\n4259\n5844\n1990\n3942\n5532\n2220\n28\n5957\n149\n6748\n3559\n7647\n2566\n1359\n5259\n7010\n554\n6005\n8172\n8125\n1350\n9051\n1973\n1386\n159\n7007\n3220\n1846\n3093\n4445\n2056\n8370\n3211\n4384\n2231\n273\n642\n5311\n265\n226\n9012\n7879\n118\n7109\n7251\n1760\n8667\n2876\n7162\n3552\n6901\n6779\n5021\n6524\n4957\n3114\n4544\n441\n1848\n2136\n2458\n8662\n1127\n5541\n3026\n1080\n6780\n2224\n8259\n1073\n9000\n7244\n7977\n500\n4435\n7376\n7979\n1435\n9291\n7704\n3521\n210\n6269\n8570\n3285\n8039\n3546\n6203\n1183\n6107\n4147\n2234\n7185\n3192\n7155\n2001\n7777\n876\n944\n908\n7791\n6784\n65\n9172\n5675\n3886\n7891\n2978\n1008\n5630\n591\n5067\n1139\n577\n9015\n574\n8137\n7786\n5765\n4900\n4090\n7842\n"
  },
  {
    "path": "artrackv2_mindspore/lib/train/data_specs/lasot_train_split.txt",
    "content": "airplane-10\nairplane-11\nairplane-12\nairplane-14\nairplane-16\nairplane-17\nairplane-18\nairplane-19\nairplane-2\nairplane-20\nairplane-3\nairplane-4\nairplane-5\nairplane-6\nairplane-7\nairplane-8\nbasketball-10\nbasketball-12\nbasketball-13\nbasketball-14\nbasketball-15\nbasketball-16\nbasketball-17\nbasketball-18\nbasketball-19\nbasketball-2\nbasketball-20\nbasketball-3\nbasketball-4\nbasketball-5\nbasketball-8\nbasketball-9\nbear-1\nbear-10\nbear-11\nbear-12\nbear-13\nbear-14\nbear-15\nbear-16\nbear-18\nbear-19\nbear-20\nbear-3\nbear-5\nbear-7\nbear-8\nbear-9\nbicycle-1\nbicycle-10\nbicycle-11\nbicycle-12\nbicycle-13\nbicycle-14\nbicycle-15\nbicycle-16\nbicycle-17\nbicycle-19\nbicycle-20\nbicycle-3\nbicycle-4\nbicycle-5\nbicycle-6\nbicycle-8\nbird-1\nbird-10\nbird-11\nbird-12\nbird-13\nbird-14\nbird-16\nbird-18\nbird-19\nbird-20\nbird-4\nbird-5\nbird-6\nbird-7\nbird-8\nbird-9\nboat-1\nboat-10\nboat-11\nboat-13\nboat-14\nboat-15\nboat-16\nboat-18\nboat-19\nboat-2\nboat-20\nboat-5\nboat-6\nboat-7\nboat-8\nboat-9\nbook-1\nbook-12\nbook-13\nbook-14\nbook-15\nbook-16\nbook-17\nbook-18\nbook-2\nbook-20\nbook-4\nbook-5\nbook-6\nbook-7\nbook-8\nbook-9\nbottle-10\nbottle-11\nbottle-13\nbottle-15\nbottle-16\nbottle-17\nbottle-19\nbottle-2\nbottle-20\nbottle-3\nbottle-4\nbottle-5\nbottle-6\nbottle-7\nbottle-8\nbottle-9\nbus-1\nbus-10\nbus-11\nbus-12\nbus-13\nbus-14\nbus-15\nbus-16\nbus-18\nbus-20\nbus-3\nbus-4\nbus-6\nbus-7\nbus-8\nbus-9\ncar-1\ncar-10\ncar-11\ncar-12\ncar-13\ncar-14\ncar-15\ncar-16\ncar-18\ncar-19\ncar-20\ncar-3\ncar-4\ncar-5\ncar-7\ncar-8\ncat-10\ncat-11\ncat-12\ncat-13\ncat-14\ncat-15\ncat-16\ncat-17\ncat-19\ncat-2\ncat-4\ncat-5\ncat-6\ncat-7\ncat-8\ncat-9\ncattle-1\ncattle-10\ncattle-11\ncattle-14\ncattle-15\ncattle-16\ncattle-17\ncattle-18\ncattle-19\ncattle-20\ncattle-3\ncattle-4\ncattle-5\ncattle-6\ncattle-8\ncattle-9\nchameleon-1\nchameleon-10\nchameleon-12\nchameleon-13\nchameleon-14\nchameleon-15\nchameleon-16\nchameleon-17\nchameleon-18\nchameleon-19\nchameleon-2\nchameleon-4\nchameleon-5\nchameleon-7\nchameleon-8\nchameleon-9\ncoin-1\ncoin-10\ncoin-11\ncoin-12\ncoin-13\ncoin-14\ncoin-15\ncoin-16\ncoin-17\ncoin-19\ncoin-2\ncoin-20\ncoin-4\ncoin-5\ncoin-8\ncoin-9\ncrab-1\ncrab-10\ncrab-11\ncrab-13\ncrab-14\ncrab-15\ncrab-16\ncrab-17\ncrab-19\ncrab-2\ncrab-20\ncrab-4\ncrab-5\ncrab-7\ncrab-8\ncrab-9\ncrocodile-1\ncrocodile-11\ncrocodile-12\ncrocodile-13\ncrocodile-15\ncrocodile-16\ncrocodile-17\ncrocodile-18\ncrocodile-19\ncrocodile-2\ncrocodile-20\ncrocodile-5\ncrocodile-6\ncrocodile-7\ncrocodile-8\ncrocodile-9\ncup-10\ncup-11\ncup-12\ncup-13\ncup-14\ncup-15\ncup-16\ncup-18\ncup-19\ncup-2\ncup-20\ncup-3\ncup-5\ncup-6\ncup-8\ncup-9\ndeer-1\ndeer-11\ndeer-12\ndeer-13\ndeer-15\ndeer-16\ndeer-17\ndeer-18\ndeer-19\ndeer-2\ndeer-20\ndeer-3\ndeer-5\ndeer-6\ndeer-7\ndeer-9\ndog-10\ndog-11\ndog-12\ndog-13\ndog-14\ndog-16\ndog-17\ndog-18\ndog-2\ndog-20\ndog-3\ndog-4\ndog-5\ndog-6\ndog-8\ndog-9\ndrone-1\ndrone-10\ndrone-11\ndrone-12\ndrone-14\ndrone-16\ndrone-17\ndrone-18\ndrone-19\ndrone-20\ndrone-3\ndrone-4\ndrone-5\ndrone-6\ndrone-8\ndrone-9\nelectricfan-11\nelectricfan-12\nelectricfan-13\nelectricfan-14\nelectricfan-15\nelectricfan-16\nelectricfan-17\nelectricfan-19\nelectricfan-2\nelectricfan-3\nelectricfan-4\nelectricfan-5\nelectricfan-6\nelectricfan-7\nelectricfan-8\nelectricfan-9\nelephant-10\nelephant-11\nelephant-13\nelephant-14\nelephant-15\nelephant-17\nelephant-19\nelephant-2\nelephant-20\nelephant-3\nelephant-4\nelephant-5\nelephant-6\nelephant-7\nelephant-8\nelephant-9\nflag-1\nflag-10\nflag-11\nflag-12\nflag-13\nflag-14\nflag-15\nflag-16\nflag-17\nflag-18\nflag-19\nflag-20\nflag-4\nflag-6\nflag-7\nflag-8\nfox-1\nfox-10\nfox-11\nfox-12\nfox-13\nfox-14\nfox-15\nfox-16\nfox-17\nfox-18\nfox-19\nfox-4\nfox-6\nfox-7\nfox-8\nfox-9\nfrog-1\nfrog-10\nfrog-11\nfrog-12\nfrog-13\nfrog-14\nfrog-15\nfrog-16\nfrog-17\nfrog-18\nfrog-19\nfrog-2\nfrog-5\nfrog-6\nfrog-7\nfrog-8\ngametarget-10\ngametarget-11\ngametarget-12\ngametarget-14\ngametarget-15\ngametarget-16\ngametarget-17\ngametarget-18\ngametarget-19\ngametarget-20\ngametarget-3\ngametarget-4\ngametarget-5\ngametarget-6\ngametarget-8\ngametarget-9\ngecko-10\ngecko-11\ngecko-12\ngecko-13\ngecko-14\ngecko-15\ngecko-17\ngecko-18\ngecko-2\ngecko-20\ngecko-3\ngecko-4\ngecko-6\ngecko-7\ngecko-8\ngecko-9\ngiraffe-1\ngiraffe-11\ngiraffe-12\ngiraffe-14\ngiraffe-16\ngiraffe-17\ngiraffe-18\ngiraffe-19\ngiraffe-20\ngiraffe-3\ngiraffe-4\ngiraffe-5\ngiraffe-6\ngiraffe-7\ngiraffe-8\ngiraffe-9\ngoldfish-1\ngoldfish-11\ngoldfish-12\ngoldfish-13\ngoldfish-14\ngoldfish-15\ngoldfish-16\ngoldfish-17\ngoldfish-18\ngoldfish-19\ngoldfish-2\ngoldfish-20\ngoldfish-4\ngoldfish-5\ngoldfish-6\ngoldfish-9\ngorilla-1\ngorilla-10\ngorilla-11\ngorilla-12\ngorilla-14\ngorilla-15\ngorilla-16\ngorilla-17\ngorilla-18\ngorilla-19\ngorilla-2\ngorilla-20\ngorilla-3\ngorilla-5\ngorilla-7\ngorilla-8\nguitar-1\nguitar-11\nguitar-12\nguitar-13\nguitar-14\nguitar-15\nguitar-17\nguitar-18\nguitar-19\nguitar-2\nguitar-20\nguitar-4\nguitar-5\nguitar-6\nguitar-7\nguitar-9\nhand-1\nhand-10\nhand-11\nhand-12\nhand-13\nhand-14\nhand-15\nhand-17\nhand-18\nhand-19\nhand-20\nhand-4\nhand-5\nhand-6\nhand-7\nhand-8\nhat-10\nhat-11\nhat-12\nhat-13\nhat-14\nhat-15\nhat-16\nhat-17\nhat-19\nhat-20\nhat-3\nhat-4\nhat-6\nhat-7\nhat-8\nhat-9\nhelmet-1\nhelmet-10\nhelmet-12\nhelmet-14\nhelmet-15\nhelmet-16\nhelmet-17\nhelmet-18\nhelmet-2\nhelmet-20\nhelmet-3\nhelmet-4\nhelmet-6\nhelmet-7\nhelmet-8\nhelmet-9\nhippo-10\nhippo-11\nhippo-12\nhippo-13\nhippo-14\nhippo-15\nhippo-16\nhippo-17\nhippo-18\nhippo-19\nhippo-2\nhippo-3\nhippo-4\nhippo-5\nhippo-6\nhippo-8\nhorse-10\nhorse-11\nhorse-13\nhorse-14\nhorse-16\nhorse-17\nhorse-18\nhorse-19\nhorse-2\nhorse-20\nhorse-3\nhorse-5\nhorse-6\nhorse-7\nhorse-8\nhorse-9\nkangaroo-1\nkangaroo-10\nkangaroo-12\nkangaroo-13\nkangaroo-15\nkangaroo-16\nkangaroo-17\nkangaroo-18\nkangaroo-19\nkangaroo-20\nkangaroo-3\nkangaroo-4\nkangaroo-6\nkangaroo-7\nkangaroo-8\nkangaroo-9\nkite-1\nkite-11\nkite-12\nkite-13\nkite-14\nkite-16\nkite-17\nkite-18\nkite-19\nkite-2\nkite-20\nkite-3\nkite-5\nkite-7\nkite-8\nkite-9\nleopard-10\nleopard-11\nleopard-12\nleopard-13\nleopard-14\nleopard-15\nleopard-17\nleopard-18\nleopard-19\nleopard-2\nleopard-3\nleopard-4\nleopard-5\nleopard-6\nleopard-8\nleopard-9\nlicenseplate-1\nlicenseplate-10\nlicenseplate-11\nlicenseplate-14\nlicenseplate-16\nlicenseplate-17\nlicenseplate-18\nlicenseplate-19\nlicenseplate-2\nlicenseplate-20\nlicenseplate-3\nlicenseplate-4\nlicenseplate-5\nlicenseplate-7\nlicenseplate-8\nlicenseplate-9\nlion-10\nlion-11\nlion-13\nlion-14\nlion-15\nlion-16\nlion-17\nlion-18\nlion-19\nlion-2\nlion-3\nlion-4\nlion-6\nlion-7\nlion-8\nlion-9\nlizard-10\nlizard-11\nlizard-12\nlizard-14\nlizard-15\nlizard-16\nlizard-17\nlizard-18\nlizard-19\nlizard-2\nlizard-20\nlizard-4\nlizard-5\nlizard-7\nlizard-8\nlizard-9\nmicrophone-1\nmicrophone-10\nmicrophone-11\nmicrophone-12\nmicrophone-13\nmicrophone-15\nmicrophone-17\nmicrophone-18\nmicrophone-19\nmicrophone-20\nmicrophone-3\nmicrophone-4\nmicrophone-5\nmicrophone-7\nmicrophone-8\nmicrophone-9\nmonkey-1\nmonkey-10\nmonkey-11\nmonkey-12\nmonkey-13\nmonkey-14\nmonkey-15\nmonkey-16\nmonkey-18\nmonkey-19\nmonkey-2\nmonkey-20\nmonkey-5\nmonkey-6\nmonkey-7\nmonkey-8\nmotorcycle-10\nmotorcycle-11\nmotorcycle-12\nmotorcycle-13\nmotorcycle-14\nmotorcycle-15\nmotorcycle-16\nmotorcycle-17\nmotorcycle-19\nmotorcycle-2\nmotorcycle-20\nmotorcycle-4\nmotorcycle-5\nmotorcycle-6\nmotorcycle-7\nmotorcycle-8\nmouse-10\nmouse-11\nmouse-12\nmouse-13\nmouse-14\nmouse-15\nmouse-16\nmouse-18\nmouse-19\nmouse-2\nmouse-20\nmouse-3\nmouse-4\nmouse-5\nmouse-6\nmouse-7\nperson-11\nperson-13\nperson-14\nperson-15\nperson-16\nperson-17\nperson-18\nperson-19\nperson-2\nperson-20\nperson-3\nperson-4\nperson-6\nperson-7\nperson-8\nperson-9\npig-1\npig-11\npig-12\npig-14\npig-15\npig-16\npig-17\npig-19\npig-20\npig-3\npig-4\npig-5\npig-6\npig-7\npig-8\npig-9\npool-1\npool-10\npool-11\npool-13\npool-14\npool-16\npool-17\npool-18\npool-19\npool-2\npool-20\npool-4\npool-5\npool-6\npool-8\npool-9\nrabbit-1\nrabbit-11\nrabbit-12\nrabbit-14\nrabbit-15\nrabbit-16\nrabbit-18\nrabbit-2\nrabbit-20\nrabbit-3\nrabbit-4\nrabbit-5\nrabbit-6\nrabbit-7\nrabbit-8\nrabbit-9\nracing-1\nracing-11\nracing-12\nracing-13\nracing-14\nracing-17\nracing-18\nracing-19\nracing-2\nracing-3\nracing-4\nracing-5\nracing-6\nracing-7\nracing-8\nracing-9\nrobot-10\nrobot-11\nrobot-12\nrobot-13\nrobot-14\nrobot-15\nrobot-16\nrobot-17\nrobot-18\nrobot-2\nrobot-20\nrobot-3\nrobot-4\nrobot-6\nrobot-7\nrobot-9\nrubicCube-10\nrubicCube-11\nrubicCube-12\nrubicCube-13\nrubicCube-15\nrubicCube-16\nrubicCube-17\nrubicCube-18\nrubicCube-2\nrubicCube-20\nrubicCube-3\nrubicCube-4\nrubicCube-5\nrubicCube-7\nrubicCube-8\nrubicCube-9\nsepia-1\nsepia-10\nsepia-11\nsepia-12\nsepia-14\nsepia-15\nsepia-17\nsepia-18\nsepia-19\nsepia-2\nsepia-20\nsepia-3\nsepia-4\nsepia-5\nsepia-7\nsepia-9\nshark-1\nshark-10\nshark-11\nshark-12\nshark-13\nshark-14\nshark-15\nshark-16\nshark-17\nshark-18\nshark-19\nshark-20\nshark-4\nshark-7\nshark-8\nshark-9\nsheep-1\nsheep-10\nsheep-11\nsheep-12\nsheep-13\nsheep-14\nsheep-15\nsheep-16\nsheep-17\nsheep-18\nsheep-19\nsheep-2\nsheep-20\nsheep-4\nsheep-6\nsheep-8\nskateboard-1\nskateboard-10\nskateboard-11\nskateboard-12\nskateboard-13\nskateboard-14\nskateboard-15\nskateboard-17\nskateboard-18\nskateboard-2\nskateboard-20\nskateboard-4\nskateboard-5\nskateboard-6\nskateboard-7\nskateboard-9\nspider-1\nspider-10\nspider-11\nspider-12\nspider-13\nspider-15\nspider-17\nspider-19\nspider-2\nspider-3\nspider-4\nspider-5\nspider-6\nspider-7\nspider-8\nspider-9\nsquirrel-1\nsquirrel-10\nsquirrel-12\nsquirrel-14\nsquirrel-15\nsquirrel-16\nsquirrel-17\nsquirrel-18\nsquirrel-2\nsquirrel-20\nsquirrel-3\nsquirrel-4\nsquirrel-5\nsquirrel-6\nsquirrel-7\nsquirrel-9\nsurfboard-1\nsurfboard-10\nsurfboard-11\nsurfboard-13\nsurfboard-14\nsurfboard-15\nsurfboard-16\nsurfboard-17\nsurfboard-18\nsurfboard-19\nsurfboard-2\nsurfboard-20\nsurfboard-3\nsurfboard-6\nsurfboard-7\nsurfboard-9\nswing-1\nswing-11\nswing-12\nswing-13\nswing-15\nswing-16\nswing-18\nswing-19\nswing-2\nswing-3\nswing-4\nswing-5\nswing-6\nswing-7\nswing-8\nswing-9\ntank-1\ntank-10\ntank-11\ntank-12\ntank-13\ntank-15\ntank-17\ntank-18\ntank-19\ntank-2\ntank-20\ntank-3\ntank-4\ntank-5\ntank-7\ntank-8\ntiger-1\ntiger-10\ntiger-11\ntiger-13\ntiger-14\ntiger-15\ntiger-16\ntiger-17\ntiger-19\ntiger-2\ntiger-20\ntiger-3\ntiger-5\ntiger-7\ntiger-8\ntiger-9\ntrain-10\ntrain-12\ntrain-13\ntrain-14\ntrain-15\ntrain-16\ntrain-17\ntrain-18\ntrain-19\ntrain-2\ntrain-3\ntrain-4\ntrain-5\ntrain-6\ntrain-8\ntrain-9\ntruck-1\ntruck-10\ntruck-11\ntruck-12\ntruck-13\ntruck-14\ntruck-15\ntruck-17\ntruck-18\ntruck-19\ntruck-2\ntruck-20\ntruck-4\ntruck-5\ntruck-8\ntruck-9\nturtle-1\nturtle-10\nturtle-11\nturtle-12\nturtle-13\nturtle-14\nturtle-15\nturtle-17\nturtle-18\nturtle-19\nturtle-2\nturtle-20\nturtle-3\nturtle-4\nturtle-6\nturtle-7\numbrella-1\numbrella-10\numbrella-11\numbrella-12\numbrella-13\numbrella-14\numbrella-15\numbrella-16\numbrella-18\numbrella-20\numbrella-3\numbrella-4\numbrella-5\numbrella-6\numbrella-7\numbrella-8\nvolleyball-10\nvolleyball-11\nvolleyball-12\nvolleyball-14\nvolleyball-15\nvolleyball-16\nvolleyball-17\nvolleyball-2\nvolleyball-20\nvolleyball-3\nvolleyball-4\nvolleyball-5\nvolleyball-6\nvolleyball-7\nvolleyball-8\nvolleyball-9\nyoyo-1\nyoyo-10\nyoyo-11\nyoyo-12\nyoyo-13\nyoyo-14\nyoyo-16\nyoyo-18\nyoyo-2\nyoyo-20\nyoyo-3\nyoyo-4\nyoyo-5\nyoyo-6\nyoyo-8\nyoyo-9\nzebra-1\nzebra-11\nzebra-12\nzebra-13\nzebra-15\nzebra-18\nzebra-19\nzebra-2\nzebra-20\nzebra-3\nzebra-4\nzebra-5\nzebra-6\nzebra-7\nzebra-8\nzebra-9\n"
  },
  {
    "path": "artrackv2_mindspore/lib/train/data_specs/trackingnet_classmap.txt",
    "content": "Nf1aqv5Fg5o_0\tairplane\nAAB6lO-XiKE_0\tperson\nAACM71csS-Q_0\tperson\nAACM71csS-Q_1\tperson\nAARNQeeGCeM_1\tperson\nAARldOxX9Qc_0\tbird\nAATSbTthMRo_1\tperson\nAAVQ--F7Bk8_7\tbird\nAAVQ--F7Bk8_2\tbird\nAAVQ--F7Bk8_8\tbird\nAAWK6esRYaE_0\tperson\nAAWK6esRYaE_1\tperson\nAAjY2Ci68z8_0\tperson\nAA19zjGEPvg_1\tbear\nAA28Bcp5cJ4_0\ttrain\nABBGULxaufw_0\tperson\nABF8Qzi1y6k_1\tbear\nABIlEiPfEC4_0\tbird\nABJ_agLToOw_0\tbird\nABZMoeeFyek_0\tbicycle\nABny-jw1_S0_0\telephant\nABrhnT3LRWs_2\tcat\nABxlnMGfo5c_0\tumbrella\nAByCCGnybVU_1\tperson\nAB2MjrpRiEQ_0\thorse\nAB-q-hxh9XQ_4\tbus\nAB-q-hxh9XQ_1\tbus\nAB-q-hxh9XQ_3\tbus\nACDuy9fWQCs_1\tumbrella\nACFxVnoXE2k_1\thorse\nACMvGMt8Neo_0\tperson\nACM6PJWHfcM_0\tperson\nACOGOPL4ZH0_1\tperson\nACOGOPL4ZH0_0\tperson\nACS5TtaAdG8_0\ttruck\nACarEC5tuT8_0\ttruck\nACiNZsAvVTE_0\tperson\nACkYaVC9f9M_1\tumbrella\nACnQKLobnGE_4\tairplane\nACnQKLobnGE_5\tairplane\nAC0Z4yw1hf0_0\tperson\nAC0Z4yw1hf0_1\tperson\nAC-10OYYnLM_1\tperson\nAC-10OYYnLM_0\tperson\nADHNPU5iB_4_0\tcat\nADWpC6kDWFU_0\tperson\nADiIG2D8pds_2\tmotorcycle\nADiIG2D8pds_0\tmotorcycle\nADi674XOuRY_0\tdog\nADn8ZdVYOcc_0\ttrain\nADn8ZdVYOcc_2\ttrain\nAD1cVG81mpA_0\tperson\nAD4EACfWAIM_0\thorse\nAD4EACfWAIM_1\thorse\nAD531xkux4k_0\tperson\nAD7A6_o0Las_0\thorse\nAEQT6XxEeT0_0\tperson\nAEQT6XxEeT0_1\tperson\nAESfphazWKA_0\tperson\nAESfphazWKA_1\tperson\nAEokTVMPd4A_0\tperson\nAEtwwIR9UkI_0\tdog\nAE2TrzJHr2s_1\tmotorcycle\nAE3t_VNk3eo_0\tperson\nAE6G6W2CL9M_1\tperson\nAE7tEK8S9pk_0\tbird\nAE7tEK8S9pk_3\tbird\nAE-k9jcdaJk_1\tgiraffe\nAFLrK88FzTI_0\tmotorcycle\nAFOjy-9Kf-8_0\tperson\nAFSTw_O6inE_0\tperson\nAFSTw_O6inE_1\tperson\nAFT64SYoPTo_1\tperson\nAFeRUltwvNE_0\tknife\nAFeRUltwvNE_2\tknife\nAFf9I30fB6U_0\tperson\nAFkSCsJ_jeg_0\tperson\nAFkSCsJ_jeg_1\tperson\nAFnPp9mvoJs_0\thorse\nAFpVfranYCA_1\tknife\nAFrLubifeb4_0\tairplane\nAFrLubifeb4_2\tairplane\nAFsmSsZBS6I_1\tperson\nAFsmSsZBS6I_0\tperson\nAF0FDnfdpro_0\ttrain\nAF0-2lDeBME_1\tbird\nAF2bYjH_Q8c_0\tperson\nAF4nO1MeUis_1\ttrain\nAGV9gZ6ePKk_0\tairplane\nAGXVFK896Os_0\tcow\nAGYehDNUqx0_1\tairplane\nAGYehDNUqx0_0\tairplane\nAGdqwMVGRoU_0\thorse\nAGfcGfMXHPM_3\telephant\nAGsg2IV8FME_1\tskateboard\nZBPURFcpqDM_0\tmotorcycle\nZBXAMWkamQk_2\tknife\nZBXAMWkamQk_1\tknife\nZBcCcSynS3Y_1\tcar\nZBcTSnaCcqE_1\tperson\nZBcTSnaCcqE_0\tperson\nZBcjhADZaUk_0\tbear\nZBdz7fg01uE_0\tumbrella\nZBp5ICCzoK8_0\tperson\nZBriZpPQR6Q_0\tcat\nZBvEIHeKcKg_2\tzebra\nZBvEIHeKcKg_9\tzebra\nZBvEIHeKcKg_0\tzebra\nZBvEIHeKcKg_1\tzebra\nZBvEIHeKcKg_3\tzebra\nZBvEIHeKcKg_4\tzebra\nZBvEIHeKcKg_5\tzebra\nZBvEIHeKcKg_6\tzebra\nZBvEIHeKcKg_7\tzebra\nZBvEIHeKcKg_8\tzebra\nZB0EfmbWfng_0\thorse\nZB0kV8Ni0e8_0\tperson\nZB_pe6v1lVI_0\tperson\nZB_pe6v1lVI_2\tperson\nZCAOpABRfTI_10\telephant\nZCAOpABRfTI_0\telephant\nZCAOpABRfTI_3\telephant\nZCAOpABRfTI_4\telephant\nZCAOpABRfTI_6\telephant\nZCAOpABRfTI_7\telephant\nZCAOpABRfTI_8\telephant\nZCFCltdIjeg_1\tperson\nZCFCltdIjeg_0\tperson\nZCGB4r_lWmY_0\thorse\nZCS_eyAufDo_0\tperson\nZCTwXcewINc_0\tcow\nZCfqT4CDOYA_1\tbird\nZCgDbEHLsIg_0\tperson\nZClABNZVqqw_1\tperson\nZCmoG6WgVO4_1\tperson\nZCmoG6WgVO4_0\tperson\nZCnJ6weWtz8_1\tperson\nZCnJ6weWtz8_0\tperson\nZCnJ6weWtz8_2\tperson\nZCzrSOZhkx8_1\tperson\nZCzrSOZhkx8_2\tperson\nZC3Y42jSG_0_0\tperson\nZC5Jtr93Fc0_0\tcat\nZDDtjYsFrzY_0\tmotorcycle\nZDMLHna_uZU_1\tskateboard\nZDMSLfnIpw0_0\tperson\nZDS-TQTDheA_0\tperson\nZDWUEeCoa0c_0\tperson\nZDfRsMjEWrU_0\tperson\nZDucdx9SldA_0\tbicycle\nZDwG7VWIZ2E_0\tmotorcycle\nZDw-tgE8yQw_0\tperson\nZEA5lDwY3hY_0\tperson\nZERPmLuCNr0_1\tskateboard\nZEYyXBrvcIU_0\tperson\nZEbxfeAOLec_1\tmotorcycle\nZEdGptkowmk_2\tcow\nZEdsROg2ZAk_2\thorse\nZEgcTqeZxOk_1\tperson\nZEiW5hvCQyM_0\tbird\nZE16Mis16oE_0\tbus\nZE3Vro7d4pA_0\tcat\nZE415SbIjYI_7\tbird\nZE5h8vmL_Vw_0\tboat\nZE6oeN8ZzDA_1\tperson\nZE6oeN8ZzDA_0\tperson\nZFKQ9r76HHU_1\telephant\nZFKYTz9Jkhw_0\tumbrella\nZFSspVdQ_1M_0\tperson\nZFSspVdQ_1M_1\tperson\nZFe5vGzmYgY_0\tbear\nZFe5vGzmYgY_4\tbear\nZFfH8M8dMH8_5\tbird\nZFk9b7tQz1g_0\tperson\nZFn422HSENU_2\tairplane\nZFw7fJO3h3U_0\tmotorcycle\nZF2yE0Tm8D0_0\tcow\nZF5yV-qvHfg_0\tbicycle\nZF8rySXBivY_0\tperson\nZF_u1UFqAvg_0\tperson\nZGHtP6pLosk_0\tperson\nZGT9Ky1jJ0E_0\thorse\nZGWqLNy2PDM_2\tbird\nZGeWYNFOH7U_0\tperson\nZGhdqsb3kNA_0\tcar\nZGhdqsb3kNA_3\tcar\nZGhdqsb3kNA_1\tcar\nZGkmBkelEBU_0\tperson\nZGpMZT1HUiw_0\thorse\nZGsHiz0oPuw_0\tbus\nZGvfU-Fgk40_1\tperson\nZGyWFwMmdbs_0\tperson\nZG9dVnPGocw_0\tperson\nZHDkDNgRSz0_0\ttrain\nZHFPykjdFAY_1\tperson\nZHPeB20mRyI_0\tcow\nZHPeB20mRyI_1\tcow\nZHX1xXuU_Jw_0\tperson\nZHlb-NoDPiE_1\telephant\nZHlb-NoDPiE_2\telephant\nZHlb-NoDPiE_4\telephant\nZHl7b8RItn0_0\thorse\nZHnW6ge8wBc_0\tcat\nZHodaPFcFYU_0\tperson\nZHovXJVH8xk_0\ttruck\nZHpZ3CGHl44_0\tperson\nZHrrW673jzQ_1\tperson\nZHrrW673jzQ_0\tperson\nZHrsTuxP7aI_1\thorse\nZHu6CNOlw3g_0\tcow\nZHu6CNOlw3g_1\tcow\nZHxx4jT0QY8_0\tperson\nZH1tP4KBq4c_0\tgiraffe\nZH5HXdNA_Vg_0\tperson\nZH-X6nu5grI_33\thorse\nZH-X6nu5grI_2\thorse\nZH-X6nu5grI_3\thorse\nZH-X6nu5grI_6\thorse\nZH-X6nu5grI_7\thorse\nZH-X6nu5grI_8\thorse\nZH_6GNzE7AE_0\tperson\nZIAnd6kIMac_0\tbird\nZIAnd6kIMac_1\tbird\nZICz-o8kLz0_0\tskateboard\nAGx9YQ6C-6o_7\tcar\nAG1KXUn4YG0_0\tperson\nAG_bCNeWGbQ_0\telephant\nAHARpIfT490_0\tdog\nAHIF--VOeQs_0\tperson\nAHJcPNPqKmI_0\thorse\nAHKFqtjfRZA_2\tbear\nAHLL47_EdEA_1\tperson\nAHLL47_EdEA_0\tperson\nAHNC2jifaeA_1\tairplane\nAHQLEaBATbw_0\tperson\nAHQW1ru8IzY_0\tairplane\nAHQrFFp5yq4_0\tairplane\nAHiwgwMi8HU_0\tdog\nAHjEWaIP4Us_0\tcow\nAHkvSb7kMDQ_0\tperson\nAHn7KxEbpSw_0\tperson\nAHvhccaU6e0_0\tbus\nAHx-m9m2WSM_0\tperson\nAIAtwCnT8D0_1\tperson\nAIBVp_3pm4U_1\tperson\nAIBVp_3pm4U_0\tperson\nAIFwUvUUIAU_1\tperson\nAIPKb-NMVjk_0\tairplane\nAIPKb-NMVjk_3\tairplane\nAIVpT8BRXaQ_1\thorse\nAIYDjtWzamM_0\tbear\nAIYDjtWzamM_1\tbear\nAIZGolX95Do_0\tperson\nAIbvvs9Mppk_0\tperson\nAIduTWoo-tY_0\tskateboard\nAIeFzUH7L38_1\ttrain\nAIkHZuaZGZc_1\telephant\nAIkHZuaZGZc_2\telephant\nAIpwAHaTBsI_0\ttrain\nAI00Hva5A8g_0\tperson\nAI38cuNcfsE_0\tknife\nAI73dwp8OlI_1\ttrain\nAJAy74dPvNA_0\tperson\nAJCXZxF7mEU_1\tskateboard\nAJDMiWpRbdY_0\tperson\nAJILdTCo1mA_0\tdog\nAJKXpUsj3I0_0\tbird\nAJRdbCnFyVo_0\telephant\nAJTfeXepoNQ_0\tbus\nAJZ65x_ashE_0\tairplane\nAJaOK6nLWLU_0\tperson\nAJaOK6nLWLU_1\tperson\nAJaOK6nLWLU_2\tperson\nAJh6EhObuEU_0\tperson\nAJiQZJH_ZsU_0\tbird\nAJiYw7-oCvA_1\tknife\nAJiYw7-oCvA_2\tknife\nAJiYw7-oCvA_0\tknife\nAJkWw2b2Qjg_0\thorse\nAJor90pfjM8_0\tcow\nAJtuQLfNvSs_0\tcat\nAKBoEjrtQwE_1\ttrain\nAKDi2KVrR1Q_0\tskateboard\nAKIcyYzL9C0_0\tcat\nAKMl62ZFICw_3\tbus\nAKMl62ZFICw_1\tbus\nAKN6nvHB7P0_2\tairplane\nAKN6nvHB7P0_3\tairplane\nAKPDvaUNx94_1\thorse\nAKPDvaUNx94_2\thorse\nAKVUSpeg9Jk_0\tknife\nAKxpzCrmsi8_0\tbus\nAK4AJfDZfEo_0\tcat\nAK64udGI1BA_0\tumbrella\nAK8imx-InYk_1\thorse\nAK8imx-InYk_2\thorse\nAK_J57sNeeo_1\telephant\nAK_0-KHw9wc_1\thorse\nALCj6V-0pU8_0\tperson\nALKBlOms7sk_0\ttruck\nALLYkPepYRc_0\ttrain\nALRR_HHP500_0\tperson\nALRzJ2FzEoY_0\tperson\nALYKJChPG6k_0\tknife\nALjxXEqJFTg_0\ttrain\nALpnjTPWIN4_0\tbird\nAL73oE_aovA_2\tbicycle\nAL73oE_aovA_3\tbicycle\nAMDjY36EpsU_0\ttruck\nAMEZhZVe7hk_0\tperson\nAMEZhZVe7hk_1\tperson\nAMI4Xu1mmNw_0\telephant\nAMZeyszxY78_0\tknife\nAMn7aithVV8_0\tcar\nAMz8PhUkmpM_0\thorse\nAMz8PhUkmpM_3\thorse\nAMz8PhUkmpM_7\thorse\nAMz8PhUkmpM_2\thorse\nAMz8PhUkmpM_5\thorse\nAM5_HQ705r4_1\tgiraffe\nAM6sweCILPU_0\tairplane\nANHdxFi36CM_1\tbird\nANNbcEcj8Do_0\tperson\nANQZ1MB6gI4_0\tskateboard\nANVkluf6XZA_0\tcat\nANWtZTJoYYc_0\tdog\nANZDRJnX_Os_0\tperson\nANlhuKqnObE_1\tperson\nANlhuKqnObE_0\tperson\nANmJ_3l01rw_2\thorse\nANmJ_3l01rw_3\thorse\nANmkxc2V7qQ_0\tperson\nANufFQ7Fqao_0\tcar\nANufFQ7Fqao_1\tcar\nANvWNG7bZj0_0\tperson\nANwXehjlmOU_0\tgiraffe\nANwXehjlmOU_2\tgiraffe\nANwXehjlmOU_6\tgiraffe\nANwXehjlmOU_7\tgiraffe\nAOFbvqQZz1M_0\tperson\nAOJiO3o1Pgw_0\tperson\nAONi1Rhl0VI_2\tperson\nAONi1Rhl0VI_1\tperson\nAOmvm3OOZZQ_0\tperson\nAOn9I3GEHoU_0\tperson\nAOo1qXfZWsc_0\tbus\nAOq0zSQhX1E_0\tperson\nAOq0zSQhX1E_1\tperson\nAO9zthhr-og_0\tperson\nAO9zthhr-og_1\tperson\nAPAgxsDsZqs_0\tperson\nAPCppiM1SL4_0\tperson\nAPEd6F66jXU_1\tairplane\nAPHhGoshqFo_0\tumbrella\nAPIrIPchQwg_1\tperson\nAPIrIPchQwg_0\tperson\nAPJ4_CEV8HQ_0\tbus\nAPLJsXaOe1c_0\tperson\nAPQ99QCF6pA_0\tperson\nAPRuUBgcBZc_1\tperson\nAPYAGnOjUQQ_0\tperson\nAPa_Xoa9qgg_1\tmotorcycle\nAPcliMIvBe4_2\tperson\nAPcliMIvBe4_0\tperson\nAPcliMIvBe4_1\tperson\nAPp-0CsKxpY_1\tperson\nAPp-0CsKxpY_0\tperson\nAPqdtMhtWlU_0\tmotorcycle\nAPtqUIS_Hyo_0\tperson\nAPwqoNNZyaA_0\tperson\nAPyVeEcEt1U_0\tairplane\nAPyxRCm1XlY_0\tperson\nAP5QrGcnGoU_0\tcow\nAP_vNEBzhqM_0\tperson\nAQALHMjkeh0_1\tgiraffe\nAQKHDJ9HKck_0\tdog\nAQNEkyvgbeA_1\tcow\nAQRKvHpsUk8_0\tperson\nAQTk87BXkxk_0\tperson\nAQVhyDD8GEk_0\tperson\nAQVthZjIETQ_0\ttruck\nAQcg3TVkW1s_0\tperson\nAQcg3TVkW1s_1\tperson\nAQi0YSJ74cw_0\tperson\nAQj3enGQQeE_0\tboat\nAQminPRA2W8_0\tperson\nAQtIgG8RHRY_0\tperson\nAQvltP0EarU_0\tperson\nAQy7gL42wfo_0\tairplane\nAQzJp7Qi_yA_2\telephant\nAQzJp7Qi_yA_13\telephant\nAQ2bfY90nuU_0\tperson\nAQ7YDkmwB4M_0\tdog\nARAX6-JmsNQ_0\tzebra\nARAX6-JmsNQ_2\tzebra\nARFd2qxDhpQ_0\tairplane\nARNkmINZamQ_0\tcow\nARNkmINZamQ_1\tcow\nAROrQJq2sWY_0\tperson\nARRADkl3-30_0\tperson\nARW5DipSrBo_0\tdog\nARmfFWE2ruc_0\tperson\nARmsnBnMyPc_0\tperson\nARnGZQm8zOM_0\ttruck\nARqQUEVhu24_0\tperson\nARrbFDLoy0Q_1\tperson\nARtGNhHj2NU_0\tcat\nARyGQdkbuyM_0\tperson\nARyGQdkbuyM_1\tperson\nASBgE1svBKQ_0\tperson\nASD516fNs3g_0\tperson\nASExrIzixaM_0\ttruck\nASc0m6oxXVI_0\tperson\nASc0m6oxXVI_1\tperson\nASm_mkHCybA_0\tcat\nAS1xCm7MYs8_0\tperson\nAS1xCm7MYs8_1\tperson\nAS2tsNB9LBI_1\tknife\nAS5hg_3pOXM_0\tperson\nAS9kBpj7qvE_0\tperson\nATKytgCulZM_0\tumbrella\nATakdxmz3qU_0\tcar\nATkJNKtd8yo_0\tperson\nATk9e0fbxBk_0\thorse\nATk9e0fbxBk_1\thorse\nATk9e0fbxBk_2\thorse\nAT1zSxV6stw_0\tcat\nAT5urL0Fr0c_0\tbird\nAUGQ4XFEkGY_3\tknife\nAUI-RsDtk4s_0\tperson\nAUMHV6JiwU0_0\tbird\nAUZevw68t_s_0\tbear\nAUcOQ1L4Nj0_0\ttrain\nAUfaVvy5QxU_0\ttrain\nAUguk_8JO_U_0\tskateboard\nAUgw-t2MrtU_0\tperson\nAUzge-cBHfM_0\tbear\nAU0RtWdAXcU_0\tperson\nAU114x-Qif4_0\tperson\nAU3mKa0Npq4_0\tperson\nAU8GXMxyP9U_0\tperson\nAVHVVt5Srow_0\tbear\nZIGThAlQuUU_1\ttruck\nZIGkCx4o3G0_0\tperson\nZIMLdoIIFbg_0\tperson\nZIWkcVTlaRU_1\tperson\nZIamYwe-hJ8_0\tcar\nZIawXDt6JH4_0\tcat\nZIlyoSrDQQ8_0\tperson\nZImLYekhFBQ_3\tbus\nZI6J2WSiZy0_0\tgiraffe\nZI7DX2OSzzQ_0\tairplane\nZJCSQFa1W3M_0\tperson\nZJDAzZZQ38k_1\tknife\nZJDAzZZQ38k_0\tknife\nZJEQHkA9NLw_1\ttruck\nZJHeFXEtwNE_0\tknife\nZJJoit687Tc_0\tperson\nZJJpIPciUts_2\tskateboard\nZJL9WONxDB8_0\tperson\nZJMJBrWq8-o_0\tperson\nZJOVhmSGVMM_0\tperson\nZJXuyIEaSc4_0\thorse\nZJYXcUOxNRc_1\tperson\nZJdKrkzHR94_0\tperson\nZJdKrkzHR94_1\tmotorcycle\nZJe2QoJwNa0_0\thorse\nZJimYyH6VUI_0\tcar\nZJoQRLyRs8o_0\tperson\nZJpozi2Piqc_0\tmotorcycle\nZJwWllfPFjo_0\tperson\nZJyDrvmQwY8_0\telephant\nZJyDrvmQwY8_1\telephant\nZJ5n1Y-yXqM_0\tperson\nZKF4kfqyu6U_0\tperson\nZKIuqz6GDSA_0\thorse\nZKJuI7-4560_0\tcat\nZKKalWR8MBM_0\tboat\nZKSF-y6kC1I_0\telephant\nZKSF-y6kC1I_1\telephant\nZKTseP8JqIw_0\tperson\nZKk703iOFmY_0\thorse\nZKrJdHuvvR8_0\tperson\nZKy67yESvjM_0\tperson\nZK1zKp1iJY4_5\telephant\nZK1zKp1iJY4_2\telephant\nZK3-Em8w4HE_0\thorse\nZK6pkPtSd_4_0\tcow\nZK_BL_TGwo0_1\ttrain\nZLFXKnOp0LM_1\tknife\nZLH6HbQ5Miw_0\tperson\nZLSqYLLWQLc_1\tcow\nZLSqYLLWQLc_2\tcow\nZLcGyr4ZfJU_1\tairplane\nZLdb8-YkoiY_0\tperson\nZLm8Hen6OFM_1\tbicycle\nZLm8Hen6OFM_2\tbicycle\nZLnf4vSxfgo_1\tumbrella\nZLqSGXI7FdM_3\tknife\nZLuY9hS-wd4_0\tbus\nZLuY9hS-wd4_1\tbus\nZLuY9hS-wd4_2\tbus\nZLupIiWNPOY_0\tperson\nZL18xmfIKH4_1\tmotorcycle\nZL18xmfIKH4_3\tmotorcycle\nZL18xmfIKH4_2\tmotorcycle\nZL3DgidLXjw_0\tperson\nZL5SCZpZWtA_1\thorse\nZL-60We4drw_0\tdog\nZMDe7QMaLa8_0\tperson\nZMD2tP69gaU_1\tperson\nZMKFhrS_QnY_0\tcow\nZML6VoRZ_Tk_0\tperson\nZMMDA6nYXZs_0\tbird\nZMPdl-1FCMQ_0\tperson\nZMZU_V7d3-I_1\tumbrella\nZMa0bYeg_NE_0\tdog\nZMdAlm9Zx_A_1\tcar\nZMeQ1Vc3HZk_0\tperson\nZMuwZKOfK1s_0\tmotorcycle\nZMvdpTH-1Ug_9\tairplane\nZMxu4wRDuqU_1\tperson\nZMyEEXdgJeA_0\tperson\nZM1xadWQqKQ_0\tbus\nZM2SMTrxUr0_0\ttrain\nZM3QVkm1izg_0\tperson\nZM5-iyB8rFk_1\tdog\nZM_TO-0UDp4_0\tperson\nZNJ8aytwo1E_0\tperson\nZNP23sy27W0_0\tperson\nZNTqZ3wERJE_0\tperson\nZNUBh1ppeyo_0\tskateboard\nZNXCWGzmxK8_0\tperson\nZNZx7hTxCQE_0\tairplane\nZNaTV3nGl6M_0\tperson\nZNcUW5m7eRw_0\tgiraffe\nZNg9OZgsMqc_0\tbear\nZNoQrAOf3Ns_0\ttruck\nZNqpyPcacjY_0\tmotorcycle\nZNv_LrEIljc_0\tumbrella\nZNxw9kVCouU_0\tbus\nZNzeI_r7GT4_0\ttruck\nZN2bt7wkvH0_1\tbear\nZN5ukEMKLY4_0\tcow\nZN_gFe4IzxE_0\ttruck\nZODUj9lsCzk_0\thorse\nZOEa1JGwnwE_0\tperson\nZOEa1JGwnwE_1\tperson\nZOGP8-XsFYc_0\tperson\nZOIuTsiGyRY_0\tbird\nZOJSvR5KOsE_0\tdog\nZOMPRnYycak_2\tcow\nZOMnEZ4dWMk_0\telephant\nZOStUYUIEdA_0\tskateboard\nZOTSBcRwdRA_0\tperson\nZOX1xH7rOus_0\ttrain\nZOthVGHUcjo_3\tcow\nZOwhFlp5EiA_0\tperson\nZOxDsYnvl0M_0\tperson\nZOymkqw58fw_0\tperson\nZOzQfVh1LN8_1\tmotorcycle\nZO_5hZ2ex6Y_0\tperson\nZPKaBLqoKvQ_0\tperson\nZPNr3zZg6jk_1\tperson\nZPNr3zZg6jk_0\tperson\nZPNr3zZg6jk_2\tperson\nZPQ0lqiH9uw_0\ttrain\nZPQ0lqiH9uw_1\ttrain\nZPQ3tbJp33I_0\ttrain\nZPVOrRypdRM_0\thorse\nZPZjgecd6OQ_1\tboat\nZPaWYb_4S8Y_0\tperson\nZPeRU9CLLew_0\tperson\nZPgUlFmZyP4_0\tperson\nZPjN0Rp_1ZA_0\thorse\nZPkO4x8HPaI_1\tperson\nZPqs3xJ8sMY_0\tperson\nZPqs3xJ8sMY_1\tperson\nZPq9qgTZ4XI_0\ttruck\nZPyxQD17Fq4_2\tperson\nZPyxQD17Fq4_4\tperson\nZP7SN9kW5kg_0\tperson\nZP7sET2Y9dU_0\tperson\nZP8YaHDM_qE_0\thorse\nZQCFPzE41bg_0\tcow\nZQDoAEWZCQk_0\tperson\nZQG5CpZ3fLM_0\tperson\nZQRzkpfy378_0\tbus\nZQZRNVrE9hk_0\tperson\nZQarE1lLDl4_0\tperson\nZQdhjMVGJrk_0\tperson\nZQdhjMVGJrk_1\tperson\nZQmTc5C-h8w_0\tperson\nZQrMMWQidx0_0\tperson\nZQuVUoqiT_I_0\tgiraffe\nZQuVUoqiT_I_1\tgiraffe\nZQ3LAYCIDf8_3\tbear\nZQ8X2cqYANs_0\ttrain\nZQ9G0UkTR1c_1\tperson\nZQ_vGl5xbKY_0\tcat\nZRFMzM7kxuI_3\tcow\nZRFMzM7kxuI_0\tcow\nZRFMzM7kxuI_1\tcow\nZRFMzM7kxuI_2\tcow\nZRLkkoSR8o8_0\tknife\nZRMOgw0VYRI_0\tperson\nZRNQrzQlVwA_0\tperson\nZRNgdckx504_0\tperson\nZRQug2qT1tc_0\tperson\nZRSRBBpyBG8_0\tperson\nZRXjiNMKvis_0\tairplane\nZRc8GDK_9hc_1\tumbrella\nZRkHgC0EAz8_0\tperson\nZRmkeBogj-U_0\tperson\nZRoz_bGkPaE_0\tperson\nZRuQ3ipcK3o_0\tbus\nZRzOWgIAwe8_0\tbird\nZRzOWgIAwe8_3\tbird\nZR0Qj5P8snw_1\tbear\nZR4yO1ASDwo_2\tperson\nZR_VWPjxLTU_0\tdog\nZSDCxbSs-Hs_0\tperson\nZSFzv92w5z4_0\tmotorcycle\nZSGJwERlcvM_0\tperson\nZSXoUfKY7t8_0\tperson\nZSdzUC2BB8Q_0\ttrain\nZSdzUC2BB8Q_1\ttrain\nZSkkNWgXm6E_0\tskateboard\nZSkkNWgXm6E_1\tskateboard\nZSn4gRAJToo_0\tcat\nZSoJT194AtI_1\tskateboard\nZSoJT194AtI_0\tskateboard\nZSruK26cGuI_0\tdog\nZSs6Knma-Q0_0\tcow\nZSs6Knma-Q0_1\tcow\nZSu3GocMJzI_0\tcar\nZS29l3t9vK8_0\tperson\nZS6NQXztroI_0\tperson\nZS_wuZnVzbw_0\tperson\nZTLDJDjvSuQ_0\ttruck\nZTPTnzEs_Lc_0\tperson\nZTcRmNM1n8M_0\tperson\nZTjOZ-dZDEg_1\tcar\nZTmHHCmX7aw_0\tskateboard\nZTnEKCqMNHs_0\tperson\nZTo33r_63Wg_0\tknife\nZTw6Dkp-LPU_7\telephant\nZTw6Dkp-LPU_0\telephant\nZTw6Dkp-LPU_4\telephant\nZTw6Dkp-LPU_5\telephant\nZTw6Dkp-LPU_6\telephant\nZT5iwG3vEhM_0\tumbrella\nZUCf2cVBY08_0\tperson\nZUWSpLaJj4M_0\tbird\nZUYtIKrcaKo_0\tperson\nZUaHjAaQqF0_0\tbus\nZUdCQl7WU_U_1\tperson\nZUdCQl7WU_U_0\tperson\nZUd0IAbilBA_0\telephant\nZUoFqGf_ijs_0\telephant\nZUoJFmQ6ro4_0\tperson\nZUwniKcHERQ_0\thorse\nZU0WSpOWSak_1\tbear\nZU0_sT3EbVY_0\tzebra\nZU9LGiLzKJg_0\tmotorcycle\nZU-ZhVyhBpA_1\tbicycle\nZVAHreexSa0_0\tperson\nZVBjo5HM0Do_0\tknife\nZVD-ea5SjMg_0\tperson\nZVJpmiue5IA_0\ttruck\nZVKyUsgomW4_0\tperson\nZVOMkt8TORM_0\ttrain\nZVQo_9tFZGY_0\tbus\nZVY_873YYQY_0\tskateboard\nZVZJRbJ2h1A_0\tcat\nZViLnbCdjZM_1\tperson\nZVlOetMc3m4_0\tperson\nZVl8So4V1Ss_0\tcat\nZVnaHf8vAhA_0\tzebra\nZVtPRAs8Za0_0\tperson\nZV8NIO3XuLQ_0\tperson\nZV9eJe2grq4_1\tbear\nZWIPlBvd1DI_0\tperson\nZWIPlBvd1DI_1\tperson\nZWJv_-wAdws_1\tskateboard\nZWKHlq-W7_8_9\ttrain\nZWKHlq-W7_8_14\ttrain\nZWKHlq-W7_8_0\ttrain\nZWKHlq-W7_8_1\ttrain\nZWKHlq-W7_8_4\ttrain\nZWKHlq-W7_8_7\ttrain\nZWKHlq-W7_8_10\ttrain\nZWKHlq-W7_8_11\ttrain\nZWKHlq-W7_8_12\ttrain\nZWKHlq-W7_8_13\ttrain\nZWNe-zcl-IY_0\tboat\nZWNjUm5Uzh0_1\tbicycle\nZWNjUm5Uzh0_5\tbicycle\nZWXE7IAaWrg_0\tperson\nZWXSnELtawA_1\tknife\nZWXSnELtawA_3\tknife\nZWX1cGhJG98_0\tbicycle\nZWlTD6EbOTo_0\tperson\nZWqzdCz6UvY_0\tbird\nZWr6RECjqV0_1\thorse\nZWr6ZU_-ir4_1\tperson\nZWthtO1iGtQ_0\tperson\nZWwlzozPAk8_0\tperson\nZWxn8yT0bXo_0\tcow\nZW0HC4IRa64_0\tperson\nZW3CWoXzrn4_0\tbicycle\nZW3CWoXzrn4_1\tbicycle\nZW5VkDNSfWA_0\tcat\nZXMqiFE6KOE_0\tairplane\nZXRcWIcok2I_0\tperson\nZXgYAh2AWyk_0\thorse\nZXp6jOe8DUE_0\tperson\nZXyJafbGcBM_0\thorse\nZXzno8CjUyM_0\telephant\nZYB9yzoJ6jc_0\tperson\nZYG83auB9Lk_0\ttrain\nZYIgTdUmOWk_0\telephant\nZYKlgXftesk_0\tcow\nZYM0_4YzeeQ_0\tperson\nZYRgw5rNhE4_0\tperson\nZYS7WVlJbuU_0\tperson\nZYX53PWsBdk_0\tperson\nZYY8vkvB1zU_0\tperson\nZYkIkq9kfLc_0\tdog\nZYlANECCXnI_0\tperson\nZYocOIOyuqs_0\tperson\nZYsifQxv94s_1\tmotorcycle\nZYs7rbZt8Zw_0\tairplane\nZYs7rbZt8Zw_1\tairplane\nZYs7rbZt8Zw_2\tairplane\nZYtk2iVNC90_2\tairplane\nZYtk2iVNC90_0\tairplane\nZYxn9wmzRI4_0\tbicycle\nZYxn9wmzRI4_1\tbicycle\nZYzeKMdP2SE_0\tperson\nZYz6B5dwXcE_0\tperson\nZY_urkqeQLM_0\tbicycle\nZZANjG2Z5Jk_0\tperson\nZZFzCaL48sE_0\tcow\nZZNRG-ux4fw_0\tperson\nZZQDFjbEcHQ_1\tbird\nZZQDFjbEcHQ_2\tbird\nZZQSDwoLZ00_4\tknife\nZZSFKq4WH78_0\tcat\nZZVPKuh-2v8_0\tperson\nZZVx_IT4voA_0\tperson\nZZlf3LtDpH8_1\tbear\nZZpLkBcXUgs_1\tperson\nZZpLkBcXUgs_2\tperson\nZZxMtMlV-MM_0\tcow\nZZyW-2jZcIo_0\thorse\nZZyW-2jZcIo_1\thorse\nZZ20JXRExdg_0\tperson\nZZ8OuI39UTM_1\tperson\nZZ85EAvnAGU_0\tperson\nZZ85EAvnAGU_1\tperson\nZaDVUoq6h5o_1\tperson\nZaD5V9_Vw2w_0\tperson\nZaJb3JTan7Q_0\tperson\nZaLqPrH_aVo_0\ttrain\nZaLqPrH_aVo_1\ttrain\nZaNZV-lM-3o_0\tperson\nZaNZV-lM-3o_1\tperson\nZaPC288yVBg_1\tbicycle\nZaPC288yVBg_5\tbicycle\nZaPC288yVBg_7\tbicycle\nZaPltFe0S_o_1\ttruck\nZabt7ElK3jM_0\tperson\nZacHdhX9F9M_2\tdog\nZadGgAG3PzE_0\tperson\nZaew_bHz-PQ_11\tumbrella\nZaflj5gSZEw_0\tperson\nZanT0hXyJhk_0\tbird\nZavCWamLatc_2\tperson\nZavCWamLatc_1\tperson\nZa4BYhhaFFQ_1\tzebra\nZa6oX4aQR34_0\tairplane\nZbB-tdDvITQ_0\tmotorcycle\nZbDu8V7ppZE_0\tmotorcycle\nZbHt1sn7oTI_0\tperson\nZbJvtTVTTV8_0\tknife\nZbQXzueqj4Y_0\thorse\nZbgfg8usx-k_0\tperson\nZbgfg8usx-k_1\tperson\nZbm5_qB8fEs_0\tperson\nZbrJHC_mHlo_1\tperson\nZbrJHC_mHlo_2\tperson\nZbrJHC_mHlo_0\tperson\nZbrqZYGiMvE_1\tcow\nZb2Vz655gh4_2\thorse\nZb755JeGMpU_2\tperson\nZb-JKfQ5emU_1\tperson\nZb-JKfQ5emU_2\tperson\nZb-JKfQ5emU_0\tperson\nZcJPap_gVyo_0\tperson\nZcXA6CyQBi8_0\tcat\nZchU4DxP5A8_0\tperson\nZcw7wSfd2JM_0\tperson\nZcw7wSfd2JM_1\tperson\nZdElKzM-US0_0\tumbrella\nZdKO1sC4o60_0\tperson\nZdMbx0IXDzs_0\tperson\nZdMm6j__cQM_8\tbicycle\nZdTZrRX0dv4_0\ttruck\nZdXrQlOU7iw_1\tbicycle\nZdaFXJzLLUs_0\tperson\nZdaFXJzLLUs_1\tperson\nZdeTj7nyN-s_0\tboat\nZdevf1MbY8U_0\ttrain\nZdevf1MbY8U_1\ttrain\nZdevf1MbY8U_2\ttrain\nZdirtQF_sjE_0\tperson\nZdlnVpHrDcg_0\tgiraffe\nZdlnVpHrDcg_2\tgiraffe\nZdq2csZeJr8_2\tperson\nZdrk4yHmMXA_0\tperson\nZdtUPHscS-s_0\tperson\nZdxD4gqVioQ_0\tcat\nZdxHWwaivLc_0\tcow\nZdyBZtlMq-M_2\tbear\nZd3j0bQV6NI_0\tperson\nZeHLf0q4Z1Q_0\tperson\nZeZAZbMg1zY_0\tperson\nZeaoaXZDhPw_0\tperson\nZemOY1F1bVo_0\ttruck\nZemOY1F1bVo_3\ttruck\nZemOY1F1bVo_1\ttruck\nZerHfx3SLxU_0\tperson\nZerYXYTyhoc_0\tperson\nZetcbIDyydg_1\tcar\nZetcbIDyydg_0\tcar\nZeuqVhpsVu0_0\thorse\nZe6GIOUVxZU_0\tperson\nZe8W47hBrrE_2\tskateboard\nZfAFALQjUwI_2\tperson\nZfAFALQjUwI_1\tperson\nZfAM39o5Cbc_0\tbird\nZfDkxwMowSk_4\telephant\nZfF5Z0hrOQw_0\tperson\nZfHSyDaLaw0_0\tairplane\nZfHSyDaLaw0_2\tairplane\nZfHSyDaLaw0_1\tairplane\nZfJvZeaN7Ro_1\tperson\nZfTTW39iHJQ_0\tperson\nZflcz9EKz4g_4\telephant\nZflcz9EKz4g_1\telephant\nZflcz9EKz4g_2\telephant\nZfmwrq2aghI_0\tperson\nZf86HoPHmBs_1\tbird\nZf86HoPHmBs_0\tbird\nZf-rSx5ZNB8_0\tperson\nZgK0Y4PgWSM_0\tperson\nZgOr7facaIw_0\tskateboard\nZgP7q-rIhs0_1\tperson\nZgTDthFY-aI_0\tbird\nZgZ18HIfCGc_1\tmotorcycle\nZggirLBvHSw_0\tdog\nZgjspuwgTAc_0\tperson\nZgtG8Zy63UQ_0\tperson\nZg18GZ5OFWw_1\tperson\nZg2YrzGNuZs_0\tperson\nZg4f2iY8_zo_1\tcat\nZg4f2iY8_zo_0\tcat\nZg5MdsCXRWM_1\tcow\nZg5MdsCXRWM_0\tcow\nZhLB-laOg_g_9\tbicycle\nZhLB-laOg_g_3\tbicycle\nZhLB-laOg_g_5\tbicycle\nZhLB-laOg_g_6\tbicycle\nZhLB-laOg_g_10\tbicycle\nZhLB-laOg_g_12\tbicycle\nZhPafr5WTEs_0\tperson\nZhtgT8q5Gm4_0\tperson\nZhtr_XhO6_4_0\ttrain\nZhtr_XhO6_4_1\ttrain\nZh6QWGGQ9dU_0\tperson\nZiJFOBVGah4_0\thorse\nZiPO1UcM3IY_0\tdog\nZiP2ydBHuPs_2\tperson\nZiSl_Dy1ZB4_0\tperson\nZibk3bXvHCY_0\tcat\nZig1VrVbQc0_0\thorse\nZimvCFcji0A_0\tperson\nZisoM7y_CS4_0\tperson\nZitUYI22J54_1\tknife\nZitUYI22J54_0\tknife\nZi1etYbSUmQ_1\tperson\nZjCbmE2jLo4_0\tperson\nZjFb1VLHvyg_1\thorse\nZjPmZ4grIFA_0\tperson\nZjPmZ4grIFA_1\tperson\nZjQqfJ1Docg_0\tperson\nZjQ9lIlCehk_0\tskateboard\nZjSloqSrfWU_1\tairplane\nZjSloqSrfWU_3\tairplane\nZjWBw4tZUO4_0\ttrain\nZjWBw4tZUO4_1\ttrain\nZjWBw4tZUO4_2\ttrain\nZjWBw4tZUO4_3\ttrain\nZjWBw4tZUO4_4\ttrain\nZjWBw4tZUO4_5\ttrain\nZjWBw4tZUO4_6\ttrain\nZjbhM1ZiKW8_0\tperson\nZjbhM1ZiKW8_2\tperson\nZjcEfOHRyLQ_0\ttruck\nZjcevqmMJvY_0\tperson\nZjgTSjb7Vh4_1\tcar\nZjnaerD1MHM_0\telephant\nZjn6uD43ewg_4\tairplane\nZjn6uD43ewg_5\tairplane\nZjn6uD43ewg_1\tairplane\nZjn6uD43ewg_2\tairplane\nZjpmS5k09Ug_1\tperson\nZjpzw1n9Lvc_0\tskateboard\nZjsEX7nNYdQ_0\tperson\nZjxiHzcXOAs_0\tperson\nZjxiHzcXOAs_1\tperson\nZj2HBun9kBY_0\tperson\nAVW26zY72Ns_0\tperson\nAVXWb0s5LZw_0\tperson\nAVqCe7X9Pp4_0\tcow\nAVragVmWr8M_0\tmotorcycle\nAVvnZ-Ky-ew_0\tperson\nAV9y4LnUV84_0\tdog\nAWAQTemnBJc_0\tperson\nAWCUoghX20A_0\tcow\nAWD_KAfvb0U_0\tskateboard\nAWOhJ9RZReg_0\tperson\nAWOhJ9RZReg_1\tperson\nAWPNd7zPJzg_0\tperson\nAWPNd7zPJzg_1\tperson\nAWZt9EdU3BU_3\tzebra\nAWdKXFitdJI_0\tboat\nAWh2S4rI6kc_0\tperson\nAW1SjuoheU8_0\tcat\nAW2cvkaExG4_0\tcow\nAW8munaOGqw_0\tperson\nAW--f4fsLFY_0\ttrain\nAXB4hYQKqUw_0\tperson\nAXB4hYQKqUw_2\tperson\nAXQlwoC_K0g_1\ttruck\nAXX66Oq_RkU_0\tperson\nAXhx8hncZvA_0\tboat\nAXm0KvcIchQ_0\ttrain\nAXtXzxTXTqI_0\telephant\nAX2rS0bpAmM_0\thorse\nAX4Hsfdm-Fo_0\telephant\nAX8WoOXfJDA_0\tperson\nAX-xVtjP42Q_0\tperson\nAYLoR7L3CMs_3\tbird\nAYLoR7L3CMs_1\tbird\nAYUGoWokN_0_0\tperson\nAYYdBxTI_54_1\ttrain\nAYakvLR8aVM_0\tperson\nAYe6Wf0URgo_0\ttruck\nAYgbgSVClN4_0\tperson\nAYg1V2ol96s_0\tdog\nAYj70IRvvwI_2\tairplane\nAYj70IRvvwI_3\tairplane\nAYn-qtOy_nc_0\tperson\nAY7foLy1uok_0\telephant\nAY7foLy1uok_1\telephant\nAY-AbrJPyY0_0\ttrain\nAZHYXkv5rMk_0\tbird\nAZJsII37MPY_0\tbird\nAZMW1TyN6Z4_0\tperson\nAZQjsUm-CXk_1\tperson\nAZhH2ej_x_g_0\tperson\nAZjZ1ZSyCeE_0\tperson\nAZk4MAu-j90_0\tperson\nAZleWF5zAxc_1\tbear\nAZl3Emy9K3A_0\thorse\nAZouBTtQrtM_0\tperson\nAZpAuvQryZo_0\tperson\nAZpAuvQryZo_1\tperson\nAZ9SW8bxD3E_0\tbicycle\nAaGwVQ6UjOE_0\tperson\nAaRVwgGBmWU_0\tperson\nAaTW4oc5bBU_0\tperson\nAaZsdPwg9qg_3\tbus\nAac18k-eLZI_0\tperson\nAac18k-eLZI_1\tperson\nAac18k-eLZI_2\tperson\nAakpjcyvFSo_0\tperson\nAalaqaXsEbs_3\tumbrella\nAalaqaXsEbs_0\tumbrella\nAalaqaXsEbs_1\tumbrella\nAalaqaXsEbs_2\tumbrella\nAaoK6DPQKII_0\tbus\nAaotWWHg4eU_0\ttruck\nAaotWWHg4eU_1\ttruck\nAaotWWHg4eU_2\ttruck\nAasksRmCk1g_0\tperson\nAatNkWo2ryE_0\tperson\nAa0FU2EIMZ4_0\tbird\nAa-wzDtjCGc_0\tperson\nAa_biYfYp08_0\tperson\nAbEsU9EX9XQ_0\telephant\nAbEsU9EX9XQ_2\telephant\nAbO_VrlyQ8I_0\tumbrella\nAbTxhwSueZw_0\tperson\nAbd7Vn-Nyt8_1\ttruck\nAbeOAFhMXBY_1\tbird\nAbeOAFhMXBY_2\tbird\nAblKd4XIjqk_0\tperson\nAbmnNkzkXFg_0\telephant\nAbmnNkzkXFg_1\telephant\nAbuMVYzS0mw_0\tskateboard\nAbvoOuTpLtA_0\tdog\nAbwI4m0H9Hk_2\ttrain\nAbx126RTs10_1\telephant\nAb9zgKJnr9Y_1\tperson\nAb-vGS2mqFQ_0\tcow\nAb-vGS2mqFQ_1\tcow\nAcCU5YAWXlw_0\tdog\nAcReGpoHOZI_0\tperson\nAcSmnBYhEsg_0\tperson\nAcTgPRNars0_1\ttruck\nAcUEWZRPoGA_0\tumbrella\nAcZNiBe0Fgo_0\tperson\nAcZukbBG7tI_0\tboat\nAcc1yTFpH2c_0\tdog\nAcpBKywfL4o_1\tcat\nAcpOxyI_YPI_0\tperson\nAcprJcYvkbY_0\tperson\nAdDiiRHwZ2E_0\tcat\nAdEH-oHs1Qo_3\ttrain\nAdEiQT7Nm0o_1\tmotorcycle\nAdE2jnpk6AM_0\tboat\nAdbsyVjq_Xs_0\tcow\nAddL-M622TI_0\tknife\nAdgTVbi_kus_0\tperson\nAdsPsjswSGQ_0\tmotorcycle\nAd044xbRhE8_0\tperson\nAd2TSmaLvX8_0\tperson\nAeDfdgrccVw_0\tperson\nAeHbZ3U8S8U_2\ttrain\nAeWBkNuJmEA_0\ttruck\nAeWBkNuJmEA_3\ttruck\nAeWBkNuJmEA_4\ttruck\nAeWBkNuJmEA_5\ttruck\nAeakbNNwcW0_0\ttrain\nAec4uweTSes_2\tskateboard\nAeflYi3Sxss_0\tperson\nAegDGWXkWNw_0\tperson\nAenVUPH1ils_0\tbird\nAendE1XHSps_0\tbicycle\nAerUXP3Mmks_0\tperson\nAe5qWkNt6RU_2\tcar\nAe7ucKj40mw_0\tdog\nAe9Zd3lP7bg_0\tperson\nAfHkdkvxhNs_0\telephant\nAfNCSPijpao_0\tperson\nAfNGR5iEpvU_0\tcat\nAfNtKiB_rD8_1\tmotorcycle\nAfWHElsVCyM_0\tcow\nAfWfexnwsHg_0\tperson\nAfWfexnwsHg_1\tperson\nAfkKO6j4jWc_0\tperson\nAfmMpft13ZU_0\tperson\nAfnQoNimSjc_0\tperson\nAfynslRqwxI_0\tcar\nAfz2VDV4UHg_1\tperson\nAfz2VDV4UHg_0\tperson\nAf2MGhdZAn8_0\tperson\nAf2VyQEZtfk_0\tperson\nAf6Ve26JUOg_0\tperson\nAgBaUhTbzxA_0\tairplane\nAgBaUhTbzxA_4\tairplane\nAgBaUhTbzxA_5\tairplane\nAgBaUhTbzxA_3\tairplane\nAgBaZRmz8IY_0\tskateboard\nAgJCf77qxsY_0\tperson\nAgP2HoU83S4_4\tknife\nAgYhFemsFag_0\tperson\nAgZ2iflIKWc_1\tperson\nAgaetfTOzc8_0\tperson\nAgdrEW8jmw4_0\ttruck\nAgqmhFD0R94_2\telephant\nAgqmhFD0R94_3\telephant\nAgqmhFD0R94_1\telephant\nAgrKeQXSU2M_0\telephant\nAgrKeQXSU2M_1\telephant\nAgrKeQXSU2M_2\telephant\nAgtCW50wfig_0\tperson\nAgvxdVNj5Oc_0\tskateboard\nAgw5t7YSQbE_0\tskateboard\nAhAW4UKPzz0_0\tgiraffe\nAhE2vDF6Gbc_0\thorse\nAhE2vDF6Gbc_1\thorse\nAhjsDq9fEzQ_0\tperson\nAhv2jhPqRPg_0\tperson\nAhwGPZWtf3E_0\tperson\nAhxq6Rtu3lc_0\tperson\nAhx3IZujXDw_0\tbus\nAh0AGjta1qg_5\tbird\nAh04VeRs2hg_0\ttruck\nAh4x4EfR3BY_0\tmotorcycle\nAh4x4EfR3BY_1\tmotorcycle\nAiIc8FW3q98_0\tcar\nAiL_iCJ8HZI_1\tperson\nAiNLvzwt3_w_1\tbird\nAiNLvzwt3_w_2\tbird\nAiP7EOvTpK4_0\tmotorcycle\nAiP7EOvTpK4_2\tmotorcycle\nAiU_T3DZI2w_1\tbus\nAiU_T3DZI2w_2\tbus\nAieRY99VkmE_0\tperson\nAieVzbENJv0_3\tbicycle\nAiieCerOKpc_0\tperson\nAik2hirrxEo_3\tairplane\nAik2hirrxEo_0\tairplane\nAik2hirrxEo_1\tairplane\nAim6_lZQi4g_0\tperson\nAiqqXxqnPPM_1\tcow\nAiqqXxqnPPM_0\tcow\nAittR1dd2SI_0\ttrain\nAittR1dd2SI_1\ttrain\nAiv3XHMuVq8_0\ttrain\nAiyfw0Zh38k_0\tperson\nAi29fDmklxM_1\tperson\nAi29fDmklxM_0\tperson\nAi3S7n1Aofs_0\telephant\nAi-487iZv0E_0\tperson\nAjFhyF1XZw4_0\tperson\nAjJHvamHoMU_0\thorse\nAjPBAy1xgrY_0\tperson\nAjVe8d0vc1E_0\tperson\nAjamPk2Geuw_1\tbus\nAjg7q9zxJUo_0\tperson\nAjroIzI2OW8_1\ttruck\nAjroIzI2OW8_2\ttruck\nAjsu2bGngDw_1\tperson\nAjs4qdBK7Jk_0\telephant\nZkD_WAxZB3o_0\tcow\nZkHPsjy-YUQ_1\tknife\nZkbav-Qoxds_0\thorse\nZkbav-Qoxds_2\thorse\nZkbav-Qoxds_1\thorse\nZkidaaVx2VU_1\tbus\nZknqgRL504A_4\tbear\nZkqA2kLudwE_4\ttrain\nZkqA2kLudwE_0\ttrain\nZkqA2kLudwE_3\ttrain\nZku9JAotBZ0_0\tboat\nZkzM2jvV2AY_0\tperson\nZlBfF2yK2vg_1\tperson\nZlBfF2yK2vg_2\tperson\nZlBfF2yK2vg_0\tperson\nZlDsSDEHEzY_1\tcow\nZlDsSDEHEzY_0\tcow\nZlDsSDEHEzY_2\tcow\nZlFElBglnHA_0\tcat\nZlP8tmFYeyY_5\tbird\nZlfyrRfHDoc_0\tcow\nZljx0icnRa8_0\tperson\nZljx0icnRa8_1\tperson\nZlmsqen0qZo_0\tperson\nZln667JkWo8_0\tperson\nZmHKBIsSjQA_0\thorse\nZmHKBIsSjQA_1\thorse\nZmVLw9-fLDo_0\tcar\nZmbXlevaX2U_1\tboat\nZmgJjFt3JU4_0\tskateboard\nZmhKe4_d5Ag_0\tperson\nZmiCqFxUJSw_1\tairplane\nZmkKOYN1dRw_0\tperson\nZmrCaB8p3IM_0\tbear\nZmuzvhzN6EI_0\tcow\nZm3AU4TEpEw_0\tperson\nZm5VvBaQUwU_0\tbird\nZnRgQ1VBIGE_1\tperson\nZnWAM5ju8NM_0\tperson\nZne4XpVG2YQ_1\tperson\nZne4XpVG2YQ_0\tperson\nZnr-Uiobo-k_0\tperson\nZntDSf8cCPI_0\tperson\nZnvLWU_PCZ0_0\tmotorcycle\nZn-r14oEJwM_0\tairplane\nZoC1knYO0Tg_0\tcow\nZoJIup20AGU_0\tperson\nZoKfc3OL0JY_0\tperson\nZoK4wKRoZjY_0\tperson\nZoN4k6UNw6I_1\thorse\nZoOvu218D6M_0\tperson\nZoR1yoQzsbM_0\tperson\nZouHgocvjDI_0\tbird\nZo-8G7N2DXU_0\tperson\nZpAlbL-YE0E_0\tbus\nZpCrRb_a9QI_0\tperson\nZpCuVDLXQSw_1\thorse\nZpCuVDLXQSw_0\thorse\nZpSzmFLEm0c_1\tcar\nZpURI0wRgws_0\tperson\nZpXJ-0dv6Us_1\tcat\nZppFK22HdIk_0\tperson\nZpqXtZfe-3w_0\tcat\nZp1nQXN7dyg_0\thorse\nZp2CuvTAZLw_1\tperson\nZp740cgCPPE_0\tperson\nZp8GHxi_5l0_0\tknife\nZp8GHxi_5l0_1\tknife\nZqM9VL5DJ28_1\tperson\nZqOcOhiAI6k_0\tcow\nZqS1PqS3iT0_0\ttruck\nZqW027iDkCI_0\tperson\nZqXFvdeNrYI_1\tperson\nZqa0-AUnl9s_0\tperson\nZqm8A3wpeJQ_0\tperson\nZqtVs5joekw_0\tcow\nZq018zZzx1c_0\tperson\nZq1u84GLCHI_0\tmotorcycle\nZq5nK49UZ_o_2\telephant\nZq5nK49UZ_o_3\telephant\nZq5r3BwLg_c_0\tskateboard\nZq-RNCVoZFs_0\tperson\nZrA0NE09ipc_0\tdog\nZrDoGqu-A5A_0\ttrain\nZrI4ruv6B3o_0\tbird\nZrKpKmp29_o_1\tbird\nZrKpKmp29_o_3\tbird\nZrKpKmp29_o_6\tbird\nZrK5JKg83qU_0\tperson\nZrUx83OGIOk_0\tperson\nZrW7Si0hJKI_0\tperson\nZrbVa__ne-0_0\tperson\nZrfPtqkS_MY_0\tairplane\nZrfPtqkS_MY_1\tairplane\nZrfPtqkS_MY_5\tairplane\nZrfPtqkS_MY_6\tairplane\nZrfPtqkS_MY_7\tairplane\nZrgMnk8f_TA_0\tperson\nZrgMnk8f_TA_1\tperson\nZruJ2hhn9z0_1\tperson\nZrvWeRZ_dyU_1\tcow\nZrvWeRZ_dyU_0\tcow\nZrwXUWAxjIM_0\tgiraffe\nZrzdqF_ePkM_0\thorse\nZrzdqF_ePkM_2\thorse\nZr5eAtkuxQ0_0\tbear\nZr_AAxouNfg_0\tcow\nZsCaDsfPNec_0\tcow\nZsDDOO-bpFA_0\tperson\nZsDDOO-bpFA_1\tperson\nZsESx0nIYqI_0\telephant\nZsESx0nIYqI_6\telephant\nZsESx0nIYqI_7\telephant\nZsJCwiPEvkI_0\tperson\nZsLDBiZ0o14_0\tskateboard\nZsPVRik6m_c_1\tbear\nZsSkZhL-HOM_2\tbicycle\nZsb2ucv_mAg_0\tperson\nZsdv_3EWODM_0\tperson\nZsyMk67bjIM_0\tdog\nZs0j_1tuTDo_0\tperson\nZs1ltKMvRec_0\tperson\nZs1ltKMvRec_1\tperson\nZs79wUXMpx8_0\tbear\nZtA8n6dsH-w_4\tcar\nZtA8n6dsH-w_1\tcar\nZtA8n6dsH-w_2\tcar\nZtA8n6dsH-w_3\tcar\nZtDUifuLGrM_2\tbird\nZtEDTuHcM9U_0\tperson\nZtM6JRtVtpU_0\tmotorcycle\nZtToUMIMdYE_0\tperson\nZtlDJ70ap8Q_1\tbear\nZtlJcLPPjsg_0\tperson\nZtsGzhfZg9g_0\tperson\nZttTri7sEK4_0\ttrain\nZtyep9o6CLE_4\tbus\nZtyep9o6CLE_6\tbus\nZtyep9o6CLE_7\tbus\nZt9qKAA_xyA_0\tperson\nZuC0Jr3Y3s8_0\tcar\nZuGpcHtPLLA_0\tperson\nZuWlzE4F84c_0\ttruck\nZuhmoYvtP40_1\tperson\nZuicm6_fX9I_1\tbicycle\nZunjyc7DIP4_2\ttrain\nZuoBIQ-Kq74_0\tperson\nZuqXxaMAufU_1\tperson\nZuuL_Yi4FZQ_1\tdog\nZuuL_Yi4FZQ_0\tdog\nZuy59kV2M-0_1\tperson\nZu-vh46IwiU_0\tcow\nZu_dXJvDHdo_0\tperson\nZu_f8xuOweg_3\telephant\nZu_f8xuOweg_1\telephant\nZu_f8xuOweg_2\telephant\nZvDo2WbWL4g_1\tperson\nZvDo2WbWL4g_0\tperson\nZvJItzBdO04_1\tperson\nZvJrqHsPVL0_0\tbus\nZvSN_Y6vK3c_0\tperson\nZvV5mqJgbcQ_0\tcow\nZvfCrJvE1Tg_0\thorse\nZvfIYK-AWCw_0\tperson\nZvlx8vSlAPs_0\tbicycle\nZvtGPgtfhE8_0\tperson\nZvtuffxB5EY_0\tperson\nZvyOzgxu-4Y_0\ttruck\nZvzVi9irgvw_0\tbear\nZv6DWiKAux4_1\tperson\nZv9e9Vm6Vis_0\tmotorcycle\nZwDqCxCFpF4_0\tbicycle\nZwDqCxCFpF4_3\tbicycle\nZwH5xnh6Thw_0\tperson\nZwW6ybIP8ys_0\tbus\nZwdSYMz9ioo_0\tperson\nZwmRodW5wgg_0\thorse\nZwrtmR7ewc4_0\tperson\nZw7a69yU7f0_0\tmotorcycle\nZxAlVbDwlCc_2\tbird\nZxAuwcxhXxc_0\tperson\nZxE5MjV6i4w_0\tskateboard\nZxOVw-Lc-NI_0\tperson\nZxStkYy-wgo_0\tmotorcycle\nZxUKijmOWJc_0\tperson\nZxitXAY6Xsc_1\tknife\nZxqbwwO81Xc_0\ttrain\nZxv2BRQIWm0_4\tairplane\nZxv2BRQIWm0_5\tairplane\nZxv2BRQIWm0_7\tairplane\nZxv2BRQIWm0_8\tairplane\nAj7HWiU0iQg_0\tskateboard\nAj_E-ObfzoE_1\tperson\nAkGYKkcRyPM_0\tdog\nAkHT5Oo22rQ_0\tperson\nAkMpnm9JrLU_0\tperson\nAkWcVIeIx34_0\tboat\nAkaR-XgClv0_0\tperson\nAkaR-XgClv0_1\tperson\nAkeAdeJpbpg_0\ttrain\nAkeAdeJpbpg_3\ttrain\nAkeAdeJpbpg_1\ttrain\nAkh0VNTS6G4_0\tperson\nAkh0VNTS6G4_1\tperson\nAkh0VNTS6G4_2\tperson\nAkh0VNTS6G4_3\tperson\nAkkNBGH82Ic_0\thorse\nAknHhsIpRqc_0\tairplane\nAkxKeaxEnvQ_0\tdog\nAk3XQg9z8XQ_0\tperson\nAk8ygMb5ykk_0\tperson\nAk8y7dALcJI_0\tperson\nAlAUJSBL-e4_0\tdog\nAlNCPdpo1gg_2\tbicycle\nAlNCPdpo1gg_5\tbicycle\nAlNCPdpo1gg_6\tbicycle\nAlNCPdpo1gg_0\tbicycle\nAlNCPdpo1gg_3\tbicycle\nAlNCPdpo1gg_4\tbicycle\nAlPZeADzCKc_0\tperson\nAlPZeADzCKc_1\tperson\nAlXlVnkucyU_3\ttrain\nAlXlVnkucyU_1\ttrain\nAldX05MqOs0_0\tperson\nAleuxLN7VcU_1\tbird\nAlfbdsgKBAc_1\tperson\nAlhjN5qz_WI_0\ttrain\nAlikgfDMckk_0\tperson\nAlnIWAFamHE_0\tbear\nAltA5vQ7Icw_0\tbus\nAlzB8mXDcYc_0\thorse\nAl2hm71ia6E_0\tperson\nAl9l6-4QDz0_0\thorse\nAl9wCTPpSWM_0\tskateboard\nAmPe5gTOCTo_2\tperson\nAmPe5gTOCTo_0\tperson\nAmPe5gTOCTo_1\tperson\nAmQ_UrwLf3g_0\tperson\nAmRyW4hmSjw_0\tperson\nAmcAzvpvDRg_0\tbear\nAmeaTbvmKvo_0\tcar\nAmt8BGudD0w_0\tskateboard\nAmt8BGudD0w_2\tskateboard\nAmuX-Lv7OeM_2\tcow\nAmwvLxALyCw_0\tperson\nAm2wElVETcw_0\tcat\nAnD6ijSktyM_0\tperson\nAnEC6v3fXrE_0\tcow\nAnOwuTW7DKk_0\tcow\nAnOwuTW7DKk_1\tcow\nAnQ2ZY1JxAY_2\tperson\nAnWClR8yyu8_0\tperson\nAnZKri0xn-c_1\tcow\nAnZKri0xn-c_2\tcow\nAnb2IyxcJbk_0\thorse\nAnevw4PbqTo_0\tperson\nAnkgvW70F5E_0\tperson\nAnkgvW70F5E_1\tperson\nAn342tYqi5g_0\tperson\nAoI1hSI0PSI_2\tcar\nAoKs5jwMuHc_0\tperson\nAoP-So0vjIc_0\tcat\nAoSwFyY0f_A_0\tperson\nAoXHZgatpco_1\thorse\nAoXHZgatpco_2\thorse\nAoXHZgatpco_3\thorse\nAoXHZgatpco_4\thorse\nAof87CGS8NQ_1\tskateboard\nAoiCmKM8xz0_1\ttruck\nAojgueRMVCY_0\tperson\nAolLjcEFv5o_0\tperson\nAopGnIjKuEk_0\tmotorcycle\nAo0EDmBMIQk_0\tperson\nAo0EDmBMIQk_1\tperson\nAo7Iys-_lZs_0\tskateboard\nAo_b43xexzA_0\tperson\nApJMiJjCxCY_1\tcar\nApJMiJjCxCY_4\tcar\nApJMiJjCxCY_5\tcar\nApP4eoyM72g_1\tskateboard\nApWIa9pt-vk_0\tperson\nApilCZCROGI_0\tmotorcycle\nApjCOCv29N8_0\tperson\nAppgdYQTII8_0\ttruck\nAp1gZJZynL4_0\tperson\nAp-iaHj5SLk_4\telephant\nAp-iaHj5SLk_5\telephant\nAp-3HonA5go_0\tperson\nAqBYSr4wmpQ_0\tperson\nAqKP0V3Xj7E_0\tcow\nAqOxDunFl08_0\tairplane\nAqOxDunFl08_1\tairplane\nAqSP11-eje8_0\tboat\nAqUxRBRS-n0_0\tskateboard\nAqZhKjLLG70_2\tboat\nAqdAnSsQLI8_1\tperson\nAqdAnSsQLI8_0\tperson\nAqlHHwyJypE_0\tbird\nAqmXAZYmPJc_0\tperson\nAqmXAZYmPJc_1\tperson\nAqo5yZkzz8I_4\ttruck\nAqpinwPH8gM_1\tperson\nAqpinwPH8gM_0\tperson\nAqqs8XxA8gM_1\thorse\nAqqs8XxA8gM_0\thorse\nAqqs8XxA8gM_2\thorse\nAqqvZzLy3IE_0\tmotorcycle\nAqsuBaW1L0Q_0\tperson\nAqxTv7XRAH0_0\tperson\nAq_n86sub5o_2\tbicycle\nAq_n86sub5o_3\tbicycle\nArJNEsuLzDc_0\tperson\nArJaHKwfOEo_0\tperson\nArM6GXi6YnI_1\tdog\nArbpF1NIm-s_0\tcar\nArbpF1NIm-s_1\tcar\nArfeHbvYvKY_0\tmotorcycle\nAriIdq0ZPfE_1\telephant\nAroxRXjr3po_3\tbear\nArrB-hbOgf8_1\telephant\nArvYqb1hJSk_0\tperson\nAryOE3od43M_0\tperson\nAr7WaiToztg_0\tperson\nAr8Wk3m0uZ0_1\tperson\nAr8Wk3m0uZ0_0\tperson\nAr-vOeN30bM_0\tcat\nAsJt3MHLGiM_0\tperson\nAsKUm364aHg_0\tperson\nAsNy8gmdVec_0\tperson\nAsWWfQtZSHA_0\tperson\nAsY1dt4QojM_0\tperson\nAsZa3il8cZQ_0\tperson\nAsfAcK_laZA_2\thorse\nAsix5lGmXlg_0\tairplane\nAskNHLhn1t0_0\tcow\nAs_a3CyN-kQ_0\tbicycle\nAs_a3CyN-kQ_2\tbicycle\nAs_a3CyN-kQ_7\tbicycle\nAs_a3CyN-kQ_8\tbicycle\nAs_a3CyN-kQ_10\tbicycle\nAtFOIFqxLKs_0\tperson\nAtG98YoPQyg_0\tbird\nAtKUkiMSzfs_2\telephant\nAtKieG766oI_0\tperson\nAtawrCflbrM_0\tperson\nAtfXsIpaSgQ_0\tperson\nAtmVV-8Pjsg_0\tperson\nAtmVV-8Pjsg_1\tperson\nAt0-VpJyfBY_0\tskateboard\nAt81P33v_z8_0\tperson\nAuA4_FjCMvo_0\tperson\nAuJLIGyAoj4_1\thorse\nAuJalbdpJP8_0\ttrain\nAuLw9iNhPvw_0\tbird\nAuQYS5w13co_0\tbus\nAucK5ZDM060_1\tairplane\nAuchGbKLdmk_0\tperson\nAucxkj3w3nc_0\tperson\nAugnPC3tdso_0\tmotorcycle\nAunfkfLwN1w_0\tbear\nAunfkfLwN1w_3\tbear\nAunfkfLwN1w_2\tbear\nAutsbWiMLoY_0\tperson\nAuuZLhOpxcI_1\telephant\nAuuZLhOpxcI_6\telephant\nAvGLANxpJ-Y_1\tperson\nAvJexx39uCE_0\tperson\nAvOpMSLKXTM_1\tperson\nAvOpMSLKXTM_0\tperson\nAvP_DY8SuU4_0\tperson\nAvQgdEmyoFA_0\tairplane\nAvVBLLWgeWo_0\thorse\nAvdUsPyX5lE_0\tperson\nAvdgweWTeeg_0\tcat\nAvgusAC7DUU_0\tbird\nAvlg_B60Z0E_0\tbear\nAvlg_B60Z0E_4\tbear\nAvp80BzoG9Y_1\tperson\nAvp80BzoG9Y_0\tperson\nAvr6FKguO2o_4\tskateboard\nAvr6FKguO2o_1\tskateboard\nAvvWfbj5x88_0\tperson\nAv78r-lWmCs_0\thorse\nAv8Hkyi1fdc_1\tknife\nAv8k98IyQhs_0\tperson\nAwAX85eLJH4_0\tcow\nAwDIxdZSWKQ_0\tperson\nAwECiro8_h4_1\telephant\nAwEtKHnfKJ8_1\tcow\nAwEtKHnfKJ8_2\tcow\nAwFA2LuUWN8_0\tperson\nAwM3QWX5Jsc_0\tperson\nAwOJkAFe8Xs_0\tbicycle\nAwZ6nHwMMuA_0\tdog\nAwqZ_9G0pWg_0\tperson\nAwsAA0Xk1J8_0\tperson\nAw-D6USSthk_0\tbear\nAxAIZDsViZw_0\tperson\nAxAIZDsViZw_1\tperson\nAxAkf4tRXbI_0\tperson\nAxLiwCy5umU_0\tperson\nAxUFYNgnIq4_0\tperson\nAxg0nab1SDc_0\tperson\nAxvrCidcYqM_1\tperson\nAx2iIXU4Gyc_0\tperson\nAx5dd2_2sFA_1\tcar\nAx5dd2_2sFA_0\tcar\nAyAAL3Rd_Rg_3\tbicycle\nAyAAL3Rd_Rg_5\tbicycle\nAyAA5q5B-84_0\tperson\nAyAA5q5B-84_1\tperson\nAyH0zvW0ndQ_1\tbird\nAyKf0Ufaa_o_0\tperson\nAyfmwf4oW_k_0\tperson\nAyhXfIgl4Kk_0\tknife\nAyo9w6aKSY0_0\tperson\nAyqiYJuONPs_0\tairplane\nAyqvDNKC1CQ_0\tperson\nAy2VXLYZW50_1\tperson\nAzFaa7gRy0k_0\tperson\nAzMHek-Oow0_0\tcat\nAzNf4dneWFU_1\tperson\nAzVMbaXM_QM_1\tboat\nAzVoOWc-ueY_0\tperson\nAzaUz9OpHMI_0\ttruck\nAzeA4K-S0CI_1\tperson\nAzew3w3WZfI_5\tskateboard\nAzew3w3WZfI_1\tskateboard\nAzew3w3WZfI_3\tskateboard\nAze0ijK2t2M_0\tperson\nAze_lfqL6mw_0\tcow\nAzhTPVtwJVk_0\tperson\nAzh82KkzMVs_0\tbird\nAzh82KkzMVs_1\tbird\nAz0Hr5pa_Pw_0\tperson\nAz5vE5ssYxk_0\tperson\nAz5vE5ssYxk_1\tperson\nAz7glF28oOw_0\tperson\nAz_5XR0RSv0_1\tperson\nAz_5XR0RSv0_2\tperson\nA0JB0OdZ2NE_1\tknife\nA0L6M_8fDyM_0\tperson\nA0Nx4JbdXO0_0\tperson\nA0PQ6Si3nOU_0\tairplane\nA0XGvY-NO00_5\tairplane\nA0jhzA4HvrY_0\tumbrella\nA0n7dLEgCjo_0\tcow\nA02wb1V5W0A_1\tperson\nA02wb1V5W0A_0\tperson\nA08TTc4NLik_0\tperson\nA1Hvxm2NCpk_1\tairplane\nA1H8wrYSPlQ_0\tbicycle\nA1NBheOGWNE_0\tbird\nA1fdw6WBO_w_0\tcat\nA1oQZf9EXPg_0\tperson\nA1oQZf9EXPg_1\tperson\nA1oQZf9EXPg_2\tperson\nA1r3FpgoeP0_0\telephant\nA1unjHSiYuk_0\tskateboard\nA1w5Z9ryeJI_0\telephant\nA1w5Z9ryeJI_2\telephant\nA1w5Z9ryeJI_1\telephant\nA11L_7hymDI_0\ttrain\nA2ODL8T477o_0\tumbrella\nA2UiM17u3Ao_0\tbear\nA2Vhzr_2AAY_0\tperson\nA2WfZtUfAy4_1\tperson\nA2gisYdnTi0_0\tbird\nA2iD7VC-A9g_1\tcow\nA2p7Z_Ia9Ak_0\tperson\nA2p7Z_Ia9Ak_1\tperson\nA2rOJWkWoRo_0\tperson\nA23nZy9maYk_1\tperson\nA23nZy9maYk_0\tperson\nA29DgqMHeEQ_0\tperson\nA3EcM1p8r14_0\tperson\nA3FTEFw2Bo0_3\thorse\nA3JmvJSIxeU_0\tperson\nA3Lmb8E3Ovw_0\tperson\nA3L2pdrSYdE_0\tperson\nA3MpR785VH8_0\tperson\nA3MpR785VH8_1\tperson\nA3UoQh4P1_o_0\tperson\nA3ZIKfh-QPo_0\tperson\nA3b1bCXjWWE_1\tknife\nA3eocVVFaX8_0\tperson\nA3vXSLx3blY_0\tperson\nA4BVLpu2EQI_1\tcow\nA4CYcvyDGec_0\tperson\nA4P_7hjid7Q_0\tperson\nA4gw9TbmL54_0\ttrain\nA4ijVvmthCQ_0\tperson\nA4oNmb9PiYQ_0\tperson\nA4t4imYj0tA_1\tdog\nA4u61iOuzr0_0\tperson\nA4u61iOuzr0_1\tperson\nA4u61iOuzr0_2\tperson\nA4wLmZZODQU_1\tperson\nA4zzoIg6-W4_0\tskateboard\nA42uEePHr8c_0\tperson\nA438LRj4MN0_0\thorse\nA5Ho_qla_bQ_0\tskateboard\nA5Kii0lU4h4_0\tperson\nA5ZAKa7xw_I_0\tperson\nA5ciZloGW2o_2\thorse\nA5nuZ-mKcBE_4\tairplane\nA5nuZ-mKcBE_7\tairplane\nA5-RNkQ5yzU_0\tperson\nA5-yfb7-1NM_1\tperson\nA6DfgaqbLDM_0\tperson\nA6GND629_dg_0\tperson\nA6IIHamstQo_0\tperson\nA6KXKalaC7M_0\ttrain\nA6KXKalaC7M_1\ttrain\nA6LmIR6_mtk_1\ttruck\nA6L7XcS8oF4_0\tperson\nA6MkQdxLBSI_1\tbicycle\nA6MkQdxLBSI_6\tbicycle\nA6SipDli3dE_0\tperson\nA6Tx9smTdyo_0\tboat\nA6Zbpn5hd6Q_0\tperson\nA6jEv9bIawA_1\tbus\nA6rxrML8vyk_0\thorse\nA66pUkVBt_M_0\tperson\nA7GxuMCyr50_0\tcat\nA7KLi_xOQFc_0\tperson\nA7SDQoaalEY_0\tperson\nA7SIvy9srFU_0\tperson\nA7Zz2ESO-PM_2\tbear\nA7aEqy5QRJ4_0\tcat\nA7cjjAkLjfQ_1\tperson\nA7cjjAkLjfQ_0\tperson\nA7coVhNQrSs_0\tcow\nA7c_1Wcr5hM_0\tcow\nA7ltojA7WTk_0\tperson\nA729VkZvy_s_0\tperson\nA7_WDIFj23s_0\tcow\nA7_hPlvWyGc_0\tcow\nA8F5UnJOU5A_0\tboat\nA8MGPGEOAWk_0\ttrain\nA8PGaHrBO-g_0\tbus\nA8PlfHNTHVQ_0\tperson\nA8RztgyPvCE_1\thorse\nA8U5HWirVCk_0\tperson\nA8gL-e9dRa8_2\tbear\nA8oMFSrcteU_0\tbicycle\nA80V1BVUvf4_0\tairplane\nA89eQvkZ4go_1\tcar\nA89eQvkZ4go_0\tcar\nA89tFE_-szI_0\tperson\nA9ACfqLHRIM_0\tperson\nA9ACfqLHRIM_1\tperson\nA9LEZHrMOh8_0\tperson\nA9Mw5uHZ7WM_0\tdog\nA9UlOqoTO3A_0\tcar\nA9WAS-oLC8Q_1\ttrain\nA9WAS-oLC8Q_2\ttrain\nA9etwHCHkQM_0\tperson\nA9fblLjEn7E_1\tperson\nA9fblLjEn7E_0\tperson\nA9f0bktW-uM_0\ttrain\nA9sznaQipiM_1\tperson\nA9sznaQipiM_3\tperson\nA9tOXINxUeA_2\tperson\nA-BcgCHWiLE_1\tknife\nA-JRl34Jmok_0\telephant\nA-JRl34Jmok_1\telephant\nA-JRl34Jmok_2\telephant\nA-JRl34Jmok_3\telephant\nA-MMqq_FLXo_0\tperson\nA-R5A0HMT3w_0\tboat\nA-SdlQGGdZg_1\tperson\nA-Vo3GQZrd8_0\tskateboard\nA-gQnulNzVo_0\tperson\nA-gZpG3OWNM_0\tperson\nA-jGPkEGCdo_0\tperson\nA-qT3DcitzM_0\tskateboard\nA-0o6fFroLk_3\tbird\nA-1_sR8c39g_0\tskateboard\nA-1_sR8c39g_3\tskateboard\nA-37XpNHfQw_0\tcow\nA_AbA6K8Ouc_0\tperson\nA_AbA6K8Ouc_1\tperson\nA_B83i3dvWQ_0\tperson\nA_CDsn7za4c_1\tperson\nA_CDsn7za4c_0\tperson\nA_DqzmxTyPQ_0\tdog\nA_Eaoo5O71M_0\tskateboard\nA_Eaoo5O71M_3\tskateboard\nA_Nb1jSK7vY_0\tperson\nA_RHSgWC24U_0\telephant\nA_R7iK_MLgM_0\telephant\nA_Z7Cj10nKA_0\ttruck\nA_aN9LUuMY8_0\tperson\nA_g6G7vBr8I_1\tperson\nA_qnLTG_VBg_0\tperson\nA_uC3UuAVQE_0\tcow\nA_uxGLJDf9I_0\tperson\nA_xtvYH_7vg_0\tperson\nA__fHCZfwtM_0\tperson\nBACWpC6GdxY_5\tairplane\nBACWpC6GdxY_3\tairplane\nBANdhsMHpw0_0\tperson\nBANdhsMHpw0_1\tperson\nBANdhsMHpw0_2\tperson\nBAOR6YBIb8U_1\tskateboard\nBAO0Uce3vXA_0\tcat\nBARELTt_9Ko_0\telephant\nBAWN6Xpw7sg_0\tperson\nZx3x1-cBu7I_0\tperson\nZx3x1-cBu7I_1\tperson\nZx8LkdyJzG8_0\tperson\nZyDqefuyQfU_1\tcat\nZyDqefuyQfU_2\tcat\nZyNwfXl7s2w_0\tmotorcycle\nZyQL8Ugiq4Y_0\tperson\nZyQxolWsw2o_0\tcat\nZyQ_gFztNXU_0\ttrain\nZyQ_gFztNXU_2\ttrain\nZyqvHk5Ugjk_0\tbird\nZyrTKvb3Uq4_0\tperson\nZyuoNtTPexE_0\tperson\nZywGdneFaWs_0\tdog\nZyw6pIArS1g_0\ttrain\nZy04v73t_oU_0\tperson\nZy4s6kQgRAs_0\tperson\nZy7a1FYT_2I_0\tperson\nZy9BXzUqORk_0\thorse\nZzAgbPU4qoA_0\tperson\nZzBP5IPOX7Q_0\tperson\nZzBP5IPOX7Q_1\tperson\nZzFvfG2mfRU_0\tcow\nZzIeftZXBMw_0\tperson\nZzPUlKXnUgE_0\tperson\nZzRMRSyCzzU_0\tperson\nZzS_a0D4AhE_1\tskateboard\nZzWMnTc1LBY_0\tperson\nZzdl60FMu48_0\tperson\nZzeCPtqruzg_0\tperson\nZzgU7APbNfs_0\tperson\nZzgoobk2eIA_0\tperson\nZzgoobk2eIA_1\tmotorcycle\nZzhCWdZJAQY_0\tperson\nZzic21J3Ea8_0\tperson\nZznEoJsdkVI_0\tperson\nZzpccfyFyL0_0\tperson\nZzpccfyFyL0_1\tperson\nZzq_S3HujTo_0\tperson\nZztD-tmxwyc_0\tperson\nZzwlUbCfscM_1\tdog\nZzxRC2pLBVA_0\tperson\nZz2oIdSVB6Q_0\tperson\nZz5GwCMuMj0_0\tperson\nZ0D6uKz7v5Q_0\tperson\nZ0m37r4St5Q_3\ttruck\nZ0pLWU6Wg-o_0\tdog\nZ0stjlmfTpU_0\tcat\nZ0xYA5PwrjI_0\tperson\nZ02r-T2hINk_0\telephant\nZ04k6LBSuRk_1\tperson\nZ1G9pYdQwCY_0\tperson\nZ1HK6zDIJhg_0\tperson\nZ1MvNM4bmxs_0\tperson\nZ1SML4zVPik_0\tperson\nZ1U7Wnf_WiA_0\tcat\nZ1XafO8l8gs_0\tperson\nZ1aU1CigISE_0\tperson\nZ1a8Tqg-yjE_0\tperson\nZ1e-5FLWf6I_0\tcat\nZ1gxFkBk4EY_0\thorse\nZ1j81keSb9Q_0\tmotorcycle\nZ1j81keSb9Q_1\tmotorcycle\nZ1nr46t7EVk_0\tairplane\nZ1pv5a0as9c_0\ttrain\nZ1rB_fu2lKY_0\tdog\nZ1x8sEeQIuI_1\tmotorcycle\nZ13O2uGP1nE_0\tcar\nZ14p6heAJRc_2\tperson\nZ14p6heAJRc_0\tperson\nZ14p6heAJRc_1\tperson\nZ15QqHX1Z6M_1\ttrain\nZ2HF5_tyxR4_0\tbus\nZ2K03YbfcGg_0\telephant\nZ2QWOKCHkM8_0\tcow\nZ2QWOKCHkM8_2\tcow\nZ2QWOKCHkM8_1\tcow\nZ2SljfwK58g_0\tskateboard\nZ2SljfwK58g_1\tskateboard\nZ2VI7eM7BB0_0\tbear\nZ2acpS-e_cg_0\tperson\nZ2cvYI55Dps_0\tskateboard\nZ2dab1zmqv8_0\thorse\nZ2gvlPrX5HA_5\telephant\nZ2gvlPrX5HA_6\telephant\nZ2kcVxTMZtM_0\tperson\nZ2n2a39MxJQ_7\tbicycle\nZ2n2a39MxJQ_1\tbicycle\nZ2n2a39MxJQ_2\tbicycle\nZ2n2a39MxJQ_3\tbicycle\nZ2n2a39MxJQ_4\tbicycle\nZ2n2a39MxJQ_6\tbicycle\nZ21DONVXY1Q_2\tzebra\nZ23Gg06mNj8_0\tperson\nZ236ql8Tpvg_0\tperson\nZ23_3K28VSI_1\tgiraffe\nZ3AHrAB9qhw_0\tcat\nZ3AplkSO6kA_1\tcar\nZ3KMX_N6WSg_0\tperson\nZ3KMX_N6WSg_1\tperson\nZ3KMX_N6WSg_2\tperson\nZ3PzgfwbjLk_0\ttruck\nZ3i5sys0boU_0\tperson\nZ3i5sys0boU_1\tperson\nZ3sRLCOCxMY_0\tcat\nZ37dIpwPIqI_3\tbicycle\nZ4DQoYcs5mM_2\tperson\nZ4DQoYcs5mM_0\tperson\nZ4DQoYcs5mM_1\tperson\nZ4XLmQjbg7Y_0\tperson\nZ4XLmQjbg7Y_1\tperson\nZ4ZKg0KbSm4_0\tbicycle\nZ4ZPyzSGdRU_0\tdog\nZ4bO8cpjQZI_0\tperson\nZ4bO8cpjQZI_1\tperson\nZ4bW8HHeYP8_0\tcar\nZ4mYWGPFVkw_0\tperson\nZ4n5ieSA6cM_0\tcow\nZ4tOSluXWnE_1\tumbrella\nZ4u3PPkCYOs_0\tperson\nZ4u4zasFeAw_1\tbird\nZ4u4zasFeAw_0\tbird\nZ4vRtZE1WjQ_0\tdog\nZ4voZ3h_Dyk_1\tperson\nZ4xVMaYAqJ4_1\tbicycle\nZ446P08C8vE_0\tperson\nZ5KGx49qaAE_3\tbird\nZ5KGx49qaAE_5\tbird\nZ5KGx49qaAE_6\tbird\nZ5Qo8xdb8os_0\telephant\nZ5RKMhlNHEE_0\tperson\nZ5ZBRI0sc4Q_0\tbicycle\nZ5iJRTvm-Kw_1\tperson\nZ5iV683VDk0_0\tperson\nZ5ls93B1bBk_0\tperson\nZ5mQ_0ttu74_1\telephant\nZ5mQ_0ttu74_2\telephant\nZ5yNMm-TIjI_0\tbus\nZ5zGHZ82r9A_0\tperson\nZ53B8-gR640_0\tperson\nZ6BVtmEMfkI_0\tperson\nZ6FikDWrKkA_0\tperson\nZ6MfvYa9hCs_2\tcar\nZ6MfvYa9hCs_3\tcar\nZ6PyYboRq5c_0\tdog\nZ6Q3LdMwgi4_0\tcat\nZ6WrlM4ZZKA_0\tperson\nZ6j-7La25S4_0\tperson\nZ6j-7La25S4_1\tperson\nZ6j-7La25S4_2\tperson\nZ6k1unwmsfA_1\tperson\nZ6sd800eFC4_0\tperson\nZ6tGpP8q53A_9\telephant\nZ6tGpP8q53A_2\telephant\nZ6tGpP8q53A_4\telephant\nZ6vCDHs6NrM_0\tperson\nZ6yNyxXPPOw_0\telephant\nZ60iXtKpGMQ_0\tbus\nZ61B0fShfbs_1\tcow\nZ7AqkWEBwV8_0\tperson\nZ7DGMMQP79U_0\tcat\nZ7I8r1AqMhU_0\tperson\nZ7JHCdt48hA_0\tairplane\nZ7KEzuE_7hQ_0\tperson\nZ7LfnFm4OHs_0\tperson\nZ7WaJYiX_1o_0\tperson\nZ7WaJYiX_1o_1\tperson\nZ7bMdjLGiAo_0\tperson\nZ7eGCBjkKrU_0\tdog\nZ7gxE6ZSQXI_0\tairplane\nZ7iq45DtCTM_4\thorse\nZ7iq45DtCTM_5\thorse\nZ7zeXJ5lJRY_1\tperson\nZ7zeXJ5lJRY_0\tperson\nZ72sIqrQAF4_0\tskateboard\nZ74EGXvFjFM_0\tperson\nZ76Y_PNOgK4_1\tperson\nZ76Y_PNOgK4_0\tperson\nZ78P87kjtu4_0\tperson\nZ8CXvEObu4c_0\tdog\nZ8NfZN7WDKw_0\tperson\nZ8Oi5HJEyS4_0\tskateboard\nZ8k0TTq5BC8_0\thorse\nZ8s-Kg1PuSg_0\thorse\nZ86E7eIS9t8_1\tairplane\nZ89mG68LE2k_0\tperson\nZ8942_IPiTo_0\tbicycle\nZ8942_IPiTo_2\tbicycle\nZ9SwanypLJM_0\tbear\nZ9SwanypLJM_1\tbear\nZ9XS4cvVVy4_2\tperson\nZ9awHnw5J4o_0\ttruck\nZ9bt3xT5dCc_0\tcat\nZ9f--QLEQqI_1\tmotorcycle\nZ9jDpr533Cg_0\tcat\nZ9o5BEm1UeI_0\tperson\nZ9pHCguAO5c_0\tperson\nZ9wO9tftNG0_0\tbus\nZ9x_cPvKErA_0\tperson\nZ98EscJ1IG8_0\tperson\nZ98GFnZo-LA_0\tperson\nZ-I0S45eRT0_0\tperson\nZ-J0UQfvb5M_0\tperson\nZ-MvTXpMdm4_0\ttruck\nZ-PMnTjqAS8_0\tperson\nZ-QO3lrbh7c_1\tskateboard\nZ-VVWO3Ovgs_0\tperson\nZ-djkrj-5Cs_0\thorse\nZ-glDeBd2xA_0\tboat\nZ-lrIzXr9ck_0\ttrain\nZ-mTl_ipVa4_0\tumbrella\nZ-mXYrvubn8_0\tdog\nZ-zy-BzjLT0_0\tmotorcycle\nZ-zy-BzjLT0_1\tmotorcycle\nZ-7W_lh96xg_1\tairplane\nZ_JXyC6v_-s_0\tperson\nZ_KItWz0mTI_0\telephant\nZ_PViIzihe8_0\tperson\nZ_QVuM8wEmQ_0\tperson\nZ_QVuM8wEmQ_1\tperson\nZ_kPrUEqYXE_0\tbird\nZ_p4gYNjwG0_0\tperson\nZ_85vV3FHUg_0\tperson\nZ_85vV3FHUg_1\tperson\naACqXYewohQ_0\tperson\naAI7SN5_3CY_4\tbus\nBAhHrnCKvcM_2\tboat\nBAhHrnCKvcM_3\tboat\nBAhHrnCKvcM_5\tboat\nBAmy5TQke7w_0\tperson\nBAnfbsB8rIY_0\tbear\nBAnn4L-iNLE_0\tperson\nBAq_fnyQ6z4_0\tperson\nBA4ZGv8flRA_0\tperson\nBBCBbdz3Qvs_0\tdog\nBBCBbdz3Qvs_1\tdog\nBBLAyHVLHh8_0\tperson\nBBOd-YBAUgw_0\tbicycle\nBBPlqTbAphY_1\tperson\nBBQ2xu9OehQ_1\tdog\nBBS5owVJaTU_1\tskateboard\nBBS5owVJaTU_0\tperson\nBBVPb5z0x7k_0\tcat\nBBXs1J4j2mA_0\tskateboard\nBBdA1qc9H-g_0\tskateboard\nBBk7ZnOEjMA_0\tperson\nBBopEl_n3Fc_0\tperson\nBBpFu8j2fBc_0\tbus\nBBpFu8j2fBc_1\tbus\nBBqTHwpYeEc_0\ttrain\nBBrfgTTduuI_0\tperson\nBB9l_znmPls_0\tumbrella\nBCBCK2k2Bdw_0\tperson\nBCBgjRWuOcA_0\tperson\nBCGB6zaBDpg_1\tperson\nBCGB6zaBDpg_0\tperson\nBCI91i3aEek_0\tmotorcycle\nBCJbf6um28s_1\tairplane\nBCKVauIBDFM_2\tbear\nBCin0MjzM8Y_0\tcow\nBCoTKGNhMVw_0\tdog\nBCoTKGNhMVw_1\tdog\nBCo8e6n2dYQ_1\tdog\nBCqYnyGIols_1\tbicycle\nBCsmPvRqaNk_0\tperson\nBCuzA73UTl4_0\tperson\nBCwAdqAouFU_0\tboat\nBCwyoTwckSE_0\ttruck\nBDFBV8JbIF8_0\tperson\nBDFVkc87amI_0\tperson\nBDHUAJn9nnc_0\tperson\nBDHsXkbkS-w_0\tskateboard\nBDOemJGz04I_1\tperson\nBDcTOMebCHs_0\tperson\nBDcTOMebCHs_2\tperson\nBDcTOMebCHs_1\tperson\nBDdIKtFwnjA_1\ttrain\nBDdbk3ZQrP0_0\tcat\nBDdhenNSY9o_0\tperson\nBDk-BklqSdI_0\tperson\nBDroGke9Ogg_0\thorse\nBDroGke9Ogg_2\thorse\nBDtGFVFexaU_0\tperson\nBDzXi4ukhN0_1\tperson\nBDzXi4ukhN0_0\telephant\nBD30MTvTuYU_0\tperson\nBD7TQWBytfQ_0\tknife\nBEArUGKSB-Y_0\ttrain\nBEArUGKSB-Y_1\ttrain\nBEKMcritl6M_1\tperson\nBEMcwkY2beQ_0\tperson\nBERvmKL4Glc_0\tperson\nBESdHwoIDsA_0\tdog\nBEUB64a3AIY_0\telephant\nBEUB64a3AIY_1\telephant\nBEYy-ZRSWSk_0\tskateboard\nBEa_8wp0528_0\tcow\nBEqG56tHTEI_2\tbus\nBEqPniAgjaY_0\tcat\nBErty5GnulU_0\tperson\nBEuXjB1zLeE_1\tcar\nBExSp8l17GY_0\tperson\nBExlFv0scM0_0\tperson\nBE10HJUHUHw_1\tperson\nBE8KS4PZH54_0\telephant\nBE-crlUXSSE_0\tdog\nBFC3DWxOces_2\tairplane\nBFC3DWxOces_1\tairplane\nBFC3DWxOces_3\tairplane\nBFC3DWxOces_4\tairplane\nBFC3DWxOces_5\tairplane\nBFJ4v-XlKAg_0\tskateboard\nBFPQCoJqTRk_0\tperson\nBFeIwErwdS8_0\tperson\nBFeIwErwdS8_1\tperson\nBFggPKKt6wk_0\tperson\nBFggPKKt6wk_1\tperson\nBFhh8z0Fmk0_0\tperson\nBFponHgVsdA_0\tperson\nBFs239KuGa8_1\tperson\nBFxUyTrqZhU_2\thorse\nBFxUyTrqZhU_4\thorse\nBF4YTMGtDs8_1\tskateboard\nBGAQlsAiJ_0_0\tairplane\nBGAQlsAiJ_0_1\tairplane\nBGAQlsAiJ_0_2\tairplane\nBGAQlsAiJ_0_3\tairplane\nBGAQlsAiJ_0_4\tairplane\nBGAQlsAiJ_0_5\tairplane\nBGAQlsAiJ_0_6\tairplane\nBGLM4yl_Ka4_2\thorse\nBGO3DBbNozc_0\tskateboard\nBGR1gMrCTpA_0\tperson\nBGT-p0CgoFg_1\tperson\nBGW9SDHTWKY_1\tperson\nBGW9SDHTWKY_0\tperson\nBGee3Ar-Fbg_0\tairplane\nBGpx9Xow9Ew_0\tcat\nBGqNnzNtWkc_0\tperson\nBGq6TeZHkLU_0\telephant\nBGshZfVDb5w_0\tperson\nBG4QyYPKYvg_0\tperson\nBG4QyYPKYvg_1\tperson\nBG_x-4YUtFE_0\tdog\nBHA5UUg4lCw_2\ttrain\nBHH2sTfHwks_0\tperson\nBHH2sTfHwks_1\tperson\nBHPSyq8L5S8_1\tperson\nBHQkdwmXrtI_1\tskateboard\nBHQkdwmXrtI_2\tskateboard\nBHYrJ1yaM-w_0\tcar\nBHdbqcxv3Vw_0\ttruck\nBHfXgxJCcrw_0\tboat\nBH5fxWFpHvE_0\tairplane\nBH5npOcPlY0_0\tcar\nBH6nqU68dWo_0\tperson\nBH74QV_0vtc_0\tbird\nBH9Ob6Uiw1w_1\tperson\nBH_SlBCiQ_8_0\tperson\nBIETPRRGGgY_4\telephant\nBIETPRRGGgY_5\telephant\nBIIU36E15Vo_0\tperson\nBIMggdk7AHQ_0\tcat\nBIQeL2o_Ogg_0\tperson\nBIUQ935UkDo_0\tcow\nBIVLmUTNYbk_0\tperson\nBIV-1bNQ7pI_0\tskateboard\nBIfqcruNiic_0\tperson\nBIkDAHYmcFw_0\tperson\nBIkDAHYmcFw_1\tperson\nBInC--gFqHM_0\tperson\nBIvTK9qvP1w_0\tskateboard\nBIxCP9ck4-8_0\tcat\nBI5i3aDb_FQ_1\tperson\nBI-kr0tFSDg_0\tperson\nBJIZYdOZHzg_0\tumbrella\nBJK_SXpLtnI_0\tbird\nBJMP05du3Eg_0\tperson\nBJQstPOa8Wk_0\tperson\nBJS2YLbErJg_1\tperson\nBJfRrRcfmF4_0\tskateboard\nBJf9nFjqLvg_1\tbird\nBJlcWhfsg_g_0\tperson\nBJriJT6zJl8_1\tskateboard\nBJwoZcHbBK0_0\tumbrella\nBJ05o1_UKzw_0\tdog\nBJ44CIPaDf8_0\tperson\nBKAo6GZ_kNs_0\ttrain\nBKTCaKgjiag_2\tperson\nBKUKi0vTt0A_0\tperson\nBKdSO_PNJ4U_1\tperson\nBKdSO_PNJ4U_2\tperson\nBKdSO_PNJ4U_0\tperson\nBKl0wLRzoD8_0\tperson\nBKw9UQxZ3a8_1\thorse\nBK-rIrwen6U_1\tmotorcycle\nBLB0F-XD8IA_1\tperson\nBLB0F-XD8IA_0\tperson\nBLEdcnrUmEo_0\tcat\nBLE9cZ8L3a0_1\tskateboard\nBLFYe-dU9ZU_0\tairplane\nBLO7KJUu8t4_0\telephant\nBLSwwE9mtTQ_1\tknife\nBLcOGv-0-dc_1\tdog\nBLfmgLou27o_0\tcat\nBLvowRU6z7s_0\tbird\nBLxsg2_sjDM_1\tperson\nBLy6RcifNl0_0\tbus\nBLy6RcifNl0_1\tbus\nBLy6RcifNl0_3\tbus\nBL6tcorHrT4_0\tbicycle\nBMH2ReDeKuc_0\tperson\nBMUnKa8FUGQ_0\tperson\nBMUnKa8FUGQ_1\tperson\nBMavrQABR1Y_0\tperson\nBMa4xJ1U3Zk_0\tperson\nBMbZc-jxEfo_0\tperson\nBMbZc-jxEfo_1\tperson\nBMfsf9tDz8o_0\tcow\nBMfsf9tDz8o_1\tcow\nBMhy1f7EuXM_0\telephant\nBMptIGI1Il8_0\tcar\nBMuO2fjJoOw_0\tcar\nBMweJTmvCBg_0\tperson\nBMweJTmvCBg_1\tperson\nBMypDovEOEE_0\tperson\nBMypDovEOEE_1\tperson\nBM0QiiStqd8_1\tskateboard\nBM6XrBQQ7NE_0\tperson\nBM6609PpfO0_1\tperson\nBM6609PpfO0_0\tperson\nBNGDM8sFM8Y_0\tperson\nBNIVhG5pZh8_1\tdog\nBNJwAx3eUKc_0\tperson\nBNK68rC7RdI_0\tumbrella\nBNTS3OPHAP4_0\thorse\nBNXKRPSr66c_0\tperson\nBNXKRPSr66c_3\tperson\nBNXKRPSr66c_1\tperson\nBNXKRPSr66c_2\tperson\nBNbPQGMLs2w_0\tperson\nBNbPQGMLs2w_1\tperson\nBNbSUPI8feg_0\tperson\nBNcj3161E9o_0\tperson\nBNeWUyqXAC0_1\tairplane\nBNmMB68b1PA_0\tperson\nBNnVfaIfBx0_0\tairplane\nBNnVfaIfBx0_1\tairplane\nBNyK_4tt2fg_0\tcar\nBNybc47kPjg_0\tperson\nBN1HT0FOOhI_0\tdog\nBN7YfmbYuVs_0\telephant\nBOE82LEqzWw_0\tcow\nBOF3tFvEu0o_0\tperson\nBOHE8JNUcQc_0\tboat\nBOMeyjZNH5k_0\tbicycle\nBOQiuL9QlIo_1\tperson\nBOUcPea33eY_2\tskateboard\nBOfgzvAgVQw_0\tbus\naAMhdGuR5DE_0\tcat\naARa5-CLhG8_0\tperson\naAVaqjgY1m8_1\tperson\naAZ2fVjhcIE_0\tperson\naAj0EN1Rnc0_0\tbird\naAj0EN1Rnc0_1\tbird\naAlTiBaLr8M_0\tperson\naAmVIu8X7p4_1\tperson\naAma36YlaAo_0\tzebra\naAsr-Rf6rEE_0\tperson\naAsr-Rf6rEE_1\tperson\naAuz7EfR_fU_0\tcow\naAyTLM_PmzA_0\tskateboard\naAzpA1iK_bE_0\tperson\naA0FrWtkjXk_0\tperson\naA3okCsYx6Y_0\tbird\naA5DYzky6o4_0\tcow\naA8Tz4nZ99g_0\tperson\naBBtHXQoEtM_2\tperson\naBBtHXQoEtM_1\tperson\naBQm5kN1TfY_0\tcat\naBexNnNkORk_0\tairplane\naBq4NF1upak_0\tperson\naBvvXrP1BJs_0\tperson\naB-tGXFmyFU_0\tperson\naCQAel27T4o_2\tperson\naCSzhpU1heQ_0\tcow\naCXfvvg8CF8_0\tairplane\naCiDDC9KFS8_0\tmotorcycle\naClye1Ctc9E_3\ttruck\naCl98J6O9Hk_1\tperson\naCuXZ3LmfSo_0\tperson\naDGpg2xtDk8_1\tperson\naDRE08tF2Wc_1\tbus\naDTQRnSeu_E_0\tskateboard\naDTTYd0Z5Vk_1\tperson\naDjhOS5Xa9Q_0\tboat\naDmLwCb_o30_0\tdog\naDtJSv7XR90_0\tcar\naDte-e70l7U_0\tcow\naDte-e70l7U_2\tcow\naDte-e70l7U_3\tcow\naDt4Puik-kU_0\thorse\naDwTy9yiOms_0\tumbrella\naDxRlCI40wo_0\tperson\naD2q00X0-eg_0\tperson\naD2q00X0-eg_1\tperson\naEJy28mvKPk_0\tperson\naEJy28mvKPk_1\tperson\naEMPa2NvIl4_0\thorse\naERed6pg_h8_0\tperson\naER-VrHLWwY_0\tperson\naER-VrHLWwY_1\tperson\naEZ9vBpXNKU_0\tperson\naEw_vtKlegE_0\telephant\naExRtJpfZEs_0\tknife\naE1veVneq04_0\tperson\naFC2Zy2-0dY_0\tperson\naFFKeUdtPcQ_4\tknife\naFL2V522q9A_0\tperson\naFZ03eEOZFE_0\tbird\naFbVlCimys8_0\tbird\naFdPuo5xB-c_0\tperson\naFhKp8gVZSE_0\tperson\naF86vrld8V4_0\tperson\naF-CmWo8ooM_0\tperson\naF-CmWo8ooM_1\tperson\naGAB6WQFklc_0\tperson\naGE8AphnkNU_0\tknife\naGGiVuwB1p8_0\tbear\naGY3LCiYRnQ_0\tmotorcycle\naGgnovv6T3U_0\tdog\naGgxdwCpAN0_1\thorse\naGhNzJSHCOU_1\tknife\naGmxZatPe60_0\tperson\naGmxZatPe60_1\tperson\naGuWVv6XS8Q_0\tperson\naGuWVv6XS8Q_1\tperson\naGwPRbsru-4_0\tcat\naGxOl5SXjtM_0\tperson\naG1c8x5Dl-w_3\tbicycle\naG1c8x5Dl-w_2\tbicycle\naG1c8x5Dl-w_4\tbicycle\naG20iwkTd_o_0\tperson\naG6D_te6V3s_0\tperson\naHEFx7Zz6E4_0\tperson\naHb4yEpCinw_0\ttruck\naHiGSUMMfBQ_0\tperson\naHnMWEvjLzI_0\tcar\naHrTcxckS-A_0\tperson\naHrTcxckS-A_1\tperson\naHsgQAyd8ss_0\tperson\naH2ZxImdwaU_1\tmotorcycle\naH2ZxImdwaU_2\tmotorcycle\naH5Cd20kdJw_0\telephant\naILjXrLJpHw_0\tumbrella\naIQf8LQ5QPU_0\tperson\naISEbZGZH68_1\tcar\naITryMUZ2b8_0\tperson\naIUYT8pblHs_0\ttruck\naIU5E5tHvdc_1\tperson\naIVWVNBI-n0_0\telephant\naIcFi8LMv0w_0\tairplane\naIjLf6T_K3o_1\tbear\naIoZO3mu_tQ_0\tperson\naI311E3BWwI_0\telephant\naI7axTZFW4A_0\ttruck\naI80ysvYFG4_0\tperson\naJChqX9Ki8A_6\tairplane\naJChqX9Ki8A_1\tairplane\naJChqX9Ki8A_2\tairplane\naJChqX9Ki8A_5\tairplane\naJN9lRsvUv8_0\tperson\naJQ9scZQmz8_0\tperson\naJTABCCQtK4_0\thorse\naJYmkpuijrk_0\tmotorcycle\naJYurtxV0Og_0\ttrain\naJYurtxV0Og_1\ttrain\naJcPyWppCcI_0\tmotorcycle\naJgpAyFnpeI_0\tcat\naJ0dUcEIE_U_0\tperson\naJ1SzcgNcxI_0\tcat\naJ8w4L7E368_0\tperson\naKLf2yC2diM_0\tcar\naKMqeCkIJSg_0\tperson\naKOMIxz2RsM_0\tperson\naKOMIxz2RsM_1\tperson\naKiwOUy71Lo_1\tperson\naKiwOUy71Lo_0\tperson\naKqrwq-Sigg_0\tskateboard\naKtBD-3wFMA_2\tbear\naKtBD-3wFMA_1\tbear\naKu-1-TFl1g_0\tknife\naK-rgio7orw_2\tbus\naLDq7roX-SU_0\tcat\naLFDqtBMblI_0\tcat\naLFxGnCM1zs_0\tperson\naLIa7x90hQc_0\tperson\naLUSnANtUlE_0\tairplane\naLX9cIe12C8_0\tskateboard\naLZAMgiWcXk_0\tbird\naLZ0lbLzg8Y_0\tperson\naLZ0wCY2j2s_1\tperson\naLeeoZ1uVcc_0\tboat\naLjomcNk9fc_0\tperson\naLj4N9Tp6C0_0\tskateboard\naLj4N9Tp6C0_1\tskateboard\naLo-gekX9j0_0\tperson\naLo-gekX9j0_1\tperson\naLuNNRUC09A_1\tbus\naLuNNRUC09A_6\tbus\naLvCIWJQJbY_0\tcar\naLvg1CWrY0Q_0\ttruck\naLxJ8T4CFuM_0\tperson\naLzL_Gldhzk_1\tperson\naLzhO0EqNcc_3\thorse\naL6H2Jatw0k_0\tcat\naL70_drPJtA_0\ttrain\naL8hELYDnTc_0\tperson\naMAKznXul5M_2\tknife\naMAYLrcEnZY_0\tbus\naMAeSegIdJg_0\tperson\naMAeSegIdJg_1\tperson\naMHtvIvWTBU_0\tbear\naMNbQ1Cl5GY_0\tmotorcycle\naMRtQFBcLNM_0\tperson\naMX0jhSq6UY_0\tperson\naMb78Ixlbfw_0\tskateboard\naMqHsdXJ7UU_0\tperson\naMzZxN9uvMc_2\thorse\naNB5rIhRL7g_0\tairplane\naNEpBEnAUhw_0\tmotorcycle\naNF18KgxGHA_0\tskateboard\naNJuTWrnIfo_0\tperson\naNJuTWrnIfo_1\tperson\naNKleFpxS4M_0\tperson\naNKleFpxS4M_1\tperson\naNNWNDoOM_4_0\tperson\naNNWNDoOM_4_1\tperson\naNOXvvKZ3qU_0\tperson\naNZMe4tov6w_0\tcow\naNdJrRu4imo_0\tperson\naNjs-khPjiU_0\tperson\naNj1xwowXYU_0\tperson\naNqkQnGfWEc_2\tskateboard\naNqkQnGfWEc_0\tskateboard\naNwIHwPqFPc_0\tcar\naN4Na3OaY4I_0\tbicycle\naN4NmH-GafU_0\tperson\naN770kOQCD8_0\tperson\naN82X1hXgEE_0\tperson\naN82X1hXgEE_1\tperson\naN9XAd7-rzE_0\tperson\naN9XAd7-rzE_1\tperson\naN_3Pwk-7oY_0\tperson\naOHPVt_93RE_0\tbicycle\naON6RKmi-YQ_2\ttrain\naOPbvY62dMQ_0\tairplane\naOQ-8RoQYEU_0\tperson\naOQ-8RoQYEU_1\tperson\naOQ-8RoQYEU_2\tperson\naOW81s5KlyA_0\tperson\naOcGv3kcyhg_0\tbear\naOcGv3kcyhg_3\tbear\naOjjUIWuG6Q_1\telephant\naOp2NlwNeoY_0\tcat\naOz0l6mLHmA_1\tdog\nBOlBcGufEU8_0\tperson\nBOlBcGufEU8_1\tperson\nBOmgqlRxGlM_1\tperson\nBOmgqlRxGlM_0\tperson\nBOnvGIZd58M_0\tperson\nBOowRuwiNhU_0\tperson\nBOowRuwiNhU_1\tperson\nBOr7CffDWEU_0\tperson\nBOsNz8L3PXI_0\tperson\nBOtfIOm5kag_0\tdog\nBO1T_-iFGdM_5\tbird\nBO1T_-iFGdM_2\tbird\nBO1T_-iFGdM_3\tbird\nBO3UKxe7nyo_0\tperson\nBO5EdP_PO9M_0\tperson\nBO7sWBaaL7g_0\tperson\nBO7sWBaaL7g_1\tperson\nBO-3uvHhUdI_0\tperson\nBO-3uvHhUdI_1\tperson\nBPBBMIdFoiE_0\tperson\nBPEwUVhfaOk_1\tknife\nBPVpq7UrI-k_0\tperson\nBPX5EquoyCU_0\tmotorcycle\nBPX5EquoyCU_3\tmotorcycle\nBPX5EquoyCU_1\tmotorcycle\nBPX5EquoyCU_2\tmotorcycle\nBPiWTYUA7eI_0\tperson\nBPjkQ-lEqcw_0\tperson\nBPrrZpiDdo4_0\tcow\nBPsTDg4C4o0_1\tperson\nBPsTDg4C4o0_0\tperson\nBPxPfFzwlQA_0\ttruck\nBP-GGAbCOhE_1\tbus\nBQDxNNWRtas_0\tcar\nBQDxNNWRtas_1\tcar\nBQEzj9pP1SU_0\tperson\nBQIO94PF6RE_0\tperson\nBQIO94PF6RE_1\tperson\nBQVcvMWyWpU_1\tperson\nBQZGptzIdjE_0\tcow\nBQgPk0vRreM_0\tbird\nBQgPk0vRreM_1\tbird\nBQgPk0vRreM_3\tbird\nBQgPk0vRreM_6\tbird\nBQgPk0vRreM_9\tbird\nBQh5Ib9nynM_0\ttruck\nBQtDUi4BxRg_0\tperson\nBQwLGv7fgQg_0\tperson\nBQxCcefrjSk_0\tcat\nBQyowuIZqFQ_0\tperson\nBQzzKQ9ejzw_1\tknife\nBRCb183ELe0_0\tperson\nBRHPsi_0nTg_0\tmotorcycle\nBRQiSnowTss_0\thorse\nBRVNuDR5WzI_0\tcow\nBRcQS0dQqEU_0\tcar\nBRfegSv5VEk_0\tperson\nBRfegSv5VEk_1\tperson\nBRi_AMaK3kc_0\tdog\nBRjvUtQdukg_0\thorse\nBRlWBt4WHdU_1\thorse\nBRnsmPzoEsM_0\tskateboard\nBRtCCpXG_N8_1\telephant\nBRt1o8xqxFs_0\tperson\nBRt5hLASRMU_0\tbird\nBRxrw0-skYM_0\telephant\nBR0SGq2ioqU_2\ttrain\nBR0SGq2ioqU_7\ttrain\nBR1gOlJPEdk_2\telephant\nBR8cOV8KYX4_0\tperson\nBR-XwELzLV0_1\tdog\nBSDy_dzOSS4_0\tcow\nBSHg9I0V6Yc_2\tbus\nBSJgV2iO0jc_0\tperson\nBSOCno_3bfI_0\tperson\nBSSyaPq1EoM_0\ttrain\nBSWNCcyXeR4_1\thorse\nBSWpwtIPQ9U_0\telephant\nBSWpwtIPQ9U_1\telephant\nBSWpwtIPQ9U_2\telephant\nBSWpwtIPQ9U_3\telephant\nBSqz3i60KPw_4\tbicycle\nBSqz3i60KPw_1\tbicycle\nBSqz3i60KPw_2\tbicycle\nBSutEBx3H4A_0\ttruck\nBSvCnoryvn4_0\telephant\nBSyxB7X9SH0_5\ttruck\nBSyxB7X9SH0_7\ttruck\nBS1lexD0ugY_1\tperson\nBS1lexD0ugY_0\tperson\nBS5mJ0Y7Rys_0\tperson\nBS-S0nYSwkQ_0\tperson\nBS-S0nYSwkQ_1\tperson\nBTBmlFGHK-8_2\tperson\nBTBmlFGHK-8_0\tperson\nBTBmlFGHK-8_1\tperson\nBTKLizyvgcA_0\tperson\nBTR83oP1vpo_0\tperson\nBTlwglCdzOk_0\telephant\nBTpBteZfK7Q_0\tcat\nBTxSuijXVPY_0\tperson\nBTywlpNCABw_0\tcow\nBTzWqg8vHQI_0\tcar\nBT9sKGDb0Qw_0\ttrain\nBT9sKGDb0Qw_1\ttrain\nBUF45g7KGB8_0\tmotorcycle\nBUX8raEGFZk_0\tdog\nBUX8raEGFZk_2\tdog\nBUX8raEGFZk_3\tdog\nBUY-_l8_v9s_0\tperson\nBUZ7x7JaQ1k_0\tperson\nBUrMlyUBryI_0\thorse\nBU4SnrK9UiY_0\thorse\nBU4SnrK9UiY_2\thorse\nBU4yiA6qKAQ_0\tbicycle\nBU5PaU-UTss_0\tperson\nBVAi_zqhIeg_1\tperson\nBVCe2emxuTQ_0\thorse\nBVFYmsvoNTA_0\tcow\nBVS5Q8eBmRs_0\tperson\nBVWEvs3lq0Y_0\tperson\nBVWEvs3lq0Y_1\tperson\nBVXMpcHTg80_2\tmotorcycle\nBVm9KRW0iu8_0\tmotorcycle\nBVo3XdFnAJM_0\thorse\nBVxr6TGFsMQ_1\tperson\nBV5tXmVwddI_1\tperson\nBV-UtDJNS2w_1\tmotorcycle\nBWA5eWlt6Lg_0\tcar\nBWFYpOE-8yo_0\tperson\nBWcaU8lR4rM_0\tperson\nBWdhK5cwgt0_0\tbus\nBWjRZ-aKRX4_1\tperson\nBWlnPrI8FLk_0\tperson\nBWnFU-Li_8E_0\tperson\nBWn3QGOyZJc_0\telephant\nBWn7EPWkJ2I_1\tbear\nBWp2oVJMG1A_0\tperson\nBWqYVuIKaNA_0\tperson\nBW5r0Kv6h2U_0\tboat\nBW56O_QhBmc_0\tperson\nBW7uP0jcst8_0\thorse\nBXA3uMFAA9M_0\tcow\nBXCd65rDsk4_0\tdog\nBXCrD4eGGWw_0\tperson\nBXHktSPnW24_0\tperson\nBXTGSkuESqU_0\tperson\nBXUL3aLVZM4_0\tperson\nBXWXLNGacmc_1\tmotorcycle\nBXWXLNGacmc_0\tmotorcycle\nBXdMv9s3Rtw_0\tperson\nBXiQhR0Zj70_0\tperson\nBXrwbMjK_ZU_0\ttrain\nBX8AJD8uL3U_2\tperson\nBX-SAZsC6yc_2\tknife\nBX-SAZsC6yc_4\tknife\nBYQfvvAP9rY_0\tperson\nBYRNeh3RRZs_0\tperson\nBYS-DmtMpWE_0\tcat\nBYVhHLCSZ_M_1\tdog\nBYYakMVK6Ko_0\tperson\nBYi8dYVDYak_0\tperson\nBYkytpBqzHQ_0\tairplane\nBYq45niURL8_1\ttruck\nBYq45niURL8_0\ttruck\nBYud6fy8t8A_1\tknife\nBYud6fy8t8A_0\tknife\nBYud6fy8t8A_2\tknife\nBYud6fy8t8A_3\tknife\nBYxg5sQjvQ4_0\tperson\nBYyATiWsxZs_2\tcar\nBYyATiWsxZs_0\tcar\nBYyATiWsxZs_1\tcar\nBYyrXwDFF5U_0\tperson\nBY0XhpATtuI_0\tumbrella\nBY2Fs4KDDbU_0\tmotorcycle\nBY7KYQ_Qf3Y_0\tcow\nBY8mmPl_K_A_0\tperson\nBY-5sA1BbFE_0\tdog\nBY-5sA1BbFE_2\tdog\nBZDa7e9EFvI_0\tknife\nBZERyxrpvg4_1\tperson\nBZIzw3XdAgI_1\tperson\nBZI3ovXxotQ_0\tknife\nBZeIe9Nkb1E_0\tcat\nBZgZ1H4t3hQ_0\tperson\nBZgxjWSM7Vc_0\tbicycle\nBZhfYzqKuu8_0\tperson\nBZkYWI_qxz4_1\tbird\nBZldivEoOo8_0\tperson\nBZli_iMMV8k_0\tbear\nBZli_iMMV8k_7\tbear\nBZ94WX4wHn0_0\tskateboard\nBaDQg_CCQpU_0\tperson\nBaDQg_CCQpU_2\tperson\nBaHS1WcgbbE_0\tbird\nBaHS1WcgbbE_1\tbird\nBaJTQLa-vuU_0\tperson\nBaOQYsYuC6A_1\telephant\nBaRsW_taGVY_0\tcat\nBaWQb_lSjYs_0\ttrain\nBaYLeM_yk_Q_1\tskateboard\nBafH7BetIyk_0\tperson\nBakCr5HeDNE_2\tboat\nBakCr5HeDNE_0\tboat\nBauKE-faLzM_1\tperson\nBavQVUFfmBU_1\tperson\nBavQVUFfmBU_0\tperson\nBavoG7kb0wo_0\tcar\nBaxc5TW06FU_1\tknife\nBa1sC-X1OF8_0\tperson\nBa1sC-X1OF8_1\tperson\nBa2T3joy6BQ_0\tperson\nBa3CWVKFpBE_0\tboat\nBa5BO-nvDnE_1\thorse\nBa-SiAqH09k_2\ttruck\nBbAdBjyFFEA_0\tbird\nBbAdBjyFFEA_1\tbird\nBbAdBjyFFEA_2\tbird\nBbEfZ9mUKOY_0\tcat\nBbOabnT5V-E_0\tperson\nBbQyfmZx-2Y_2\tbear\nBbRarKH6D_Q_0\thorse\nBbYZ7Ee3Ixs_0\tperson\nBbYqjT1OzLY_0\tperson\nBbYqjT1OzLY_1\tperson\nBbfOXQD21Ac_1\tmotorcycle\nBbnSU5sRdBs_0\tperson\nBbnxzNL5tMk_0\tperson\nBbq8h83cFE8_0\tperson\nBbu_YM_GBG4_3\tbird\nBbu_YM_GBG4_0\tbird\nBbv9Y9Goufk_5\telephant\nBbv9Y9Goufk_0\telephant\nBbv9Y9Goufk_1\telephant\nBbv9Y9Goufk_2\telephant\nBb4uwSjmtKk_2\tbird\nBcHl4OuJLT4_0\tperson\nBcHl4OuJLT4_1\tperson\nBcSXX5O_YDw_0\tbicycle\nBcVn38vI_Zk_0\tperson\nBcV5QdDIrMg_0\tperson\nBcg-TsdpO-Q_0\tperson\nBcjVHV-6WWM_0\tperson\nBcjZaclf1m0_3\tbird\naO4uLNN4Gt0_0\tbear\naPCEyodWBU4_0\tperson\naPPUf7JUJRo_0\tperson\naPf5SoOgmhQ_0\tmotorcycle\naPheJtUTSps_1\tboat\naPm89i_7aKs_0\ttrain\naPm89i_7aKs_1\ttrain\naPswSvCaFDQ_0\telephant\naPvqWgeR03U_0\tperson\naQAieL0LKIo_0\thorse\naQB2gAnqQi0_1\tperson\naQGQKDLwRqM_0\tperson\naQVn7fJi_l4_0\tcat\naQaKnTZ4hDg_0\tperson\naQfQqr5W5uI_1\ttruck\naQfQqr5W5uI_2\ttruck\naQfQqr5W5uI_4\ttruck\naQlLjT95Hgs_3\thorse\naQub6VGWKzQ_0\tcar\naQzKS5Sn9u0_0\tperson\naQ1c75hfANo_0\tperson\naQ6larydXgI_4\telephant\naQ6larydXgI_0\telephant\naRBWB79BIIg_1\tumbrella\naRHGn50eToQ_0\tbear\naRQQ75s9Ni4_0\tboat\naRRUAfurxVU_0\tperson\naRcw_PTSf4o_0\tperson\naRdAN9jVvqQ_1\tdog\naRnJ4lIPIL4_0\tbus\naRueDRgWEOs_0\ttruck\naRzwrPXsTRI_0\ttruck\naR6P3PtMIZc_0\tperson\naSDuIU0pzYY_0\tperson\naSH88cb0kww_0\tperson\naSMzQpOjAc8_0\ttrain\naSUtY_pSN0k_0\tbird\naSWGbO-Nfcg_0\ttrain\naSWGbO-Nfcg_1\ttrain\naSb-LY3vBsg_0\tgiraffe\naSkBoJ55w2Y_0\tperson\naSqwAZJaQIk_0\tbus\naSqwAZJaQIk_2\tbus\naSsjyvISV94_0\ttrain\naSw1yhbXHuA_0\telephant\naS2Zw7-j7p4_0\tcar\naTBr31jkThQ_3\tbus\naTOn74Inw24_0\tbird\naTR3FylgTkA_1\tperson\naTR3FylgTkA_2\tperson\naTS8hur_yyo_0\tperson\naTcDiEXEhhk_1\thorse\naTdIOtWasSE_0\tperson\naTeFjqoG9fM_0\tperson\naTeFjqoG9fM_1\tperson\naTj38bNIsQo_0\tcow\naTvgsqSb5aA_0\tperson\naTvoRXrEvG4_0\tbicycle\naTvoRXrEvG4_2\tbicycle\naT3idINTybY_0\tumbrella\naUFHlj5AVrU_0\tperson\naUNlQPWMFHo_0\tcar\naUQh47P34C0_0\tperson\naUQh47P34C0_1\tperson\naUX-HZraWQs_3\tzebra\naUh41vv5vdE_3\ttrain\naUh41vv5vdE_0\ttrain\naUh41vv5vdE_2\ttrain\naUv4LjbJxLs_0\tbus\naU5AZMYHZ2o_0\tdog\naU5tePXE5qE_1\telephant\naVFbcdQrobU_0\tperson\naVGtibXVt40_0\ttrain\naVMpwmT7ojA_0\ttruck\naVPIHMyNEw8_0\ttruck\naVZJ8qaxG3s_0\tperson\naVif6Qc9Prw_0\tcow\naVknWcQimJA_0\tbus\naVm9jp_ttsk_0\telephant\naVm9jp_ttsk_1\telephant\naVm9jp_ttsk_4\telephant\naVm9jp_ttsk_5\telephant\naVm9jp_ttsk_6\telephant\naVm9jp_ttsk_7\telephant\naVm9jp_ttsk_8\telephant\naVo-jvGoUGs_1\tboat\naVo-jvGoUGs_0\tboat\naVq4ezzbcTc_0\tbird\naVvuGEexwy0_1\tperson\naVy9mhLlo5U_0\tumbrella\naV2_0JBmw8o_1\tperson\naV7mSkydynI_4\tbicycle\naV7mSkydynI_1\tbicycle\naV7mSkydynI_2\tbicycle\naWCNGGW4Qew_0\tperson\naWDtrDYqivs_0\tperson\naWQxqFyyzng_0\tcow\naWQxqFyyzng_1\tcow\naWWMT0webCY_0\tperson\naWWtWhgt_V0_0\tcow\naWYoUCAev64_2\tbicycle\naWYoUCAev64_0\tbicycle\naWcaF85RIM8_3\telephant\naWgSKxQO5Ps_0\tcat\naWi51gAEIkY_0\tperson\naWma4eTtHv0_0\tperson\naWqBSBc-XpU_2\tknife\naWt13fGkYuA_0\tcow\naW9D5rT3GCo_0\tbear\naXFFLOGR_yI_0\tperson\naXFgCWZLFj8_0\thorse\naXFgCWZLFj8_5\thorse\naXFgCWZLFj8_1\thorse\naXKbkyjRqkU_8\tbear\naXKbkyjRqkU_0\tbear\naXKbkyjRqkU_7\tbear\naXOPdDTpvxc_0\tperson\naXWkAKNw0Dg_0\tbird\naXXfrIsIqi0_0\tperson\naXhd5BhT4hs_0\tcow\naXhd5BhT4hs_1\tcow\naXml5kCJyDY_0\tskateboard\naXml5kCJyDY_2\tskateboard\naXn1cwN8vng_0\tairplane\naXn1cwN8vng_1\tairplane\naXxKLf5m61g_1\tperson\naXxKLf5m61g_0\tperson\naXxPxBeZjQI_0\tperson\naX0JOJY-BDc_0\tperson\naX0JOJY-BDc_2\tperson\naYCA7dz0nbI_0\tperson\naYJzxhE8-Rs_5\tknife\naYPCTMucy6A_0\tperson\naYgA8AxT0V4_0\tgiraffe\naY1i2TADX0c_0\tperson\naY1i2TADX0c_1\tperson\naY1i2TADX0c_2\tperson\naY4dOYabpbs_0\tcow\naY6lI7qO6kI_0\tperson\naZF83PK7HKU_0\tperson\naZF83PK7HKU_1\tperson\naZGZbrCAFl4_0\tperson\naZGZbrCAFl4_1\tperson\naZHznZSD2uE_0\tperson\naZJ_vArnOC0_0\tcow\naZL_n-gon0U_0\tboat\naZT_v5WnLio_0\tperson\naZVtxAF_Imw_0\tdog\naZZcXyRJwyI_0\tperson\naZ4tzgju18s_1\ttrain\naZ-3jypmJiY_0\tperson\naaAAXDB7ml4_0\telephant\naaAAXDB7ml4_1\telephant\naaA_qcyN3eM_1\tcow\naaBf3fxpR7E_1\tperson\naaQjh2_8aVw_1\tmotorcycle\naaQjh2_8aVw_0\tmotorcycle\naaUXN-xWi1c_0\tperson\naaWV0TEIbhM_0\tskateboard\naaWV0TEIbhM_2\tskateboard\naacFWGARp08_0\tperson\naacLCDo8Zus_0\tumbrella\naacZc8VUtxg_0\tbird\naaoYsiVAFDY_0\tairplane\naas39xgvbfg_0\tcat\naatdoixvb4w_0\tdog\naazC6OJV2GY_0\tperson\naa0jo00Yxz0_2\tboat\naa-J6xg9RH4_0\tperson\nabCu1bwDisA_0\tumbrella\nabHvXnWduQQ_0\tperson\nabQ7YCx3QQM_0\ttrain\nabbympAEM_k_0\tcow\nablCJGTLCow_1\telephant\nablCJGTLCow_3\telephant\nablCJGTLCow_4\telephant\nablCJGTLCow_0\telephant\nable--ZWvkg_1\tperson\nabnCzyC9R28_0\tperson\nabpyt2p-uMg_1\tbird\nabrKRGgLV0o_0\tdog\nabrKRGgLV0o_1\tdog\nabxcR1X4UIo_1\tbird\nabxuxX4aHFI_1\thorse\nabxuxX4aHFI_2\thorse\nab1RpuefUA0_3\tbicycle\nab2b2WA-fQs_1\tperson\nab2b2WA-fQs_0\tperson\nab2b2WA-fQs_2\tperson\nacDY2Ono9WA_0\tdog\nacL58vxHnnc_0\tperson\nacOdf26jldk_0\tperson\nacYxvpS0b7s_2\tairplane\nacZFDZif1ww_0\ttrain\naciCzrBQsM0_0\tperson\nacnOEnTXwJY_0\tcow\nacnOEnTXwJY_1\tcow\nac4feYMso4k_0\ttrain\nac6NdTBtc6U_1\tperson\nadAkRe99CDA_0\ttruck\nadE0Nk3CKyI_0\tcar\nadKIteGSOIM_1\tskateboard\nadY8EtfOO_w_0\ttrain\nadcv2A70AoA_0\tperson\nadiBUyRiBfo_1\tperson\nadiBUyRiBfo_0\tperson\nadskAqVAdFQ_1\telephant\nad2C17MGAEo_0\tbus\nad94BZD75ck_1\tcow\naeAjL4rCjIM_1\ttruck\naeAjL4rCjIM_0\ttruck\naeIzIOSHZek_0\tperson\naeJKW7m42xo_2\tairplane\naeJKW7m42xo_0\tairplane\naeKckIdL0io_0\tbird\naeUVIIEtwdw_1\tmotorcycle\naeUVIIEtwdw_2\tmotorcycle\naeUVIIEtwdw_3\tmotorcycle\naeUVIIEtwdw_4\tmotorcycle\naeboOU_vdjo_0\tperson\naeboOU_vdjo_1\tperson\nBc2pPI9s8bM_2\thorse\nBc26F0eEyBg_0\tperson\nBc5QvTVd-04_0\tperson\nBc64C5jdZDg_0\tperson\nBc7NXuSycR4_0\tskateboard\nBc-b4WhkWxw_0\tperson\nBdBZuvI8oak_0\ttruck\nBdBZuvI8oak_8\ttruck\nBdBZuvI8oak_1\ttruck\nBdBZuvI8oak_2\ttruck\nBdBZuvI8oak_3\ttruck\nBdBZuvI8oak_4\ttruck\nBdBZuvI8oak_7\ttruck\nBdB6NgtqioE_1\tbear\nBdCnusBWLuw_0\tbicycle\nBdC5wdGWMCw_0\tperson\nBdLMnBBX7rc_0\tperson\nBdQ8AC4jpkk_0\tperson\nBdR02myBXHY_0\tperson\nBdTRTQRbNqI_1\tskateboard\nBdT2u0kYx90_0\tbicycle\nBdT2u0kYx90_1\tbicycle\nBdT2u0kYx90_2\tbicycle\nBdT2u0kYx90_4\tbicycle\nBdZOawocL-c_0\tperson\nBddRmrmaI6M_0\tperson\nBd0JDJL6yXk_0\tairplane\nBd21KrWCyCg_0\tcat\nBd-WW1Hs9kk_1\ttrain\nBeAD9m4Yu_U_0\tperson\nBeCQkxXRRww_1\tperson\nBeCQkxXRRww_0\tperson\nBeCmkGB-RCw_0\thorse\nBeQWoctTF5I_0\tbear\nBeQWoctTF5I_2\tbear\nBeQupBkL2y8_0\ttrain\nBeTu3Ag6XIw_4\tbicycle\nBeTu3Ag6XIw_1\tbicycle\nBeVqWRYzPkY_0\tknife\nBebzr4dP1Ug_2\tperson\nBebzr4dP1Ug_0\tperson\nBedgXkpLAOs_0\tperson\nBefMC4f6Z3s_0\tperson\nBefMC4f6Z3s_1\tperson\nBefq3kL0E7o_0\tperson\nBegwn2Da_j8_0\tperson\nBepRWdKn0QA_0\tcat\nBetAKo6E3rw_0\tperson\nBezlbA5t77I_1\tperson\nBe4NCK9GwQU_0\tperson\nBe4V9lpSpJw_0\tknife\nBfIBlw1RkXc_1\ttruck\nBfJUkGEnxvE_0\tperson\nBfOXYUOsSf8_0\tairplane\nBfSxTA9yZak_0\tperson\nBfT3bVAeXLU_2\tboat\nBfWpLwfDFbc_0\tperson\nBffFognyZOA_1\tskateboard\nBffFognyZOA_0\tskateboard\nBfkXvdTkYF4_0\tperson\nBfkXvdTkYF4_1\tperson\nBfwHmAlZdKA_0\tperson\nBf1cF3BfY18_0\tperson\nBgBDqhuoTr0_0\tdog\nBgHvkS4H7w0_0\tperson\nBgamGCKlzTI_0\tperson\nBgbxYgCIde8_0\tcow\nBggPqcJz12g_1\telephant\nBgjdCfaJfsE_0\telephant\nBglxBESIjlE_0\tperson\nBgsTkbznAjI_0\tperson\nBgwZN0Ui-Q8_0\tperson\nBg0_DcQLOys_1\tknife\nBg3Zox43xGI_0\tskateboard\nBg4NtG5QkwM_0\tperson\nBg_cKljiGGE_2\tperson\nBg_cKljiGGE_0\tperson\nBhA7KMeJYAE_0\tskateboard\nBhL184lkUcw_0\tperson\nBhPyQcTHRmg_0\tboat\nBhXpOqm8Q5o_0\tbird\nBhZl6ZTtKDo_0\tperson\nBha-PhOr-bU_0\tbird\nBhdcIu_nQYs_0\tbus\nBhqZrCcQpD4_0\telephant\nBh4QFujTqIo_0\ttrain\nBh5wIL7IE9A_0\tperson\nBh5wIL7IE9A_1\tperson\nBiGYFhnDhMI_0\tairplane\nBiQ4cYnaGPo_0\tperson\nBiYzQbOwhWY_1\ttrain\nBiYzQbOwhWY_2\ttrain\nBipPdxUV2PY_3\tboat\nBirMOPf7k0I_0\tknife\nBizSBnzOzy0_0\tperson\nBizSBnzOzy0_1\tperson\nBi1KsDpJT8w_0\tperson\nBi1KsDpJT8w_1\tperson\nBjGhd-Eq5ig_1\tcar\nBjGhd-Eq5ig_7\tcar\nBjJSECIrsd0_0\tdog\nBjLJqIPSyUM_0\tbicycle\nBjQO2ipch-w_1\tdog\nBjRyA1cPxA4_0\tcow\nBjZ9JRI_WkM_0\tperson\nBjbCdEHhCjI_0\tperson\nBjfwCDsBoeg_2\tbicycle\nBjhITTFavAk_0\tperson\nBjiJ7HAaOj8_0\tperson\nBjj4KdIbDBY_0\tperson\nBjk2IA4thIE_0\tbear\nBjogwheL3BI_0\thorse\nBjpX2nla914_1\tcar\nBjqdFABBqxA_0\tperson\nBjqdFABBqxA_1\tperson\nBjraW0bXW-0_0\tperson\nBj8lO8Jag3Y_0\tperson\nBj9wPwHXNQo_1\thorse\nBj9wPwHXNQo_2\thorse\nBj9wPwHXNQo_3\thorse\nBj_fS2abD9o_1\tbird\nBkFws1J8IM0_0\tbird\nBkMb48QM-zQ_0\tperson\nBkco3wJWvp0_0\tperson\nBkdBnU65i7Y_0\tperson\nBkdWJT3sWro_3\tairplane\nBkdWJT3sWro_4\tairplane\nBkfKa-zgphc_1\tairplane\nBklBU6Epydc_4\thorse\nBklBU6Epydc_1\thorse\nBkoQ8_W4drM_0\tumbrella\nBkteTGu81tQ_0\tbus\nBkwpJBHM_DM_0\tdog\nBk3VbRagAwg_0\tdog\nBlXhR1rRct8_0\tbicycle\nBlfVNiQZtko_1\tcow\nBlfVNiQZtko_0\tcow\nBlhT8WFfI54_0\tperson\nBlj4FY__L6Y_0\tperson\nBllnWV-BIDo_0\tbird\nBlqsGIq2hNg_0\tperson\nBlqsGIq2hNg_1\tperson\nBlzUBgB6BEc_0\tperson\nBl-1081HLyM_0\tmotorcycle\nBl--N1EQpuA_5\tairplane\nBmCAiO-WNmE_0\tskateboard\nBmG7dEBuS6s_0\tcow\nBmHShiZ1Xus_3\tairplane\nBmNwfiFBeRo_0\tperson\nBmNzw5vNQNI_0\tskateboard\nBmRZWeMzQLg_3\tbicycle\nBmRZWeMzQLg_0\tbicycle\nBmSBpZrrEt8_0\tcat\nBmXdIzhVZ0Q_2\tbear\nBmZN0ljGa84_2\tmotorcycle\nBmfHrAPEMrk_2\tperson\nBmfHrAPEMrk_0\tperson\nBmfHrAPEMrk_1\tperson\nBmjBM58PfZE_0\tcow\nBmjEEjKDJVI_0\tperson\nBmjLZgp38NI_0\tcat\nBm3l_RLjYpo_0\tmotorcycle\nBm3wZ63Ymvo_2\tmotorcycle\nBm7e-qOAcKQ_0\tperson\nBm8qAGd91Gg_0\ttrain\nBnADRMlWOsM_0\tairplane\nBnNJUP6xfG8_0\tbear\nBniJFr7IJRo_1\tperson\nBniJFr7IJRo_0\tperson\nBniJr-iCh9M_1\ttruck\nBnkIFwVPh8w_0\thorse\nBnkIFwVPh8w_2\thorse\nBnkIFwVPh8w_4\thorse\nBnkU89Dq2IQ_0\tperson\nBoA6CUl4t70_0\tcow\nBoGAxXRzHWs_0\tcow\nBoLSvTrm3d8_3\tcow\nBoNtUpvusGM_3\tmotorcycle\nBoNtUpvusGM_4\tmotorcycle\nBoNtUpvusGM_0\tmotorcycle\nBoNtUpvusGM_1\tmotorcycle\nBoNtUpvusGM_2\tmotorcycle\nBoOANS5_U9I_0\tmotorcycle\nBoPj2W_G2Qg_0\tairplane\nBoYvNfndu60_0\tskateboard\nBoZ3ZvdEZ4o_0\tcar\nBoZ3ZvdEZ4o_1\tcar\nBoiPpDeQ2mQ_0\tairplane\nBomNEWAGolQ_0\tperson\nBomVU8_LL_Y_2\tdog\nBowyw_fhWZ8_0\tperson\nBoy5toMvMwo_0\tgiraffe\nBo2qsQNYATk_3\tskateboard\nBo5bT8QP_Og_0\tperson\nBpDLFqS9EAE_0\tperson\nBpVyiSvjk4o_1\tdog\nBpdZmCkSHco_0\tgiraffe\nBpjdKB7AJ8U_0\tskateboard\nBpkMUQLoJUM_0\tperson\nBpoWgamMMro_0\tcow\nBp1zluIhHzc_0\tperson\nBp4vXfVIVxA_0\tskateboard\nBqBkvlijWKg_1\tperson\nBqDnDPIE18k_3\thorse\nBqPcqKW3uAM_0\tdog\nBqoRxXUz7q4_2\ttruck\nBqpA7iBOQ_s_0\tperson\nBqqPm3F1F_w_0\tperson\nBq4id5zA48c_2\tbear\nBq_emgXftMI_0\tperson\nBrDdbgxB7qI_1\tbird\nBrHDj1biLlA_0\tairplane\nBrHDj1biLlA_1\tairplane\nBrJiBbRF25U_0\tperson\nBrKgWUQnUWI_0\tcow\nBrQNhzCKfxs_0\tperson\naelph1Y8yPk_0\tskateboard\nae161Zq0QBg_0\tskateboard\nafCYMTTgbMw_1\tdog\nafD_y2ZEHn4_0\tskateboard\nafLO-CD48TI_0\tmotorcycle\nafLO-CD48TI_1\tmotorcycle\nafWl3lTglsw_0\tperson\nafbS6cTlE5Q_0\tperson\nafu5-raaJEc_1\telephant\naf9Z_LR-L7M_0\tperson\naf-MtTvmPic_0\tperson\nagFlIZmS0zU_0\tperson\nagF_eyIgF3g_0\tperson\nagGuxSx4UdI_0\tmotorcycle\nagIme93Q6WA_0\tperson\nagMdtESL5kE_2\tcow\nagSpfpV4EsQ_0\tperson\nagVHBb-qLAw_1\tbus\nagWS48KnYWk_0\tmotorcycle\nagXPzkjMl4c_0\tbird\nagYR35aJ1no_0\tperson\nag1ohTMq9Iw_0\tcar\nag5Gy7ZNbfw_2\tknife\nag5Gy7ZNbfw_3\tknife\nag6NY6nrTvw_0\tbear\nahE37MgcoUs_0\tperson\nahMgOG4Bpcw_0\tcar\nahQD9PpYoqE_1\ttrain\nahYD0J4XzC0_0\tcat\naheVwPx1egw_0\ttruck\nahiO1CwoaY4_0\tperson\nahnbyNWfvpM_1\tcow\nahsHWgQGPNI_0\tperson\nahv6_xBxvmg_0\tperson\nah03BOnPUqs_0\tcow\nah-2yN1cKOg_0\tbus\naiINQVIMx5o_0\tperson\naiNcNIUbY3E_1\tdog\naiX8ymgR1g0_0\tboat\naiX8ymgR1g0_3\tboat\naierZPItkn8_0\tbicycle\naierZPItkn8_1\tbicycle\naiiN3X-f5Ss_0\tperson\naiklFoEJX1Q_0\tperson\nainWSZibSIM_1\tbicycle\naio5SboRXGU_0\tperson\naio5SboRXGU_1\tperson\naizJI68M2SY_2\ttruck\naizJI68M2SY_1\ttruck\nai1CTuarr50_0\tbus\nai3xYb_xvFA_0\tperson\nai7WTyMnl1g_2\thorse\nai7WTyMnl1g_3\tperson\nai7WTyMnl1g_0\thorse\nai7WTyMnl1g_1\thorse\nai9-_EMwk4U_0\tskateboard\nai_jmsLJTR0_0\tperson\najAuKSOFBKQ_2\tbus\najAuKSOFBKQ_3\tbus\najB-QUVDyXI_0\tcat\najO4xx5beuE_1\tbicycle\najPP5EY_nAo_0\tperson\najPP5EY_nAo_1\tperson\najPY1htweXM_0\tperson\najPY1htweXM_1\tperson\najtvjEY9TPA_0\tairplane\najxcj5ovYdw_0\tskateboard\naj0Ll84jtZs_0\tperson\naj0Ll84jtZs_1\tperson\naj3UwQNtZPo_0\ttrain\naj6sqeG0k54_0\tumbrella\nakH9ouIrOds_0\tskateboard\nakIlFKpZAtk_0\tperson\nakOLIpAsxqc_1\tperson\nakQU-s0RCWE_1\tbus\nakoVZ50spRM_0\tperson\nak6iAVUNU7c_0\tdog\nak6iAVUNU7c_2\tdog\nak6iAVUNU7c_1\tdog\nak89dpHVmHc_1\tperson\nalAFNWeSJts_0\tskateboard\nalDkqPNUFLU_0\tperson\nalDkqPNUFLU_1\tperson\nalKgZTVxcV4_0\tmotorcycle\nalX9MOY80Aw_0\tperson\naluZTs_Ys8I_0\tcar\nalvKKzlOBKM_0\tperson\nalzWhOivD0E_0\tperson\nal2Vh0In4HU_0\tbear\nal2Vh0In4HU_2\tbear\nal2Vh0In4HU_3\tbear\nal8Of2FWy80_0\tcat\nal8vzWgNDbs_2\tbicycle\nal8vzWgNDbs_7\tbicycle\nal8vzWgNDbs_8\tbicycle\namIvXQ6aZkE_0\tcow\namL9Dar_hp0_0\tperson\namTcWqrgBBg_3\tairplane\namjpcHzuYb4_0\tperson\nams9MCDF15I_1\tperson\nams9MCDF15I_0\tperson\namvLPTONS1U_0\tcow\nam-3XKJkCqg_0\ttrain\nanAXVexurxo_2\tdog\nanJbsuTwShw_0\tperson\nanLTttUpag0_0\tskateboard\nanR9cuXRv6Q_0\tperson\nanWxwjzPRBA_0\tperson\nanYy3XNTTGw_0\tperson\nanZ9lxr24eY_0\tperson\nangay7OmUwA_0\ttruck\naniCxSPm8Uc_0\tcar\nanlydfnmv7g_0\tperson\nannQpJsk6NI_0\tbus\nanpsTMr_HIo_0\tcat\nanrBShdHOz4_0\tperson\nanvk-OdKLBE_0\tperson\nanvngue8Qh8_0\tcat\nanzrRzyYAAc_0\tdog\nan-QcnhNhL4_0\tperson\nan-mFuTYuCk_0\tperson\nan_FRcZ669c_0\tperson\naoBqV2Guvso_0\tperson\naoDJu0KrrQs_0\tmotorcycle\naoOJR-0sPM0_0\tperson\naoSWWKtf8mU_0\tperson\naohLKKJxjIM_0\tperson\naoizdynEVYU_0\tdog\naoqMoScEfqE_1\thorse\naotBl0tvpFs_0\ttrain\naotBl0tvpFs_1\ttrain\nao9uUinn2WY_1\ttruck\napKAwFA4oP0_0\tbird\napQKmVEucLQ_0\tperson\napZAEWvk8XY_0\tperson\napcgot45Ql0_0\tperson\napdP6_tCdls_0\tperson\napfZjUpoTy0_0\tskateboard\napfZjUpoTy0_1\tskateboard\napprUmnQTcI_2\tcow\naqGKBg0azPA_0\tcow\naqGp6tCGLOU_0\tmotorcycle\naqKiwfY3Oqc_6\tbus\naqKiwfY3Oqc_5\tbus\naqKiwfY3Oqc_7\tbus\naqNz8TCica4_0\tzebra\naqUHuS5ALXE_0\tcow\naqWN-Q0wDHI_0\tperson\naqWN-Q0wDHI_1\tperson\naqZfqhHJPLo_0\tperson\naqdSuLpYlwQ_0\tperson\naqe_mdIg6k0_0\tperson\naqmie50AFwE_0\tdog\naq2UMxzwliQ_0\tperson\naq50xKvuSFg_0\tskateboard\naq59B_-6ilw_0\tperson\naq9Sfxn9vMg_5\tknife\naq-QzG14KJ4_0\tperson\narFKRc7lAo0_0\tperson\narFKRc7lAo0_1\tperson\narPGoY7uh4E_0\tperson\narS7aqpkAU0_0\tmotorcycle\narT4jZLX8pg_1\tknife\narW0ZUPkah8_0\tperson\narZ_mIhaJMo_0\tcat\nare5LvOB2nQ_1\tskateboard\nare9NykT9FM_0\ttruck\narn0j0l_IWI_0\tperson\nartWKQTC7CQ_0\tperson\nartcASpzYrU_0\tperson\narwZ6ZPJuN4_0\tcat\nar7TRjurXMY_0\tperson\nar-fzXT8Juc_0\ttruck\nasT-GJNeJok_0\tperson\naseOdDcbIRE_2\tperson\naseOdDcbIRE_0\tperson\naseOdDcbIRE_1\tperson\nashHnkqFz7g_0\tbicycle\nashHnkqFz7g_3\tbicycle\nasl-XTE0jsE_0\tperson\nasrDocOfGQE_0\tcar\nasrDocOfGQE_1\tcar\nasrDocOfGQE_3\tcar\nasrDocOfGQE_4\tcar\nasrDocOfGQE_5\tcar\nasrDocOfGQE_6\tcar\nastLiScyoaQ_0\tperson\nasx2CkH0O6I_0\telephant\nas1twjKe3Cw_0\tskateboard\nas6Y3-EaaCg_0\tperson\nas6Y3-EaaCg_1\tperson\nBrgRnN_LBGk_1\tperson\nBrgRnN_LBGk_0\tperson\nBrhMkJ6n-hQ_1\ttrain\nBrnBTne3NBw_0\tbear\nBrnBTne3NBw_1\tbear\nBroiAN_qtCI_0\tperson\nBrpRmX410DU_0\tperson\nBrrAlsmwDnk_1\tperson\nBrrAlsmwDnk_0\tperson\nBrrlyds8g1A_0\tperson\nBrwABvccCWs_0\tperson\nBrzEfM8nWCw_0\tcow\nBr3M-xsvXFQ_0\tperson\nBr9CVteHFEc_0\tperson\nBsCH_ABy0WE_0\tperson\nBsRC5xbG6uY_0\tperson\nBsXphFpnOxE_0\tbird\nBsXwLsR6dm8_0\tperson\nBsv8dNYzPkY_0\tbear\nBs1rRAtP7bw_1\tbear\nBs3BPJZMD9E_0\tperson\nBs94h8vMmwg_0\tperson\nBs_9E_Rq524_0\tperson\nBs_9E_Rq524_1\tperson\nBtFwcgeJjsY_0\tperson\nBtKVAhU1LdI_0\tknife\nBtKl-iqkgoY_0\tcat\nBtN0FlaISuY_0\tperson\nBt19SM8BenY_0\tperson\nBt41QF0ze6E_1\tperson\nBt7B7nkGO_4_0\ttruck\nBt7B7nkGO_4_1\ttruck\nBuFYI1vYj1k_1\tperson\nBuH65mVX5yM_0\tperson\nBuPWtDPEJ-0_0\tperson\nBuXvxclES0s_0\tbird\nBuco16wWyFA_1\tmotorcycle\nBuco16wWyFA_2\tmotorcycle\nBuco16wWyFA_3\tmotorcycle\nBuco16wWyFA_0\tmotorcycle\nBufY7NdKUlM_2\tmotorcycle\nBufY7NdKUlM_4\tmotorcycle\nBunvBFXoGPg_0\tbus\nBuqljdjPWWc_0\tknife\nBuqljdjPWWc_1\tknife\nBuumm7rgDPY_0\tperson\nBu0gJwoDkRw_0\tcat\nBu5Bgr9asUU_0\tperson\nBu_HdLSyLSI_0\tperson\nBu_3ep-qAi0_0\tperson\nBvEAIc3hmkk_0\tmotorcycle\nBvHzGHjR6rk_0\tperson\nBvLCgNWIHfA_0\tperson\nBvLJZAhIR3A_1\ttruck\nBvTLdUcIH5I_1\tperson\nBvTbuvBeunI_0\tairplane\nBvTjf9mG5MU_0\tperson\nBvZ8DqslB-U_1\tairplane\nBvZ8DqslB-U_2\tairplane\nBviGbtAujq0_0\ttruck\nBvrORC4d2yg_0\ttrain\nBvrORC4d2yg_1\ttrain\nBv4rjfW9RsM_0\tdog\nBv9IXbrDYLk_0\tbird\nBwDccOS7_vw_0\tperson\nBwIoxW7Ee8M_4\ttrain\nBwUYR-ZnpX8_0\thorse\nBwW4Fs1eTRg_0\tairplane\nBwW4Fs1eTRg_1\tairplane\nBwergWBqOOs_2\ttrain\nBwgJmjOzlRk_0\tperson\nBwoTsoC3hvQ_3\thorse\nBwo1MaJvxRs_0\tperson\nBwrh4q5KLVg_1\tdog\nBwsHsSpS0dQ_0\tbird\nBw2RhmesY5g_0\tperson\nBw5iwcbP4eM_0\tgiraffe\nBw6f2OXYtSo_0\tcow\nBxHIRvoGZMM_0\tperson\nBxMoEE7XwL8_0\tperson\nBxNE34BGZ-4_0\tperson\nBxQp3-SCUGs_0\tperson\nBxQp3-SCUGs_1\tperson\nBxWs9aINEEI_0\tperson\nBxWs9aINEEI_2\tperson\nBxWs9aINEEI_1\tperson\nBxYdU6vB2YQ_1\tmotorcycle\nBxaEaD7zeX4_0\tperson\nBxhktnvjtLA_0\ttruck\nBxmeqCev3Kw_2\tboat\nBxmeqCev3Kw_3\tboat\nBxm3EvRZAI0_0\tskateboard\nBxvlWueS9vA_0\tmotorcycle\nBxwmNnxcI7o_1\tperson\nBxzVlf9-SLc_14\tbicycle\nBxzVlf9-SLc_4\tbicycle\nBxzVlf9-SLc_6\tbicycle\nBxzVlf9-SLc_8\tbicycle\nBx2YQSFETcw_1\tperson\nBx4ELKBw9PU_0\tcow\nBx4ngxnRjvM_0\tmotorcycle\nBx-is-dL1ko_0\tperson\nBx_z_4bt8O4_0\tperson\nBx_z_4bt8O4_1\tskateboard\nByBWtiJJNqk_0\tperson\nByBWtiJJNqk_1\tperson\nByFCiUvKd4E_0\tcow\nByFCiUvKd4E_1\tcow\nByFCiUvKd4E_2\tcow\nByJNGLp-Q1Q_0\tboat\nByRne1VtDow_1\tperson\nByfeHjkm0NA_0\tbus\nByhpLi9sRUs_4\ttrain\nByhpLi9sRUs_5\ttrain\nByhpLi9sRUs_0\ttrain\nByn2Qo7ghaQ_1\tperson\nByvWskJDMGg_0\tairplane\nByvW2VADH6w_0\tmotorcycle\nBy1cSo8DcUw_0\tbicycle\nBy8jq7bVrkw_0\tperson\nBzKADkfj5sM_0\tcow\nBzNlO4ccRRY_0\tperson\nBzOo01dGJkw_0\tperson\nBzT8xDTB14c_2\ttruck\nBzWiQPw-vQc_0\tperson\nBzX2DmrGvp0_0\ttrain\nBzeW7KdQ818_0\tskateboard\nBzeW7KdQ818_1\tskateboard\nBzehenf5vSI_0\tairplane\nBzgqI8VBlSE_0\tperson\nBzpY-JMNW4c_0\tperson\nBzrM5QG9q2o_0\ttrain\nBzr3gVS8SzI_1\tboat\nBz5rpBZ1dzs_0\tperson\nBz7A9QxD1nY_0\tknife\nBz9MqNlU7KM_0\tperson\nB0AazXeFQIU_0\tperson\nB0BXcxFMgrk_0\tknife\nB0EZ9LIObGc_1\tmotorcycle\nB0FupWyYbG8_1\tperson\nB0NJSrhuWwA_1\tperson\nB0NJSrhuWwA_0\tperson\nB0QFrtXczzE_0\tperson\nB0SYog80Y78_0\tperson\nB0WaLst2GGg_1\tperson\nB0YrdZ7s3UY_1\tperson\nB0YrdZ7s3UY_2\tperson\nB0aFuZP3nYE_0\tperson\nB0aFuZP3nYE_1\tperson\nB01lwUoyl90_0\tperson\nB03gLj0lJrk_0\thorse\nB0-L6VbxLcU_0\tcat\nB0-lAJ4tBN4_0\ttrain\nB0-lAJ4tBN4_1\ttrain\nB1IQyTNE7eg_0\tskateboard\nB1Ojfucympw_0\tperson\nB1Ojfucympw_1\tperson\nB1YzUGPZQWo_0\ttrain\nB1hkAet1OQI_0\tperson\nB1isEeljBFI_0\tperson\nB1pC6hfF_Do_0\tperson\nB1qSE-7JgXE_0\tperson\nB1yiSrv4Ocw_1\thorse\nB1zPD20nhTg_0\tperson\nB12C84by_eA_0\tperson\nB12C84by_eA_3\telephant\nB12C84by_eA_1\tperson\nB12C84by_eA_2\tperson\nB12C84by_eA_4\tperson\nB12C84by_eA_5\tperson\nB12C84by_eA_7\tperson\nB12C84by_eA_10\tperson\nB12C84by_eA_11\tperson\nB2EMVGU5pNA_4\ttrain\nB2VryVb5p54_0\thorse\nB2VryVb5p54_2\tcow\nB2V7kk7fqSc_0\tperson\nB2X9JzMNZb0_0\tperson\nB2ZpqEJpVX0_0\tperson\nB2fTIk9eCNc_1\telephant\nB2gJVve4I58_0\tperson\nB2hKNbDmBtM_0\tcat\nB2lAxi3jIR0_0\tperson\nB2lAxi3jIR0_1\tperson\nB2lAxi3jIR0_2\tperson\nB2xcdU4Qoz8_0\tbicycle\nB2xcdU4Qoz8_12\tbicycle\nB23TpirETNE_0\thorse\nB26AQtx7Xic_0\tperson\nB3HZSrALQYc_0\tskateboard\nB3IjPORG3_w_1\tbird\nB3J2umsYK7E_0\tperson\nB3QykPv8TnI_0\tperson\nB3X5wDENAUw_0\tcat\nB3kTu0B4OjM_0\tperson\nB32uNSxqzgs_0\tcow\nB33seWCiea4_1\tperson\nB33seWCiea4_0\tperson\nB4Q6pRC_mZ8_0\tbicycle\nB4Q6pRC_mZ8_1\tbicycle\nB4Srj2O1AWQ_0\tcow\nB4dFepwxEOU_0\tperson\nB4iP6lAoNYo_0\tperson\nB4jbThMFW00_0\tperson\nB4mWkc8-_6A_0\tbird\nB4oO-miJ6VU_0\tumbrella\nB4vM2iKb8cs_0\tperson\nB4_mRuPC7o0_0\tperson\nB5BNEoIaQL4_0\tperson\nB5GwJoM3aX8_0\tperson\nB5NgN9mocgI_0\tperson\nB5PHI2HVtuc_0\tperson\nB5fv91yB4Gw_0\tbicycle\nB5qSvRpXLS8_0\tcat\nas7rVUFzyzg_0\tskateboard\nas_Rz9F3slw_0\tcat\natA-Cgv2XHY_0\tperson\natE1O6J4Wls_0\tperson\natLGWZUbEuM_1\ttrain\natMjLEIbsBI_0\tcow\natxnLL4Vjuo_0\tperson\nat2dmAEDdmg_1\tperson\nat4pXKjEDic_0\tperson\nat4pXKjEDic_1\tperson\nat5edW3lMVA_0\tperson\nauA-q9fWwn4_0\telephant\nauDJ1xtxFlw_0\tperson\nauDJ1xtxFlw_1\tperson\nauFLAZb-gD8_4\ttruck\nauGyhsy8iLA_0\tcow\nauNciV4eLVo_0\tbus\nauOl1mbGUlk_0\tbicycle\nauOo1Lg_wvU_0\tdog\naubLDLbxxsk_0\tperson\naueT5WO4e_c_0\tgiraffe\naueT5WO4e_c_1\tgiraffe\naugKp60fa5Q_1\tcar\nauiPa0HNOEQ_0\tperson\nauu_tYb3G1Y_0\tperson\nauzy4oPzM5Q_0\tmotorcycle\navCqOSeS7WU_0\tperson\navC67gaD1NM_0\tcat\navHbY1Q3vyw_1\telephant\navLxYBedm_c_1\telephant\navT7Q6Wibdg_0\tperson\navl9d-bL57Q_0\tairplane\navl9d-bL57Q_1\tairplane\navob12vGzmU_0\thorse\navonCFmxPyg_0\tperson\navonCFmxPyg_1\tperson\navpWY3czerE_1\tcar\navpf9VVT6CU_0\tmotorcycle\navvQ5wNPiew_1\tperson\nav475qBV4QY_0\tskateboard\nawC9zxAeP54_0\tperson\nawQ1n9aQEco_0\tperson\nawVBieSP5Zw_0\tperson\nawVa7pqR9DU_0\thorse\nawfg9NsCVQ0_0\tperson\nawjHSQ5uPi4_0\tbus\nawkpYVN-fJw_1\thorse\nawmHGFkxxlw_0\tperson\nawwWMuOKe3c_0\tperson\naw059qHbVm0_0\tbus\naw2lOvXUAPg_0\ttruck\naw5C9nQgLcA_0\tperson\naxB1Gk85UtQ_0\tperson\naxEK7nZ8W3I_0\tperson\naxJZ92uWnkA_0\tperson\naxXs2oUd4ow_0\tbear\naxcDoOd0G0s_0\ttruck\naxjSgDsN6t8_0\thorse\naxltu5Qf6ok_0\tskateboard\naxn6QuPBPqA_0\tperson\naxulii3UXSQ_1\tperson\naxulii3UXSQ_0\tperson\nax4YUE-PcF8_0\tairplane\nax4YUE-PcF8_2\tairplane\nayD3RJIjplM_0\tdog\nayRmnUb2LAI_0\tairplane\nayax5k3PJMs_0\tperson\naybdlOdul0U_1\tperson\naybdlOdul0U_0\tperson\naydxF0r6n9s_0\tperson\naydxF0r6n9s_1\tperson\nayg0x1glF2s_2\thorse\nayg0x1glF2s_0\thorse\nayg0x1glF2s_1\thorse\naylBB_8cv60_0\tumbrella\naysqPEtZvsg_0\tperson\nayuF_8chcKM_0\tperson\naywW_Wvo49w_0\tperson\nayzzG8M0fzo_1\tperson\nay1d8NBbrl0_2\tbird\nay1d8NBbrl0_3\tbird\nay5RnrQple4_0\ttrain\nay5tx1Rovwk_0\tcat\nay7LLDO9Ecc_0\tdog\nazC7-_wC8N8_0\tbus\nazDn4DU7cGA_0\tperson\nazKKcIb4Ufw_1\tboat\nazOInI_CMHM_0\tbus\nazbls7-iaEU_0\tperson\nazbls7-iaEU_1\tperson\nazbls7-iaEU_2\tperson\nazfLb8VvI-4_0\tperson\nazfLb8VvI-4_1\tperson\nazfLb8VvI-4_2\tperson\nazlRI_Jydpw_4\tcow\nazmZDijLihI_0\tperson\nazmZDijLihI_1\tperson\na0FDxoXtFyM_1\tairplane\na0NOwUio_n8_1\tperson\na0NOwUio_n8_2\tperson\na0NdjlW5H_U_0\tcow\na0N_vetshbg_0\tperson\na0N_vetshbg_1\tperson\na0OjB7xzRx4_0\tperson\na0RusP9ATfw_0\tperson\na0dHPtoBS3U_0\tperson\na0hRgBpppWs_0\tperson\na0jpiOFS7eM_0\tbear\na0oeBV6-20U_0\tperson\na0uoJdAwobA_0\tperson\na085oeXd0RE_0\tperson\na0-Pmmyi8js_1\tperson\na1ADw1megCI_1\tairplane\na1Fzn7iUHO8_1\tmotorcycle\na1RVXQl4rlY_1\tcat\na1RinDI9Hgw_2\tknife\na1SaKvoO2Og_0\tcow\na1U6U_pntMo_0\tperson\na1XDxiP1hNA_0\tperson\na1ctjjNUZ-4_0\tdog\na1kLNA-KACs_1\tbicycle\na1lQwuhicQI_0\tperson\na1lQwuhicQI_1\tperson\na14VlgxHS3M_0\tperson\na2AT0Xo7uLY_0\tperson\na2Osa5aleJ0_1\tbus\na2Qp2Grx3_8_0\tperson\na2XMK6mjiZg_0\tdog\na2XvXs2guuE_1\tperson\na2XvXs2guuE_0\tperson\na2fEq8oS3M8_0\tbus\na2gYRtJhP1E_0\thorse\na2gYRtJhP1E_1\thorse\na2hv4szlq-Q_0\ttrain\na2kH2_9zoWU_0\tairplane\na2o_-GSpXXk_0\tcat\na2qmS6AhUYk_0\tmotorcycle\na2vx_F1NOas_0\tperson\na26mRIQUPoU_0\tdog\na26mRIQUPoU_1\tdog\na26mRIQUPoU_2\tdog\na27UC8vu1hI_1\ttruck\na29AS00WJrY_0\tcow\na3AIwQnG0Ek_0\tcow\na3FLLhQu768_0\tperson\na3THrQYDkqw_1\tbird\na3UCtF8nZIY_1\tskateboard\na3dbdHben-o_0\telephant\na3dbdHben-o_3\telephant\na3dbdHben-o_9\telephant\na3dbdHben-o_1\telephant\na3dbdHben-o_2\telephant\na3dbdHben-o_4\telephant\na3dbdHben-o_6\telephant\na3rGEI8MdMs_0\tcow\na3uvEIsI1no_2\tperson\na32oJ0GsAYw_0\tperson\na35UuVw16Ks_0\tperson\na37D3FoqIJA_1\tknife\na3-oi7T-Lw0_1\tzebra\na3-tURw95Xo_2\tperson\na4IU4va7hp0_1\ttruck\na4LYVAPbEwI_0\tmotorcycle\na4LaeeZXIc0_2\tskateboard\na4Nt5QxFqmY_1\tboat\na4PwZfJZVPA_2\tbear\na4arqJgXHDA_0\tperson\na4pR_YBd4yY_1\tbicycle\na4uNoGpllg4_3\tbear\na4v1ptMpyi0_0\tcow\na4v1ptMpyi0_3\tcow\na41TZwhyyP0_0\tcow\na46BqT5Mo5I_0\tcow\na5HZnFcvdyA_1\thorse\na5P8pVrcSRk_0\tmotorcycle\na5brvs-fct0_0\tperson\na5tSaF5GCKE_1\tcat\na5ye5BUJFlY_1\tperson\na5znd3aNwLk_3\tbicycle\na58tMy0mhIk_0\tperson\na6G_DBEFdFA_0\thorse\na6ZXi7Qqls0_0\tperson\na6fBYYEgBvs_0\tdog\na6jDeIJbF7Q_0\tperson\na6uyjrBkkXs_0\tboat\na61piN6ffE4_0\tperson\na67zz0CSEpk_0\tperson\na67zz0CSEpk_1\tperson\na7B81Zeqgfw_2\ttruck\na7HKuyv2qLQ_0\telephant\na7HKuyv2qLQ_1\telephant\na7Q6eb6feT8_0\tperson\na7S9rFNKVMI_7\tmotorcycle\na7Zr0-1LIPc_1\tdog\na7hwm4TORvY_0\tperson\na7pC7IjO2ik_0\ttruck\na7peWR4xJwQ_1\tcow\na7ygZsaDMis_0\tperson\nB5_Hyk-p7kE_0\tcat\nB6E15pe4UR8_0\thorse\nB6LGwD1E9SQ_1\tperson\nB6P8B8BO-6U_0\tgiraffe\nB6SaDYczlDQ_1\tperson\nB6U92N9hh6k_2\thorse\nB6V4xqX67OA_0\ttruck\nB6bDVhRNw00_0\tairplane\nB6cEdaWTjeU_0\tperson\nB6dBkoOhfBU_0\tcar\nB6lU93wtaDA_1\tboat\nB6mP9KsnQPc_1\tbear\nB6mngUQtFJ4_0\tcow\nB6nlTJYtmws_0\tcow\nB6pXMjH4geU_3\tboat\nB6qshzfLYzs_0\tperson\nB6x2dNbgPjM_0\tcow\nB6y439-imys_0\tperson\nB6z7eCsgfM0_0\tbear\nB61Wf8NFvcU_0\tairplane\nB645r0hkdmg_0\tperson\nB645r0hkdmg_1\tperson\nB67FwwZfIEA_0\tperson\nB6_IcyhOHpE_0\tperson\nB7BjhnnQ2K4_0\tperson\nB7GRNv2opSY_0\tbird\nB7MHQOUO4f8_0\tumbrella\nB7Z9UV6aQuM_0\tbird\nB7a8WkaWmH4_0\tperson\nB7a8WkaWmH4_1\tperson\nB7cXCz7jJKQ_0\tcow\nB7gX18_mDyQ_0\tperson\nB7hmqrwe88o_1\telephant\nB7hmqrwe88o_2\telephant\nB7iAvi5riV8_0\tmotorcycle\nB7nwfSMbEL8_0\tcow\nB7pEEUJ-J1g_1\tmotorcycle\nB7rCxgg3F_s_0\ttrain\nB8Bp9yKWV9c_0\tperson\nB8D4fPARFvo_0\tperson\nB8HQglK444U_2\tairplane\nB8HQglK444U_0\tairplane\nB8HQglK444U_4\tairplane\nB8LGL1Tt_wg_0\tperson\nB8MxJKDkvkE_0\tperson\nB8eeoykmq1E_1\tperson\nB8eeoykmq1E_2\tperson\nB8f7NnYq5sg_0\tperson\nB8f7NnYq5sg_1\tperson\nB8sWL2syyA8_0\tperson\nB8uIyRkm9YA_0\tairplane\nB8zGkBkQw4c_0\tperson\nB87W__RIE-E_0\tperson\nB8_Z7m50I_E_0\tmotorcycle\nB9AXF91pIUs_0\tairplane\nB9Ed_vAN9mc_0\tdog\nB9Y_LrDVbg4_0\tperson\nB9aqDsvGy5Q_0\tperson\nB9aqDsvGy5Q_1\tperson\nB9j233QxEuQ_0\tperson\nB9oJSA_NJ2s_0\tbicycle\nB9z17FOPd5A_0\tperson\nB99mIPKaChY_3\tcow\nB-CR7vl67W8_0\tperson\nB-QiQvJcSVk_0\tperson\nB-T1YNe09SU_4\tbear\nB-T1YNe09SU_3\tbear\nB-bDxAN93a4_0\tairplane\nB-dlnlRKA5s_2\tairplane\nB-dlnlRKA5s_7\tairplane\nB-tukWZbXp8_0\tperson\nB-wJpt4zl0c_0\tperson\nB-x2pu-ux3w_0\thorse\nB-z1uE4iuz4_0\ttruck\nB-0WNs2QYPk_1\telephant\nB-48lEXzIS8_0\tumbrella\nB-7cqxw95Ro_0\tperson\nB_BqrY2eeCY_0\tmotorcycle\nB_Gjc7J18qg_1\tperson\nB_Gjc7J18qg_0\tperson\nB_M6X41emhY_0\tperson\nB_O8idmfoCQ_0\tperson\nB_Tj79jaRXs_1\tperson\nB_Tmq51dx1g_0\tperson\nB_jGC2tlhRo_0\tperson\nB_k6vEEPHK0_0\tperson\nB_lEJv31TlI_1\tperson\nB_lEJv31TlI_2\tperson\nB_nZdcreecE_0\tperson\nB_wWPH9kbxM_0\tperson\nB_ylVg-TN2Q_0\tskateboard\nB_4Kfa8_9ms_0\tperson\nB_4eJYakoRY_0\tmotorcycle\nCAEqRvJLY-M_1\tmotorcycle\nCAe1SZKZ9T0_1\tcar\nCAq4CxCpeQE_0\tcat\nCA4UqnJCs58_0\tmotorcycle\nCA9SLI7TOKQ_0\tperson\nCBASqWyp4yk_0\tperson\nCBJQ5dL6Df8_2\thorse\nCBNqNe7G-QQ_0\tperson\nCBnYDFRfYgo_1\tbus\nCBqyVKttAwU_0\tcow\nCBtgGOzZtLQ_0\tperson\nCBz3ZOrTAjI_0\telephant\nCBz3ZOrTAjI_2\telephant\nCCAsEc2oRAM_0\telephant\nCCGg17i4vMU_0\tperson\nCCHay2RSnJI_0\tskateboard\nCCHay2RSnJI_1\tskateboard\nCCLRdGNDgdc_0\tcat\nCCoGim--jEg_0\ttrain\nCCp6NLBil8k_0\tbicycle\nCCwovjgEx1k_0\tperson\nCCwovjgEx1k_1\tperson\nCC0aX78fQFo_0\tcat\nCC-qoxEyocI_0\tperson\nCDCLLCkr87I_0\tcow\nCDY4TXCreQ0_0\tperson\nCDbWYF89944_0\tperson\nCDb6uyrYrZA_0\tcar\nCDfjcWI7iBQ_0\tboat\nCDgBHxiVkFw_0\ttruck\nCDnrG74PXbI_0\tperson\nCDpQZEjohRc_1\tcow\nCDpQZEjohRc_0\tcow\nCDrU-q6QdEs_0\tperson\nCD0cWR7d9yI_0\tperson\nCD4SGfIdfSg_3\telephant\nCEDTshbJOaI_0\tperson\nCEJoHSbb4gg_0\tperson\nCEJoHSbb4gg_1\tperson\nCEMCCDAYzQs_0\tperson\nCENd4xI4dnY_0\tperson\nCETUG_G0I4k_0\tcow\nCETUG_G0I4k_1\tcow\nCETUG_G0I4k_2\tcow\nCEUjuyvgrB0_0\tperson\nCEUqqi8y4sg_0\tcat\nCEVHrP5OzJ0_1\tknife\nCEafe_JTk8g_0\tknife\nCEqA0cqMfzg_1\tcow\nCEsjzJHOUBw_0\tdog\nCEzWiyTQOMA_0\ttruck\nCE1gHqc8aqU_0\tperson\nCE3KdY0X0QE_1\tperson\nCFD0NOl12CA_1\ttrain\nCFD6d4OweGQ_3\tmotorcycle\nCFD6d4OweGQ_1\tmotorcycle\nCFD6d4OweGQ_2\tmotorcycle\nCFD-UQW1aQU_1\tcar\nCFD-UQW1aQU_2\tcar\nCFRsGLeMJKc_0\tperson\nCFXkKgig7Io_0\tperson\nCFee6F2rbjc_1\tbird\nCFxObg2ebKQ_0\tairplane\nCFxObg2ebKQ_1\tairplane\nCF0JmXACTww_0\tperson\nCF01UBuV76Q_0\tperson\nCF7DZCaSqIg_0\tbird\nCF7DZCaSqIg_1\tbird\nCF7KYbTChlg_0\tperson\nCF71f3YLQ9U_1\tperson\nCF-cX0etaAw_1\tcat\nCF_NSKkrwjg_0\tperson\nCGCNTZsml7Y_0\tcow\nCGQoaYTzfaU_0\ttrain\nCGQoaYTzfaU_5\ttrain\nCGQoaYTzfaU_7\ttrain\nCGgxp3ycSWs_0\telephant\nCGoqd4n_qJg_0\tperson\nCGsUTzKzV4U_1\ttrain\nCGwrXZ2fUqg_0\tperson\nCGy0nn1MCqY_0\tperson\nCGy0nn1MCqY_1\tperson\nCG1sXlDy2Yg_4\thorse\nCG1sXlDy2Yg_5\thorse\nCHH1SlvOzfI_0\tperson\nCHIVYSnFst8_1\tbear\nCHJFpAcH8NM_8\tbicycle\nCHMzSMq0ui4_0\tskateboard\nCHZU6sP-loU_0\tperson\nCHZU6sP-loU_1\tperson\nCHbhzxurZNM_1\tperson\nCHbhzxurZNM_0\tperson\nCHnWGkGAnos_0\tperson\nCHo3jSv3HIA_0\ttrain\nCHwNoZ55z6c_0\tcat\nCH6ptLNxppU_0\tperson\nCH8zCsamj44_0\tperson\nCH-_pvq3am4_0\tperson\nCIJ-q_X_y7E_0\tperson\nCIKrCLz06-4_0\tcat\nCIQLvytEu6E_0\tperson\nCIQz5we_nHI_0\tperson\nCITgpk4GyMA_0\tbear\nCITgpk4GyMA_9\tbear\nCIV_VaLTf5c_0\tmotorcycle\nCIc1KbOeijU_0\tperson\nCIgzZOf3uA0_0\tperson\nCIgzZOf3uA0_1\tperson\nCIlb5C929mc_0\tknife\nCImmRnndBuo_0\tperson\nCItr4F49wO4_0\tperson\nCIxs-77bPrM_1\tperson\nCI2GrLRwQR4_0\tperson\nCI3rFXxUPtI_0\tbird\nCI6fYr7IJJM_0\tperson\nCI_9TEXzQE8_0\tperson\nCJD7b_dMrVE_0\tperson\nCJG8ou9QuY0_0\tperson\nCJIpdb7wZEc_0\tperson\nCJNAMf-R_J4_0\ttruck\nCJNj2wqp8QU_0\tbear\nCJOJBhvHmCE_0\tperson\na79_ETe4ego_0\tperson\na7_ixAbhsRI_0\telephant\na8MHgXPiRZU_0\tperson\na8as0DkifS0_0\tperson\na8eQTqlG-6o_0\tperson\na8insUA82jQ_1\tdog\na8insUA82jQ_2\tdog\na8insUA82jQ_3\tdog\na8r9Xss8Es0_0\tperson\na8wT4T21reQ_0\tperson\na8z4RhTT02c_0\thorse\na82uXl_fE7A_2\tcow\na82uXl_fE7A_3\tcow\na892r_pD5PM_0\tperson\na9FI5hfZsG0_0\tboat\na9GBRb_g82o_1\tbicycle\na9GBRb_g82o_2\tbicycle\na9YciDJw4wo_0\tdog\na9Y2Jm4-FDM_0\tperson\na9ZvcKL6lEg_0\tperson\na9fG2p2YO7k_0\tbus\na9fG2p2YO7k_2\tbus\na9g4dt8Lszw_0\tperson\na9g4dt8Lszw_1\tperson\na9riNB4_uhk_0\thorse\na90AssqciQk_1\telephant\na90AssqciQk_2\telephant\na-EIC5v0X4o_0\tdog\na-EIC5v0X4o_1\tdog\na-MNXAJ2mZo_0\tperson\na-NocjWzZtY_2\tperson\na-QTXZfMMT4_0\tperson\na-ZWAMyDG3o_0\tperson\na-iJ2J3oI-A_0\tperson\na-lm-MyKchM_0\tcow\na-s461-Ddxc_0\tskateboard\na-u5tm8bZnc_0\thorse\na-yRjCC5TTM_0\thorse\na-1bMCU5aj8_0\tmotorcycle\na-8RK3OMAOo_0\tskateboard\na-8RK3OMAOo_1\tskateboard\na_KVzTF1RIA_0\tperson\na_KZ5mevNfs_0\tbear\na_OkB8q7LMc_1\tperson\na_SryCna8Rk_0\tperson\na_UjbYab9UM_0\ttrain\na_YIQ1VvpcU_0\tperson\na_YIQ1VvpcU_1\tperson\na_gLFD5d04A_0\tperson\na_wdiSqtOK4_0\tairplane\na_xkGO87GsU_0\tskateboard\na_1zKb6B-bs_0\tperson\na_6uxh_4kb8_0\tperson\na_-WUUfn_l4_0\tperson\na__R_Y49D54_0\tperson\nbALr5X95BQ8_1\tperson\nbAMbXytHB7Y_0\tperson\nbAMbXytHB7Y_1\tperson\nbAdtKFYWQcE_0\tperson\nbAfpD53Vjic_0\thorse\nbAinSo2I3HI_0\tperson\nbAinSo2I3HI_1\tperson\nbAp653-8UZI_0\tperson\nbAtWugkhW88_0\tbus\nbAutb-z3rvw_0\tcow\nbAwVg4MVWds_1\telephant\nbAwVg4MVWds_0\telephant\nbAwVg4MVWds_5\telephant\nbAwVg4MVWds_9\telephant\nbAwVg4MVWds_10\telephant\nbAwVg4MVWds_11\telephant\nbA2bnjEnbus_0\tperson\nbA4v5gLC700_0\tperson\nbA5elX54rTQ_0\tcat\nbA6JRlAu2yE_0\tperson\nbA8lz4kTY-0_0\tbicycle\nbA8lz4kTY-0_3\tbicycle\nbA8lz4kTY-0_5\tbicycle\nbA8lz4kTY-0_6\tbicycle\nbA_NwRpP6Tw_0\tperson\nbA_6OElyKFo_0\ttrain\nbBPPJNf59kQ_0\tumbrella\nbBT4o_qtgWU_0\tperson\nbBgRYIPlqAQ_0\tperson\nbBm9VYnMO9g_0\tbird\nbBt5A6pwnxY_0\tperson\nbB1rIuXXQFA_1\tbus\nbB4Xm1LS9CI_0\tdog\nbB6PWM19eMo_0\tperson\nbCB5mMgiGnk_0\tperson\nbCRN4AZbr6o_0\ttrain\nbCbqiJ6Ales_0\tperson\nbCuWk5NSB0k_0\tperson\nbCuWk5NSB0k_1\tperson\nbCuuL9wxM7E_0\tperson\nbCuuL9wxM7E_1\tperson\nbCvbst3iM94_0\tmotorcycle\nbCwUgQIL5cE_0\tknife\nbCx54wbopXs_1\thorse\nbDBjT69DcT4_0\tcow\nbDEPo_ZJ8BY_0\ttruck\nbDJyFQqK69A_0\tperson\nbDOeksOYoHc_0\ttruck\nbDOeksOYoHc_1\ttruck\nbDOeksOYoHc_2\ttruck\nbDOeksOYoHc_3\ttruck\nbDO5jSIN9C4_0\tperson\nbDZrANNzYZY_0\tskateboard\nbDaTeoyWI4g_0\ttrain\nbDcapf9qqwU_0\tperson\nbDjiXPhFyUA_0\tperson\nbDu9DwJEoHs_0\tcow\nbDu9DwJEoHs_1\tcow\nbDu9DwJEoHs_2\tcow\nbDxvHkJLr2M_0\tbus\nbD9LGwYECDw_0\tcat\nbD-NwifgK0w_1\tskateboard\nbEDI6tCMZXU_0\tperson\nbEIh6sX-Tl4_0\tperson\nbEKdkY9RBEY_0\tperson\nbEM1_c0lvzs_0\tbear\nbEOBKFTwR2Q_0\tgiraffe\nbETxZfOvyHY_3\tbear\nbEUZ0kW5UxE_1\tperson\nbEawSJKPt-Q_0\tperson\nbEhFibV8au4_0\tperson\nbEqXwB3xaWk_0\tperson\nbErIbiSkE10_0\tskateboard\nbEwALd1GaT4_0\tbicycle\nbEzk1Y4QUKs_0\tbus\nbE2p5KejqaA_0\tperson\nbE54N9ho-us_0\telephant\nbE9RuKWeuuo_0\tperson\nbE--xARlZGI_1\tbird\nbFA9McooYzo_0\tcar\nbFCSt5rQdmU_0\tperson\nbFEO4MHzBto_0\tperson\nbFIAwyZ6uuE_2\tperson\nbFIAwyZ6uuE_0\tperson\nbFIAwyZ6uuE_1\tperson\nbFNUtoXNMlQ_0\tbus\nbFORQXIUbxA_0\tperson\nbFXutLP--Cw_0\tcow\nbFXutLP--Cw_1\tcow\nbFXutLP--Cw_2\tcow\nbFXutLP--Cw_3\tcow\nbFYfbtcZvsM_1\thorse\nbFe5fer15nk_1\tbus\nbFm95kiEE_Q_0\tbicycle\nbFnZbMhDMQ8_0\tperson\nbFrVmI5XvFw_0\tperson\nbF2D0pMJqLQ_1\tknife\nbF65L0Tc9w8_0\tperson\nbF8lUYDQNgc_0\tperson\nbGCRyP03o54_1\tskateboard\nbGFqTDkSuTA_1\tbird\nbGMKF81Sy6c_0\tperson\nbGcugFPOZ98_0\tperson\nbGeFOznVAdA_0\telephant\nbGmggiJ7Hrk_2\tboat\nbGpuuVQyMOY_0\tperson\nbGsY4wldptk_1\thorse\nbGsY4wldptk_0\thorse\nbGyLNR-ZWRY_1\tcat\nbG7btkvllWc_0\tskateboard\nbG9Q1zv6YZ4_1\tperson\nbG-X3irBEO0_0\tperson\nbHALJVsPIWo_0\tperson\nbHBuapxTSS0_0\tperson\nbHB5zkcU4DY_0\tperson\nbHO746jxL2Y_0\tskateboard\nbHP9bh7-qNQ_0\ttruck\nbHWmtSkc1qY_0\tperson\nbHWmtSkc1qY_1\tperson\nbHbgFvCFkb0_0\thorse\nbHcNLuPTrTk_0\tperson\nbHcbcNIxs_o_0\tknife\nbHdxB4LnmGY_2\tmotorcycle\nbHdxB4LnmGY_0\tmotorcycle\nbHdypdEXRYY_0\tskateboard\nbHim6VG9R7E_1\tboat\nbHoVPJGd7EU_1\ttruck\nbHoVPJGd7EU_2\ttruck\nbHoVPJGd7EU_3\ttruck\nbHvVd9-u80E_0\tperson\nbH5d5crxmiw_0\tcat\nbIFUXEvQb_4_0\ttruck\nbIFUXEvQb_4_1\ttruck\nbIV7YZEPqTo_0\tperson\nbIiV4e5w280_0\tperson\nbInwFKVbP2c_0\tperson\nbIqcbjzOQ0Y_0\tcar\nbIslKUiw6YQ_4\tairplane\nbIslKUiw6YQ_0\tairplane\nbIyfjvesRuY_0\tboat\nbIzzvd9q2po_0\tcow\nbI19pnS1D7Q_0\tmotorcycle\nbI8htXUqQkI_0\tcat\nbJADjJacbIY_1\tperson\nbJAxqtGR-MY_0\tperson\nbJBnGIqBiuw_0\thorse\nbJDJ5yePi6M_0\tperson\nbJITjrxz5Ns_0\tperson\nbJI1844s-tU_0\thorse\nbJKrgOW0nMk_0\tperson\nbJMS4sT7XRo_5\thorse\nbJMS4sT7XRo_6\thorse\nbJMS4sT7XRo_8\thorse\nbJMS4sT7XRo_9\thorse\nbJMS4sT7XRo_0\thorse\nbJMS4sT7XRo_1\thorse\nbJWTtXkyZHg_0\tperson\nbJcrA1AOfI4_2\ttrain\nbJcrA1AOfI4_3\ttrain\nbJfHVvueTbo_0\tperson\nbJh3iPv6jYc_0\tcow\nbJqhWaDN0hQ_1\tdog\nbJ0SdP6bjnQ_0\tperson\nbJ24-WqB1xs_0\tperson\nbJ6hIJWstDo_0\ttruck\nbJ6-RBgHmRU_0\tperson\nbJ8k9v22vJA_0\tperson\nbKBLXhOMUi8_0\tdog\nbKCfbZIUSZI_0\tperson\nbKCjZrT7jIY_0\ttruck\nCJfXDO8EqQ4_0\tperson\nCJfXDO8EqQ4_1\tperson\nCJm40KxFN5E_1\tperson\nCJm40KxFN5E_0\tperson\nCJqFjtBvN9Y_0\tskateboard\nCJqHpmU9iSk_2\tperson\nCJqHpmU9iSk_0\tperson\nCJrxPkQa2GE_1\ttrain\nCJ0sXsga9bM_0\tbus\nCJ35smVDZW0_0\tperson\nCJ4qgeMiaOQ_0\tairplane\nCJ6n8mmO1b4_0\tcat\nCKB_--5AbfU_0\ttrain\nCKC6BopJKyk_0\tperson\nCKGpdOkI6P4_0\tperson\nCKNmSha1fz0_0\tperson\nCKQHLTDcKyk_1\tbird\nCKSN1SlM9ug_0\tcat\nCKZ1xRX4dh8_4\tknife\nCKcBs841bV0_0\tperson\nCKhADB_ssaI_0\telephant\nCKjQxzl__Fw_0\tbicycle\nCKkp1wLGtks_0\tperson\nCKmTbQn6J9U_1\tperson\nCKsvfQdlYfo_0\tperson\nCKuBMM3fZ84_0\tairplane\nCKxmvXSrPIg_0\tbicycle\nCKzh_WuJFng_0\tperson\nCK29cIxMNP0_0\tperson\nCK39c3vr6gc_0\tskateboard\nCLAjvvAM-K4_0\tperson\nCLB6UiAOkP0_1\tbus\nCLMUcOgZdNQ_2\tcow\nCLQOTITDBeo_0\tperson\nCLXlbsB7sLY_0\tperson\nCLdyznsISW8_2\tcar\nCLosaFzMFeI_1\tperson\nCLzV3TNXkFo_0\tperson\nCL1Bt58elWc_1\tperson\nCL1Bt58elWc_0\tperson\nCL1z2IBwWkA_0\tperson\nCL1z2IBwWkA_1\tperson\nCL4fc23TpVo_0\tperson\nCL5zmQikk-A_0\tperson\nCMBw6j8-QzY_0\tperson\nCMBw6j8-QzY_1\tperson\nCMIMzbsGXk8_0\tbus\nCMLOYaDEQ9g_0\tperson\nCMMGX4SFyIs_2\tperson\nCMOEwqoxxwo_0\tperson\nCMP-dHylUas_1\tperson\nCMlE5HjD19w_0\ttruck\nCMlNU8W7Lsk_0\tcow\nCMrJ3Hog9z4_0\telephant\nCMrJ3Hog9z4_1\telephant\nCMrJ3Hog9z4_2\telephant\nCMsMnTwn9o8_1\ttruck\nCMwy_JpVNwc_3\tbird\nCMwy_JpVNwc_1\tbird\nCMwy_JpVNwc_2\tbird\nCNDd5De0h98_0\tperson\nCNEdjudh1lE_0\tperson\nCNID7GMZCtU_1\thorse\nCNiuz-9TxDo_0\tperson\nCNqKVUmynPk_0\tairplane\nCNt_itMBqgs_0\tperson\nCNua3gOk0oM_0\tbus\nCNwRXN4wSAk_0\tknife\nCN6-VQgDfe4_0\tperson\nCN8AktLgwN8_0\tgiraffe\nCN8AktLgwN8_6\telephant\nCOAed-b3LTY_0\tperson\nCOFcQrVSFcc_0\tperson\nCOTylrR16zU_1\tboat\nCOc8fmI9wQ4_0\thorse\nCOh7aoqTWjY_0\telephant\nCOj_p56dMLI_0\tmotorcycle\nCOksm121JZ0_0\ttrain\nCOxq73j4_rY_0\tperson\nCOyU6vUfxXQ_1\tperson\nCOyU6vUfxXQ_0\tperson\nCO2cK7r8MNQ_0\tperson\nCO33VpWw45s_0\tskateboard\nCO_0l5Z12kw_0\tcat\nCPManZ0i9vw_0\ttruck\nCPN9sc_XrbM_0\telephant\nCPOp_zZsQJk_0\tcow\nCPQXOFjv2LM_0\tperson\nCPXyJXYL8yY_0\tmotorcycle\nCPXyJXYL8yY_4\tmotorcycle\nCPYxpWVVj_M_0\tcow\nCPZSesZALiI_1\tcat\nCPuy90LHgrc_0\tbus\nCP3cZfEx36E_2\tbear\nCP3u7XjYteQ_1\tperson\nCP3u7XjYteQ_0\tperson\nCQEjDKzTc3Y_2\tperson\nCQE_vEzLzMQ_0\tperson\nCQPAMu_3qwY_0\tbear\nCQUUCXr0Idg_0\tperson\nCQU9LkJ1PlA_0\tperson\nCQU9LkJ1PlA_1\tperson\nCQbUivUBlJ8_1\tbear\nCQbUivUBlJ8_3\tbear\nCQihoSP1KLM_0\tperson\nCQite5jXihw_2\tperson\nCQlL5sCIaM4_2\ttrain\nCQlL5sCIaM4_0\ttrain\nCQlL5sCIaM4_1\ttrain\nCQmCFDEszdc_0\tcat\nCQyxRGB9-_o_1\telephant\nCQzQkumb_iw_0\tperson\nCQ0hdku_Mu0_3\telephant\nCQ0hdku_Mu0_4\telephant\nCQ0hdku_Mu0_6\telephant\nCQ0hdku_Mu0_8\telephant\nCQ0hdku_Mu0_11\telephant\nCQ2pa82Muc4_0\tperson\nCRGhEOLOPLw_0\tbus\nCRHfpplogUY_2\tcar\nCRHfpplogUY_1\tcar\nCRPfcUOT10Q_0\ttrain\nCRQ8kzUgpGE_0\tcat\nCRS3P9ePDug_8\ttrain\nCRS3P9ePDug_0\ttrain\nCRS3P9ePDug_4\ttrain\nCRS3P9ePDug_7\ttrain\nCRS3P9ePDug_9\ttrain\nCRS3P9ePDug_1\ttrain\nCRYLa0UnCJY_0\tdog\nCRZQQc-7Cr4_0\tperson\nCRZQQc-7Cr4_1\tperson\nCRcL9sc8Z_Q_0\tperson\nCRihNgUldQg_0\tperson\nCRpG5Auclh4_0\ttrain\nCRscoQhOT24_0\telephant\nCRteSMMhdfo_1\tperson\nCR2Qbth78ug_0\tperson\nCR7gNMR7aFk_0\tperson\nCSBnYbN-fwQ_0\tperson\nCSBnYbN-fwQ_1\tperson\nCSCN35ZL4gk_0\tperson\nCSCmLaLpgec_1\ttrain\nCSGkGWkJnIo_0\tperson\nCSKOzx-8MRM_0\tperson\nCSKhQtYbLiY_0\tperson\nCSTEfDaVq_w_3\thorse\nCSgIyZrF2Xw_6\tbear\nCShE1WLp4V4_0\tperson\nCSlYtyS3ekI_0\tcat\nCStjlkpuH8I_0\tknife\nCSwiprmAnWk_0\tperson\nCS4LhFaTdRc_1\tperson\nCS4TVHuh-OI_1\tperson\nCS4TVHuh-OI_0\tperson\nCTBCSXpoCNw_0\tknife\nCTGjM7vaWkc_0\tcar\nCTNN0vCWthk_0\tcow\nCTOTTFDvM9g_0\telephant\nCTOTTFDvM9g_1\telephant\nCTpK5Ywqj4E_0\tperson\nCTtActqncZs_1\tperson\nCTtActqncZs_0\tperson\nCTty0Fesx4k_1\telephant\nCTty0Fesx4k_2\telephant\nCT6O84zfmoY_0\tperson\nCT8VKdB074U_0\tdog\nCUB_Y4U0gNU_0\tperson\nCUE1Oj2b7oo_0\tperson\nCUIv9zU0_7M_2\tdog\nCUQZtS7SlyM_0\ttruck\nCUVQtlpfthI_0\tperson\nCUVqn-7LP_k_0\tcat\nCUjEVN0BT58_0\tperson\nCUjbAz30mdA_0\tperson\nCUvi-gOiEak_0\tairplane\nCUvi-gOiEak_1\tairplane\nCUzrNlKejnA_0\tperson\nCU-5HeXnZag_0\tperson\nCU_cxu2KrzY_1\tcow\nCU_4MsJSWGw_0\thorse\nCVCPdF3TevY_0\tcar\nCVJEcVS63rM_0\tperson\nCVJu9kpxa0o_0\tskateboard\nCVQq3Lnsmb8_0\tskateboard\nCVRQkAzvHOI_0\tcat\nCVXbWRarjGI_3\tbicycle\nCVa-tmxG3G8_0\tbus\nCVfXcK9LvU4_0\tcat\nCVnQzQjIfdo_0\tperson\nCVnQzQjIfdo_1\tperson\nCVtUo7t1tg4_0\tknife\nCVtdQUWrMFo_0\tperson\nCV1gdpxyUvQ_0\tumbrella\nCV7yBA-RY-s_0\tperson\nCV9Mv-Z5ywo_1\tknife\nCV9_qaQ3bOc_0\tdog\nCWNPg3hbbCc_0\tperson\nCWRUw47fnHQ_0\tdog\nCWcpGIObSb4_0\tperson\nCWcpGIObSb4_1\tperson\nCWhtecFS3Ps_0\tperson\nCWh66yU69HI_1\tperson\nCWq2nbpnjkw_0\tperson\nCWsgkyp-Wv8_1\tperson\nCWsgkyp-Wv8_0\tperson\nCWu6nT2qW2Q_0\tperson\nCWydCxGJyck_0\tcat\nCW0GVWegie4_0\tperson\nCXEi_k33z08_0\tperson\nCXF-MNV21Uw_1\tperson\nCXF-MNV21Uw_2\tperson\nCXF-MNV21Uw_0\tperson\nbKIEzYSD9LU_0\tbird\nbKM4LmiXX5k_3\tknife\nbKM4LmiXX5k_0\tknife\nbKQQdBiIraA_0\tdog\nbKT6s25xsS4_0\tperson\nbKh8FyKvOq8_0\tumbrella\nbKic74m-XKg_0\thorse\nbKnsY1ytgqc_0\tperson\nbK0HzQHKqhg_0\tmotorcycle\nbK0HzQHKqhg_1\tmotorcycle\nbK0IN2qoSjQ_1\tperson\nbK7Wo0UxDyQ_0\tperson\nbLBmIVS2T-0_0\tperson\nbLLFtAMqoF0_0\tperson\nbLOW53I2oWw_0\tknife\nbLU0G55kWgs_0\tcar\nbLU0G55kWgs_1\tcar\nbLYGpYiiF7Q_0\tperson\nbLg0SdwRkKc_0\tperson\nbLneVyHWwdk_0\tperson\nbLoyRVgQcTk_0\tcat\nbLoyRVgQcTk_1\tcat\nbLoyRVgQcTk_2\tcat\nbLs4dUFZzcQ_0\tperson\nbLs4dUFZzcQ_1\tperson\nbMEbcFBdRsA_0\tairplane\nbMM1OZMZ_WY_0\tperson\nbMNzE6F4WK4_0\ttruck\nbMPPnTHvu8c_1\tcow\nbMQlfzj9vCE_0\tperson\nbMZPcnVc1K0_0\tperson\nbMakr2vwfqQ_0\tperson\nbMdfLBSo6jw_0\tbicycle\nbMfQw6tBALo_0\tcow\nbMgWjlwilqA_0\tbicycle\nbMk8JyTyvUo_0\tskateboard\nbMojajeogfY_0\tperson\nbMphaUsZuqU_2\telephant\nbMrDB2JI0QM_0\telephant\nbMuSXdxvaWY_0\tbicycle\nbMumJTM0f28_0\tperson\nbM3OcevX9F4_0\tperson\nbM6fRimkPZg_0\tcow\nbM6peJ4lQyU_0\telephant\nbNGoGllCEj0_0\tcar\nbNGoGllCEj0_1\tcar\nbNJ5ygVB-GI_0\tperson\nbNPtMp-AuhY_5\ttrain\nbNPtMp-AuhY_4\ttrain\nbNR89JLsh7Q_0\tmotorcycle\nbNZe9vwuE8E_0\tcar\nbNcTCIgwqNY_0\tboat\nbNinDD5s0LQ_0\tperson\nbNo2RseLYYs_0\tperson\nbNqXgNLQX3s_0\tperson\nbNtivYIWtQE_0\tperson\nbNtivYIWtQE_1\tperson\nbNtivYIWtQE_2\tperson\nbNyyHqBZnmQ_0\tairplane\nbN4vggzwxWI_0\tperson\nbN-epcJfRJ8_0\tperson\nbOPvxhSlnZI_0\ttruck\nbORQv_d22gA_0\tbear\nbOTYFfq_264_0\tperson\nbOXM6ibmbG0_0\ttruck\nbOarvmUMdLs_0\tperson\nbOarvmUMdLs_1\tperson\nbOb4k6pTF-k_0\tmotorcycle\nbOeUzXPOIWw_0\tmotorcycle\nbOfrPHjROWI_0\tdog\nbOm9Qgnl2KI_1\tumbrella\nbOor15z5M5Y_1\ttruck\nbOuuxRt7ugE_0\tbear\nbOwOVcqeajs_1\tboat\nbPAO0nyCO8Y_2\tcow\nbPLKx5uJaZY_0\tbear\nbPTTPAsH7v8_0\tairplane\nbPZdC3oRr1c_0\tdog\nbPanGwtU82U_0\tairplane\nbPavgNJxZnI_0\thorse\nbPavgNJxZnI_4\thorse\nbPcXQrlHs60_0\tzebra\nbPddyJH2fm4_0\tcow\nbPeFwxV66_s_1\tcow\nbPfaS8RIHVw_1\ttrain\nbPjZsDes9ck_0\tbird\nbPvvA8Wm5Ts_0\tperson\nbPw91vtx0rY_0\tdog\nbP17881jyH4_0\telephant\nbP17881jyH4_2\telephant\nbP17881jyH4_1\telephant\nbP6QvQUfZSI_0\tperson\nbP7ZU4wl_xs_1\tperson\nbP7lN2WyBTg_2\tbird\nbP7lN2WyBTg_0\tbird\nbP7pux4nQa4_0\tperson\nbQJQKEfdctc_1\tperson\nbQKuVB3YmRI_1\tknife\nbQNLK-43XKM_0\tperson\nbQNXrSVq4r4_0\tperson\nbQQS-amRhxU_0\tperson\nbQQr8FzMTHE_0\tperson\nbQR6KxB4qjg_1\ttrain\nbQWO4r5DLWY_7\tbicycle\nbQWO4r5DLWY_8\tbicycle\nbQZ8WQ2mS9o_0\thorse\nbQd1k1RNZZA_0\tperson\nbQwDt3XOok0_1\tskateboard\nbQy9W_tIPJg_0\tcat\nbQ7FEMZ309U_0\tbicycle\nbRElYolSzbI_2\thorse\nbRKfUmz_7hE_0\tbicycle\nbRKfUmz_7hE_5\tbicycle\nbRP4TElBetA_0\tskateboard\nbRUtCCY00Yw_0\tperson\nbRd_NGjRFpU_0\tcow\nbRgNc063rsk_0\tperson\nbRgNc063rsk_1\tperson\nbRiVaIWzo4k_0\tperson\nbRiVaIWzo4k_1\tperson\nbRpbblTb1VU_1\tperson\nbRq06zdCv4k_0\tdog\nbRsjD1GTjeE_0\ttruck\nbRuSrTOibGY_0\tskateboard\nbRw2PFlL8l8_0\tcat\nbRxyuZTXkWo_0\tperson\nbR61bP65wdI_0\tperson\nbR_EeaX8Kns_0\tcat\nbSC7MwTZ0Og_0\tperson\nbSJbBDA3-rI_0\tperson\nbSJbBDA3-rI_1\tperson\nbSSSYoS7HhY_2\tperson\nbSSX8qJnGak_0\tperson\nbSVCTx_L7lU_0\tperson\nbSbZuDkimC8_1\tcow\nbScFgdC-DH8_0\tmotorcycle\nbSkEsUu7aBI_0\tcat\nbSqX5D_GrEc_0\tperson\nbS4mTtP-Ud4_0\tperson\nbS4mTtP-Ud4_1\tperson\nbTAxiISsPNE_0\tcow\nbTHRXr-yw54_0\tperson\nbTOZp15gd24_0\tairplane\nbTOZp15gd24_1\tairplane\nbTOZp15gd24_2\tairplane\nbTO9Pid9808_1\tcow\nbThFysASYJg_0\tperson\nbThX-5t7OWM_3\tbus\nbTl-dt761p8_2\tbird\nbTp1hk4dhPE_0\tperson\nbTuho6CpJpg_0\thorse\nbT7mzx9P1Yo_6\tbird\nbT7mzx9P1Yo_8\tbird\nbUDYPhSFyyw_0\tairplane\nbUFCsL247kY_1\tperson\nbUFCsL247kY_0\tperson\nbUIov_O62GU_0\ttrain\nbUVi7VVygmM_0\tperson\nbUa61WY6E38_1\tperson\nbUu6iW_nRvM_0\tperson\nbU8cBepgoMY_4\telephant\nbU8cBepgoMY_1\telephant\nbU8cBepgoMY_3\telephant\nbU8r7rNDaHQ_0\tmotorcycle\nbU8r7rNDaHQ_1\tmotorcycle\nbVCLNxl4PQY_0\tperson\nbVPgCZmg1CY_0\tperson\nbVTzUiTPtww_0\tperson\nbVZixqlT1AI_0\tperson\nbVbT4F3I0s4_0\tperson\nbVbdO8rj6TQ_0\tperson\nbVbdO8rj6TQ_1\tperson\nbVdjQbIzOGc_0\thorse\nbVgKe0-_228_0\tbear\nbVkYqw1YJ6c_0\tperson\nbVnmeQsd3xk_1\tcar\nbVph6GZ3jLE_0\tskateboard\nbVrck_XYsR8_0\tbicycle\nbVtMukuPx9A_0\tmotorcycle\nbVtWuhD1L1s_0\tcar\nbVvVMOxHOT4_0\tcat\nbVwWkzYdrvk_0\tperson\nbVw9txmBeX0_0\tperson\nbVz-pHuWNfc_0\tperson\nbV3UXbGCshc_3\telephant\nbV3UXbGCshc_4\telephant\nbV3UXbGCshc_0\telephant\nbV3UXbGCshc_2\telephant\nbV8k_w0cphI_0\tperson\nbV9tUYWi-9o_0\ttruck\nbV9tUYWi-9o_1\ttruck\nbWCW4QZTIXE_0\tperson\nbWCxObc3uVo_0\tperson\nbWEnwFThRlA_0\tperson\nbWEtMBeQQCA_0\tbus\nbWEw8rNQ-kI_0\tperson\nbWJg9jatoBY_0\tperson\nbWLcKJauKIs_0\tperson\nbWO4NBx37Vk_4\tairplane\nbWdWgIB371Y_0\tperson\nbWdWgIB371Y_1\tperson\nbWkKy-_YzW8_0\tumbrella\nbWotjBNgmiA_1\tmotorcycle\nbWotjBNgmiA_2\tmotorcycle\nbWo4CzHWaZ8_0\tdog\nbWqayCqhqVQ_0\tperson\nbWtXkAzA6zE_0\tperson\nbWtXkAzA6zE_1\tperson\nbW1JoZnZpXs_0\tbicycle\nbW1JoZnZpXs_2\tbicycle\nbW2I1hUiWgg_1\tbear\nbW2I1hUiWgg_3\tbear\nbW2I1hUiWgg_2\tbear\nbW6PJACBEFo_0\tboat\nbW6PJACBEFo_1\tboat\nbW7x14tLsxU_0\tcow\nbW7x14tLsxU_1\tcow\nbXGa-FIGViQ_0\ttruck\nCXOKkaurfXo_0\tperson\nCXVmfrDfalE_0\tperson\nCXVyHpmc_fU_1\tcat\nCXXWvUVLBBE_1\ttrain\nCXXWvUVLBBE_3\ttrain\nCXaF0E3wEzI_4\tboat\nCXaF0E3wEzI_1\tboat\nCXaF0E3wEzI_2\tboat\nCXdGDPRtlo4_1\tcat\nCXdjIo4q-w4_0\tdog\nCXoeLQPShqU_3\thorse\nCXoeLQPShqU_0\thorse\nCXrwHki5ShI_0\tperson\nCXw5HMRQwEk_7\tbear\nCXxPPuZcT2k_0\tknife\nCXyujV2S5aE_0\tperson\nCX1US3Y-2jI_0\tperson\nCX5Y01eJ_g0_0\tknife\nCX838M4iPkw_1\tbear\nCX_YxpWurRk_0\tperson\nCYEtgx1uVTM_0\ttrain\nCYEtgx1uVTM_1\ttrain\nCYFtiy8FtgM_0\tperson\nCYGBUw8HZ8Q_0\tperson\nCYKbj5BgaiI_0\tperson\nCYPFpTJXCp8_1\tperson\nCYXd3muNlJ8_0\tperson\nCYcxxdqG02k_0\tperson\nCYcxxdqG02k_1\tperson\nCYghFhQySik_1\tperson\nCYghFhQySik_2\tperson\nCYghFhQySik_0\tperson\nCYg8fy66poA_0\ttrain\nCYjEASXRoys_0\tperson\nCYkow-sm2pA_0\tperson\nCYmpj4UFFtA_0\tcow\nCYsgb4GhJ_0_1\tcat\nCYtehjvIIIE_0\tcat\nCYw9ONxIi0M_4\tbear\nCY3-VTI7lQU_1\tcow\nCY48729zIgM_0\tbus\nCZAt34OJpoI_0\telephant\nCZGoAqWEDQM_1\thorse\nCZJz6zZt3cE_0\tperson\nCZXHqexGqfI_0\tcow\nCZduQndn_Eg_0\ttrain\nCZfMxEFk9hc_0\tmotorcycle\nCZfe1GuZxPI_1\tperson\nCZws8sfLA8M_0\tperson\nCZ8bjG4wdZU_0\tperson\nCZ9MT7tZZ2E_0\tknife\nCZ-Kodbg_2A_0\tbus\nCaA-PFuqaXw_0\ttruck\nCaFlo5YQHXw_0\ttrain\nCag3vCKRh6c_0\tbicycle\nCajF9IxbOvI_0\tperson\nCajF9IxbOvI_1\tperson\nCam_wHie6XQ_1\tperson\nCa4_dI-Ii8o_0\tperson\nCa5GzZ-rifE_2\thorse\nCa5GzZ-rifE_0\thorse\nCa5GzZ-rifE_3\thorse\nCa5mOzqFz70_2\tbear\nCa6g367yxss_3\tdog\nCa9JsTGifmQ_1\tknife\nCa-l5zpgIL0_0\thorse\nCa-wDaXxSn8_0\ttrain\nCa_LwXljv5I_2\tdog\nCbBrv9GkBDM_0\tperson\nCbKVR2EGoWU_0\tcat\nCbO4r5w5NEM_0\tcat\nCbTbpHHYfGo_1\tcow\nCbYQk8GFQwY_0\tperson\nCbYXzAv9G40_0\tperson\nCbZA75LYWsk_0\tboat\nCbZA75LYWsk_4\tboat\nCbZA75LYWsk_7\tboat\nCbZA75LYWsk_8\tboat\nCbZA75LYWsk_6\tboat\nCbbsxxHKQBs_1\tbicycle\nCbbsxxHKQBs_3\tbicycle\nCbfML92fBFc_0\tperson\nCbrOGI6D5oo_0\tdog\nCbz0hgvZtyM_0\tperson\nCb0EbSTABAw_0\tperson\nCb31aGVbcGE_0\tperson\nCcJ-51mUw00_0\tperson\nCcNfpk8tVxA_2\tperson\nCcNfpk8tVxA_0\tperson\nCcNfpk8tVxA_1\tperson\nCcadL-XHA8w_0\tperson\nCccC-FK79hM_0\tskateboard\nCceETksmvEc_0\tbus\nCcfAKl1kCRM_0\tperson\nCcl3EZzzNhc_2\tbird\nCcl3EZzzNhc_3\tbird\nCcmiWGPbuT4_0\tcar\nCcyRYeSG3sQ_0\ttruck\nCcyqd4ZzDtQ_0\tperson\nCc5DUip1-eE_0\tperson\nCc9-Kd--ejs_0\tcar\nCdA-Gg7O6d4_0\tperson\nCdD0W0pS7gk_0\tskateboard\nCdG8sd9UZFM_1\telephant\nCdG8sd9UZFM_3\telephant\nCdOwMZqCiMs_0\tbird\nCdRgo9V_e_U_0\tperson\nCdTDo40rdz4_3\tumbrella\nCdVnK1TcGcQ_0\tknife\nCdW2qTShGbY_2\tperson\nCdW2qTShGbY_1\tperson\nCdYkEASWMqQ_0\tperson\nCddXUsFqg4Q_10\tbicycle\nCddXUsFqg4Q_12\tbicycle\nCdeUORbvfgs_0\tperson\nCdkbBdQwTX0_0\tperson\nCdmrCOVxj8c_0\tperson\nCdosWRXaOgY_0\tperson\nCdtY-oTmACc_0\telephant\nCd3qxnZC6s4_0\tairplane\nCd8dfcT-D9U_0\thorse\nCd8zY0wsrLc_0\tumbrella\nCd_ZgXZ7qKw_0\tperson\nCd_ZgXZ7qKw_1\tperson\nCeCnRUGvs9Q_1\thorse\nCeEMUoHNeVA_0\tperson\nCeICmGeQXOk_0\tmotorcycle\nCeICmGeQXOk_1\tmotorcycle\nCeVjsWpfoCY_0\tperson\nCekBpSMLr08_0\thorse\nCetmVa_LV2A_0\tbird\nCetw-N1I1bA_0\tdog\nCew6y9K7ynI_0\tcat\nCezGmkW4sRY_0\tperson\nCe1tW6uV_lw_0\tperson\nCe1tW6uV_lw_1\tperson\nCe_dgPawIkU_0\tperson\nCfC--i0DQ-o_0\tcar\nCfThv8Vk-oM_0\tumbrella\nCfbzDUZ6PyQ_0\ttruck\nCfqtCB_f_Z8_3\tskateboard\nCfwk3niR9Uc_0\tmotorcycle\nCfyvbbrxquI_0\tcat\nCf_GVLLQaTA_0\tperson\nCgB0fwUOZd4_2\tbus\nCgDcN1Lk7ag_0\tcar\nCgDcN1Lk7ag_1\tcar\nCgDcN1Lk7ag_2\tcar\nCgDyrbc-LLo_0\tperson\nCgHCCqADKys_0\tperson\nCgQl21vwrqk_0\tperson\nCgQv6o97KqY_0\tperson\nCglmlO92nKA_0\tperson\nCglmlO92nKA_1\tperson\nCgod2p17L48_0\tperson\nCgwHXWDGAak_1\tperson\nCgzt1Kv6Sqg_0\tcow\nCg9H20lr5Uk_0\tperson\nCg9H20lr5Uk_1\tperson\nCg9H20lr5Uk_2\tperson\nChBKKPEO8N0_0\tperson\nChOKPIVr5XE_2\tbicycle\nChPBGkSbJ0g_0\telephant\nChRNCk9Bq-k_0\tcat\nChZB3vAX8sk_0\tperson\nChc7poZ9r-k_3\tskateboard\nChmcE3Lz1Vc_0\tperson\nCh2_CQg4r1o_0\tperson\nCh-PosNzqZ8_4\telephant\nCh-PosNzqZ8_0\telephant\nCiCqdFq_a7U_1\tperson\nCiCqdFq_a7U_0\tperson\nCiLbnwjSJ9w_0\tperson\nCiQOmR8VCzs_0\tperson\nCiQOmR8VCzs_1\tperson\nCiQS0RMaLZQ_0\ttruck\nCiT09gfBJPA_1\tperson\nCiVwjoLvdAs_1\thorse\nCiWhBWV1zGM_0\tcow\nCiWhBWV1zGM_1\tcow\nCiYOn9VW1eY_0\thorse\nCihCAad2Duo_0\tperson\nCilRWTfS8e4_0\tperson\nCiwaaMNfvCo_0\tairplane\nCi0S27Qp1w4_0\tcat\nCi2vW1OGHe0_0\tcat\nCi6mTJ6BqYI_0\tperson\nCjJ3l2smqAc_0\tperson\nCjMaorKuwf0_1\thorse\nCjRX9J2BM4Y_0\tskateboard\nCjUf3D9IsCQ_0\tperson\nCje7Ip85T1I_0\tperson\nCjm9Wky44TM_0\telephant\nCjm9Wky44TM_1\telephant\nCjn-mt97y-w_0\tperson\nCjq3dda3PlA_1\tperson\nCjq3dda3PlA_0\tperson\nCjw2f0M_eB8_0\tbird\nCj1CpXDG_Qw_0\tperson\nCj3PTZcRbd4_0\tperson\nCj3ZEx4SDe4_0\tcow\nCj-a9t9yiiA_0\tperson\nCj-a9t9yiiA_1\tperson\nCkBGaJnF9vo_0\tperson\nCkC43WVctnk_0\tcat\nCkKQhDP2FGY_1\tperson\nCkKQhDP2FGY_0\tperson\nCkKQhDP2FGY_2\tperson\nCkLE-s6CsgY_0\tcow\nCkLwgOIBF_I_0\tperson\nCkLwgOIBF_I_1\tperson\nCkP_70u-2zU_1\tboat\nCkX8laawskQ_2\thorse\nCkZeki9RVDI_0\tperson\nCkZhHtevDk8_0\tperson\nCknHFY05prw_0\tperson\nCkoK8C4Rzj0_0\tperson\nCkvEr5T38Wc_0\tperson\nCkvEr5T38Wc_1\tperson\nCkvEr5T38Wc_2\tperson\nCkyU5jU74Js_1\tdog\nCkyU5jU74Js_0\tdog\nCk8GRgUrpoE_0\tperson\nClBCXl7l2pw_0\tskateboard\nClH2-R5LeVo_0\tcat\nClLZcmIHrTw_0\tperson\nClM3Ftm0S7o_0\tcow\nClRLFlpMUhU_1\thorse\nClSzHW4AuJ0_0\tperson\nClV1oHNuF9o_0\tperson\nClV6A8WNCvw_0\tcow\nbXcKQNGRBvw_3\tairplane\nbXcKQNGRBvw_0\tairplane\nbXcKQNGRBvw_1\tairplane\nbXcKQNGRBvw_2\tairplane\nbXjVvJ8eOJc_0\tskateboard\nbXkjwotai0Y_0\tbicycle\nbXnvGCFA9Dg_0\tperson\nbX9TcejzzTM_0\tperson\nbYCvd_BTMsk_0\tdog\nbYE-vUOh10s_0\tboat\nbYN8lkupLt4_0\tbird\nbYQiCAwebzs_1\tbicycle\nbYSbuWYiixQ_0\tperson\nbYVgzwF1hNw_0\tbicycle\nbYWGnwi8nDQ_0\tmotorcycle\nbYm9aUK2zzk_0\tperson\nbYpG750b7pE_0\tmotorcycle\nbYvzSXZ0w_I_1\tperson\nbYwwOO6vMAw_0\tperson\nbYwwOO6vMAw_1\tperson\nbYyFEbIGMfo_1\tdog\nbY3sDu5BZDI_0\telephant\nbY3sDu5BZDI_1\telephant\nbY3sDu5BZDI_2\telephant\nbY6vPIaJDGA_0\tperson\nbY8BdyCsCAw_0\tperson\nbZL41d9eFyc_0\tcow\nbZRpdnJtcT4_0\ttrain\nbZRpdnJtcT4_2\ttrain\nbZVMygQQgNg_0\tperson\nbZVZbn0oTjo_1\tgiraffe\nbZdq8Rk75M8_0\tknife\nbZgZihlL0IU_0\tperson\nbZsoMlw4CnI_2\tbus\nbZuOWV67gnY_0\tcat\nbZwJl6ye9Cc_0\tmotorcycle\nbZwJl6ye9Cc_1\tmotorcycle\nbZzzlD0C8Jg_0\ttrain\nbZ2u1x38Qbg_1\tairplane\nbZ6gk6FLGss_0\tperson\nbaDesUZ9Pyc_0\tbear\nbaRyXrRn_ls_1\tmotorcycle\nbaWLnj87FOc_0\tcat\nbabQ3FBdeqQ_0\tcow\nbagbzsb-tg4_0\tperson\nba1hwKdPRx8_0\tcow\nba3cGHmc_OA_0\tperson\nba5407XQYAQ_0\tcow\nbbHdRyrdpDA_0\tboat\nbbH4CQx07Go_2\tknife\nbbLW6902ITg_0\tperson\nbbLW6902ITg_1\tperson\nbbLW6902ITg_3\tperson\nbbLW6902ITg_4\tperson\nbbM0SbH_pgk_2\tbear\nbbZAdo3awRs_0\tcar\nbbZeVbzmLVw_0\telephant\nbbaUzB0Na2o_0\tperson\nbbfDHSIT9ys_0\tperson\nbbhyEgEjfvQ_0\tcow\nbbjuucY5QQc_0\tperson\nbbkjnF0iGrs_0\thorse\nbbkjnF0iGrs_2\thorse\nbbkjnF0iGrs_3\thorse\nbbkjnF0iGrs_6\thorse\nbbnb-beW0p0_0\thorse\nbb0DRm0ueKk_0\thorse\nbb4sgALviyc_0\tbear\nbb5OO1wMKr8_0\tperson\nbcJ1MAj_A_w_1\tperson\nbcLW7YqnUGs_0\tskateboard\nbcdQmV1-Z5k_0\tmotorcycle\nbcgTPCycRIw_0\tskateboard\nbcksTLjC1fs_0\tmotorcycle\nbcrQdxrU_vI_0\tperson\nbc1C8HrNVqE_0\thorse\nbc28CjoKODI_0\tperson\nbc28CjoKODI_1\tperson\nbc3rySF6iao_0\tperson\nbc6jeLN-DUo_0\ttrain\nbdU9JALjnmw_0\tperson\nbdYKw4SpkQQ_0\tzebra\nbdZpXHSW4Ps_0\tcat\nbdbVAdua3uI_0\tairplane\nbdbVAdua3uI_1\tairplane\nbdcoNmelRw4_1\tdog\nbdcoNmelRw4_2\tdog\nbdcwT2ufUBg_0\tbird\nbddes6RyfCI_0\tskateboard\nbddes6RyfCI_1\tskateboard\nbdeoe5gmCd4_0\telephant\nbdeoe5gmCd4_2\telephant\nbdgSMIY2A8Q_0\thorse\nbdoNsiMM1RY_0\tbird\nbdwlZMpXPJo_8\tbird\nbdwlZMpXPJo_7\tbird\nbd--DVCeT-s_0\tcow\nbeE5VOzxibM_0\tgiraffe\nbeLTv9YiY78_0\tdog\nbeLTv9YiY78_1\tdog\nbeLTv9YiY78_2\tdog\nbeQOHdCA8KM_16\telephant\nbeQOHdCA8KM_3\telephant\nbeQOHdCA8KM_6\telephant\nbeQOHdCA8KM_7\telephant\nbeQOHdCA8KM_10\telephant\nbeQOHdCA8KM_12\telephant\nbeSTl1azmTY_1\tskateboard\nbeVVM2pBQdA_0\tcow\nbeVVM2pBQdA_1\tcow\nbecTICXjrg4_0\tperson\nbeliMXc3JE8_0\ttrain\nbesXR1P9Oew_0\tcar\nbeu-edT1daM_0\tperson\nbe9BCy6kHvY_2\tperson\nbe9CXLatX9I_0\thorse\nbe-ggiVD4V0_0\tknife\nbe-5ARU_aHA_0\tperson\nbe_IhYef3hE_0\tperson\nbfBZLLwpNWA_0\tgiraffe\nbfJaD1qZ2gE_0\tbus\nbfJaD1qZ2gE_3\tbus\nbfJtapJ86Gw_0\tperson\nbfRgL9oanEc_1\tperson\nbfRgL9oanEc_0\tperson\nbfS8FB_HOlY_0\tperson\nbfZfMA1mLrQ_0\tdog\nbfZfMA1mLrQ_1\tdog\nbfaMdaYiK90_0\tcat\nbffC89pE6fo_0\tperson\nbffC89pE6fo_1\tperson\nbfkNVFr6Cwg_0\tcow\nbfkNVFr6Cwg_1\tcow\nbflVgDgAHSo_0\tumbrella\nbfrY2wEePwY_0\tperson\nbfrY2wEePwY_2\tperson\nbfwWF0XO7bE_0\tboat\nbf9YySHJcdQ_0\tperson\nbgAOYaooc18_0\tperson\nbgAo5vgwe2M_0\tzebra\nbgBK4sMnLig_0\tcow\nbgBK4sMnLig_1\tcow\nbgC-r6p-XHU_2\telephant\nbgE_uy3Ml6g_1\tumbrella\nbgHMLwWY4Qo_0\tperson\nbgV-FqQ8Tv8_0\tumbrella\nbgXZ3BpIOh8_0\ttrain\nbgaD7K2iEPI_0\tperson\nbgbS11O9lSw_0\tbus\nbgelX1blhpQ_0\ttruck\nbglPgA_0LAk_0\tmotorcycle\nbgpB-A04RLI_0\tperson\nbgyEHsMav4U_0\tperson\nbhBMa8wQ5KA_0\tbird\nbhGJ9gZmP90_0\tperson\nbhGJ9gZmP90_1\tperson\nbhH_pqCQ3Co_0\tcow\nbhJGFbgXlts_1\tperson\nbhNfsUPLKDg_1\ttrain\nbhWmpmnXSlc_0\tperson\nbhZZubkX8_o_1\tbird\nbhdtzsUvieg_1\tperson\nbhqr680CLr0_0\tperson\nbhrOzwB-7qA_0\tperson\nbhsCCw1J_JU_0\tperson\nbhuOX61sk8M_0\tperson\nbhz6HG2KpnI_0\tskateboard\nbh0ZZ4Z76cc_0\tperson\nbh3QacG9JYk_0\tairplane\nbh3QacG9JYk_1\tairplane\nbh3QacG9JYk_2\tairplane\nbh8aMNVny8s_1\ttruck\nbiAdsjypETI_0\tknife\nbiFm-y7gSrc_0\thorse\nbiGJ8vHOsZM_0\tumbrella\nbiLY6NMsqJU_0\tcat\nbiUFB3c0Ucc_0\tbus\nbiZU5SOHQvc_0\tumbrella\nbibJ3Bv5YmQ_0\tmotorcycle\nbik9GuCughc_1\tbird\nbiuEbYnn68k_0\tbus\nbiwbqbVsZeE_1\telephant\nbiyu3sxIOYc_0\tperson\nbi1kYvu5Irg_0\ttrain\nbi1kYvu5Irg_1\ttrain\nbi3GSUnfzd8_0\tperson\nbi5Bkz2MVP4_0\tbird\nbi5Bkz2MVP4_3\tbird\nbi6BNwvsR_0_0\tperson\nbi-GKlUZMR8_0\tmotorcycle\nbjBwCQ5z4IQ_0\tcat\nbjH2OQR68Vc_0\tperson\nbjRQ69TaeKs_2\tperson\nbjgooTfy3JM_0\ttrain\nbjgooTfy3JM_1\ttrain\nbjgooTfy3JM_2\ttrain\nbjhEqucWULo_0\tcow\nbjq8de0pw5M_0\tperson\nbjq8de0pw5M_1\tperson\nbjrq_Kj-wSU_0\tairplane\nbjrq_Kj-wSU_1\tairplane\nbjrq_Kj-wSU_2\tairplane\nbjrq_Kj-wSU_3\tairplane\nbjwdTl5zyaI_0\tskateboard\nbjx96uw-Q24_0\tperson\nbj-Grf4s790_0\telephant\nbkElaSUqJjM_0\ttrain\nbkIBcqXKARI_0\tperson\nbkMU7xViDvA_0\tperson\nbkXBjOrn2yI_0\tperson\nbkggwniG4vc_0\tperson\nbkiQTbQF_TA_0\telephant\nbkigtjV1zA0_1\tmotorcycle\nbklheVvsfac_0\ttruck\nbkoOiNz6Zmo_0\tperson\nbkok3wr4188_0\tperson\nbk2l-O9wSEc_0\tperson\nbk8UlOzFy7U_1\tperson\nblAiGXbJxmI_0\ttrain\nblIpNvBakFI_0\tperson\nblW8z3TPVvo_0\tmotorcycle\nblhCjXE5cRo_0\tperson\nbli5Z83QY_U_0\tperson\nblnFzQdaVRc_0\tperson\nbluU1CAbJfo_0\tperson\nblubKbt8mLE_0\tcar\nbluqyqDv2eE_0\tcar\nblv0QslQ524_5\tbus\nblv0QslQ524_6\tbus\nblzDAgvxJMw_0\tperson\nbl1XJCtyP2E_0\ttruck\nbl2xZSpcZqs_0\tcat\nbl6wIjxfuJo_1\tbicycle\nbl6wIjxfuJo_2\tbicycle\nCloG2hcM5nU_9\tbicycle\nCloLHr7NJqg_0\tperson\nCloOQkTkYfY_0\tbus\nClpDLu1qCx4_2\tperson\nClpDLu1qCx4_3\tperson\nClpDLu1qCx4_1\tperson\nClvAi34e1zM_1\telephant\nCl1mEpQ3wy4_0\tboat\nCl1mEpQ3wy4_1\tboat\nCmEoz728tlo_2\tbear\nCmGSMnkcvrg_1\ttrain\nCmIXZuJDwt0_0\tperson\nCmNv_yKt5oM_0\tperson\nCmOIqZyQpPI_3\tbird\nCmOIqZyQpPI_1\tbird\nCmVoggJ6fxY_1\thorse\nCmYL2EyELbA_0\telephant\nCmezWT8A2i8_0\tbus\nCmjUCOwcOT8_4\tbicycle\nCmjUCOwcOT8_11\tbicycle\nCmjjEuS9_Ww_0\tbicycle\nCmjw8kbfDCw_1\tknife\nCmoknpL1cMA_0\tperson\nCmqXoT7CXJs_0\tdog\nCmq1qVX-Ugo_1\tcat\nCmsqpFOcosw_0\tperson\nCmtmoydPH08_0\tcow\nCmxhIEztsyg_1\tskateboard\nCm1y7USHcrg_0\tperson\nCm3tYZlSc0o_0\tskateboard\nCnBJ9TMTRAA_0\tperson\nCnBJ9TMTRAA_1\tperson\nCnCTVtsK5Kw_2\tbear\nCnEXHgq3AE4_2\telephant\nCnGp9Wq2rTs_0\tbear\nCniS9Q6Y200_0\tperson\nCn0UKsWocEI_0\telephant\nCn0UKsWocEI_1\telephant\nCn1dXZ_p3dw_1\tperson\nCn9Bj5B29UI_0\tmotorcycle\nCoBuNWx_OwM_0\tperson\nCoDB7ZeilsQ_0\tperson\nCoKMowfrd5Q_2\ttruck\nCoKMowfrd5Q_3\ttruck\nCoKVaYX3c1k_0\tperson\nCoKVaYX3c1k_1\tperson\nCoKVaYX3c1k_2\tperson\nCoOwm7ccDrs_0\ttruck\nCoSIyrW5lvA_1\tskateboard\nCoSSvI2-U_w_1\tbicycle\nCoZY8o0c-h8_0\telephant\nCoZY8o0c-h8_1\telephant\nCocSNWws-Qo_0\tperson\nCodelARKQ10_0\tskateboard\nCosYvoW04Uk_0\tperson\nCot7Xj8C308_0\tboat\nCoz9g_0N91c_0\tperson\nCo_XBpd6lxE_0\tperson\nCpDHwc5JmK8_3\telephant\nCpFiT_6KvM4_0\tperson\nCpF-80dM2aY_0\tperson\nCpF-80dM2aY_1\tperson\nCpxxxHYsJy8_0\tperson\nCp0lT2opaL0_1\tperson\nCqANE5ByBvY_0\tperson\nCqDjHjvw8T0_0\telephant\nCqDjHjvw8T0_1\telephant\nCqVeLNnA0vk_0\thorse\nCqZz9FnLLjk_0\tknife\nCqkhrld_7LU_0\tperson\nCqzahbOVzO4_0\tperson\nCq02-pFNn6w_0\tmotorcycle\nCq02-pFNn6w_1\tmotorcycle\nCq4KAVAWq7g_0\tperson\nCrAxPJajbcs_0\tairplane\nCrCNqDd18fw_0\tumbrella\nCrUmEDCjFtU_0\tperson\nCrUmEDCjFtU_1\tperson\nCraDHWuN4Q0_0\tperson\nCrgMhrCYmOo_2\tmotorcycle\nCriTKYemGmo_0\tperson\nCrmzwYKpLAY_0\tumbrella\nCrn24ZKAP1k_0\tperson\nCrsjxpJoY5Q_0\tperson\nCru8KBJqhng_0\tperson\nCrz3l2CEDzA_0\tperson\nCr0SWcS1qX0_0\tcow\nCr_B3I0QPEQ_6\tairplane\nCsM_GTD0TZE_0\tperson\nCsPLGd2dgl0_1\tairplane\nCsTntmE8EWs_0\tperson\nCsa542XNEXo_0\tperson\nCsfkuwD6-nA_0\tperson\nCsh_4yR8bFk_1\ttruck\nCsh_4yR8bFk_2\ttruck\nCsii4vkefsM_0\tboat\nCsii4vkefsM_2\tboat\nCsw3kLrhjoM_0\tperson\nCs38JY7Gqjo_3\tskateboard\nCs-Vx_ym23o_1\tbicycle\nCtC2yC9NGTk_0\tbird\nCtD4wnIU0Pw_0\tbicycle\nCtF9IxfLhaQ_1\tperson\nCtF9IxfLhaQ_2\tperson\nCtF9IxfLhaQ_0\tperson\nCtHIoS1lGKA_0\tperson\nCtLVK2j48gA_0\tperson\nCtO5dmTdzYQ_0\tperson\nCtPEAoFPnE4_0\tperson\nCtQPPKpIEIc_0\tperson\nCtTcyoZvRvU_2\tskateboard\nCtUPPSKU8cE_0\tbus\nCtVUqIFqqr8_2\tbus\nCtYDJRkhtpg_1\tumbrella\nCtYDJRkhtpg_5\tumbrella\nCtfPPnpBKHs_2\tbird\nCtipU0GHAEo_1\telephant\nCtjTAe-FFe4_3\telephant\nCtkjh9fntpQ_0\tbird\nCtkjh9fntpQ_4\tbird\nCtkjh9fntpQ_5\tbird\nCtkjh9fntpQ_2\tbird\nCtkjh9fntpQ_3\tbird\nCtnjw80kgcw_0\tperson\nCtxK3wGlqx0_2\tmotorcycle\nCt1QrXUgBGg_0\tperson\nCt1QrXUgBGg_1\tperson\nCt8S9nC7sfk_1\tperson\nCt870xrnBGU_0\tperson\nCuDfCpgoIjg_6\tboat\nCuGfRQMwYd8_0\tcat\nCuHF9Hd0uwI_0\tperson\nCuIkNejeZrY_0\tcat\nCuUJUrjEcc4_0\tperson\nCuWdZPYMLww_0\tperson\nCvDW2A8hD78_0\tperson\nCvRJwKt7FfY_1\tskateboard\nCvVVS4SUiuw_1\ttrain\nCvZaA28QUK4_1\tknife\nCvajmAL3sjQ_0\tperson\nCvda-hutmbg_0\tdog\nCvqylkq9fwI_0\ttruck\nCvxsoaCV1_8_0\tperson\nCvzsX_s6tek_0\tperson\nCv2T8U0uQcQ_2\tperson\nCwAdBrBzIcA_0\ttruck\nCwBiMh4zHWQ_0\tperson\nCwFcmrnz1yw_0\telephant\nCwFcmrnz1yw_1\telephant\nCwFcmrnz1yw_2\telephant\nCwR2tJptu0Y_2\tmotorcycle\nCwVLRawns04_0\tperson\nCwVTSONqnVw_6\tknife\nCwnHi50fuuQ_0\tperson\nCwnHi50fuuQ_1\tperson\nCw22-zpE1UY_0\tperson\nCw3iLs4yV4g_0\tperson\nCxFRYsUCyWc_0\tcat\nCxH8vGqLVM0_0\tbicycle\nCxH8vGqLVM0_1\tbicycle\nCxH8vGqLVM0_3\tbicycle\nCxH8vGqLVM0_6\tbicycle\nCxJ7Uww1mSk_0\telephant\nCxN5CG94Q5Q_1\tairplane\nCxN-YEErXFg_0\ttrain\nCxPyIeBtRec_2\ttruck\nCxWaiU0rF9g_1\tcow\nCxWaiU0rF9g_0\tcow\nCxXdw0Cqr4Y_2\tairplane\nCxa8q3QXoRs_0\tperson\nCxgqklOxSfo_0\tairplane\nCxgqklOxSfo_2\tairplane\nCxnCTBBNWCY_0\tperson\nCxnCTBBNWCY_1\tperson\nCxoZT0--IBo_0\tperson\nCxooWldim98_0\tperson\nCxs-xZDDZWw_0\tperson\nCxug83tjWyc_0\thorse\nCxzJV_HYpAc_0\tairplane\nCxzJV_HYpAc_1\tairplane\nCx0XeFKQ06o_1\ttrain\nCx7ZY8oqOmE_10\tbicycle\nCx7ZY8oqOmE_6\tbicycle\nCx7ZY8oqOmE_8\tbicycle\nCx9efnltcUY_0\tperson\nCyE1kuECzfg_0\tperson\nCyH0woBc0zU_0\tboat\nCyI7nyp65bI_0\tperson\nCyI7nyp65bI_1\tperson\nCyLLTzV_lAg_0\tcat\nCyOXSqLm7ao_1\tperson\nCyb4-vF1WMM_0\tairplane\nCyedl__okwE_0\tperson\nCyedl__okwE_1\tperson\nCynfaDsQ1AI_0\tzebra\nCysFfEkdDT4_0\tbear\nCytiPd_Wbkg_0\tairplane\nCytiPd_Wbkg_1\tairplane\nCyvInNqvQyE_0\ttruck\nCy002CigJRQ_0\tperson\nCy_hvqOd0RY_0\tknife\nCzFRG22Jmvs_0\tcow\nCzHeIzQZUEg_0\tperson\nCzNFSb4N6p8_0\tperson\nCzQ03Z7Dv5U_2\tskateboard\nCzQ03Z7Dv5U_3\tskateboard\nCzQ03Z7Dv5U_6\tskateboard\nCza2-_wwpd4_0\tperson\nCza2-_wwpd4_1\tperson\nCzcwXF0Z1TQ_0\tcow\nCzt8McI8UTE_0\tperson\nCzze2Jy6Ook_0\tcat\nC0Tk6QryTA0_0\tbus\nC0Tk6QryTA0_1\tbus\nC0a9pkujXQg_1\tperson\nC0lvs-UEqKs_0\tperson\nC0pOQ36uosU_0\tperson\nC0pOQ36uosU_1\tperson\nC0qbh7OJTHI_2\tskateboard\nC0tGKqnFyZA_0\tperson\nC0xTDmlUYSA_0\tperson\nC0xZYHsXNws_0\tperson\nC0xZYHsXNws_1\tperson\nC0xjvq51pVA_0\thorse\nC0xl46ieUxg_0\tskateboard\nC0zUOQoeQrA_0\tperson\nC0zrmcMf8D4_0\tbird\nC05P4mCw-xA_0\tbear\nC1DCcNlUQDk_0\tboat\nC1DX9TjKTrE_0\tbus\nC1MfcNYih9c_1\tperson\nC1RCXQFjvvc_1\tperson\nC1RCXQFjvvc_0\tperson\nC1bdSMUVy2Q_1\ttruck\nC1bdSMUVy2Q_0\ttruck\nC16ZlJRDfUc_0\tbird\nC16_rFYBwUA_0\tperson\nC17jwrOnSCI_0\thorse\nC19rR4b8CSQ_0\tdog\nC1_gk-bIL6Y_0\tairplane\nC1_tauCAYjs_0\tperson\nC2GvHXU8mIc_0\tperson\nC2HZBTrCAf8_0\thorse\nC2Hcs2itPTc_1\telephant\nC2H_P7MX3zw_0\tbus\nC2H_P7MX3zw_1\tbus\nC2IJYHPWHJM_1\tcow\nC2K7zu49SKw_0\tperson\nC2K7zu49SKw_1\tperson\nC2LdkQMjxJk_0\tcow\nC2ROFMcXam4_0\tcat\nC2S4CV9mnC0_0\ttruck\nC2VjZHe3ID8_0\tperson\nC2r9VGslxTE_0\tperson\nC2v7hcs3Ax0_0\tzebra\nC2zRn25TBOo_1\tairplane\nC2zRn25TBOo_2\tairplane\nC2zRn25TBOo_4\tairplane\nC2zRn25TBOo_6\tairplane\nC23ZGYnWhgo_0\tperson\nC26HiGgIjYg_0\tperson\nC2-glFtt9Vw_0\tumbrella\nC3LbuiUjzvo_0\tcat\nC3LbuiUjzvo_1\tcat\nC3LbuiUjzvo_2\tcat\nC3Qu-KUydyg_1\tcow\nC3UX9hrlLeE_0\tperson\nC3YcvZKgCgY_0\tperson\nC3terpXzPm4_0\tperson\nC3z1zbkmwdU_0\tbird\nC30B6KXg9vs_0\tperson\nC3399zrSQ6A_0\thorse\nC34_EkCWJaU_0\tmotorcycle\nC4HzsadhLW0_0\tboat\nC4QHknuNLYI_0\tperson\nC4RAj-omUMo_0\tperson\nC4W_g9eheB8_0\tskateboard\nC4XGGPoj4q8_0\tperson\nC4dV8SPq6Mk_0\tperson\nC4e-5QS1FmU_0\tumbrella\nC4e-5QS1FmU_1\tumbrella\nC4irKghQYTE_0\thorse\nC4jghf6KKYI_0\tskateboard\nC4vFHmzTY-s_0\tcat\nC4xJ3_Wrrn4_0\ttrain\nC4yVuAqcr0U_0\ttrain\nC409K0fAxiM_0\tperson\nC42397qio9c_1\tskateboard\nC4317zxtzKA_0\tperson\nC4-k1XW5O3U_0\tdog\nC5DAyL_gEQU_0\tcow\nC5GJx1VFRm8_2\tcow\nC5HT9La1jDY_0\tperson\nC5JobuZa590_0\tskateboard\nC5MJ8fSfmLw_2\tbear\nC5dPwnswp8Q_0\tcat\nC5jo-fCBqmA_0\tperson\nC5jo-fCBqmA_1\tperson\nC5jo-fCBqmA_2\tperson\nC5pop0SvnOM_0\tperson\nC5r41vkLsKE_0\tperson\nC5sXGZRLfmU_4\ttruck\nC5sXGZRLfmU_6\ttruck\nC5umaWklWFQ_0\tboat\nC5ybfGh51LM_0\tcat\nC55z9Fe6H7A_0\tdog\nC56Bp4toMG8_0\tperson\nC6NYuB7zIzs_0\tperson\nC6NYuB7zIzs_1\tperson\nC6XCgppHkHA_0\tbus\nC6Yy8uEd0bQ_0\tperson\nC6aB6M0DHrU_0\tperson\nC6cOmWIisxU_0\tperson\nC6eN6sMtuXY_1\tboat\nC6gNbZUU7xg_0\tperson\nC6ia-W4TV1U_0\thorse\nC6nHtSy67OY_0\tcow\nC6n6ECY5h84_0\tcow\nC6qWzx58kxo_0\telephant\nC6qWzx58kxo_2\telephant\nC6rqmPvlIlI_0\tperson\nC6upTeuDG4E_1\tskateboard\nC6xv6Wmy97M_0\thorse\nC62nD-_VXpM_0\thorse\nC62nD-_VXpM_1\thorse\nC66OM90TFXI_0\ttrain\nC66OM90TFXI_1\ttrain\nC66z-I_UHqQ_0\tairplane\nC6_p7BXwCTA_0\telephant\nC7CB2A_bxa0_0\tperson\nC7COsB9pcOQ_0\tperson\nC7CXGBdoJWo_0\tcat\nC7KZnM_0j8s_0\tperson\nC7QYoT22ZYo_0\ttrain\nC7W0oxkg-nU_0\tbicycle\nC7kKR6pqYzw_0\thorse\nC7to6tRsC9U_0\tperson\nC72k6hv1NPM_1\tcow\nC72k6hv1NPM_0\tcow\nC7-sqpILAXM_0\tperson\nC7-sqpILAXM_1\tperson\nC7_HhvBNDSw_0\tperson\nC8ETc2K6ef0_0\ttrain\nC8G_kcqjspU_0\tknife\nC8IE7aLZvIA_0\tperson\nC8IUB4Opf44_0\tperson\nC8IUB4Opf44_1\tperson\nC8PqOHn0izQ_6\tbird\nC8Zex-ptYyk_0\tperson\nC8daRmtyPo0_0\tperson\nC8fcFW4HKGs_0\tairplane\nC8mEWe-TWYs_0\tknife\nC8n1dTEDWvk_0\tskateboard\nC8ukXeoRjbI_0\tcow\nC9Zq_rDHwgg_1\tcow\nC9dD6oS_Zs0_0\tperson\nC9je005HOlA_0\tbus\nC9jqFBMRyPs_1\tperson\nC9vG5qPPhzE_1\ttrain\nC9wgqGACPso_2\telephant\nC95TX0IOPa8_0\tskateboard\nC97oHqKqdBk_0\tperson\nC97t3TGT2oc_0\tperson\nC-AoVBwcBUw_0\tperson\nC-FX5hgFDd0_2\tperson\nC-Q9RDsPyOw_0\tperson\nC-Q9RDsPyOw_1\tperson\nC-S34-Drg7M_0\tcow\nC-TWHpbtVNY_1\tperson\nC-WsGZQoLx0_0\tboat\nC-cL2hzThKI_3\tairplane\nC-cL2hzThKI_6\tairplane\nC-omy9mzD7E_0\tperson\nC-q9nO8X1rs_0\tperson\nC-seg-BCK0U_0\tbird\nC-v3Ttrvuo8_0\tairplane\nC-38hraIyOs_0\tperson\nC-47EdafspI_1\tairplane\nC-54wttM4AA_0\tperson\nC-9LBJqCMm0_0\ttrain\nC-_ebeJtjyE_0\tperson\nC_BX3dg-lc4_0\tperson\nC_DOGAVETwk_1\tbird\nC_EMJm-Z2I8_1\tbird\nC_EMJm-Z2I8_2\tbird\nC_EwPB6zgIA_0\tperson\nC_EwPB6zgIA_1\tperson\nC_GnC_IEwJM_0\tperson\nC_GnC_IEwJM_1\tperson\nC_HBU7EUsoE_1\tperson\nC_HBU7EUsoE_0\tperson\nC_IjqR1NOxw_0\tperson\nC_POS7ndKw0_0\ttruck\nC_PXq5TsPRQ_1\ttrain\nC_TfufSsuEU_1\tperson\nC_VePcGhr10_0\tknife\nC_aP0fKyudQ_0\thorse\nC_aYcFttRC8_1\tperson\nC_aYcFttRC8_0\tperson\nC_cUky_0p2Q_0\tcow\nC_uGdKk79X0_1\tperson\nC_ykabkQ2U0_2\tperson\nC_2EFIuyDSA_0\tperson\nC_2p_N8Kvpk_0\tperson\nDAJkfl5W8Vc_0\thorse\nDANymtBuoIs_0\tdog\nDAOBGjTf7xI_0\tperson\nDAQ9-YTrpp0_0\tcat\nDAU6UNdxbRI_0\tperson\nDAn4fH-1Ucs_0\tperson\nDApkEgrJX0Q_0\tperson\nDAqHnZA6tBQ_0\ttruck\nDAtSTeTmg8I_1\thorse\nDAwdyKiZyzM_0\tperson\nDA1bsx2RsGA_0\tperson\nDA1bsx2RsGA_1\tperson\nDA4LF3u2VTI_0\tcar\nDA5X-ADHM1w_0\tperson\nDBFMXaS9LRg_1\tumbrella\nDBLaZSSthxo_0\tperson\nDBR0l2rW6Ew_0\thorse\nDBVbRonJkb8_0\tperson\nDBaAVcI4Ftw_0\tperson\nDBaAVcI4Ftw_1\tperson\nDBmVOTuCJ8Q_0\tperson\nDBvOm1qnWrA_0\tcow\nDBySPDEqsO8_0\tperson\nDB1Cvyyike0_0\tairplane\nDB3lsf7fD84_0\tdog\nDB6TJh9r1Dw_0\tperson\nDCE8Dg_ycjo_0\ttruck\nDCHv6sxfCAs_0\tperson\nDCPk1uyVNlU_0\tperson\nbmHyfvCZWsg_0\telephant\nbmHyfvCZWsg_2\telephant\nbmHyfvCZWsg_3\telephant\nbmLLdC88ohM_0\ttrain\nbmMB6Mr1uKI_1\tperson\nbmPhh5NpV7U_0\tperson\nbmQbHpw-4fY_1\tbird\nbmUFMo3pjyo_1\tairplane\nbmhSkbKIg0U_0\tcow\nbmhSkbKIg0U_2\tcow\nbmhSkbKIg0U_1\tcow\nbmhfPSKCY8I_1\tdog\nbmqPIwMWGj4_0\tperson\nbmuIwo4T6rk_0\tcow\nbmvh7yxyWcY_1\thorse\nbm2eU4uLgQE_0\tskateboard\nbm8MRDfmerA_2\tperson\nbm8MRDfmerA_0\tperson\nbnOUoCjxIvA_0\tbird\nbnWQnn3a2xE_0\tcat\nbnZwZd6xdHY_0\tperson\nbnc1LyPUCLg_0\ttrain\nbnfN43NoRbA_0\tperson\nbnqbJR2oSPk_1\tperson\nbnqbJR2oSPk_0\tperson\nbnsuTEBQy44_0\tperson\nbnw6G0Prvc0_0\tbus\nbnyALwWqo4Y_3\tcow\nbn8epY7auRE_1\tperson\nbn8epY7auRE_0\tperson\nbn9y-iIDoUU_0\tperson\nbn9y-iIDoUU_1\tperson\nboHeJDDjRf4_1\tperson\nboIKCyPzxr8_0\tbicycle\nboNYwNYmh1E_0\tcat\nboVVWwoXNDw_0\ttruck\nboZ6xZrNpzc_0\tperson\nboadjC5Lci8_0\tperson\nbocql7vYA4o_0\tbus\nboja3N4XQVo_0\tperson\nborBr_AiOmM_0\tperson\nbornws-twE0_4\tairplane\nbosTHwpZ8Ao_1\tdog\nbo7P3hYkeog_0\tperson\nbo9sUjViaHQ_0\tperson\nbo-qyHCKssw_0\tbird\nbo-qyHCKssw_4\tbird\nbpI4nUgSqbE_2\tperson\nbpI4nUgSqbE_0\tperson\nbpI4nUgSqbE_1\tperson\nbpJNbivFLKE_0\tskateboard\nbpdgYRz5hPs_0\tperson\nbpiM4FHf540_0\tperson\nbpjVhXyB4M0_0\tairplane\nbpjVhXyB4M0_2\tairplane\nbpsMni7yj3M_0\ttruck\nbps3HXPsekI_0\tbear\nbpu9NYWxcEE_0\tskateboard\nbpyH8PRkBQM_0\tperson\nbp1zW8j_ajo_3\tbus\nbp26IdTs4XE_0\tperson\nbp3rDJju8n4_0\tperson\nbp3xwI_FfOI_0\telephant\nbp6K7EUtORo_0\tcow\nbqBtysMz94c_0\tperson\nbqEmBkEnR1c_0\tperson\nbqGkchWbZYE_0\tcar\nbqJcZwUB1Go_0\tperson\nbqPKigpT9AY_0\tperson\nbqQk37pcpVA_0\tperson\nbqaeUBH6J3Y_0\tperson\nbqhQG8t_2XA_0\tperson\nbqjcNzWyaC4_1\tairplane\nbqoG__OO_5g_0\tperson\nbquLxAXnaww_0\ttruck\nbqwFWjwCZas_0\ttruck\nbq6n9q-Qpv8_0\tperson\nbq6870eY1a8_7\tbicycle\nbrDq8RFzVTo_1\ttruck\nbrIIDuCmk-E_0\tperson\nbrLbzZeRz1o_0\tperson\nbrLeJHMfMXQ_0\thorse\nbrNR68fKeMk_0\tbus\nbrWg7FAeBEA_0\tperson\nbrZj8bv9oxY_1\tperson\nbrhA4NqjrgQ_0\thorse\nbrh4hrmrs0Y_1\tskateboard\nbrpbaoTNe4s_4\tbicycle\nbrpbaoTNe4s_0\tbicycle\nbr3e--6oH8Y_0\tairplane\nbsGmFJGua4w_0\telephant\nbsR9KXIHlCM_0\tumbrella\nbsVBX8u9pW8_0\tbus\nbsXpGvnXpmk_0\tcow\nbsa-G_HEllM_0\tperson\nbsbzpk_ejJk_0\tperson\nbsbzpk_ejJk_1\tperson\nbsgdfqE8ySk_0\tperson\nbspbqjb3wAg_0\tperson\nbsv_swJ9_KY_0\tknife\nbs2FVeXKiYQ_0\tperson\nbs3u00S0eu0_0\tperson\nbtI7FYFXsfI_0\tperson\nbtL1Ptjq7pM_0\tmotorcycle\nbtMmnZdL_uQ_0\tperson\nbtO34shZMZo_0\thorse\nbtSyjckocDA_0\tperson\nbtVQJbFp8Dw_0\tcow\nbtdt4lysW6U_0\tdog\nbtihrVidTTg_0\tcat\nbtk27mnJY_A_1\tperson\nbtrdQ6N7QJc_0\ttruck\nbtrdQ6N7QJc_1\ttruck\nbtsT4XRF0nI_2\tcat\nbtul_U3BMKI_0\tbus\nbtvg47tz3Ps_1\tperson\nbtvg47tz3Ps_0\tperson\nbtz7EwI5rYY_0\tperson\nbt75khQG0w8_1\tbird\nbuFiFNHj41w_0\tperson\nbuOqwfPnqkI_0\tcow\nbuRfiT3Mq6Q_0\tbear\nbuSgd-PrRmA_0\telephant\nbuSgd-PrRmA_2\telephant\nbuSgd-PrRmA_6\telephant\nbuSgd-PrRmA_8\telephant\nbuWf8ffXWTs_0\tperson\nbue8SUcqigE_0\tcat\nbugTv6zkE0Q_0\tperson\nbuh8d20UxNw_1\tairplane\nbulc7gZ_YQY_0\tboat\nbuqR3s7EZeQ_0\tperson\nbuq0_IIvQqc_0\tperson\nbusJdrzEeJU_0\ttruck\nbuyJwHRaSYc_0\tperson\nbuyJwHRaSYc_1\tperson\nbuzd3FYmwQQ_0\tbus\nbu6QE_qf8fw_0\tskateboard\nbvLQLfRAI9s_0\tperson\nbvW_ZJYSOLg_0\tperson\nbva98_iD8pI_0\tperson\nbvc6dUfKFpM_0\tskateboard\nbvg-QHsENSc_0\tumbrella\nbvnuyMz5Pk4_1\tperson\nbvnuyMz5Pk4_0\tperson\nbvqPJIDHXHI_0\tperson\nbvqPJIDHXHI_1\tperson\nbvwJ75OkrTk_0\tperson\nbvwJ75OkrTk_1\tperson\nbvwwPOK7lN8_0\tskateboard\nbvw4raRDAys_0\tperson\nbvxAWBUG1zk_0\tdog\nbv6ASjMljew_2\tperson\nbv6ASjMljew_0\tperson\nbv6ASjMljew_1\tperson\nbv7NOTxSDhg_0\tperson\nbv7lroHoMyE_0\tperson\nbv8CHN4kwyM_0\tperson\nbv9J7oplKjY_1\tbird\nbv-ps8hofSY_0\tperson\nbv_rrakMnsY_0\telephant\nbwB-cfh8UFY_0\tcat\nbwIBXBulTRg_0\tperson\nbwM3RKdZAd0_1\tairplane\nbwM3RKdZAd0_2\tairplane\nbwSSE1XeKkg_0\tperson\nbwSSE1XeKkg_1\tperson\nbwTJKRhesM4_0\tperson\nbwZEDD10b44_0\tperson\nbwd7bbxG4Kw_1\tperson\nbwjUOg-CI1E_0\thorse\nbwotbTZHoPA_0\thorse\nbwotbTZHoPA_1\thorse\nbwv4Q2VqV5A_0\tbus\nbwv4Q2VqV5A_3\tbus\nbwwud6bxEeY_3\telephant\nbw1HepCVmL8_0\tperson\nbw3c96BQrRU_0\tcar\nbw3c96BQrRU_1\tcar\nbw96DHOgI1I_0\tairplane\nbw_opOTzI6k_0\tdog\nbxRX_05rH9Y_0\tbus\nbxXWi1nvXjI_1\tbird\nbxYeOYlqDPc_0\tcow\nbxaC_opt7IU_0\ttruck\nbxjIDI2ZkO4_0\tcat\nbxnu-AITJt4_0\tperson\nbxoclb4AFb8_0\tperson\nbxsI00qOi6c_0\tperson\nbx0h8tvY6kw_0\tperson\nbx6BVBAcBtM_0\tperson\nbx6BVBAcBtM_1\tperson\nbx7PtvZe6O8_1\tairplane\nbx7-RzWnIe4_1\ttruck\nbyDPGQJdn1s_0\tperson\nbyQIRt1JF9I_2\tdog\nbyQIRt1JF9I_0\tdog\nbyQIRt1JF9I_1\tdog\nbycJD4U6rIs_0\tbird\nbyehVoG0_eg_0\tperson\nbye0FepI8wg_0\tbird\nbyi-4Qx3vx4_0\tperson\nbykN9ap_QTw_0\tbird\nbyvddKaL_kw_0\tperson\nDCRIRGz2xhc_0\tperson\nDCRIRGz2xhc_1\tperson\nDCUcxHDfYiE_1\tcow\nDCUvhnZnRGQ_0\thorse\nDCXrBMEdS4E_1\tperson\nDCrv8CyK9zM_0\tbus\nDCx698xXxjs_0\tperson\nDC0PPRyXlD4_0\tperson\nDC4ZTdVoj2o_0\tboat\nDC5fRZmUZV8_1\tairplane\nDC8lKdla6rE_0\tperson\nDC8lKdla6rE_1\tperson\nDC_Kd2iaw9U_0\tperson\nDDZILIDFFXc_0\telephant\nDDd8CfnxkYM_0\tperson\nDDgtm9B7Yj0_0\ttrain\nDDhlugZ-vro_0\tperson\nDDhlugZ-vro_1\tperson\nDDjUzAM4mLE_0\tbus\nDDjUzAM4mLE_1\tbus\nDDjUzAM4mLE_2\tbus\nDDjUzAM4mLE_4\tbus\nDDoBBLQQ1Mg_0\ttrain\nDDtWIKexWpM_0\tskateboard\nDDw2iF2W4HI_0\tbird\nDD4YGjlBsHc_0\tboat\nDD844YVVMXE_6\tbicycle\nDD844YVVMXE_0\tbicycle\nDD844YVVMXE_1\tbicycle\nDD844YVVMXE_3\tbicycle\nDD844YVVMXE_4\tbicycle\nDD844YVVMXE_5\tbicycle\nDEHHjz2xiz4_0\tperson\nDEI-qJD08Pc_0\tperson\nDELUfY3m37k_0\tperson\nDEVUyfQt_G0_0\tcow\nDEVUyfQt_G0_3\tcow\nDEVUyfQt_G0_1\tcow\nDEXhh5rt_24_0\tmotorcycle\nDEXhh5rt_24_1\tmotorcycle\nDEZHoMWiFBQ_1\tperson\nDEau5L3A9S0_0\tperson\nDEjPKQLASJg_0\tumbrella\nDEtj0Fb-Jbo_0\tskateboard\nDEuYWYNXbw4_0\ttruck\nDE3kl7rbakE_0\tskateboard\nDE6z5oB-0vo_0\telephant\nDFBlkKPYtl0_1\tcow\nDFBlkKPYtl0_0\tcow\nDFI7_dtUb0U_1\tgiraffe\nDFI7_dtUb0U_3\tgiraffe\nDFRmdyjR_Dc_0\tgiraffe\nDFb4KWUX31Y_0\tperson\nDFpZ6f1iWT4_0\tperson\nDFwPVEPK4-Y_0\tcat\nDFzgqOHlnAk_0\tperson\nDGC_pivLAEE_0\tperson\nDGMfSMlhL4w_4\telephant\nDGMfSMlhL4w_6\telephant\nDGMfSMlhL4w_13\telephant\nDGMfSMlhL4w_17\telephant\nDGM9CDF3ks8_2\tmotorcycle\nDGM9CDF3ks8_0\tmotorcycle\nDGM9CDF3ks8_1\tmotorcycle\nDGbZYKPp7XI_0\tperson\nDGc9VSWQUyQ_2\tperson\nDGc9VSWQUyQ_1\tperson\nDGp5vBVf28g_0\tperson\nDGsQAjKXPBw_0\tcat\nDGs0ZHnAtkg_1\tperson\nDGs0ZHnAtkg_0\tperson\nDGvsndSWlBw_0\telephant\nDGx5aC4h8wg_0\thorse\nDGygUuHcJhs_0\tperson\nDGygUuHcJhs_1\tperson\nDG8TJBoerZ0_1\tperson\nDG8TJBoerZ0_0\tperson\nDG93jIsco3E_0\tperson\nDG93jIsco3E_1\tperson\nDHB_RgHOHdo_0\tumbrella\nDHB_RgHOHdo_1\tumbrella\nDHLK8xDGwL0_2\tknife\nDHLg5KzzoOM_2\tcow\nDHLg5KzzoOM_0\tcow\nDHPWnuYI2qA_0\tperson\nDHSGQLguGZ4_0\ttruck\nDHdFVfp7SvM_1\thorse\nDHl_QoiyZ2I_1\tperson\nDHl_QoiyZ2I_2\tperson\nDHl_QoiyZ2I_0\tperson\nDHqrGwHgnAA_0\tperson\nDHr77uGYi-g_0\tdog\nDHsorh6ngMI_0\tumbrella\nDHs1KtWx2n4_0\tperson\nDH0OVsYB2vs_0\tperson\nDH5nSZZ6uJE_0\tumbrella\nDH_wEdP1Glk_2\ttrain\nDIFEQ3rorSw_0\tperson\nDILtO1oyoCY_0\tperson\nDIOuJC_mv_k_0\tperson\nDIO8l6DAJX0_0\tperson\nDIO8l6DAJX0_1\tperson\nDIP8d1YC6vM_0\tperson\nDISU2i6bJqs_0\tcow\nDIaTXSXAfJM_1\tperson\nDIaTXSXAfJM_0\tperson\nDIpJyhb8gzw_3\tmotorcycle\nDI7rj5AAYEE_0\telephant\nDI801ysby74_0\tknife\nDJD4Xlf0eNg_0\tperson\nDJKFzJe6KAk_1\tskateboard\nDJKokwprK90_2\tskateboard\nDJLSHLPE0po_0\tperson\nDJQ8goQ4xyo_0\tperson\nDJV-ft_10HY_1\tperson\nDJjjrdYts2s_0\telephant\nDJ4oQ03HqyE_0\tbicycle\nDKBIz_MLIpw_2\tknife\nDKC58UBq-0w_1\tairplane\nDKEmSml-t4c_1\tperson\nDKEmSml-t4c_0\tperson\nDKHCjzNZE3U_0\telephant\nDKHCjzNZE3U_4\telephant\nDKICHseWnGQ_0\tperson\nDKJ3As_9Mlw_0\tperson\nDKKsGGUWero_0\tperson\nDKLxBVm3HHk_0\tairplane\nDKMUARFnh2Q_0\tperson\nDKShwn6Xk8w_0\tcat\nDKZ21QA0lBM_1\tperson\nDKcpPg_tEUU_0\tskateboard\nDKj3fFeAaL8_0\tperson\nDKq7d2C6gOI_0\tmotorcycle\nDKxIadOj4D0_0\thorse\nDKyckH3XY8Y_0\tbicycle\nDKydJWySeUw_0\tcar\nDLKE31mt2Qc_0\tbird\nDLLrkv1aF-k_0\ttrain\nDLMDzB4XBPg_0\tperson\nDLPmEX5pwY0_0\tcow\nDLT57E3vm98_2\ttruck\nDLct7_2tyWI_0\tperson\nDLd6kxxgSUM_0\tperson\nDLkx4w5oteM_0\tperson\nDLmCj6q5vD0_0\tperson\nDL3V2mhMX7M_0\tskateboard\nDL3eQSTbZ9Y_0\tskateboard\nDMB6Mr7lTSI_0\tperson\nDMEXGsc-PaU_0\tperson\nDMFEU87_IrU_2\tboat\nDMR4kX1M_zk_2\telephant\nDMR4kX1M_zk_1\telephant\nDMTP7OyjdJ4_4\tbus\nDMT_n1VJG80_2\tbird\nDMbwyGKLF4c_0\tperson\nDMb-AjUXKe8_0\tgiraffe\nDMiFC67o2P0_1\thorse\nDMiFC67o2P0_2\thorse\nDMiFC67o2P0_3\thorse\nDMn1JpU6MBE_0\tperson\nDMn-kaSNd5Q_0\tperson\nDMuLn7wJTcc_0\tperson\nDM7c57qvjgs_0\tperson\nDNAMMWkSfLY_11\tumbrella\nDNAjFU24eK8_0\tboat\nDNB4bgEP-8Y_0\tperson\nDNGlLqzJF6Q_0\tperson\nDNGlLqzJF6Q_1\tperson\nDNOZeC0gZzs_0\ttruck\nDNXuVh_X_qY_1\tperson\nDNXuVh_X_qY_0\tperson\nDNhOrRaOe2M_0\tperson\nDNul7ILzxkQ_0\tperson\nDNul7ILzxkQ_1\tperson\nDN0xWDfCAM0_0\tmotorcycle\nDN1ujoUaAKU_0\tperson\nDN1ujoUaAKU_1\tperson\nDN4TuB3csDg_0\tperson\nDN4e8ljPm1g_0\tbicycle\nDN5mGCGzOOY_0\tperson\nDN7FitWe9k8_0\tperson\nDN8yb60bxNc_0\tperson\nDOAU-JodN0U_1\tairplane\nDOAmtFxCuKA_1\tperson\nDODU9JghuAA_0\tcow\nDORauVZJhAU_1\tperson\nDORauVZJhAU_0\tperson\nDOhLqHOLbQY_0\tperson\nDOiUy3AGiKw_0\tperson\nDOiUy3AGiKw_2\tperson\nDOoTpSSHVho_0\ttruck\nDOoTpSSHVho_1\ttruck\nDOsVwDV787M_0\tbus\nDOuULWa1RKM_0\tperson\nDOvC_-Yrn5k_0\tcat\nDPAEt1AqwbQ_1\tcar\nDPCyQOQdLHE_0\tcat\nDPFO_O_f3hc_0\tcow\nDPIm8x0i2yo_0\tmotorcycle\nDPJ7ZSWY2Qs_0\tskateboard\nDPXJpAVtRfM_0\ttrain\nDPXJpAVtRfM_1\ttrain\nDPZi4DZaTmk_0\tperson\nDPZi4DZaTmk_1\tperson\nDPelBJ73uaU_0\tbicycle\nDPo9M61p8gI_0\tumbrella\nDPvxwOvedrQ_1\tknife\nDPz3CG4lD2Q_5\ttruck\nDPz3CG4lD2Q_6\ttruck\nDP2q1TrqjAE_0\tperson\nDP2q1TrqjAE_1\tperson\nDP6ZB5PxNfc_0\tperson\nDP-JZPR9HFc_2\telephant\nDQDV1Wr7qo8_0\tbear\nDQOglBZHFCs_0\tbear\nDQZiSQmMBnc_0\tbird\nDQcCfbTKP1s_1\tperson\nDQcCfbTKP1s_2\tperson\nDQcCfbTKP1s_0\tperson\nbywgcqNg6RU_2\tcar\nby7PLb7MqM0_0\tmotorcycle\nby_OJvQqlKE_0\tperson\nbzKVRbSQpZE_0\tknife\nbzLdvZQAWgA_0\tperson\nbzO5MBTTrdQ_0\tperson\nbzRELZo9WMU_2\tdog\nbzRELZo9WMU_0\tdog\nbzZgsynjAGk_0\tcow\nbzfE3U02_44_1\tperson\nbzfE3U02_44_0\tperson\nbzimWzymgu0_0\tperson\nbzquVP0NUms_2\ttruck\nbz5Ht4jyT0k_0\tbus\nbz66OedbeoI_0\tperson\nb0C_2T7-IfU_0\tcat\nb0GlXXGkfRQ_0\tperson\nb0GlXXGkfRQ_1\tperson\nb0HXAfyZ7Sk_1\tperson\nb0Q3EfK70fg_2\tairplane\nb0Q3EfK70fg_4\tairplane\nb0Q3EfK70fg_5\tairplane\nb0Q3EfK70fg_6\tairplane\nb0a7ewqE8S4_0\tdog\nb0nOQfZSaUo_0\tperson\nb0nt17hBmDw_0\tboat\nb0qXUUs3-WE_1\tperson\nb0t8uuynzIM_0\ttrain\nb0xQRq8njAI_0\tcat\nb0z1nalEX08_0\ttruck\nb0-UOt-DT1A_0\tperson\nb1ETK4nP9ag_0\tdog\nb1EnXvOZQbQ_0\ttruck\nb1Gd5IWJBRI_0\tperson\nb1R3uk0VLc4_0\tperson\nb1SyeZsSk80_5\telephant\nb1SyeZsSk80_3\telephant\nb1UAPTD4s74_0\tperson\nb1UpjRRBrTw_0\tcat\nb1cpAYk99_U_0\tperson\nb1cpAYk99_U_2\tperson\nb1cpAYk99_U_3\tperson\nb17OiOMReIs_0\tperson\nb1-WFxZ7Lcs_0\ttruck\nb2DqNP9s4t0_0\tperson\nb2Tm_7DUimQ_0\tperson\nb2Y6KLIX5vE_1\tmotorcycle\nb2Y6KLIX5vE_0\tmotorcycle\nb2azzMxEH84_0\tmotorcycle\nb2fq5Ba1L8M_0\tperson\nb2fsE3wZfWM_1\tperson\nb2m2gaVpjNE_0\tperson\nb2qNS9qjYbE_1\tperson\nb2tlrwd_LIg_0\tperson\nb28pEbOSeUs_0\tdog\nb2_dSc2NxNI_0\tperson\nb3KP0d-WX38_0\tbicycle\nb3KP0d-WX38_1\tbicycle\nb3KP0d-WX38_2\tbicycle\nb3R6fHlRZu4_1\tbicycle\nb3R6fHlRZu4_3\tbicycle\nb3R6fHlRZu4_4\tbicycle\nb3SsKosfjOA_0\ttrain\nb3SsKosfjOA_1\ttrain\nb3SsKosfjOA_2\ttrain\nb3UOZHA5jRI_0\tcat\nb3Z1Ay2o1zQ_0\tknife\nb3bkNCYQbwc_0\tcow\nb3p-fFVYM4E_2\ttrain\nb3p-fFVYM4E_4\ttrain\nb3p-fFVYM4E_6\ttrain\nb3tgGsan2vc_0\ttruck\nb3x6f5xFPTQ_0\thorse\nb3x6f5xFPTQ_1\thorse\nb3x8Gwk4V8o_1\tperson\nb3x8Gwk4V8o_0\tperson\nb323CLKf_vM_0\tperson\nb34Cdm6l5_k_1\tairplane\nb34JUq19S0E_2\tmotorcycle\nb34JUq19S0E_0\tmotorcycle\nb34JUq19S0E_1\tmotorcycle\nb344je6lVYA_0\tairplane\nb35ihWGyz_4_0\tcat\nb37tPdAEkEw_0\tperson\nb39uBVwcm48_0\tmotorcycle\nb4E8uT19QkY_0\tbus\nb4E8uT19QkY_1\tbus\nb4FBbr4Pud8_0\tperson\nb4GXrkSKAdA_0\tcat\nb4HAPQ_xX5E_0\tperson\nb4HAPQ_xX5E_1\tperson\nb4KwBIif5OY_0\tcow\nb4KwBIif5OY_2\tcow\nb4KwBIif5OY_3\tcow\nb4KwBIif5OY_4\tcow\nb4UXSjdnqZ0_0\tperson\nb4Xn8--nfvI_0\tperson\nb4aEJNvYqtU_0\tbear\nb4j8lkkY_lE_0\tzebra\nb4tTUDVt6Gk_0\tperson\nb42WUwHAKPs_0\tboat\nb455pPKgTj4_0\tperson\nb5D9lQq3uf8_0\tbear\nb5IshxZjL7o_0\tmotorcycle\nb5NxbNaAo_8_0\tperson\nb5R1HVvc040_1\ttrain\nb5S8Db1Gu7I_1\tbicycle\nb5S8Db1Gu7I_3\tbicycle\nb5T_VSM7nbg_0\tmotorcycle\nb5nwFyniymA_0\tdog\nb5ud9dsnS1c_1\tperson\nb5ud9dsnS1c_0\tperson\nb51dSWD8MF4_0\telephant\nb59pPUKW_78_0\tcar\nb5-eXPHW4Mg_0\tperson\nb6AoStVIzkw_2\tperson\nb6IE2imnfp4_0\tperson\nb6MtzhRufn4_2\tskateboard\nb6MtzhRufn4_0\tskateboard\nb6RIavVJ660_1\tperson\nb6dVZMAHwro_1\tairplane\nb6gsIu7Pxbc_0\tdog\nb6ndIInoIzU_0\tboat\nb6xUAyNCbdY_0\tperson\nb61MghNCCTI_0\tperson\nb61MghNCCTI_1\tperson\nb65S2P2Pfms_0\tperson\nb66BE9WdQP0_2\tbicycle\nb7HqfhRNtAQ_0\tcow\nb7H_n_w2eFQ_0\tperson\nb7Igw_OO-P4_0\tperson\nb7LHlx86tk0_0\ttrain\nb7RYkf4oXv0_0\tskateboard\nb7WQe48-0NI_1\tgiraffe\nb7WQe48-0NI_0\telephant\nb7WiE1a8IAM_0\tperson\nb7go-l8jA5s_1\tboat\nb7hJ62ORLHc_0\tperson\nb7iLQoOKVrM_1\thorse\nb7ivqvv6s6A_0\tmotorcycle\nb7mawJlPASQ_0\tperson\nb7u0NZEc8OI_1\tperson\nb7ycKg8GLHA_0\tperson\nb71SThzfrDg_0\tbird\nb78PYqyYWZA_0\tperson\nb8LqaxvNRHw_0\tperson\nb8LqaxvNRHw_1\tperson\nb8VoRclgULc_0\tcat\nb8aWJIa4RFI_0\tgiraffe\nb8es8BWiC5c_1\tperson\nb8g4M9Yov8M_11\tbear\nb8g4M9Yov8M_3\tbear\nb8xtOCMwjJM_1\tbird\nb8x1qHT8nvE_2\tboat\nb8yA8bHlrtQ_0\tbus\nb8yqEFXS8Ck_0\thorse\nb82N91HYnUo_0\tknife\nb9O_mJTNj2A_0\ttrain\nb9SLHObDJzQ_0\thorse\nb9Y5tpPv-LQ_0\tcar\nb9iCmG9fIHc_1\tmotorcycle\nb9melHkIeV4_0\tbird\nb9oiO21MJh0_0\thorse\nb9oiO21MJh0_1\thorse\nb9u4WV9ft4s_0\tmotorcycle\nb9wwfAu5DCs_0\tskateboard\nb96WdT0DXKk_2\tbicycle\nb96WdT0DXKk_0\tbicycle\nb96WdT0DXKk_1\tbicycle\nb98Gs0d8AKo_0\tmotorcycle\nb9-xiVm1Xck_0\tskateboard\nb9-2bW13faI_0\tperson\nb-Cp0i6fBOU_0\tperson\nb-Cp0i6fBOU_1\tperson\nb-S7G5A0MNI_0\tperson\nb-T0AS7CuxI_1\tknife\nb-VYy9eEU6w_0\tperson\nb-W1PY33nQg_0\tperson\nb-hT8zKObfM_0\tperson\nb-hqwYjKCH8_0\ttruck\nb-i49sLOjBo_0\tperson\nb-i49sLOjBo_1\tperson\nb-mQajOHUAA_0\tperson\nb-mQajOHUAA_1\tperson\nb-mQajOHUAA_2\tperson\nb-ncxt38EFw_0\tperson\nb-wiIOBccF0_1\tperson\nb-x--HjbnpM_0\tknife\nb-5K7RwiHdw_3\tboat\nb-8ARNgk-Tw_0\tperson\nb-_FeNpM_wI_0\tperson\nb_B3oYiBWi4_1\tskateboard\nb_KBD-NL4Vo_0\ttrain\nb_ZVDwMrcEU_0\tairplane\nb_exMPY7gnM_0\tperson\nb_fR7aS10Z0_0\tbear\nb_h4xugql44_0\tumbrella\nb_kksCK6cbw_0\tcat\nb_n776bwyJo_0\tboat\nb_n776bwyJo_1\tboat\nb_vDLf3193s_0\tbus\nb_1TwBIgwKE_0\tcar\nb_7EvlxDWFc_0\ttruck\ncAARR6q3Qq8_1\tskateboard\ncAARR6q3Qq8_0\tskateboard\ncAFqK_6ltXw_0\tcat\ncAJsxlkMG_s_0\tdog\ncAJsxlkMG_s_2\tdog\ncAJsxlkMG_s_1\tdog\ncAKfCLDFg34_1\tperson\ncASL6wZ33vA_0\tboat\ncAYIECe6Bvs_0\ttruck\ncAnDryag2FA_0\ttruck\ncAqs3d9KNzk_0\tperson\ncArYvJEUdOg_0\thorse\ncA0HCmGOK84_8\thorse\ncBAG9pjaV70_0\tcow\ncBBDfwkH23A_5\thorse\ncBBDfwkH23A_2\thorse\nDQk3Xvbv57I_0\tcat\nDQqBXfTgqTE_0\ttrain\nDQ04rtHIqHQ_0\telephant\nDQ7GZOJxra8_0\tperson\nDQ-vQygnOx0_0\ttrain\nDQ-vQygnOx0_1\ttrain\nDQ-vQygnOx0_2\ttrain\nDQ-vQygnOx0_5\ttrain\nDQ-vQygnOx0_7\ttrain\nDQ_yyvagS0g_0\ttruck\nDRMoOpmUgn8_0\tperson\nDRO4MalcQFk_0\tperson\nDRSSiSNzV7Y_0\tperson\nDRXxJArWrQA_0\tperson\nDRaIGIiQXd0_1\ttrain\nDRaX3P2ysBk_0\tperson\nDRhRKwI26n8_0\tbear\nDRhRKwI26n8_1\tbear\nDRseWxukwaI_0\tperson\nDRsoi5DxAJk_0\tcar\nDRuDqkZ0zfE_0\tperson\nDRuDqkZ0zfE_2\tperson\nDRuDqkZ0zfE_1\tperson\nDRxLQ6we5YU_0\thorse\nDRybt0Cgr_U_1\tbird\nDR0QGL0n_wM_0\tperson\nDR4mzyMklY8_0\tskateboard\nDR82KhNzs1w_0\tperson\nDR-AMnnLCCQ_0\tcat\nDR_jo4aSqn0_0\tperson\nDR_jo4aSqn0_1\tperson\nDSAbzYpUW5w_0\tcow\nDSB9X3bgG2A_0\tperson\nDSCt67aveiw_0\ttruck\nDSCt67aveiw_2\ttruck\nDSEt02E1kJE_0\tperson\nDSM_BlK-ggg_1\tperson\nDSM_BlK-ggg_2\tperson\nDSRGbK9rPbo_0\ttrain\nDSWlLGL3xj8_0\thorse\nDSZkEwhJEI4_0\tskateboard\nDSaSooZZeAg_2\tbus\nDSn5-dKW_P0_0\tperson\nDSoRmFNRxiE_0\tperson\nDSoRmFNRxiE_1\tperson\nDSqy2MlVOxE_0\tperson\nDSq0q8dCuCw_0\ttruck\nDS5z-K8Cpzs_0\tperson\nDS-V_NKOawo_0\tknife\nDTBhYAFcQ94_0\tskateboard\nDTFg8SeWhbE_3\tskateboard\nDTYiSIRTXW8_0\tknife\nDTZkCYvGZ9E_0\tperson\nDTm5L6IAHC4_0\tperson\nDTnIC_Q8YoY_1\tboat\nDTs2uXh47Xw_0\tperson\nDTtejx1VYBs_0\tperson\nDTvjWj60ixI_0\tperson\nDTvzQwX0KRQ_1\thorse\nDT4KxrhD89E_0\tperson\nDT7TSCbFXek_0\tperson\nDUAhVOWkluQ_0\tperson\nDUAhVOWkluQ_1\tperson\nDUBzIIKht_w_0\tperson\nDUBzIIKht_w_1\tperson\nDUB3OOi7dQc_0\tperson\nDUHEv94Tyno_0\tperson\nDUHEv94Tyno_1\tperson\nDUHEv94Tyno_2\tperson\nDUPQ3fPhomY_0\tperson\nDUQa7q5NTQI_1\thorse\nDUZhPq4FiJM_1\tperson\nDUb6-VQcokc_0\tcat\nDUlYPwiuBrw_0\ttruck\nDUlYPwiuBrw_1\ttruck\nDUmKu-rc7jI_0\tperson\nDUwVOy7IYvA_0\tperson\nDUxGnuYB_GI_0\tcow\nDU1ww3ryP7s_0\tperson\nDU4acd1_vuI_0\tperson\nDU8jvzO9tEA_0\tzebra\nDVFfZw4HW3E_0\ttrain\nDVFfZw4HW3E_1\ttrain\nDVK9BrG_Y_8_0\tperson\nDVOFKTeh9BY_0\tperson\nDVgCgSDZVw0_0\tperson\nDVjOMylPUfU_0\tperson\nDVlEnd5Ra2Y_0\tperson\nDVm_-u6oWwA_0\tcar\nDVqsCPYrMrg_0\tperson\nDVqsCPYrMrg_1\tperson\nDV4GPAloBks_1\tperson\nDV4GPAloBks_0\tperson\nDV79-MpnE1Y_0\tperson\nDWQ0kmCIT0E_0\tperson\nDWZNfCg0W8o_0\tperson\nDWjj9U_lr30_0\tperson\nDWoRZEAFpUI_0\tperson\nDWqyeu4eovM_0\thorse\nDWuaB5j6-CQ_0\tperson\nDWwGWBcxL0k_0\tperson\nDW1iqzQEWkE_0\tperson\nDW4OTTF7Jc4_0\tperson\nDW8G3A0trOk_9\tbear\nDXEqDJWN72E_0\tperson\nDXEqDJWN72E_1\tperson\nDXI2AmrILgw_1\tcat\nDXa15hEKLAc_0\ttruck\nDXgs-pfW-0M_0\ttrain\nDXpyVrXMs1w_0\tperson\nDX5AP4s6u0k_0\tbird\nDX867I2CNRk_0\tairplane\nDX-PbjeeB6o_1\tgiraffe\nDYJJBRoUlnU_0\tknife\nDYUiMLisOzs_0\tperson\nDYbb8_mMeLs_0\thorse\nDYhTdNMuv5g_0\tknife\nDYkV2TPfOBk_0\ttruck\nDYlrCUMDv_g_0\tcat\nDYpBOmbclGY_0\tperson\nDYqIQv97tuE_0\tperson\nDYvHdc4rnxk_2\tperson\nDYvHdc4rnxk_1\tperson\nDY0ggbU0cIk_0\tperson\nDY3h0Y3ijmo_0\telephant\nDY3h0Y3ijmo_2\telephant\nDY6eQdk8jaE_0\tperson\nDZESlirYB3I_1\ttrain\nDZGEjl9U78c_0\tperson\nDZIFKtO6y2Q_0\tperson\nDZIFKtO6y2Q_1\tperson\nDZMd9NPNnLE_0\tperson\nDZRZg1gGn1g_0\tbus\nDZWsGelqCPg_0\tperson\nDZXldsAgY7o_4\tskateboard\nDZYjfZMMVAE_1\tperson\nDZgbeXD-bZg_0\tbear\nDZqs7ie6HPU_0\tperson\nDZ3JlgmRHQ8_0\tperson\nDZ4G9EBImOM_1\tperson\nDaMdWu7CyRE_0\tperson\nDaRYBq6zsmY_2\telephant\nDagKzwyphZY_0\tperson\nDapmUIRDw3o_0\tairplane\nDaqVTidNtg0_1\tperson\nDatNYbTqxlw_0\tperson\nDaz5kZBXn5c_1\telephant\nDa10JheIcaw_0\tperson\nDa25bjhf1WQ_0\tperson\nDbAZPBnTh3U_1\tperson\nDbGX12xMbWM_0\tperson\nDbNOHXsDP5I_1\tboat\nDbSGsjNmQ8A_0\tcat\nDbXz_8anwSM_0\tperson\nDbZGV4ixs2E_0\tbird\nDbdZugU9GWk_0\tbus\nDbeCxvMCD-Q_0\tperson\nDbfJ2s7qQJ8_0\ttruck\nDbivV-It_rM_0\tperson\nDbmwr1_ObHM_0\tperson\nDbnhReILFSs_0\tperson\nDboUAm-F7Rg_0\tperson\nDbpte835xwc_0\tperson\nDbqj1XCvcGw_1\tcow\nDbrGY3BalZ0_0\tskateboard\nDbrGY3BalZ0_3\tskateboard\nDbrGY3BalZ0_2\tskateboard\nDbvkTKJjRj8_0\tperson\nDbwEevYFGrg_0\tperson\nDbzakdG34mg_0\tcar\nDbzakdG34mg_1\tcar\nDb3OG025sz0_0\tperson\nDb74WjMmf-0_0\tbear\nDb74WjMmf-0_1\tbear\nDcAxPsNVe28_0\ttrain\nDcFWetycnqY_0\tperson\nDcKjrocJ8iM_0\tperson\nDcKjrocJ8iM_1\tperson\nDcOl0Ec1kuI_0\tperson\nDca5CTtFQZ8_0\tmotorcycle\nDcexSE28IOA_2\tperson\nDcexSE28IOA_0\tperson\nDcexSE28IOA_1\tperson\nDcfs-bFQcxk_0\tperson\nDcj-1vKe6iI_0\telephant\nDckRd1CpSm0_0\tskateboard\nDckTHE_Pn5Q_0\tperson\nDcknQtmjIDA_0\telephant\nDclr-tDJMO8_0\tperson\nDcpuJSx5z78_0\tperson\nDcpuJSx5z78_1\tperson\nDc3yhv5mfN8_0\tperson\nDc4EXPP0fqU_0\tcat\nDc9dWfPxIEM_0\tbicycle\nDdGvFcujfxo_0\tperson\nDdHWfz7kw4I_0\tperson\nDdJuIi7LexI_0\tbus\nDdKvI-6rMII_1\tperson\nDdNpi-Pmvgc_0\tperson\nDdNpi-Pmvgc_1\tperson\nDdNpi-Pmvgc_2\tperson\nDdOk9lG9b1k_0\tknife\nDdUa-CozM14_0\tperson\nDdUa-CozM14_1\tperson\nDdYyeGgXLKw_0\tperson\nDddB5joJQC4_0\tairplane\nDddRHyvYqFI_0\tperson\nDddRHyvYqFI_1\tperson\nDdf4T9I0sdI_0\tperson\nDdz7VVJXgHs_0\tperson\nDd2qrXASEzk_1\tperson\nDd2qrXASEzk_0\tperson\nDeCtt_QZqjk_0\tperson\nDeCtt_QZqjk_2\tperson\nDeFuoRV0yCw_0\tperson\nDeFuoRV0yCw_1\tperson\nDeHiMvczAD4_0\tperson\nDeIpwOsUzjw_0\tperson\nDeVZ83g93sE_1\tbird\nDeViLrLvD1Y_0\thorse\nDefHSc2VTOo_0\tperson\nDfGzSVv2ELQ_4\thorse\nDfGzSVv2ELQ_1\thorse\nDfGzSVv2ELQ_3\thorse\nDfS7lvAcDQc_0\tumbrella\nDfS7lvAcDQc_12\tumbrella\nDfT_7BUGNQA_0\tperson\ncBI2gZhpA-8_0\tperson\ncBMnKBVcoOE_0\tperson\ncBMnKBVcoOE_1\tperson\ncBQJU95uwwM_0\tperson\ncBQJU95uwwM_1\tperson\ncBSbDKv-Z_o_0\tcar\ncBb6VPKgF1M_0\tknife\ncBeH0xcCCWE_1\tperson\ncBhDn0TkAdc_0\telephant\ncBhDn0TkAdc_2\telephant\ncBhDn0TkAdc_3\telephant\ncBhDn0TkAdc_1\telephant\ncBlqBEElvDI_0\tperson\ncBpFzTn_uOo_0\tperson\ncBvZAwlCN4M_1\thorse\ncBvZAwlCN4M_2\thorse\ncB1RhnpteUg_3\tairplane\ncB9XRu3bb_0_0\tperson\ncB_RQN9IXg8_2\tskateboard\ncCA7llOU4HQ_0\tperson\ncCEUd1IZ6OQ_0\tperson\ncCEUd1IZ6OQ_2\tperson\ncCMe4KdqzeI_0\tperson\ncCaz75u-bCM_0\tmotorcycle\ncCfInBOvqkk_0\tperson\ncCfVriTflG8_0\tperson\ncCnjh5F8dvM_2\tboat\ncCvpQCZ33xQ_0\ttrain\ncCwB7O-yg4Q_1\tairplane\ncCxZRIxh_yk_0\tcow\ncC2UgNbG7Rs_0\tcat\ncC3-bziiNKk_0\tcow\ncC3-bziiNKk_4\tcow\ncC4nZNGoC-g_1\thorse\ncC4nZNGoC-g_2\thorse\ncDGz5cnIzK0_0\ttrain\ncDIc8cs3igI_1\tperson\ncDL0YZ_vXOk_1\tperson\ncDaR5WdXvIo_0\tdog\ncDfSk2g6wRM_0\tdog\ncDg-vYWO3AI_0\tumbrella\ncDvCYN97QYU_0\tdog\ncDvWWER9oeI_0\tperson\ncD_EAISZcwM_0\tperson\ncD_zwwrcvkI_1\tperson\ncEAwCEnfITY_2\thorse\ncEFLP7rdZSU_0\tperson\ncEIAg54WPCs_0\tskateboard\ncEOHFcu3Uic_0\tperson\ncEOqnkbgfMQ_0\tperson\ncEXYVwmcpSg_0\tperson\ncEdeOfPvcQ0_0\tperson\ncEomNeUqQUI_0\tumbrella\ncErRs5qv8mc_0\telephant\ncEyCX-t8Jlo_0\tbird\ncEyCX-t8Jlo_1\tbird\ncEzC3hwdO_o_0\tperson\ncE7AS1hrlYA_1\tperson\ncE7AS1hrlYA_0\tperson\ncFBoLads7vA_0\tperson\ncFHTt7uFxH4_4\tumbrella\ncFOk-AMS2Aw_0\tmotorcycle\ncFOk-AMS2Aw_1\tmotorcycle\ncFkmNa2nYEk_0\tperson\ncFoUf9UmoZ0_0\tperson\ncFq4fzO00qE_0\tcat\ncFtfKwaxphA_0\tperson\ncFuoJPf6prU_0\tskateboard\ncFzjl_SiNhg_2\tdog\ncFzjl_SiNhg_0\tdog\ncF0SM2Lf82s_0\tperson\ncF7uQwB8sEg_0\tperson\ncF9YklqKEp0_0\tcow\ncGBOBTCgzP8_3\thorse\ncGBOBTCgzP8_4\thorse\ncGCbcyeQqG8_0\tperson\ncGCbcyeQqG8_1\tperson\ncGC4pGWPOUk_0\tperson\ncGC732t-itM_0\tperson\ncGEvxRn1UtQ_0\tperson\ncGNmKg25XMs_0\tboat\ncGUXUioIa4o_0\tperson\ncGVaIIV18ug_0\tperson\ncGcyxMp1ZQc_0\tperson\ncGcyxMp1ZQc_1\tperson\ncGdeftwBWL4_0\tperson\ncGiVzhQI2a0_0\tperson\ncGpNQ9Vk-5E_0\tperson\ncGtaJVgvTJg_0\tperson\ncG1_sZqy7lU_0\tperson\ncG2fL1nRZmE_0\tperson\ncG5TxH-1Sf4_0\tperson\ncG65cBtyj20_0\tcow\ncG7BBtumZnQ_0\tdog\ncHCYX0EqsfE_0\tperson\ncHQLun1YTiM_1\tperson\ncHQLun1YTiM_0\tperson\ncHSjCxvPumA_0\tmotorcycle\ncHWE72lnzZo_0\tperson\ncHYcXW7HAkA_0\tperson\ncHaBQgTFdr4_2\tknife\ncHjKy80ojXM_2\tbear\ncHkm25QAG8A_0\ttruck\ncHnV0yZTha4_0\tcar\ncHpaD5PtHnM_0\tcow\ncHv3ulnF1fo_0\tperson\ncHyjhzLIeO0_0\tperson\ncH2A35uULdc_0\tperson\ncH2g9vV4SyM_0\tbird\ncH27awicc50_0\tperson\ncH8zYhvzdb8_0\tperson\ncICrfFzHoZs_0\tperson\ncIFXOWG5Dd0_1\tperson\ncIF9coXttVs_0\tperson\ncIIlWssV9Sk_0\tperson\ncIJSKwcTQ10_2\tbicycle\ncIJSKwcTQ10_3\tbicycle\ncIPlCULXXHQ_3\telephant\ncIPlCULXXHQ_2\telephant\ncISwax-t_78_0\tperson\ncIVGJQrNkT8_0\tperson\ncIV9T5ZQmdI_0\tperson\ncIh9baL5Hzw_1\tperson\ncIjMwiaApEc_0\tperson\ncIvqOdvwX6w_0\tperson\ncIwDGqmKrfY_0\tperson\ncJH4RK9aVR0_0\telephant\ncJJDfdbopiQ_0\tperson\ncJSjHpF7ILg_0\tairplane\ncJUj9q6wgis_0\tperson\ncJfW0Gfkzrg_0\tknife\ncJjaVdNaUko_0\tbus\ncJnihDxg0wg_1\tdog\ncJtGcHMJlMA_0\tperson\ncJ0hAba-pck_2\tgiraffe\ncJ0_u3Ta6kU_2\tskateboard\ncJ0_u3Ta6kU_0\tskateboard\ncJ2f7qDBm7M_0\thorse\ncJ41GQMsJIA_0\tdog\ncJ6BfbrgwDM_0\tperson\ncJ7Akre7-Sc_1\tcow\ncJ7ZHI-8gU0_0\tperson\ncKO8G1ZXQgo_0\tperson\ncKdank8BDik_0\tperson\ncKgqIdOoBmE_0\tperson\ncK4yj3jgWek_0\tperson\ncK5MabT7iIA_2\ttrain\ncK5MabT7iIA_0\ttrain\ncK5MabT7iIA_1\ttrain\ncK9R8KdVuIE_0\tperson\ncLKgng5yuC4_0\tperson\ncLKgng5yuC4_2\tperson\ncLKgng5yuC4_1\tperson\ncLPSEK3_jEE_2\thorse\ncLPSEK3_jEE_3\thorse\ncLPSTXefj2Y_0\tperson\ncLY_N1jEC8E_0\tperson\ncLg1pn5Oh1k_0\tperson\ncLlL2uHDyBw_0\tbird\ncLnQAhX42Eo_1\thorse\ncLnQAhX42Eo_0\thorse\ncLn0Kz_p2U0_0\ttrain\ncLrXQvFZ-y0_0\tknife\ncLvgs19Vm18_1\tperson\ncL2jFa-Zd_M_0\tperson\ncL4k6bdNmbs_0\tboat\ncL6G_y5LoDo_0\tmotorcycle\ncMGnmOyYWcM_1\tperson\ncMIyGPpW9Xw_0\tperson\ncMJhk7y1Nng_2\tbird\ncMJhk7y1Nng_0\tbird\ncMJhk7y1Nng_1\tbird\ncMOULCqujvs_0\tcat\ncMRhR707ZfA_11\tbear\ncMRhR707ZfA_13\tbear\ncMeXNjQUwe0_0\thorse\ncMg1O__kPFA_0\thorse\ncMwsAfZMG1c_0\tperson\ncMwt7xBZ9i4_1\tperson\ncM6-id-uhMg_0\tperson\ncM6-id-uhMg_1\tperson\ncNLuZxPpWho_9\telephant\ncNLuZxPpWho_14\telephant\ncNLuZxPpWho_1\telephant\ncNLuZxPpWho_4\telephant\ncNLuZxPpWho_8\telephant\ncNLuZxPpWho_11\telephant\ncNLuZxPpWho_13\telephant\ncNalYSGXOkM_0\tperson\ncNnMvF7oiUo_0\thorse\ncNr9rjOJ0ps_0\tperson\ncNxEreBWMRc_0\tperson\ncNxEreBWMRc_1\tperson\ncOD8xhwGfME_0\tperson\ncOD8xhwGfME_1\tperson\ncOYK17trE9k_0\tperson\ncOYK17trE9k_1\tperson\ncOZOzY6XDLU_0\tperson\ncOalncX8fwg_0\tairplane\ncOalncX8fwg_1\tairplane\ncOalncX8fwg_2\tairplane\ncOalncX8fwg_3\tairplane\ncOalncX8fwg_4\tairplane\ncOkVxYbnFRs_0\tperson\ncOkiG4LRtQU_1\ttruck\ncOp33oi4C8E_0\tskateboard\ncOzNmIBhiMY_0\tperson\ncO1F_0l1vSU_0\tperson\ncO1MbnbgUbU_0\tdog\ncO3WA2g_UeM_4\tbear\ncO3WA2g_UeM_2\tbear\ncO5xsG3ud_0_0\ttrain\ncO7nCAZ-uLk_0\tperson\ncPBvSHKPNvk_0\tperson\ncPdRddyxsVA_0\tcow\ncPdjr1zTQQ4_0\tperson\ncPeGSXSLepg_0\tperson\ncPkbg5bdpcE_1\tperson\ncPkbg5bdpcE_0\tperson\ncPn5c5t2g6w_3\tskateboard\ncPqAK1E1Ajo_1\tdog\ncPqAK1E1Ajo_0\tdog\ncPsXS3_4zOk_0\tbus\ncPu-riLrt1c_0\tperson\ncPu-riLrt1c_1\tperson\ncP-gl2IN_AI_1\tperson\ncP-gl2IN_AI_0\tperson\ncP_nenKIU4g_2\tbear\nDf70QgKA_Hc_0\tperson\nDf70QgKA_Hc_1\tperson\nDgSwJVCLkYM_0\tperson\nDgcSsQKaX7Q_0\tperson\nDgoFmJFWpUw_0\tbear\nDgtiaphLkMc_0\tperson\nDguiMPx8nn0_2\tperson\nDgvI1azs_0E_0\tairplane\nDgwM5b-eKvc_0\tperson\nDg2sU0bmBho_0\tperson\nDg8r8QlJw80_0\tperson\nDhAkswxLuAs_0\tperson\nDhJZwbql4dc_1\tperson\nDhLD44-KIUU_0\tperson\nDhYbvvwSsEA_1\tperson\nDhYbvvwSsEA_0\tperson\nDhd-0-xOF6I_0\tcow\nDhl-jIQaam0_3\tperson\nDhl-jIQaam0_0\tperson\nDhl-jIQaam0_1\tperson\nDh6APdqkNZ0_0\tperson\nDh_6tF8ndZs_0\tperson\nDiAj24Xsadk_0\tperson\nDiDELcBJWh4_0\tperson\nDiPjO5frbNc_0\tperson\nDiQ-VgXIDMo_0\tperson\nDiVX_-kQv0k_0\tperson\nDiVX_-kQv0k_1\tperson\nDiWi-oWT9EI_0\tboat\nDiXsD6VHEr4_0\tperson\nDiZ4OCT30AM_0\tperson\nDia6QIxORbM_4\tairplane\nDihnxPkojnQ_0\tgiraffe\nDihnxPkojnQ_1\tgiraffe\nDi41WoS7T1M_1\tbear\nDjAQs68BiwA_1\tgiraffe\nDjB4dpC4TVs_0\thorse\nDjD15NlLBYI_1\ttruck\nDjD15NlLBYI_0\ttruck\nDjK1R_LBqgM_0\tperson\nDjMnoAbMiIU_0\tperson\nDjMnoAbMiIU_1\tperson\nDjQF34GUthk_0\tperson\nDjS-0VOep0Y_2\tperson\nDjXtIIwfITI_0\tperson\nDjb2blFeoNM_0\tperson\nDjdAxUWgSdk_0\tknife\nDju4Bl2fx88_0\tbicycle\nDjyldIzPJbA_0\thorse\nDjy5UE0Ofa8_0\tperson\nDjy5UE0Ofa8_1\tperson\nDj7DVsCVqqY_0\tcow\nDj9npayKJqk_0\telephant\nDkAG7dFDk94_0\tperson\nDkC_iJTIrYc_1\tperson\nDkC_iJTIrYc_0\tperson\nDkF-LqA7wSk_0\tbus\nDkNY4yun6ek_0\tboat\nDkPYbKRQBE4_1\tmotorcycle\nDkTfU9q9U_I_0\tcat\nDkTqTY04y30_0\tperson\nDkTqTY04y30_1\tperson\nDkbRBY4ZlFY_0\tbicycle\nDkbRBY4ZlFY_5\tbicycle\nDkbRBY4ZlFY_6\tbicycle\nDkbikYoLycQ_0\tbus\nDkmab-wxSy4_0\tperson\nDkmab-wxSy4_1\tperson\nDknRMqifZFE_0\tskateboard\nDkpZP7RtrJM_1\tbus\nDkqy-okNDVM_0\tperson\nDkrkY6blx3U_1\tperson\nDkrkY6blx3U_0\tperson\nDk0wXCp-USs_0\tboat\nDk1QPiNji5I_0\tskateboard\nDk4V0c6Yzbs_1\tboat\nDk47lOWl3NM_2\tcat\nDlCMYyDhSVY_1\tperson\nDlCMYyDhSVY_0\tperson\nDlDFQ88ui2A_0\tperson\nDlDJpNWKuPM_0\tknife\nDlFJTfO-mc0_0\tcat\nDlG-VsdsPCk_0\tmotorcycle\nDlTE01-45gQ_0\tairplane\nDlX2Yvp20gY_0\tperson\nDldXGda7zfE_0\tperson\nDldXGda7zfE_1\tperson\nDlg5BFm20wI_0\tperson\nDlg5BFm20wI_1\tperson\nDl3fDWG23zU_0\tperson\nDmG9v9xVPbg_0\tperson\nDmIeMGzqZEc_0\tcow\nDmJ9x-DFdqA_0\tperson\nDmJ9x-DFdqA_1\tperson\nDmLGGv6YNEo_1\tbus\nDmL_6_a_54g_0\tbird\nDmNmgatXwU8_1\tknife\nDmSRZp63qTo_1\ttruck\nDme3Rfsqbz8_0\tperson\nDmiucPhqXMg_1\tbus\nDmiucPhqXMg_4\tbus\nDmlMgF-BuRo_0\tperson\nDmt8pgQG3M4_1\tskateboard\nDnLVGRyXAR4_0\tperson\nDnN9tjwPn-0_0\tperson\nDnR4VFNo44s_1\tairplane\nDndaJVRuOoo_0\tperson\nDniy3zze90s_0\tperson\nDniy3zze90s_1\tperson\nDnj_fhGXHC8_1\tbird\nDnkUzsPqjE8_1\tperson\nDnkUzsPqjE8_2\tperson\nDntJ297deXI_1\tperson\nDntJ297deXI_2\tperson\nDntJ297deXI_0\tperson\nDnx6TlTvRfI_0\tperson\nDn80jV69sbs_0\tperson\nDoEWhY2BkZo_0\tperson\nDoOq_FhWze0_0\tperson\nDoPKGr2HJwM_3\tbird\nDoRoLk97UqY_0\ttruck\nDobAdZVysXc_0\tcow\nDohloSZ6YdA_0\tperson\nDomgj6ptFOs_0\tbus\nDpH2eSmcTk4_0\tbus\nDpJA_qYLobk_11\tbicycle\nDpJA_qYLobk_0\tbicycle\nDpJA_qYLobk_2\tbicycle\nDpJA_qYLobk_5\tbicycle\nDpJA_qYLobk_6\tbicycle\nDpJWhFnF2Fo_0\tdog\nDpR63uhHTjo_1\thorse\nDpWw1SaCdTQ_0\tperson\nDpbGsvglx7Q_0\telephant\nDpbGsvglx7Q_1\telephant\nDpimIW1T2Sw_0\tperson\nDpp32dLn0hQ_0\tperson\nDpvuhymOiUM_0\tperson\nDpwjQ_KcYAc_0\tperson\nDpxoJ_GWJA4_0\tgiraffe\nDpxoJ_GWJA4_3\tgiraffe\nDpxoJ_GWJA4_4\tgiraffe\nDpxoJ_GWJA4_1\tgiraffe\nDpz-s6E9VWg_0\tperson\nDp2pGcutqDQ_0\tperson\nDp2pGcutqDQ_1\tperson\nDp4XaG6247k_0\tperson\nDp5KRKUJBGE_0\tcow\nDp6qJvgV4fQ_0\tperson\nDp71z8eyq7o_0\tbus\nDqBNoutsr4M_0\tperson\nDqBNoutsr4M_1\tperson\nDqDElT9H4Tg_0\tboat\nDqESUtRuhPw_0\tdog\nDqVUeH6XI2Q_0\tperson\nDqegnRXQd5Q_0\tairplane\nDqi5KTmt04s_0\tbus\nDqy6NbRkVPE_2\tskateboard\nDrAnw0S9Pmc_0\tperson\nDrCKp4YB7rI_0\tperson\nDrE7aW7O0eQ_0\tperson\nDrFxlXYC6-o_0\tperson\nDrGCtlmxxVc_0\tperson\nDrPpkd-UxFY_0\tcat\nDrc0Grdb_LU_0\tcat\nDrgjySu3e-c_0\tmotorcycle\nDr9XXUA4UKc_0\tperson\nDr9XXUA4UKc_1\tperson\nDr--We7lD3I_0\tperson\nDsA5QOOIZJw_0\tperson\nDsP87b0IuoU_0\tperson\nDsZ6Cf42EdQ_0\tperson\nDsiAcCUi8iE_2\tbear\nDsm48Msjw6k_0\tbird\nDsxyH6AKBd0_0\ttruck\nDs0GIUe1AFo_2\tperson\nDs0GIUe1AFo_0\tperson\nDs0GIUe1AFo_1\tperson\nDs3E7n1kRQk_0\ttrain\nDs44yYfSEr8_0\tbird\nDs8xwquSVkw_0\tskateboard\nDtKSEQhjq2I_1\tcat\nDtQGDwZ1PIU_0\ttruck\nDtQGDwZ1PIU_2\ttruck\nDtSpyLMbD9o_1\tmotorcycle\nDtU93_s53sI_0\ttrain\nDtc3hZBmn9Q_0\tperson\nDteEg93cINc_0\tperson\nDtf2WRyd4OA_0\tairplane\nDtgUpKmdw_g_0\tperson\nDtuRiD_E6HU_0\tperson\nDtyatJX8J1A_0\tbicycle\nDt1MDqN3TCs_1\telephant\nDt1PLFoRvoM_7\tairplane\nDt1PLFoRvoM_0\tairplane\ncQAr7IVeBrU_0\tperson\ncQC7jBc1pC0_0\tperson\ncQIviFGN-_M_0\ttrain\ncQOFvBNN9to_0\tairplane\ncQOFvBNN9to_1\tairplane\ncQPP6SqX-uk_0\ttruck\ncQbqByuUnW8_1\tcar\ncQgUGmyvkJ8_0\ttrain\ncQttS-GIM5c_0\tperson\ncQttS-GIM5c_1\tperson\ncQw1wXvFnLM_0\tperson\ncQ29m5z8Cnk_1\tcow\ncQ4aR8OLr74_0\tmotorcycle\ncRGrqg7y9tE_0\tboat\ncRVqyVvxjHI_0\ttrain\ncRczdkzrJ-w_0\tcat\ncRnDFinbH-s_0\tbird\ncRrjU515FKg_0\tperson\ncRvAv1Nn-WQ_0\tcat\ncR6qM7wjtDw_0\tknife\ncSDafQMsYwc_0\tcat\ncSJ2ISog6Pw_0\tbird\ncSJ2ISog6Pw_1\tbird\ncSLerMX3IBg_0\tperson\ncSNwXF8OcR8_0\tcow\ncSO-70KCypM_0\tskateboard\ncSVIvCYuDtU_0\tcow\ncSdBaGsGWKk_4\tbird\ncSdBaGsGWKk_9\tbird\ncSdBaGsGWKk_1\tbird\ncSdBaGsGWKk_3\tbird\ncSdBaGsGWKk_6\tbird\ncSdBaGsGWKk_7\tbird\ncSdUwiTGXPc_2\tmotorcycle\ncSor-u6VHHw_1\tdog\ncSqMDH0-sDs_2\tperson\ncS398dAyQ9k_0\tcow\ncS-QgqiUgLQ_0\tperson\ncS-QgqiUgLQ_1\tperson\ncTGOQnmi7bo_0\tperson\ncTLa1dxk76g_0\tperson\ncTUTNgp9rZ4_0\tperson\ncTUTNgp9rZ4_1\tperson\ncTayBCWq6xo_0\tperson\ncTiETDBrGv4_0\tskateboard\ncTiETDBrGv4_1\tskateboard\ncTk8pacLUcc_0\tbus\ncTmv-vp89sY_0\telephant\ncTmv-vp89sY_1\telephant\ncTsipIh7xF8_0\tcow\ncTvxGA-EvvY_1\tperson\ncTzz_ZCUpxc_0\tperson\ncT4Y0HSeBgg_0\telephant\ncT5UlPnc5MQ_0\tperson\ncT5UlPnc5MQ_1\tperson\ncT7LjXG7ByI_0\tairplane\ncT7LjXG7ByI_1\tairplane\ncT7LjXG7ByI_2\tairplane\ncT7kZP5B_2s_0\tbus\ncT_US5II64I_0\tperson\ncUEWtKzcAsM_2\tairplane\ncUEWtKzcAsM_1\tairplane\ncUM5ajI3KJg_3\thorse\ncUNExkBml18_0\tperson\ncUSRVmcbXxI_0\tperson\ncUS9QgCXcPo_0\tperson\ncUWmN_HuZiA_0\tperson\ncUYlfMGqB_8_0\tdog\ncU7JEUo5qdM_1\tperson\ncU7sT9UHs7s_0\tperson\ncVCqOzgt2vI_2\ttrain\ncVCqOzgt2vI_0\ttrain\ncVM2h5qbyUw_0\telephant\ncVXIaONp5o8_2\tperson\ncVYqiMXSh9g_1\tperson\ncVbcrOx7768_0\tperson\ncVfH0tFh5Kc_0\tperson\ncVfWBtl-qK4_0\ttruck\ncVq5VnfZtNw_0\tperson\ncVr16pInr5k_0\tperson\ncVsZMfMaxSM_0\tperson\ncVtyGQKWFcI_0\tmotorcycle\ncV0a2ScBxpE_0\tperson\ncV0a2ScBxpE_1\tperson\ncV1mBGRlLe8_0\tbird\ncV1szYodba0_0\tmotorcycle\ncV8BGLBROa8_0\tperson\ncWBCCAo3pUM_0\tbird\ncWBTkrImlLQ_0\ttrain\ncWBTkrImlLQ_1\ttrain\ncWGCbw5I6cI_0\tskateboard\ncWIDcoPB3Rg_0\tperson\ncWKf_KANUSM_0\tperson\ncWRO27zzxF4_0\tperson\ncWaVXNQ5cvg_0\tperson\ncWb-i8hj8uc_0\tperson\ncWcJrAQuNA4_0\tbird\ncWtIT6V98zc_1\tperson\ncWxELKsh43s_0\tperson\ncW2hQE3lS9k_1\tperson\ncW4fmuV2JuU_0\tskateboard\ncW7OrsSn-m8_0\tperson\ncXP1Lit5Pmk_0\tperson\ncXS9VytLIjM_0\tcat\ncXT5_AFSI8Q_0\tperson\ncXUdqfIp-Hs_1\tperson\ncXUdqfIp-Hs_2\tperson\ncXWgDE6boPQ_0\tperson\ncXZt2UZe6QQ_0\tmotorcycle\ncXaAcHkHUzU_0\tperson\ncXsRP67GHA0_0\tperson\ncXsRP67GHA0_1\tperson\ncX0yQ5KIAKw_0\tperson\ncX3mnglolLE_2\telephant\ncX3mnglolLE_3\telephant\ncX6lyv1DI80_1\tairplane\ncX-s4BNxb0c_0\tperson\ncYHq8xoYMO4_1\tbus\ncYVLbgGxJMM_1\tperson\ncYnyDXx580I_0\tperson\ncYpas0B5zEo_0\tcow\ncYvyTVEqiEU_0\tgiraffe\ncYwkpA75A8Y_0\tperson\ncY1cmlwRnaE_2\tbicycle\ncY1cmlwRnaE_1\tbicycle\ncY6HDOEiINs_0\tskateboard\ncY_INarfLQ4_0\tperson\ncZA_Yoq3vy8_0\tperson\ncZB5MQY5kVA_0\tskateboard\ncZDoXwn5lv8_1\tperson\ncZPvtKaqRxc_0\tperson\ncZU2LAWtwUM_0\tknife\ncZZT6OJ6xGk_0\thorse\ncZZT6OJ6xGk_1\thorse\ncZe888DWA8M_0\tperson\ncZgt8s4mARc_1\tperson\ncZugy4cYVng_0\tcat\ncZz6eOuSV9Y_0\tperson\ncZ155yARalk_0\tperson\ncZ155yARalk_1\tperson\ncZ7siEIFHlI_0\tcow\ncaAnHYU-Gwk_0\thorse\ncaGQ2b4L930_0\tperson\ncaGzwv3HLKU_0\tskateboard\ncaLKu0yKW0Y_0\tdog\ncacCjMLNpIg_2\tbird\ncarYHHE3y3A_3\tknife\ncavT34ZvciI_0\telephant\nca4_gKs6MN0_0\tbear\nca8aNafTzeY_0\tperson\nca_weHSJH80_1\ttrain\ncbRztq6KZn0_0\thorse\ncbVll1hxlDA_1\tperson\ncbVll1hxlDA_0\tperson\ncbvbRxOMJ-A_0\ttruck\ncb6YFX4CVqc_2\tairplane\nccIWh5JBil8_2\tbear\nccIWh5JBil8_0\tbear\nccQ7JnYrTL8_0\tbird\nccQ7JnYrTL8_1\tbird\nccRdzj5Zi-U_0\tperson\nccR-h9z3bRI_1\tknife\nccR-h9z3bRI_2\tknife\nccVJXErLdOo_0\tdog\nccWTUq_mvsU_0\telephant\nccWTUq_mvsU_1\telephant\nccaCWXJ0jKY_0\tperson\nccaYdn2p4Uk_6\tknife\nccaYdn2p4Uk_10\tknife\nccfTQmE0zsA_0\tperson\nccfTQmE0zsA_1\tperson\nccwFXG9D98w_0\tperson\ncc0S9924O-s_0\tskateboard\ncc76qcSHNMM_1\tdog\ncc76qcSHNMM_0\tdog\ncdBO6xYUmzE_0\tperson\ncdBO6xYUmzE_1\tperson\ncdKEh34fsYk_0\tperson\ncdNWg2zU6bY_0\tperson\ncdOQ7lTQJBw_1\tcow\ncdOQ7lTQJBw_2\tcow\ncdSG1fcxNAA_0\tperson\ncdS-7_Egk88_0\tperson\ncdW8PgwFm6o_0\tmotorcycle\ncdZqtqh5PwE_1\tperson\ncdZqtqh5PwE_0\tperson\ncdZ1ODMJYKM_0\tbird\ncdbmvoa89QU_3\ttrain\ncdbmvoa89QU_4\ttrain\ncdbmvoa89QU_5\ttrain\ncdf-C-P2bW0_0\telephant\ncdkSgKIMQEM_0\ttruck\ncdkSgKIMQEM_1\ttruck\ncdoGDD6m8Og_3\tperson\ncdpYTik8eL4_0\tperson\ncdruQqCvfrI_0\ttruck\ncdxkCeoDX6Y_1\tperson\ncd80Ii4FB1Q_0\tbird\nceH46gqMWak_0\tperson\nceIoRNo5FBk_0\tperson\nceIoRNo5FBk_1\tperson\nceLI06w8-Yo_0\tperson\nceVkcz1wysc_2\tdog\nDt5UnNOUlZA_0\tmotorcycle\nDuMGrFowOWE_0\tairplane\nDuUmKpZym5U_4\tboat\nDuV6ahfZ_yw_5\tknife\nDupWsV-iiys_0\tknife\nDur1W4FemFs_0\tperson\nDu7sKt25RiA_1\tknife\nDu8hVxuK10c_1\tairplane\nDu8hVxuK10c_2\tairplane\nDu8hVxuK10c_3\tairplane\nDu8hVxuK10c_4\tairplane\nDu9r_1zpPkA_0\tperson\nDvEWbWxGJvQ_0\tbus\nDvEykMsNibg_2\tbicycle\nDvIS9FV5pag_0\tperson\nDvIS9FV5pag_1\tperson\nDvKLYYQzmas_0\tperson\nDvNTMqUwwWo_0\tperson\nDvR9Ctfk8lg_0\tperson\nDvWCGbG9LT4_0\tcar\nDvWDBQ9eMNQ_0\telephant\nDvWDBQ9eMNQ_2\telephant\nDvuQOS7UVI0_2\telephant\nDv1e0Y8A8yg_0\tcow\nDv4azGPr4YI_0\ttruck\nDv7eGdF004Y_1\tperson\nDv7eGdF004Y_0\tperson\nDwJntGNV4Gw_0\tperson\nDwWzbtiIs7k_0\tskateboard\nDwhCZK1eUPw_0\tperson\nDwi-kq9Gcsw_0\tzebra\nDwi-kq9Gcsw_1\tzebra\nDwlOBOv0IC8_1\tbicycle\nDwlOBOv0IC8_0\tbicycle\nDwvclcpHQNY_0\thorse\nDwzuhLu_Jew_0\tbicycle\nDw2QHLXWmos_0\ttruck\nDw7BXQFtH60_0\tperson\nDw8lXatl4wE_2\tperson\nDw8lXatl4wE_0\tperson\nDw8lXatl4wE_1\tperson\nDxAMNpw-4qg_0\tperson\nDxB962sZJ_c_0\tairplane\nDxB962sZJ_c_1\tairplane\nDxB962sZJ_c_2\tairplane\nDxFjGsjegtk_0\tperson\nDxHhkA1fVdA_0\tperson\nDxPOOsSCJpc_0\tcat\nDxU9ZTI7KzY_0\tbird\nDxXEapsjhOg_0\tcow\nDxYW3ZMCXUw_0\tperson\nDxegJbsalCo_0\tperson\nDxegJbsalCo_1\tperson\nDxl8-fknJjM_0\tbird\nDxl8-fknJjM_1\tbird\nDxmdjAoDhkE_4\tknife\nDxpMePWSgjs_0\tperson\nDxsdKCCUvCY_0\tperson\nDxw3Y-UB0jk_0\tairplane\nDx0fgXYBRV0_0\tknife\nDx4a9ZiekrQ_0\telephant\nDx4a9ZiekrQ_1\telephant\nDx5VMmCltKo_0\tperson\nDx8eIjF--eo_0\tperson\nDx8eIjF--eo_2\tperson\nDx8eIjF--eo_1\tperson\nDyFNZgEaw24_1\tbird\nDyZHVNsbZeE_0\tperson\nDyceiTbkpMw_0\tbicycle\nDyd1Aj3RO3I_0\tcat\nDyfyfDI4jqk_0\tperson\nDytAOZD9DLU_1\tperson\nDy1-ch56AMc_0\tboat\nDy5kD11Wnbk_0\tperson\nDy5kD11Wnbk_1\tperson\nDzAi_cumPY4_0\tperson\nDzCPCgkI8XA_0\tmotorcycle\nDzCPCgkI8XA_1\tmotorcycle\nDzFhvnd07Ck_0\ttrain\nDzKdERTAA8U_0\tcat\nDzMXxF7XRaI_0\tperson\nDzW2oC31Gcs_1\tperson\nDzXDPH8p-6Y_0\tmotorcycle\nDziXgWdCrvY_3\thorse\nDzkCtRPiI-Q_0\tcat\nDzlPtZXxtpU_6\telephant\nDzlPtZXxtpU_4\telephant\nDzlfBATujA8_1\thorse\nDzp0BrJSMBU_0\tperson\nDz0d79BMerc_0\tmotorcycle\nDz34hVhjpzA_0\tperson\nDz7kWPDxgbg_1\tbicycle\nDz73CrM7pH8_0\tperson\nDz8_y0iOjLM_0\tskateboard\nD0DtV2eD7cs_0\tknife\nD0HGjOZ5XWU_1\telephant\nD0O-T4E2DVo_0\tcat\nD0R59ANL6o4_0\tperson\nD0TQLmGtPm4_0\tairplane\nD0TTR7qCVXQ_0\tperson\nD0WAC7ByU0M_0\tperson\nD0Yx5cLcrqk_0\tskateboard\nD0mf15dFGhk_0\tperson\nD0pcdPd6hwY_0\tdog\nD0qo2f2Cshk_0\tperson\nD0xc1K3BQnQ_1\tbicycle\nD0zhUpZhZi4_1\tairplane\nD04tMZ7n3YM_0\tskateboard\nD09x5ezi5hU_0\telephant\nD0-sW80X3kI_1\telephant\nD1Ct81qiyT4_0\ttruck\nD1Ct81qiyT4_1\ttruck\nD1DYQay-d_E_0\tcat\nD1IQfkEa2-8_0\ttruck\nD1KUzeiWmUE_1\tcow\nD1XPuPzMvv4_1\tbus\nD1cTj9Fy4yE_0\tdog\nD1dWoFMnKhc_0\tperson\nD1f92BE9HmI_0\tperson\nD1ktXwG0_jM_0\tperson\nD1plKiNFzvI_0\tcat\nD1tZzoBOWfA_0\tperson\nD1yVIEgFGrY_1\tairplane\nD10WSuM8eqU_0\tperson\nD19A7AUqZJ0_0\tperson\nD2CXHzxp1TU_0\tcow\nD2Iqqb3RP6c_0\tperson\nD2Iqqb3RP6c_4\tperson\nD2Iqqb3RP6c_2\tperson\nD2Iqqb3RP6c_3\tperson\nD2KcVzav3YU_2\tairplane\nD2KoBI6R7W8_0\ttrain\nD2Qw63hsi1E_3\tbear\nD2RT-qUSw_U_0\tdog\nD2RZP8Y6VT8_0\tdog\nD2Ri5Wy9XPQ_0\tperson\nD2RkdlTKlsE_0\tperson\nD2VABHjSM6E_0\tbus\nD2VABHjSM6E_2\tbus\nD2co1ZGkwCs_0\tskateboard\nD2rbERtPxNM_0\tperson\nD2t36StaDcc_0\telephant\nD2t36StaDcc_1\telephant\nD2wSgbAelUc_0\tcat\nD2yQaYJDNvs_2\tbicycle\nD2yQaYJDNvs_0\tbicycle\nD24GJS9nKC0_0\tperson\nD3EIh6pBTdQ_0\ttrain\nD3F3xWCoWD8_0\tperson\nD3IDGSQSrFY_3\tgiraffe\nD3IDGSQSrFY_4\telephant\nD3IDGSQSrFY_5\telephant\nD3IDGSQSrFY_7\telephant\nD3IDGSQSrFY_8\telephant\nD3OvvA5jYlM_2\tbird\nD3OxudXglSM_1\tcow\nD3XqhAXefSA_0\tperson\nD3Zg90Ib5GI_0\tcat\nD3b-w5J-wR0_0\tperson\nD3tuGaFbdbE_0\tperson\nD36Pwfuad5E_0\thorse\nD4CWBceBJEk_0\tperson\nD4OMvYw25w0_0\tbus\nD4aL-0UevEY_0\tperson\nD4do8kCWydY_0\tperson\nD4do8kCWydY_1\tperson\nD4goZXgzVC8_0\tperson\nD4oLradsvXE_0\tperson\nD4qq5Olmh24_0\tperson\nD410FuTGoPI_0\tbicycle\nD4_2g_M4CXM_1\tperson\nD5GNIcodIw0_0\tbird\nD5KLVLNs7-0_0\ttrain\nD5KWKhPhqWE_0\tdog\nD5OtHFsiXiI_0\tperson\nD5UGpkiG-CQ_0\tperson\nD5hYrAC2iIg_0\tperson\nD5jUPc4nQO0_0\tperson\nD5kSwHOWPBU_1\tbird\nD5kSwHOWPBU_0\tbird\nD5n4B-O8y8g_0\tperson\nD5tLtHWe0Jk_0\tperson\nD5uTmoMYXDE_0\tcow\nD5x402SaAk8_0\ttruck\nD537kaRoYEk_0\tperson\nD552mK5tfLU_0\tdog\nD59Eb3u0iPs_2\tperson\nD59Eb3u0iPs_0\tperson\nD6EDJA1bO3s_0\tzebra\nD6G1X8WFAA8_0\tperson\nD6LDq6Q1Aic_0\tperson\nD6NzaXWZGEA_1\tperson\nD6UsriFwkjQ_0\tperson\nD6XIhwBoaik_0\tperson\nD6XUUDKA1CA_0\tperson\nD6d20KAVyzk_0\tperson\nD6f2wdAt_Ug_0\tperson\nD6kIRV5rEPk_0\tperson\nD6qXaD6WnVQ_0\tbicycle\nD6zUwxeZ1zU_0\tperson\nD7c2tRlXz5k_0\tskateboard\nD7dAkMkQf4I_5\telephant\nD7kHPyS4Gw0_0\tperson\nD7r_HLTwhWY_0\tperson\nD71B5jrYOig_2\telephant\nD77yNiFrtmw_0\tperson\nD78FDAi2log_0\tskateboard\nD7_S2hp6aKI_1\tairplane\nD7_S2hp6aKI_0\tairplane\nD7_tUVFGy2o_0\tperson\nD7_zjfakeYM_0\tdog\nD7_zjfakeYM_3\tdog\nD7_zjfakeYM_4\tdog\nD8GQWYiVK1U_0\tdog\nceczRgI6HDM_0\tboat\ncev1umQFsVA_2\tperson\ncev1umQFsVA_1\tperson\nce8j1r_CDH8_0\tdog\ncfD9yGF5XmY_0\tcar\ncfFAjaziwn4_0\tperson\ncfWqngaDvvg_0\tperson\ncfWqngaDvvg_1\tperson\ncfex3QJFkTY_0\tdog\ncfex3QJFkTY_1\tdog\ncfpiw6KGB70_0\tdog\ncfyY4mfwN7A_0\tairplane\ncf0a6xp7r9s_0\tbus\ncf3VOLwZdKY_0\tdog\ncf6daxmvx6M_1\tperson\ncf6kCO9JdOM_1\tperson\ncf6kCO9JdOM_0\tperson\ncgAiH_9c5DU_1\tbird\ncgD7Gr2Y-c8_0\tperson\ncgQ_34JYUkU_0\tcar\ncgT26vQK-4A_0\tperson\ncgZo7nUeCNE_0\tbus\ncgjjdvXBsFI_0\tperson\ncgj_bzL4vsQ_0\tskateboard\ncgmkRlhxVQ8_0\tperson\ncgmkRlhxVQ8_2\tperson\ncgmkRlhxVQ8_1\tperson\ncgxIrs3ySiA_0\tskateboard\ncgyRQ1a79c0_0\ttrain\ncgyRQ1a79c0_1\ttrain\ncgzHPxfb-R4_2\tperson\ncg4GIYiUNiI_0\tperson\ncg9Y2DTUiDQ_0\tcow\nchc30sNO6KA_0\tperson\nchl-Wa4_hic_0\tperson\nchrXgx4NWck_0\tperson\nchrXgx4NWck_1\tperson\nchwYzLEqKp4_0\tperson\nchyVy1kdL5M_0\tperson\nch_yUR9RHIM_0\tdog\nciEhviIYSFY_0\tbicycle\nciFKNPdVskg_0\tairplane\nciUZ2LoiaCs_0\tperson\nciZNBF9RdaA_1\tknife\nciZNBF9RdaA_0\tknife\nciZNBF9RdaA_2\tknife\nciZNBF9RdaA_4\tknife\ncifpYBLq6dM_0\tperson\ncit4hdvCIp0_0\tmotorcycle\nci83tdO3GuM_0\thorse\ncjAhjjWOj24_1\tcat\ncjL-hMHdmN8_0\tperson\ncjdImYwFXEI_0\tperson\ncjlPNeNKoSo_0\tcar\ncjmps6UKu_Y_0\tperson\ncjtjQu1YoTc_0\tperson\ncjuRQJf1_qs_0\thorse\ncjvMLM_Uzbw_0\tperson\ncjye6t7P2XY_0\tperson\nckIaNsLDst8_0\tperson\nckJHbJCefVc_0\tbear\nckY7Izfnggc_0\tperson\nckfgZsmJEbs_1\telephant\nckyL1lkCzU8_0\tperson\nckzaUAcrtY4_0\tperson\nck6hJJVJfvQ_1\tperson\nck6hJJVJfvQ_2\tperson\nck6hJJVJfvQ_0\tperson\nclCQhmV8nf8_0\tperson\nclL4lyl6J7I_0\tperson\nclO2SRgOzAk_0\tperson\nclQ98CON1pE_0\tperson\nclUGOwaYaPg_0\tcat\nclaqhrkmhPg_0\tperson\nclmsmTFOSLo_1\tdog\ncl410aCQA8k_0\ttrain\ncl6C5KiOEHQ_0\ttrain\ncmAN1SqRkDM_0\tperson\ncmGz-63gi5Q_0\ttrain\ncmHjbUBM4q8_0\telephant\ncmKnHqPGlTw_0\tperson\ncmV1BLuEvpU_0\tcow\ncmeGuaSUg34_1\tcar\ncmqxX05lPiI_0\tperson\ncmtruoCpSec_0\tperson\ncmwRk4-z_BQ_0\tperson\ncmwzhxa6Kd8_0\tboat\ncm7Xd_WXZAs_0\tperson\ncnAC9g_fYUY_0\ttrain\ncnAC9g_fYUY_6\ttrain\ncnAC9g_fYUY_1\ttrain\ncnAC9g_fYUY_3\ttrain\ncnAC9g_fYUY_7\ttrain\ncnAC9g_fYUY_8\ttrain\ncnAC9g_fYUY_9\ttrain\ncnJKH5dTKyI_0\tskateboard\ncne8MAKWcjo_2\tperson\ncne8MAKWcjo_1\tperson\ncnoIwn3cQ7Q_0\tbird\ncnplEeb8Iuk_0\tmotorcycle\ncnp30cLXzq8_0\tskateboard\ncnrSdMSCW6w_0\ttruck\ncnrSdMSCW6w_1\ttruck\ncnrSdMSCW6w_3\ttruck\ncnryAbqs0sM_0\thorse\ncnryAbqs0sM_2\thorse\ncnt7MyeNlHA_0\tperson\ncnvzLGyGalU_0\tcow\ncoBLne1vSV0_0\tperson\ncoDrWV3qbQE_1\tcar\ncoIhjdND3yY_0\tperson\ncoVT-MPjIsc_1\tcat\ncobC6BjJahk_0\tperson\ncodE_-LtIRY_0\tboat\ncofwfK4F5ac_0\tperson\ncohdkT2S_oA_0\tskateboard\ncoh6clK_Q6A_0\tperson\ncomEv_WJ4Uc_0\tperson\ncousEghehEo_1\tperson\ncousEghehEo_0\tperson\nco17Vvf3bag_0\tknife\nco17rRdOvwc_1\tmotorcycle\nco5rBTsE2i0_0\tknife\nco7SR4bgOM4_0\tknife\nco9DJtEU4eg_0\tperson\ncpEYJnyJ9XM_0\ttrain\ncpLmgivniko_3\tknife\ncpLmgivniko_2\tknife\ncpO5pHTOelo_0\tcow\ncpQ9HawKR-Q_0\tairplane\ncpQ9HawKR-Q_1\tairplane\ncpUTjBksgdA_0\tperson\ncpmMEngbDHE_3\tperson\ncpmMEngbDHE_0\tperson\ncpmMEngbDHE_1\tperson\ncpnZFfnjGYs_0\tcar\ncpre_wIt0hs_0\ttrain\ncpre_wIt0hs_1\ttrain\ncptcOzotQ0E_0\tperson\ncpuYK9y7zu8_1\tboat\ncpxkLEREnwo_0\tcow\ncp4ttild7EA_0\ttrain\ncqEdqz5F7tg_0\tcat\ncqOLpxxqIBw_1\tperson\ncqOLpxxqIBw_2\tperson\ncqOclzkqkVg_0\tperson\ncqO2VRSBGGg_0\tbus\ncqRNPM3jgNs_0\tcow\ncqS_ZvZF4Kk_0\tperson\ncqS_ZvZF4Kk_1\tperson\ncqez5FuSf44_0\tperson\ncqf4Vh7Vy9M_0\tperson\ncqkZZqtr3z8_0\tperson\ncqkZZqtr3z8_1\tperson\ncq3TwUTSBFA_0\thorse\ncq84vJoKj0A_0\tperson\ncrXlnYSuCuw_0\tperson\ncrgSyPjbLBw_0\tperson\ncrh-ncEjMd8_0\tumbrella\ncriMO4N0K5E_0\tperson\ncrmw_2KCRlY_1\thorse\ncrmw_2KCRlY_0\thorse\ncruWABLWvD0_0\tperson\ncrzo7x07GTs_1\telephant\ncr02TlSWnkI_6\telephant\ncr5ddm3njdQ_1\tbird\ncsGJS_sNJx4_0\tperson\ncsKSGFZyk04_0\thorse\ncsTChnltOdg_0\tcow\ncsiWQna-zcg_0\tskateboard\ncsl1NFlhS0I_0\tperson\ncswk8vZ6th8_0\tperson\ncs16RhEpmu4_1\tperson\ncs16RhEpmu4_2\tperson\ncs3PfcpDro8_0\tcow\ncs_yLDexfXk_0\tperson\nctAtCH6V1Dw_1\tperson\nctAtCH6V1Dw_0\tperson\nctCQsTBheHg_1\tperson\nctJATSvGLTo_0\telephant\nctJATSvGLTo_4\telephant\nctJATSvGLTo_1\telephant\nctJATSvGLTo_2\telephant\nctK8CQu6Nvg_2\tboat\nctLUri8cnqU_0\tbear\nctNE8tj4Z18_0\ttruck\nctOTsI_RZps_1\tperson\nctOTsI_RZps_0\tperson\nctPfu5shFA0_0\tperson\nctPfu5shFA0_1\tperson\nctRpeLVhC50_0\tbicycle\nctWUEkluOFo_0\ttruck\nctWrHmTAoxw_4\tdog\nct24BXc-tWg_0\tperson\nct8_KhvMuHo_0\tmotorcycle\nct_TbfWVBQc_0\tperson\nct_TbfWVBQc_1\tperson\nct_TbfWVBQc_2\tperson\nct_TbfWVBQc_3\tperson\nct_vznHYblc_0\tairplane\ncuHFcWEuUNo_0\tskateboard\ncuQ5swAtzfk_0\tperson\ncuRuiFR7bNY_0\tperson\ncuU3htRHPgM_0\tperson\ncuWjLEIrs8k_5\tbus\nD8btdwmdRNU_0\tknife\nD8sBFUu104g_1\tknife\nD8urBZQXl6o_0\tperson\nD8wVRKGVcLw_0\tdog\nD804JptI7_4_0\tmotorcycle\nD8-J5NgmOQg_0\tperson\nD9J-SuKzTU4_0\tbicycle\nD9RlyV_QhoQ_0\tbear\nD9WsxKDzM80_1\thorse\nD9WsxKDzM80_3\thorse\nD9WsxKDzM80_5\thorse\nD9XDsr6tkug_0\tdog\nD9XDsr6tkug_1\tdog\nD9XwHuLUv_E_0\tcar\nD9ixoNe1mQ8_0\tperson\nD94_XdBnfjQ_0\thorse\nD97nupvam-4_0\tperson\nD97wkVsbfJk_0\tperson\nD97wkVsbfJk_1\tperson\nD98TSSeEEXc_0\tperson\nD9-PVz9eRtA_0\tperson\nD9-PVz9eRtA_1\tperson\nD-DNyYPMTvE_0\tcar\nD-EA0oKq0qI_0\tcat\nD-UToJ9lT9w_0\tperson\nD-YgpB48Efg_0\tperson\nD-YtknfK7cQ_0\tperson\nD-a0sdpLGlI_0\tumbrella\nD-gTVzHdFAE_0\tbus\nD-gxEOUdm98_0\tperson\nD-jl7sUktcE_1\tperson\nD-pfJT6Nyfo_0\tperson\nD-pfJT6Nyfo_1\tperson\nD-u2wEUntuI_0\tperson\nD--GMbo7meg_0\tperson\nD_FozyNGP_g_0\tperson\nD_OvU_wvmsg_0\tskateboard\nD_QDxlwnenM_0\tbird\nD_TbGwH_U4I_0\tperson\nD_XHitiDPXI_0\tperson\nD_XwOiOHuZU_1\tperson\nD_XwOiOHuZU_0\tperson\nD_g7kf5F2CE_0\tmotorcycle\nD_kMPno6xDw_1\tperson\nD_r43ev6HHs_0\tairplane\nD_uO4kxnCwM_0\ttrain\nD_vXQa4wYoY_0\tperson\nD_vxl0ffX4U_6\tbicycle\nD__WGD95lSY_0\tcat\nEABbbYMrVPo_0\tperson\nEABxiYRLhro_1\tknife\nEANBKNPscIU_1\tdog\nEANBKNPscIU_0\tdog\nEATgn3uQFCc_0\ttruck\nEAecqVilQ60_0\tairplane\nEAh-eJriiEM_0\tcat\nEAlTNLBenas_0\telephant\nEAmeB0UClfE_0\tperson\nEAoS9E3JQM0_0\tknife\nEApLpwcDY04_0\tcow\nEApLpwcDY04_1\tcow\nEAvGskBbSsI_0\tperson\nEAvUn45orps_0\tperson\nEAvhz7EUrHs_1\tperson\nEAvhz7EUrHs_0\tperson\nEA2Zq7j78Zw_2\thorse\nEA33eNV3TsM_0\tbus\nEA4Pppxm9q8_2\tairplane\nEA9IwJGPZFo_0\tperson\nEBBWzGDSfhQ_0\ttrain\nEBCEcy1RAZU_0\tbear\nEBDSyGzaeVM_0\tperson\nEBDSyGzaeVM_1\tperson\nEBGwUwk8_KI_0\tmotorcycle\nEBL5WSEhHwQ_0\tcow\nEBTH0ShVz5s_1\thorse\nEBYJEkaJizQ_0\ttruck\nEBmABlnU3Ns_0\tperson\nEBpvJEz7GAs_0\tcow\nEBqxBh52uek_1\tperson\nEBrNePUYA80_0\tcat\nEB0XdJ6nl5Q_1\tbear\nEB5sThk9G-k_0\tperson\nEB7yZ9myXmo_2\thorse\nEB7yZ9myXmo_1\thorse\nEB-GUW188Kc_0\tperson\nECDxDS-R1ZU_0\ttrain\nECEv0inW5Cs_1\tdog\nECKwTK9kBHk_0\tcat\nECLYb63wsdY_0\tperson\nECT7_2qKJJw_0\tperson\nECUpMJzxafs_0\tperson\nECXdLGCGSRU_1\tperson\nECdvMn526ho_0\tskateboard\nEChWuqD2kxc_0\tperson\nECofUr-jIIU_0\tperson\nECpmJNOAfZU_0\tperson\nECuo32_WqfU_0\tperson\nEC0Q7uMrJh0_0\tcow\nEC1pupdSC2Y_0\tperson\nEC-RADUn0SA_0\tskateboard\nEC-RADUn0SA_1\tskateboard\nEDBYWaa97hs_0\tperson\nEDUY2xl1Jkw_0\tcat\nEDYGYkJTUAw_0\tperson\nEDZ9Cu6WUAU_1\thorse\nEDcpyGbwAVs_1\ttrain\nEDcpyGbwAVs_2\ttrain\nEDqFOrLwfpE_0\telephant\nEDqFOrLwfpE_1\telephant\nEDrX2_SzLF8_0\telephant\nEDtN3eOjUXg_1\tmotorcycle\nEDvdnYUw9b0_0\tperson\nEDxj4RwQr7k_0\ttruck\nEDxj4RwQr7k_1\ttruck\nEDxj4RwQr7k_2\ttruck\nEDxj4RwQr7k_3\ttruck\nEDxj4RwQr7k_5\ttruck\nED-QWlNA_QI_1\tperson\nED-QWlNA_QI_0\tperson\nEEFgTj2V6IY_0\tperson\nEEMkBuPFopc_0\tperson\nEENkey7gvFA_0\tcat\nEENyo-VOtiA_0\tperson\nEEQVWkmTS6A_0\tperson\nEEQVWkmTS6A_1\tperson\nEEfWTq58rX0_0\tmotorcycle\nEEfWTq58rX0_1\tmotorcycle\nEEiUwF9ID5k_1\telephant\nEEiUwF9ID5k_0\telephant\nEEiUwF9ID5k_2\telephant\nEEiUwF9ID5k_5\telephant\nEEnpnVNwpgk_0\tperson\nEEnpnVNwpgk_1\tperson\nEEn1JwzcH7Y_0\tperson\nEEtv5FqPqG0_0\tmotorcycle\nEEx5nPfhJdI_0\tbear\nEE5owiH92Io_0\tbird\nEFHnwo5U2Bc_0\tbird\nEFHnwo5U2Bc_1\tbird\nEFTcDwwNw_M_0\tperson\nEFd6XVMNdEk_0\tumbrella\nEFpWVH06Tf4_3\tmotorcycle\nEFpWVH06Tf4_1\tmotorcycle\nEFpWVH06Tf4_2\tmotorcycle\nEFryCLs5aWc_0\tperson\nEFwar_GkK6Q_0\tcow\nEF0hPkNXnoA_0\tskateboard\nEF1htFUPo80_1\tbus\nEF23dhLqzKk_1\tperson\nEF23dhLqzKk_0\tperson\nEF4KGrH7s08_1\ttrain\nEF4KGrH7s08_2\ttrain\nEF4KGrH7s08_0\ttrain\nEF8PHVKHaq8_0\tperson\nEF9VafNyS20_0\tperson\nEGCQIKdLkIU_1\ttrain\nEGHYSrxI1Ek_1\tperson\nEGIhtnFv2f4_0\tperson\nEGI5Yk7IU8s_0\tboat\nEGOtOZyUpk4_0\ttrain\nEGOtOZyUpk4_1\ttrain\nEGOtOZyUpk4_2\ttrain\nEGZ7-ChFJQI_2\tknife\nEGd19Lwe3vM_0\tperson\nEGgvoXoby8c_0\tperson\nEGgvoXoby8c_1\tperson\nEGiEfcahLzY_0\tperson\nEGsRldGZ4Bc_4\ttruck\nEGsRldGZ4Bc_5\ttruck\nEGsRldGZ4Bc_0\ttruck\nEGsRldGZ4Bc_1\ttruck\nEGsRldGZ4Bc_2\ttruck\nEGvzZJ10zwQ_0\ttrain\nEG7cF7KMqs8_0\tmotorcycle\nEG-A5-_1i-o_0\tcar\nEHD613XdEQc_0\tperson\nEHMEQV26qfk_0\tboat\nEHUgk_5vbps_0\thorse\nEHafuO8IcpI_3\tbird\nEHcP0uDfEyE_0\tumbrella\nEHft6kH6siE_0\tperson\nEHft6kH6siE_1\tskateboard\nEHtU4jYmFWw_0\telephant\nEHvP9Bwmq7M_2\tperson\nEHvP9Bwmq7M_0\tperson\nEHvP9Bwmq7M_1\tperson\nEHv9RwkIPXM_0\tskateboard\nEIIC6lIbxO4_0\tcat\nEIRbrmP8N9U_1\telephant\nEISmAs76j_g_0\ttrain\nEIUHtk1IdtA_0\tcow\nEIcGpS1nsXk_6\telephant\nEIcGpS1nsXk_4\telephant\nEIdaSifBFgk_0\tperson\nEIdaSifBFgk_1\tperson\nEIe7fhZxKpQ_0\tperson\nEIe7fhZxKpQ_1\tperson\nEInkqD_T5Os_0\ttrain\nEIwa8hvMQ9g_2\tbicycle\nEIwa8hvMQ9g_0\tbicycle\nEI8OMIBxEOo_0\tperson\nEI-G2_K6zus_0\tperson\nEJE1AAlhjcQ_0\tperson\nEJE2EqHSaLA_0\tairplane\nEJJefx2O7lo_0\tperson\nEJJ0aK1Mefo_1\tbird\nEJMke8tdD9c_0\tperson\nEJMp6Gszq8M_0\tperson\nEJM15lQ1nds_0\tbus\nEJM15lQ1nds_1\tbus\nEJNv-W_Wh3s_0\tairplane\nEJNv-W_Wh3s_1\tairplane\nEJOO-gnqZOQ_0\tperson\nEJQZBc87T7Q_0\tperson\nEJTbpxYS19w_0\tperson\nEJdJUArfCgA_0\tperson\nEJdJUArfCgA_1\tbicycle\nEJ2XL046J4A_0\tperson\nEJ3IJ7_jx0s_0\tknife\nEJ3IJ7_jx0s_1\tknife\nEKDm7Y7dQ-g_1\tbird\nEKETFVqhfZI_0\tperson\nEKOgJfGpWw8_0\thorse\nEKPKBwGLkg0_0\tperson\nEKR2BQWkMTI_1\tperson\nEKf-TzUsoG8_0\tperson\nEKsbh9eVG0w_1\tairplane\nEKv1nvgLQLc_0\tmotorcycle\nEK2VY_FFN04_0\tperson\nEK56Obpu5ME_0\telephant\nEK56Obpu5ME_4\telephant\nEK5-ZuOavbM_0\ttrain\nEK5-ZuOavbM_1\ttrain\nEK5-ZuOavbM_2\ttrain\nEK7wRGel2vk_0\tperson\ncuXky9bc80o_1\telephant\ncuXky9bc80o_3\telephant\ncuXky9bc80o_0\telephant\ncuYker921kg_0\tperson\ncuZPt_f2GfE_0\tperson\ncusvncJOcwQ_0\thorse\ncu0Z8d-ioZA_0\tairplane\ncu_YsyYcbL0_0\tcat\ncvBKWYZidIs_0\tperson\ncvFAAQuXQR8_0\tperson\ncvJuXsDfcUY_0\ttrain\ncvUktXqTBBA_0\tcar\ncvUktXqTBBA_1\tcar\ncveuhB6Z_D8_1\tbicycle\ncveuhB6Z_D8_6\tbicycle\ncvfI6ccn-J4_0\tperson\ncvgZ-1Uaigk_0\tperson\ncviAzkIEA00_0\tskateboard\ncvlOlYpovm8_0\tperson\ncvyLalOdUEY_0\tperson\ncvyTQ9oFD8s_0\telephant\ncv9PMwKXLoA_0\tperson\ncwBgT8f3504_0\tperson\ncwB99KCLazI_3\tperson\ncwEuIwecOZA_0\tcar\ncwHQZi15U3s_1\tbear\ncwHQZi15U3s_2\tbear\ncwKndGwjXho_0\tperson\ncwKndGwjXho_1\tperson\ncwPtR7LsWag_1\tperson\ncwPtR7LsWag_0\tperson\ncwTq-wB6R3U_0\tskateboard\ncwe2t4eoAs0_0\tperson\ncwf1OksNfQ0_1\thorse\ncwjK5oxoq5Y_1\tperson\ncwjK5oxoq5Y_2\tperson\ncwmY9UYaukc_0\tperson\ncwnltT3Eelo_2\tbicycle\ncwp0G17bk0I_0\ttruck\ncwp8Oe0F6y0_0\ttruck\ncwsLz_ppMx8_0\ttruck\ncwsx0Rs732s_0\tperson\ncwyDOlWxH00_0\tbus\ncwzHLMKmpWM_0\thorse\ncw054hU6MdM_0\tperson\ncw4vlk-0siU_1\tboat\ncw45Y0beNG4_0\tbus\ncw55i8mKHnE_0\tperson\ncw55i8mKHnE_2\tperson\ncw57dOs_v5A_0\tbear\ncxAcLoLkk2g_0\tperson\ncxJp5-r_mjQ_0\tperson\ncxLrrWl89wo_0\tperson\ncxMcoeT1INo_0\tperson\ncxQENdEkIVQ_0\tskateboard\ncxSj2n8O4Vk_0\tperson\ncxUXpTWO4iY_0\ttrain\ncxbTIQtmtLs_0\tperson\ncxiI7jApblc_0\tboat\ncxkH0GxPEqU_0\tmotorcycle\ncxm8wGi_pl4_0\tperson\ncxsitsK8l9w_0\thorse\ncxsitsK8l9w_1\thorse\ncx0cCIp1KeU_0\tperson\ncx0cCIp1KeU_1\tperson\ncx0tj_0g0-k_0\tperson\ncx2bUajKTrw_0\tperson\ncx4EC6uXkkY_3\tboat\ncyBgPXda4lw_0\tperson\ncydwQgvjXlk_1\tperson\ncyd0m3k4Iv8_0\tcat\ncynwjNSXfDs_0\telephant\ncyz45rMhH9E_0\tperson\ncy4xwLUwDN4_0\tperson\ncy4xwLUwDN4_1\thorse\ncy4xwLUwDN4_3\thorse\ncy5IjIQ0UNQ_0\tmotorcycle\ncy58Sr7mA_Q_0\tknife\ncy6woAEQ0aU_0\tknife\ncy8BRHRLKa4_0\ttrain\ncy9CeQwHsws_0\tbird\ncy9kq-lD2Q8_0\tskateboard\nczD_BiifXv4_0\tknife\nczLen_XZrRo_0\thorse\nczUjYoRVVYw_1\thorse\nczec9DaQ1sQ_0\tperson\ncze3sm-N48s_0\tperson\nczjU6Q4s1jc_1\tperson\ncznO_APZ6xQ_0\tbear\nczpxbOFiY_Q_1\tperson\nczpxbOFiY_Q_0\tperson\ncztHS4laeBQ_1\tbicycle\nczto2OaEIww_0\tperson\ncz0dXFpjC6o_6\tbear\ncz0dXFpjC6o_4\tbear\ncz0dXFpjC6o_5\tbear\ncz5kAZB6n0k_3\tbear\ncz6eGvs1xNE_1\tmotorcycle\ncz8sE1Vn4Gw_0\tperson\ncz83QPHVLnk_0\tumbrella\nc0GrJULqad0_0\tperson\nc0GstZDjoNM_0\tperson\nc0IYOMYovRo_1\tperson\nc0J3zJ8n3SI_0\thorse\nc0LibLcues0_0\tbear\nc0MEfCeuV5U_0\tbird\nc0MdSWVdmqY_0\tbus\nc0PyfX2HFqE_0\tperson\nc0TJZWOz78g_1\tdog\nc0XKBQNwSlg_0\ttruck\nc0aHKGTYgeo_0\tperson\nc0bZsiE4obs_0\thorse\nc0jq_aReY5M_0\tmotorcycle\nc0kH2qIyG7E_6\thorse\nc0kH2qIyG7E_4\thorse\nc0lBfqi79wQ_0\tcow\nc0lDR6ABjTE_2\tperson\nc0nRMc9KiiQ_0\tdog\nc0nXpd7yJsk_0\tperson\nc0o_nv0BL6Y_1\tbear\nc0pzN4lVApI_1\tperson\nc0qkbu5wLF8_0\telephant\nc0qkbu5wLF8_2\telephant\nc0wve_629pA_0\tperson\nc0yrclVs1YA_0\tcat\nc02KdAN0Hwg_0\tbird\nc04Vd9VQao8_0\tperson\nc04ixznYflE_1\tgiraffe\nc07Yqknz4KI_0\ttrain\nc07k0EtqcVs_1\tcar\nc08cFHAOc7I_0\ttrain\nc0_M6VhGXOo_0\tperson\nc0_M6VhGXOo_1\tperson\nc1JGF-ltiJ8_1\tbicycle\nc1JGF-ltiJ8_2\tbicycle\nc1PUETYl8Lk_0\tairplane\nc1QAgByBiYE_0\tperson\nc1WZ6dEz6kw_0\tairplane\nc1XMeGkSwJQ_0\tperson\nc1XfiRiOTb0_0\thorse\nc1a_E7CZsVk_0\tperson\nc1djg96PnM0_1\tperson\nc1djg96PnM0_0\tperson\nc1hBqL_LWE0_3\tbird\nc1j8TlZsEmQ_0\tboat\nc1laLoj4fM8_0\tperson\nc10eOkpL080_0\tperson\nc10eOkpL080_1\tperson\nc2B7cQwr4Pk_0\tperson\nc2EIdJJnku0_0\tmotorcycle\nc2E5_n_bZKc_0\ttrain\nc2Kh-3yj9Ak_0\tperson\nc2MTwEvPGGk_0\tperson\nc2MUYY-qPhA_1\tbus\nc2MqPrUNXQ4_0\ttrain\nc2UDI136z20_0\telephant\nc2UDI136z20_4\telephant\nc2UDI136z20_5\telephant\nc2UDI136z20_7\telephant\nc2YlmT-aFE4_0\tcat\nc2a9uwUCFK8_0\tcow\nc2dk3AjUcYs_0\tperson\nc2gJYqYcsZg_0\tperson\nc2luSxdPZ6A_0\tperson\nc2m_PmRSEmw_0\telephant\nc2qJhOvlIUU_0\tairplane\nc2xTBZttrzA_0\tperson\nc22HGSTHjBA_2\tknife\nc22HGSTHjBA_1\tknife\nc22yvcXZcM0_0\tbird\nc2_qHguvZbI_2\tbear\nc2_qHguvZbI_0\tbear\nc3E9z6F-Txk_0\ttrain\nc3J2U0kR6Hg_0\tperson\nc3TisGCbmoU_1\tperson\nc3Ur6j05SgQ_1\tbicycle\nc3YFgnDBuXw_0\tperson\nc3bCGnwqGxc_0\tcar\nc3eo0_ftrn4_0\tcow\nc3pP__Uybq8_0\tperson\nc3wt1MUbgD4_0\tperson\nc3wt1MUbgD4_1\tperson\nc37EOoRHd7E_2\ttruck\nc4A01X82TfI_0\ttrain\nc4FmSUmvYbo_0\tperson\nc4FmSUmvYbo_1\tperson\nc4Hh2XdTBGY_0\tcow\nc4ICOFVvcTs_0\tperson\nc4e-qA4esVY_1\tperson\nc4iCXPdqm6c_0\telephant\nc4jbOCZyGsQ_0\tperson\nc4k8Yk1x3H8_1\tperson\nc4k8Yk1x3H8_0\tperson\nc4xRJS9_5Fk_0\ttrain\nc4xRJS9_5Fk_1\ttrain\nc40Mwg88VJI_0\tperson\nc43ihGsR1eA_1\tperson\nc5AKIs1XUhc_1\tbicycle\nc5AKIs1XUhc_2\tbicycle\nc5AKIs1XUhc_3\tbicycle\nc5BYdZTaBgc_0\tperson\nc5CmxgLHcxA_0\tbus\nc5Fw-Fi4daE_0\tcow\nc5GANV8PlSM_0\tperson\nc5GIQcIJ9Tc_0\ttruck\nc5GOwfkZXFk_0\tperson\nc5GOwfkZXFk_1\tperson\nc5Q2ZeMDx3o_0\ttrain\nc5TlkWtFymE_3\tdog\nc5WT0W8SfGg_0\tcow\nc5WT0W8SfGg_5\tcow\nc5WT0W8SfGg_1\tcow\nc5WT0W8SfGg_2\tcow\nc5WT0W8SfGg_3\tcow\nc5WT0W8SfGg_4\tcow\nc5cooFy7-SM_1\telephant\nc5hEygqOXOU_0\tperson\nc5oiA5xy15M_0\tperson\nc56nid2YSes_6\tbird\nc56nid2YSes_0\tbird\nc56nid2YSes_1\tbird\nc56nid2YSes_2\tbird\nc56nid2YSes_5\tbird\nc56nid2YSes_8\tbird\nc56nid2YSes_9\tbird\nc5_dNG2vWXg_0\tcar\nc6EIognIYWs_0\tbird\nc6ZQRNXfcZA_1\tperson\nc6a4xySAJ0o_0\ttruck\nc6niMRNXDeo_0\tperson\nc6qKbpvd-iw_0\tperson\nc6rbqnU4LXs_2\tmotorcycle\nc6rbqnU4LXs_0\tmotorcycle\nc6s839WnVhE_0\ttruck\nc6yBOD3Wo5A_0\tperson\nc7B-3x-3V34_0\tperson\nc7ILC5wYs8A_0\tperson\nc7KoGv5Ha7k_0\tperson\nc7PMPnuPjp8_0\tperson\nc7RFexe2Ba4_1\tbicycle\nc7RFexe2Ba4_3\tbicycle\nc7RFexe2Ba4_0\tbicycle\nc7RFexe2Ba4_2\tbicycle\nc7SMRurbkY4_0\tbus\nc7bKlPVR5pI_0\tboat\nc7hVbIhp0Wc_0\tperson\nc7jWXqWoMz0_4\tbicycle\nc7s8weR8lEY_0\tperson\nc7v4ZFCK-A4_0\tperson\nc70kaPblMLU_0\tcow\nc74hYNtpwdA_0\tdog\nc75cllxWxZE_0\tperson\nc7_op6G05l0_0\tairplane\nc8B4ZVLv364_0\tperson\nc8Cl-5olqWk_0\tmotorcycle\nc8Gaja-xUeQ_1\tperson\nc8I3JAxoLTs_0\tbicycle\nc8I3JAxoLTs_1\tbicycle\nc8I3JAxoLTs_3\tbicycle\nc8LHqWmKrJU_1\tairplane\nc8LHqWmKrJU_2\tairplane\nc8Mo16hH7qs_0\tperson\nc8UrmdREAO8_0\tperson\nc8Y7MJRWFqE_0\tcat\nc8Y8y9BsPHw_0\tcow\nc8b9qqF9Xvw_0\tperson\nc8b9qqF9Xvw_1\tperson\nc8ezNTNUXqc_0\tcat\nc8wbvQnndJc_1\tbicycle\nc8wdGQw1jB4_1\tbus\nc8wdGQw1jB4_2\tbus\nc8y3bmW0X9s_1\tcow\nc8zphqgYcJM_0\tperson\nc80SYyKXCCw_0\tperson\nc8_fHVnrzZ8_2\telephant\nc9EDbgCRGP0_0\tperson\nc9GKsfyRkmE_0\tperson\nc9IdrMV-Y_Y_0\tperson\nc9Q9LPaqyug_0\tumbrella\nc9SbfXgAoO8_1\tairplane\nc9Somjq2gLs_0\tumbrella\nc9WDXLFtYLU_0\tbus\nc9XaEHVxu4M_0\tperson\nc9Y9a6KVWRE_0\tbird\nc9Y9a6KVWRE_1\tbird\nc9ZWCwVv6Q0_0\tperson\nc9dPiEkCwR4_0\tmotorcycle\nc9gCDztKasg_0\telephant\nc9pYz2lTh3I_1\tperson\nc90ldeMSfL0_0\tcat\nc94gzpjmj24_0\tperson\nc9_87BKOW1I_0\tcow\nc-CCw_cyicE_0\tcow\nc-G0LV4kyY0_0\tcar\nc-T9ITcEW9c_0\tperson\nc-T9ITcEW9c_1\tperson\nc-ZnwBvVFGE_0\tperson\nc-gH6T1q-sk_0\tperson\nc-pKAy_3arM_0\tperson\nc-uOjPSq-10_0\tcow\nc-vwn6zqogs_0\tperson\nc-vwn6zqogs_1\tperson\nc-4uPwFKBdY_0\tperson\nc-_iMD-ihnE_0\tmotorcycle\nc-_94CuEo_M_1\tperson\nc_SQI7NirwY_0\tperson\nc_THUYYi_-k_0\tairplane\nc_YojhaB5pI_0\tmotorcycle\nc_jNM33kJuA_0\tperson\nc_rUQgBtHY4_0\tperson\nc_rUQgBtHY4_2\tperson\nc_rUQgBtHY4_1\tperson\nc_wkIYzEEDk_0\tdog\nc_6OcDyZ93k_0\tbus\nc_9GO2BbPz4_0\thorse\ndAQu2GQSyrY_0\tcat\ndAS6SqC7TCw_1\telephant\ndAVIZQJ5Af4_0\tperson\ndAqurx13i7I_0\tknife\ndAynVVxxb_o_0\tperson\ndA7mx3mrJeA_0\ttrain\ndA_ZtitJeMA_0\tperson\ndBDSqZ8rirA_0\tperson\ndBGKqrEvsIE_0\tboat\ndBGKqrEvsIE_4\tboat\ndBKexOUQSQA_0\tcow\ndBKexOUQSQA_1\tcow\ndBKexOUQSQA_2\tcow\ndBKexOUQSQA_4\tcow\ndBKexOUQSQA_5\tcow\ndBKexOUQSQA_6\tcow\ndBOrrvJDv54_1\tskateboard\ndBPu5iVlw1Y_2\thorse\ndBSryinfjiI_0\tperson\ndBS9maEElcw_0\tperson\ndBUpfcdFDUQ_0\tbicycle\ndBWeUQd06l4_0\tperson\ndBWeUQd06l4_1\tperson\ndBiGneGqmh0_0\tcow\ndBk2FwZgrtk_0\tcow\ndBq77lvujCk_0\tbird\ndBuvGegR_vA_0\tperson\ndByVvpTlwL4_1\tknife\ndB29dsCcN9s_0\ttrain\ndB43vSgLY2M_0\tperson\ndCG24UL_NeM_0\tperson\ndCSF80Y6lso_0\tperson\ndCSF80Y6lso_1\tperson\ndCZ9suBocXk_0\tperson\ndCgz-7OgwMQ_1\tperson\ndCl8hSleXYQ_0\tcow\ndCoi3rXWgbM_0\tperson\ndCqdvmS1jts_0\tperson\ndCqdvmS1jts_1\tperson\ndC9rTC3kzsI_0\tcow\ndDADJZV4i74_0\thorse\ndDA5p5TJ03g_0\tperson\ndDB84W_zVOI_0\tskateboard\ndDB84W_zVOI_1\tskateboard\ndDE3p8Gs878_0\telephant\ndDGiQLFJtPs_0\tbicycle\ndDIbBZtEJ2w_0\tknife\ndDLgQQ2XRc8_5\thorse\ndDLgQQ2XRc8_3\thorse\ndDLgQQ2XRc8_6\thorse\ndDO-RlSt3Gw_0\tperson\ndDQ58wciink_0\tcow\ndDZYTPEd9KE_1\tairplane\ndDacKPH4sOw_0\tcar\ndDacKPH4sOw_1\tcar\ndDcBtNpmCeU_0\tperson\ndDgcHWpKMeo_0\tperson\ndDkaPLEvAwM_0\thorse\ndDkaPLEvAwM_1\thorse\ndDkaPLEvAwM_2\thorse\ndDqe9sBGR24_0\tbird\ndDx0MqaKT2w_0\tperson\ndDx0MqaKT2w_2\tmotorcycle\ndD-AlVwxf-g_1\tcow\ndD_Ew85jXzk_1\ttrain\ndD_PbxvCBcA_1\tperson\ndECTTSpEUKg_0\tperson\ndEW9ZwvMsDE_0\tcat\ndEc5fHlEXCo_0\ttruck\ndEuzpQL0tNo_7\telephant\ndEuzpQL0tNo_1\telephant\ndEuzpQL0tNo_2\telephant\ndE7OwbOHsu8_0\tperson\ndE7WsfeVkI8_0\tperson\ndE7X93gdVPQ_0\tcat\ndFCUyBTrvNM_0\thorse\ndFCu7E6aYM4_0\tperson\ndFCu7E6aYM4_1\tperson\ndFEo5YKHAcA_2\tskateboard\ndFEo5YKHAcA_0\tskateboard\ndFMPz16FOzE_0\tmotorcycle\ndFZSSPvMBqE_0\tzebra\ndFZSSPvMBqE_1\tzebra\ndFa7TcQRCUU_1\tbird\ndFbZxetmjCQ_0\tskateboard\ndFkNDweVNFU_0\tcat\ndFpJq9s5fec_1\tbicycle\ndFpJq9s5fec_2\tbicycle\ndFsDjjWW00Q_0\tknife\ndFth5-8MEhM_0\tperson\ndF7OkxFt3I8_0\tperson\ndF_aGgW1jcM_0\tperson\ndGE7t6KgXHc_0\tperson\ndGFrWX61Zk0_0\tperson\ndGS01inQU1U_2\tperson\ndGS01inQU1U_0\tperson\ndGS01inQU1U_1\tperson\ndGZBUkIXMpo_0\tperson\ndGZ_pzDrl70_0\tperson\ndGdh_BHleU4_0\tboat\ndGh51ZQ9QAg_0\tbird\ndGk8D_De-2E_0\tperson\ndGk8D_De-2E_1\tperson\ndGpbPaorWys_1\tbear\ndGq1bpRxbiA_0\tperson\ndGyR5TWO-p4_1\tperson\ndG0CtnphYzg_0\tperson\ndG5mjfvTY7c_0\tboat\ndG7DSOtetMY_0\tknife\ndG9J5UpxeyY_0\tperson\ndG9J5UpxeyY_1\tperson\ndHCgtjlT_Lg_4\thorse\ndHCpH8dTwfw_0\thorse\ndHF9NIqrx6Q_0\tcar\ndHGIXivupi4_0\tperson\ndHGIXivupi4_1\tperson\ndHGIXivupi4_2\tperson\ndHJkOetpjQw_0\tbus\ndHO6vTrB66w_0\tperson\ndHO6vTrB66w_1\tperson\ndHVDjpivOKw_1\tperson\ndHVDjpivOKw_0\tperson\ndHVgQCO07SU_1\tperson\ndHVgQCO07SU_2\tperson\ndHfs5GT-YpY_0\tcow\ndHg1Xorklm0_0\tperson\ndHimuOjriUc_0\tcow\ndHnk6ulSNSo_0\tperson\ndHnsZs2Riqk_0\tperson\ndHnsZs2Riqk_1\tperson\ndHsD3F8dTpc_0\tbird\ndHvlIrb2Q-k_0\tperson\ndHwR5d4xGEk_0\tknife\ndHwR5d4xGEk_1\tknife\ndHwR5d4xGEk_2\tknife\ndHwR5d4xGEk_3\tknife\ndHwR5d4xGEk_4\tknife\ndHxmY1bGbNc_4\tbird\ndH89qyunr6s_0\tperson\ndH94i4xFlZU_1\telephant\ndH94i4xFlZU_6\telephant\ndH94i4xFlZU_0\telephant\ndH94i4xFlZU_5\telephant\ndH94i4xFlZU_7\telephant\ndICl73jYZ3M_0\tperson\ndICrafh45_I_3\tairplane\ndIDxqrhmBE4_0\ttruck\ndIDxqrhmBE4_2\ttruck\ndIEZ2kfTzzY_0\tboat\ndIJk0w4SnH8_0\tbird\ndIVtaleUNWI_0\tperson\ndIVtaleUNWI_1\tperson\ndIX81Ov0fUY_0\tperson\ndIZM-9d8bSQ_0\tperson\ndIZM-9d8bSQ_1\tperson\ndIm0Sv_iE2E_0\tmotorcycle\ndIqYGVVgYsU_0\tperson\ndIzMmAGaF6U_1\tskateboard\ndI93uXfSaRM_0\tbird\ndJB-DXpgq2U_1\tbird\ndJKAhixNM9Y_1\ttruck\ndJYNs94fv_0_0\tperson\ndJgqX3uy6z4_0\tperson\ndJg4R9cpbjI_0\tperson\ndJisrPH71tE_0\tperson\ndJi_dOrUZnw_0\tperson\ndJjrFTy9H3c_0\tperson\ndJkzzYh6BkY_1\tcat\ndJnRg-1zO1g_3\tknife\ndJqGj0FeC9I_0\tcat\ndJvoaqZjIDw_0\tperson\ndJ2B9A0mYl0_1\tdog\ndJ2kWscI-tc_1\tdog\ndJ4PR9zme-s_0\tperson\ndJ6S9bSEYDc_0\tcow\ndJ8J7WOLZtk_0\tskateboard\nELDxjZXMtCg_0\tperson\nELLTxQ47f90_1\tperson\nELLTxQ47f90_0\tperson\nELNgTt9Jswc_0\ttrain\nELOZutiZKMM_0\tperson\nELOZutiZKMM_1\tperson\nELPpy9ABb3s_1\telephant\nELTeW4X2mGY_1\tcow\nELbg8i93W8I_0\tperson\nELbjX2Ya0_o_0\tdog\nELmktutrkDk_0\tperson\nELqA6fb0un8_0\tperson\nEL8H94Lycf8_0\tperson\nEMAVfcO6JFE_0\tperson\nEMKcTJp7ehY_0\tperson\nEMOpCv3vVfE_1\tskateboard\nEMP7p3FNxZU_0\tperson\nEMU8vGL7ZFQ_0\tperson\nEMb28oLn66k_0\tairplane\nEMgh3pwtnXg_0\tperson\nEMiRla730lM_1\tperson\nEMiRla730lM_0\tperson\nEMmg9OKgyBE_1\tboat\nEMmmZ6ADzfI_0\tskateboard\nEMngQ4YMTv0_0\tmotorcycle\nEMorunu9Ik8_0\ttruck\nEMqd3lVNUxg_7\tbus\nEMuGAIADn3s_0\tperson\nEMwcDTRPPMw_0\tairplane\nEMyQWQ_Yobc_0\tdog\nEM0yGxKJWqY_0\telephant\nEM1R3HXt7DY_0\tperson\nEM1z9o601v4_0\tknife\nEM3tBaIyR0o_0\tmotorcycle\nEM5e1snhsCs_0\tperson\nEM-k8ZAva6k_0\tperson\nEM-zjCQyGAc_0\tdog\nENAr6j6fcWU_0\tbird\nENCHiWUV4dk_0\tperson\nENI-JuSPNQA_0\tmotorcycle\nENSEWig-4ZM_0\tknife\nENXXFcrrxGM_0\tcar\nENc0uxXKsaI_0\tperson\nENkqstdLKl4_0\tperson\nENk4JRIbEaE_1\tperson\nENnPjtPjU6c_0\tperson\nENtoAci6OwQ_0\tcow\nENvdCzm4whM_0\ttruck\nENvdCzm4whM_1\ttruck\nENvdCzm4whM_2\ttruck\nEN0Klsi-AKY_0\tbicycle\nEN4IIJjhBeI_0\tzebra\nEN-QCSvtEd0_3\telephant\nEN-4SsZnn-k_0\tperson\nEOEXVXG1TDk_0\tperson\nEOVNlasJhIo_1\tperson\nEOdHjLYopi0_1\tbird\nEOedzXaVI4U_2\tbird\nEOe3CfOT53g_0\tperson\nEOmVKXeoKBc_1\tairplane\nEOq-3ZRn0SQ_0\tskateboard\nEOt6j5ecODw_0\ttrain\nEO7NccQDQyM_0\tcat\nEO8Dpvy4oXs_0\tzebra\nEO8mQrkIZuY_0\tperson\nEO_DwtyWh0s_3\tperson\nEO_DwtyWh0s_0\tperson\nEO_DwtyWh0s_1\tperson\nEO_DwtyWh0s_2\tperson\nEPOXqdKNjKg_2\tgiraffe\nEPU630RSI5c_2\tperson\nEPU630RSI5c_0\tperson\nEPWmdYKJaXk_0\tbird\nEPycDWf2vY4_0\tskateboard\nEP_ezteElzk_0\tperson\nEQBFPIdI8gY_0\tperson\nEQC8eEghvs8_0\tperson\nEQNSjjkyRBg_0\tperson\nEQNSjjkyRBg_1\tperson\nEQTee9qqTZs_0\tperson\nEQVCizuJQFY_0\tumbrella\nEQdEm5HuPG4_5\ttrain\nEQx1XHc0mRM_1\tmotorcycle\nEQzXCoQRbas_1\ttrain\nEQ5rBLoiT78_0\tbus\nEQ9-lbsee1s_0\tperson\nERCvzMzkDhg_0\tskateboard\nERGwo6vIXdQ_0\tperson\nERJR-zQYyH4_0\tperson\nERR-qjVJ3lY_0\tperson\nERVp_cX1juc_0\tperson\nERev6rrd5XA_3\tmotorcycle\nERyyYMb2fFk_0\tcow\nERzh41uuxUE_3\tbicycle\nER0IdSeymeI_0\tperson\nER0IdSeymeI_1\tperson\nER03PLUBt4c_0\ttrain\nER03PLUBt4c_1\ttrain\nER03PLUBt4c_2\ttrain\nER03PLUBt4c_3\ttrain\nER53sUYwz1I_0\tzebra\nER6vMbAyQ6E_1\tskateboard\nER6vMbAyQ6E_0\tskateboard\nESDQMC_70Pk_0\tbear\nESInVf3ioiA_1\tdog\nESMdbpGXk4I_0\tperson\nEST4CUX19Eg_0\tperson\nESokfN84OYk_0\telephant\nESokfN84OYk_3\telephant\nESokfN84OYk_4\telephant\nESpwZsbwQGA_1\telephant\nESpylyha7g0_0\thorse\nESt5TEXuGIM_0\tperson\nESt5TEXuGIM_1\tperson\nESwsyjITYGM_0\tskateboard\nETBia7K3ZHw_0\tmotorcycle\nETBia7K3ZHw_2\tmotorcycle\nETQTZgnfRK4_1\tperson\nETQi93bP3YQ_8\telephant\nETQi93bP3YQ_2\telephant\nETTgj1pxvME_2\tperson\nETWI4nXFANg_0\tperson\nETcmjY7Jigo_1\tmotorcycle\nETgN7EcVVQI_1\tperson\nETmYIq5CF2k_0\tmotorcycle\nET4xC8Wl_CA_0\tperson\nET4yAsJTvlk_0\tcow\nEUH3oSBX950_0\tperson\nEUH3oSBX950_1\tperson\nEULIYiiV-O0_0\tperson\nEULIYiiV-O0_1\tperson\nEULchAlLDfM_0\ttrain\nEURUU5P5flo_0\tperson\nEUcHraiUCjA_0\tbicycle\nEUcWvzarnb0_0\tumbrella\nEUdNEi4myuA_0\tperson\nEUtfoblvHn0_0\tperson\nEUuCDfb8lf4_2\tperson\nEUuCDfb8lf4_1\tperson\nEU93Mw9WGkc_0\tskateboard\nEVBHY1qGVos_0\tperson\nEVBHY1qGVos_3\thorse\nEVElggpPSCM_0\telephant\nEVE2SBJ-2S8_0\tperson\nEVH8Ql7_pYE_0\tperson\nEVTW6Ka7-NU_0\tperson\nEViJ_JQcv5c_0\ttrain\nEVmGPGaP6bY_0\tperson\nEVnnSfmb4go_0\tgiraffe\nEVn52FBjG9E_0\tperson\nEVn52FBjG9E_1\tperson\nEVxEEc26TWg_1\tgiraffe\nEWLiwu56oQc_1\tperson\nEWNd02yWiYw_0\tperson\nEWP0Hhxsf58_0\tperson\nEWQo_1YXfYM_1\tperson\nEWQo_1YXfYM_0\tperson\nEWTvjjpAUm0_0\tairplane\nEWXyQ1tS3jI_0\telephant\nEWdNgXvr54s_0\tdog\nEWfPRTjQO9k_0\tdog\nEWgsivaLhl0_6\telephant\nEWgsivaLhl0_1\telephant\nEWgsivaLhl0_2\telephant\nEWi25l2D0cw_0\tcat\nEWkndzLXvLc_0\tbicycle\nEWuOSRFWTzg_1\telephant\nEW0Mgele6Gc_0\tperson\nEW0Mgele6Gc_1\tperson\nEW6FHYagN0Y_0\tperson\nEW98OEvTxM8_0\tperson\nEW-Zuo7ArI4_0\tdog\nEXDDO7gLoL4_1\tperson\nEXDDO7gLoL4_2\tperson\nEXDDO7gLoL4_3\tperson\nEXDDO7gLoL4_4\tperson\nEXGwKMtyR1M_0\tperson\nEXHZgqkcXG8_1\tcow\nEXJITC62tU4_0\tumbrella\nEXSMz4HnWfg_0\tdog\nEXaiYiUQrMI_1\tdog\nEXfiGeKWKTk_7\tairplane\nEXfiGeKWKTk_1\tairplane\nEXiGyq1TD80_0\tperson\nEXiGyq1TD80_1\tperson\nEXkbZbo1n5U_2\telephant\nEXkbZbo1n5U_0\telephant\nEX817S50E5U_0\tperson\nEX-dqihLUwY_0\tmotorcycle\nEX-dqihLUwY_2\tmotorcycle\nEYCaJR9md8k_0\tairplane\nEYEWPdaJuL0_4\tbird\nEYEWPdaJuL0_5\tbird\nEYEwLM8YTwc_0\tperson\nEYFMOBeF9UE_0\tknife\nEYHtNGztiRQ_1\tcar\nEYKrEDelAdU_1\tbear\nEYM1oXAmBq0_1\tbus\nEYRf00qGMVU_0\ttrain\nEYV6D6G6t2c_1\tperson\nEYZsYCSedGw_0\tperson\nEYd9lSK7Bbk_0\tperson\nEYhtY59whvs_0\tperson\nEYmWVBDEutA_0\thorse\nEYnEMtlMaPY_0\tperson\nEYoj8D64YLA_0\tskateboard\nEYuLodJTgYs_0\ttrain\nEY2pZ9A48ng_0\ttruck\nEY2pZ9A48ng_1\ttruck\nEY2pZ9A48ng_3\ttruck\nEY25PJWD2j4_0\tperson\nEY36YeIgOYI_0\tperson\nEY36YeIgOYI_1\tperson\nEZWcsRlXIA8_0\tperson\nEZbOH9yEe-A_0\tdog\nEZh1lf4yfCg_0\tperson\nEZ5Wa2duCoM_0\tperson\nEZ5Wa2duCoM_1\tperson\nEZ7d9ab31ys_0\tgiraffe\nEZ9-_7o9Vds_0\tbird\nEZ9-_7o9Vds_1\tbird\nEZ_xC5EBwvk_0\tbus\nEaBdeSUjDYs_0\tdog\nEaFSd7_S8kc_0\thorse\nEaQ1P4QyRsY_0\tperson\ndKEVBoMMD2w_0\tboat\ndKJz_EakSc4_0\tperson\ndKMb2S2SSfI_0\tskateboard\ndKTgMjbnNPQ_0\tskateboard\ndKiwficH2d4_0\tperson\ndKi4xI4vB-k_0\tumbrella\ndKlCFQxk5Dc_3\tperson\ndKlCFQxk5Dc_5\tperson\ndKlCFQxk5Dc_0\tperson\ndKlCFQxk5Dc_1\tperson\ndKlCFQxk5Dc_2\tperson\ndKq4S1IVjlA_0\tperson\ndLFWcgSewxs_0\ttruck\ndLH8fBNk89Y_0\tcat\ndLIld9ux7p4_0\tairplane\ndLT61O_htwI_0\tcat\ndLUCKkji5wo_0\tperson\ndLUCKkji5wo_1\tperson\ndLV2VJkpyMI_0\tairplane\ndLbhzrFtNC0_0\tperson\ndLhVV7DMXkw_0\tperson\ndLoxdmLuphk_0\tdog\ndLq5OW1xY54_0\telephant\ndLq5OW1xY54_3\telephant\ndLq5OW1xY54_2\telephant\ndLtQB9P_7BU_2\tbear\ndLty27VgJcc_0\ttrain\ndLvr7BjgsHg_0\tperson\ndLwXzYr8beg_0\tcar\ndL3dSZMnBko_0\tperson\ndL3vGWsRVCg_0\tknife\ndMDGwTdSHIo_0\tmotorcycle\ndMJQi7oYiqQ_1\tperson\ndMS5hB4uWdk_0\tbird\ndMWgiVqknaE_2\tperson\ndMWgiVqknaE_0\tperson\ndMZONdbNFbk_4\tbicycle\ndMZONdbNFbk_2\tbicycle\ndMdUZi9lxrU_0\tcat\ndMiwR-DS6UE_0\tcar\ndMsIDwHkWNE_0\tperson\ndMulBz-N8oA_0\thorse\ndM7lOj89YZE_0\tperson\ndM7-xh2kSmc_0\tperson\ndM7-xh2kSmc_1\tperson\ndM9u0c0qSV0_0\tcow\ndNCm5MtFcp0_0\tperson\ndNEAY77it7o_0\tperson\ndNShS9OdIoA_1\tperson\ndNShS9OdIoA_0\tperson\ndNSlL572gMU_0\ttruck\ndNSlL572gMU_1\ttruck\ndNVvIPWEH1Q_0\tperson\ndNVvIPWEH1Q_1\tperson\ndNdTs9Qa1A0_0\ttruck\ndNeF_3qppZQ_0\tskateboard\ndNj_77jiPcs_1\tcow\ndNknNwahiv4_0\tgiraffe\ndNoz32bgN0U_0\tcar\ndNpQfDg_dIg_0\tperson\ndNqdMh44imM_0\ttrain\ndNs2JO9SgGo_1\tairplane\ndNs2JO9SgGo_2\tairplane\ndNyMDstraS0_0\tperson\ndN1cn1CPEa8_0\tperson\ndODPVlzMR1A_0\tperson\ndOHuuTREVQk_0\tperson\ndOHuuTREVQk_1\tperson\ndOHuuTREVQk_2\tperson\ndOHuuTREVQk_3\tperson\ndOMW6BLHI2s_0\telephant\ndOMW6BLHI2s_1\telephant\ndOOQ32tmk14_0\telephant\ndORLSKDLr1w_0\tcat\ndOUVBpTWHzc_0\tperson\ndOVzO5pkY2o_0\thorse\ndOWhuaTBmr8_0\ttruck\ndOdX5nkOBoQ_1\tperson\ndOdYYCqd6i0_0\tperson\ndOdYYCqd6i0_1\tperson\ndOd-8kfbjz4_0\ttrain\ndOd-8kfbjz4_1\ttrain\ndOfNevz8wlc_0\tbus\ndO2CbXVpSl0_0\telephant\ndPA7g60qlnk_1\tboat\ndPJk57_DSuI_0\ttruck\ndPJ7_mdmjJo_4\ttruck\ndPJ7_mdmjJo_1\ttruck\ndPTnDrK0jl0_0\tknife\ndPZPjPwJCnA_0\tperson\ndPiOaLH0K4Y_0\tbear\ndPiOaLH0K4Y_2\tbear\ndPma_hb-MR8_0\tskateboard\ndPnxUa8yPbw_0\ttrain\ndPpwBkl-F9k_3\tbicycle\ndPpwBkl-F9k_0\tbicycle\ndPp0no_eYOQ_0\tdog\ndPqheqisvs8_0\tperson\ndPvgWsIPDr0_0\thorse\ndP0jXsi0KUw_0\tskateboard\ndP_-3SJLP1Y_0\tperson\ndQB4GI0Bgus_0\ttruck\ndQCFCRTz2rc_1\tgiraffe\ndQCFCRTz2rc_4\tgiraffe\ndQCFCRTz2rc_5\tgiraffe\ndQCFCRTz2rc_0\tgiraffe\ndQIQv4YkBaM_0\ttruck\ndQI-ReUS1hk_0\tperson\ndQM_-V4jSpM_0\tcat\ndQNG1syFdKQ_0\tperson\ndQPdAoRj8vw_0\tdog\ndQWw3losmfA_1\tbicycle\ndQY2wbSJyOQ_0\tperson\ndQh9dmaqW3s_0\tperson\ndQh9dmaqW3s_1\tperson\ndQlybGW3tbw_1\tcat\ndQnNTlCD_AQ_0\telephant\ndQnNTlCD_AQ_1\telephant\ndQoX3OkaI4M_0\tperson\ndQzWZhDVLYk_1\tperson\ndQ4hJadqL_w_0\tperson\ndQ62PlC9Frc_0\tzebra\ndRBb5v_Fv3g_0\telephant\ndRDdBvl4olg_0\tperson\ndRHTO6H764g_0\tperson\ndRHYGXImEBk_2\tperson\ndRHYGXImEBk_0\tperson\ndRInM_HaQZs_0\tbus\ndRVEs1099F8_0\thorse\ndRcLZtR6KFs_0\tperson\ndRcrvTR9xIY_0\tperson\ndRiBVua-2Ck_0\tperson\ndRjzvcGshbA_1\tperson\ndRjzvcGshbA_0\tperson\ndRs8FcKuu6w_0\tboat\ndRt8H1uQ5Og_0\tumbrella\ndRt8H1uQ5Og_1\tumbrella\ndR7jBT3cxr8_0\tperson\ndR8kCc9XNJs_0\tboat\ndR-8FlykNZ0_0\tperson\ndSAODa472ys_0\tbird\ndSAYK4yUlDs_4\tperson\ndSAYK4yUlDs_0\tperson\ndSAYK4yUlDs_1\tperson\ndSAYK4yUlDs_2\tperson\ndSAYK4yUlDs_3\tperson\ndSEv_R8nQik_0\tzebra\ndSFMrnh2szI_0\tcat\ndSLakvIEH9o_0\tbear\ndSLmBYdUku8_0\tperson\ndSQTVC-RyAU_0\tperson\ndSWhe4RgQ_w_0\tcat\ndSZBg-Vcr7E_0\tmotorcycle\ndSojBtCOkqQ_0\tperson\ndSx4IloBWZs_0\tperson\ndSzAX5l_fs0_0\tperson\ndSzAX5l_fs0_1\tperson\ndS0mBDDgP_A_0\tperson\ndS0mBDDgP_A_1\tperson\ndS8x0l5I7f0_0\tboat\ndTDxzi0o_Qg_1\tairplane\ndTMe2Vse97w_0\tcat\ndTVBSXs5Me8_0\tperson\ndTVKs9m3eZU_0\tcat\ndTm_DRCtjCo_0\telephant\ndTm_DRCtjCo_1\telephant\ndTrt1C_90H0_0\tknife\ndTurjz-gJek_0\tperson\ndT6A3DwqZb0_0\tboat\ndT8wudfW9gg_1\thorse\ndT-INB6puFM_0\tskateboard\ndT-INB6puFM_1\tskateboard\ndUAtLBDfmBo_0\tairplane\ndUAtLBDfmBo_2\tairplane\ndUC_SF_mN_E_3\thorse\ndUC_SF_mN_E_1\thorse\ndUInMUIPtTs_0\tperson\ndUJH8d3CMU8_0\tbear\ndUMLWt99A7o_0\tperson\ndUP4OTLrOA0_0\tperson\ndUW_G_--wI8_0\ttrain\ndUXFUWivXPA_0\thorse\ndUXFUWivXPA_1\thorse\ndUbP54CBYd0_0\tairplane\ndUm9A-1AoMU_0\tperson\ndUqrowFcbD0_0\tperson\ndUx_UfS9cQI_1\tdog\ndUx_UfS9cQI_0\tdog\ndU-bQRDInro_2\tbird\ndU-bQRDInro_4\tbird\ndVAMoKYgrwE_0\tperson\ndVKQhCF8o8w_0\tperson\ndVTHVxh6Tro_1\tknife\ndVWAD4gOu-8_1\tperson\ndVd7OzbhOq0_0\tperson\ndViVbA7N_AE_0\tairplane\ndVqPo7-p71Y_0\tperson\ndVtqTTZTFDQ_0\tperson\ndWCqnck4Um0_0\tperson\ndWFVX1psRZI_0\tbird\ndWGkW13rQBY_3\thorse\ndWGkW13rQBY_5\thorse\ndWGkW13rQBY_8\thorse\ndWVJFIzIKEc_2\tbicycle\ndWVJFIzIKEc_0\tbicycle\ndWVJFIzIKEc_1\tbicycle\ndWXSWEaCId8_1\tperson\ndWdOl13DwwY_0\tairplane\ndWdl9RdXrHo_0\tperson\ndWdl9RdXrHo_2\tperson\ndWd0sszZOXc_0\tperson\ndWesodD0ff4_0\tairplane\ndWgfwKBrSiE_0\tperson\ndWgpYitSv0c_0\tperson\ndWkrnxWB1CU_0\tperson\ndWlDN9Hozgg_0\tdog\ndWtqRwEurDU_0\tperson\ndW1oE_LHALo_0\telephant\ndW4DX7lQoGg_0\telephant\ndW5aU0U7K28_0\tperson\ndW53l1sR_zM_0\tperson\ndXEH9QiCyHk_0\ttrain\ndXEH9QiCyHk_1\ttrain\ndXKi3ZHjgWM_1\tumbrella\ndXLyWGJxHnI_0\tperson\ndXOsaszlVY0_0\thorse\ndXSuppGXFeI_0\telephant\ndXSuppGXFeI_1\telephant\ndXdFEix8vu4_0\ttrain\ndXjUZeuzgaw_0\ttrain\ndXkmG8AR82Q_2\tairplane\ndXkmG8AR82Q_5\tairplane\ndX6W4-sxsX0_0\tcat\ndX9J6yDM5Q8_0\tperson\ndX-4XwYWv48_0\tperson\ndYGOSaGjHQU_0\tperson\ndYQMrQe1pSk_0\tperson\ndYRIEDyD9Qs_0\tairplane\ndYRKwU2TJYI_0\telephant\ndYVcalOS1SE_0\tdog\nEacR2o35-kc_0\tbicycle\nEaeD7utPpTQ_0\tperson\nEakGzU5UgWI_0\tperson\nEakGzU5UgWI_1\tperson\nEakGzU5UgWI_3\tperson\nEamZ8De_WFE_6\telephant\nEamZ8De_WFE_0\telephant\nEamZ8De_WFE_2\telephant\nEamZ8De_WFE_3\telephant\nEamZ8De_WFE_4\telephant\nEavqjWy5gag_0\tperson\nEaxszmfn7WA_1\tperson\nEaxszmfn7WA_0\tperson\nEay0MFBCdqY_1\thorse\nEazzsVK1-pM_2\tumbrella\nEbJV0e75xtk_1\tperson\nEbJV0e75xtk_0\tperson\nEbWt1hAb3LQ_0\tperson\nEbXzlcsBsfA_0\tperson\nEbYJAv5c_G8_0\tperson\nEblX3oKGsBA_0\tskateboard\nEb1n2o0YpOM_0\tcow\nEb3sGSIWtCw_0\tperson\nEb7juFDG3Dw_0\tcar\nEcMh5TIKmzY_0\tperson\nEcNpsheyrIU_0\tperson\nEcNpsheyrIU_1\tperson\nEcWrNFz5J-o_1\tdog\nEcpsBV2FEBE_3\thorse\nEcsiLHpIvL4_0\tperson\nEcu8VEIC2y8_2\telephant\nEcu8VEIC2y8_1\telephant\nEcvYBldDm_U_0\tperson\nEdE8zCwJ56g_0\tperson\nEdE8zCwJ56g_1\tperson\nEdIfx7lQxEw_1\tdog\nEdIfx7lQxEw_0\tdog\nEdOvSD40Tb0_0\tcow\nEdTkeITBkvY_0\tperson\nEdTkeITBkvY_1\tperson\nEdaY0DFamDc_1\tskateboard\nEdfKMOIOHtI_0\tperson\nEds-fi9s-O4_0\tperson\nEd486SKW0kM_0\ttrain\nEd-ENhlS7Dg_1\tboat\nEeCjxMzh5_A_0\tperson\nEeDhzR9I-Tc_0\tmotorcycle\nEeLllq2Zim4_0\tdog\nEeMUemitsFU_0\tperson\nEeRqVkQ1Z7Q_0\tcar\nEeRqVkQ1Z7Q_1\tcar\nEeTRT4j5GcQ_0\tperson\nEeYRHJuK3wo_0\tboat\nEeYqy9QZZTU_0\tairplane\nEeb2vPJsaN0_0\tperson\nEee6rmiMYKY_1\tcar\nEesk8VSxpIU_0\tcat\nEetKMgVh0Pk_0\tperson\nEexaBL5jDL4_0\tknife\nEexaBL5jDL4_3\tknife\nEeyjjk9-BvY_0\thorse\nEe7CW7lZfXA_1\tperson\nEe7CW7lZfXA_0\tperson\nEfE6r-Iq5CM_0\tperson\nEfG_eBrAjdI_0\tmotorcycle\nEfHCZUHt0d8_0\tperson\nEfMCesQKyoE_3\tairplane\nEfNSTkpl6dQ_0\tperson\nEfSMsLkasg8_1\tperson\nEfjC0VVD2Do_0\tperson\nEfvRGCuPoF4_0\tperson\nEf1Tm3dKzbY_0\tmotorcycle\nEf2GKdopP_A_0\tperson\nEf7-yzJqZto_0\tperson\nEf9YiYODEbg_0\tcat\nEf9q8mAPYZA_0\tperson\nEf_N7JmICUU_10\tbicycle\nEf_5u21WLbs_0\tcat\nEgDOCraAd64_2\ttrain\nEgHVReOnDpM_0\tperson\nEgPKMlxhH0A_0\tperson\nEgPxUnCFS10_3\tknife\nEgYCBIlDm98_0\thorse\nEgf4iNTfanU_0\tairplane\nEgf4iNTfanU_2\tairplane\nEghxGvj6pTs_0\tperson\nEgl_1FgGUyE_2\tbird\nEgpSSMkQOEE_0\tbicycle\nEgxlP5S15uQ_1\tmotorcycle\nEg6YUwqAQqM_0\tperson\nEg7bJ46L4Cg_0\tairplane\nEg7bJ46L4Cg_1\tairplane\nEg7bJ46L4Cg_2\tairplane\nEg82FN1vC3A_0\tknife\nEg9-5uBMrpc_0\tcat\nEg-cp7jgFA0_0\tperson\nEhF73HJvEWo_1\ttrain\nEhKAs4Z1JE0_0\tperson\nEhSaOGOPUns_0\tskateboard\nEhbaW6F3U6I_1\tperson\nEhbuzBK5bes_3\tgiraffe\nEhbuzBK5bes_2\tgiraffe\nEhcmJOG2Jws_0\tperson\nEhfmC9Wa8xs_0\tperson\nEho09eptX7c_0\tperson\nEhpwK0_8UJA_0\tboat\nEhpz_gcdCcY_0\tknife\nEhpz_gcdCcY_1\tknife\nEhpz_gcdCcY_2\tknife\nEh6FARrS1VY_0\tskateboard\nEh7f9wgtUns_0\tbus\nEh88_JdkWWs_0\tperson\nEh-x-OzZxGo_0\tperson\nEiE9eIJ-Rv4_0\tcar\nEiLWN5T6wko_1\tperson\nEiNTdTOmvDU_0\tperson\nEiUbGE2f6fU_0\ttrain\nEiUbGE2f6fU_1\ttrain\nEiZG3M9_EMc_0\tbird\nEiaYgqLcbqM_2\telephant\nEibdBvTND-I_0\tperson\nEibdBvTND-I_1\tperson\nEine_0RExlI_0\tperson\nEi1XBJFaUeI_0\tperson\nEi1XBJFaUeI_1\tperson\nEi6ZitRjwdA_0\tperson\nEi7n3944Ovs_0\tumbrella\nEi9d8OX0ui0_1\tairplane\nEi9d8OX0ui0_0\tairplane\nEi9724H_wUs_1\tperson\nEi9724H_wUs_0\tperson\nEjcMZ8Y0Oeg_0\tboat\nEjgxtJaNIH8_0\tskateboard\nEj2wn6JRBzA_0\tskateboard\nEj7xV32Trwc_0\tperson\nEj8UwQiT5jk_1\tknife\nEj8UwQiT5jk_3\tknife\nEj_zFc5qxRw_0\tcat\nEkMGStKSilE_0\tperson\nEkMdmPclE3k_1\tdog\nEkTrskvsL5c_1\thorse\nEkWd3wPBEyg_0\tairplane\nEkawSvsvh3g_0\tperson\nEkdP_pWa9s0_1\tairplane\nEke0rATHhX4_0\tperson\nEkh_cm7q1y8_0\tcow\nEklOuZWH-8Q_0\tmotorcycle\nEkyydrsMSkY_0\tperson\nEk1DlGGsUdY_0\tumbrella\nEk4323MkRYo_0\tbicycle\nElJtz3uv-AQ_0\tperson\nElLiin7Cda4_1\tperson\nElLiin7Cda4_0\tperson\nElNzy4USrLA_0\ttruck\nElR4MuOUYKM_0\tbird\nElgmQr70py4_5\ttrain\nElrxptn-Zqo_0\tperson\nEluRnlB_s6c_0\ttrain\nEluRnlB_s6c_3\ttrain\nElwZ1M6McHo_0\tskateboard\nEl2nzuCxrGk_1\thorse\nEl5fRl-4vko_0\tknife\nEl9Efl32L8w_0\tperson\nEmDjVcaznIA_0\tzebra\nEmDjVcaznIA_1\tzebra\nEmDjVcaznIA_2\tzebra\nEmJeLKaG_hE_2\tbird\nEmJk7hDSzaM_0\tperson\nEmJk7hDSzaM_1\tperson\nEmJk7hDSzaM_3\tcow\nEmWzmxDjjOs_0\tperson\nEmkwHglcEKA_1\tmotorcycle\nEmlvoH2AxWs_0\tperson\nEmqEntvqLw0_0\tairplane\nEmsMjm0VXJc_0\tskateboard\nEm44RLa7Qp4_0\tperson\nEm_UT-f7q0E_1\ttrain\nEnJkvPAMuaM_0\ttrain\nEnJkvPAMuaM_3\ttrain\nEnJkvPAMuaM_1\ttrain\nEnL2FiVIuJg_0\telephant\nEnL2FiVIuJg_1\telephant\nEnS1Yte0Xzw_5\tknife\nEnS1Yte0Xzw_2\tknife\nEnUW7YSmli0_0\thorse\nEnVtYzkXwjM_0\tperson\nEnbXP2xywwk_0\tperson\nEnmwKpZJTQc_0\tperson\nEnoNrjMNAC0_0\tperson\nEnrcDrbyUxY_0\tperson\nEnrcDrbyUxY_1\tperson\nEoaeqRc88HU_0\tperson\nEoallCLchmo_0\tcow\nEodtHMtH9zw_0\tperson\nEojPQY8nQ2Y_0\ttrain\nEouV6Ut4NP8_1\tperson\nEouV6Ut4NP8_0\tperson\nEouZIHzCFq8_0\tairplane\ndYVtJPfJmf4_0\tperson\ndYgPc190feM_0\tperson\ndYgxCdKNrIo_1\tairplane\ndYjCbeBAgYs_0\tperson\ndYmREF5dDkw_0\tdog\ndYosdOz5mZo_0\tperson\ndYr1OKT1lCA_0\tperson\ndYyHudM-fQc_0\tperson\ndYyHudM-fQc_1\tperson\ndYzh49Wr9bQ_0\tairplane\ndY9dlzr4w0Y_0\tperson\ndZFiRqMkFPc_0\tperson\ndZHJc_1os9Q_1\tperson\ndZHJc_1os9Q_0\tperson\ndZHJc_1os9Q_2\tperson\ndZMQgxFHQPA_0\ttrain\ndZQ2o-4a5tU_0\tperson\ndZSQXDQcafc_0\tknife\ndZUOCWwr2xs_0\tknife\ndZaFo3C_1ts_0\tperson\ndZdvK41DxLI_3\tcar\ndZio0uN6DHY_0\thorse\ndZio0uN6DHY_1\thorse\ndZjnkqYO2lE_0\ttruck\ndZmG64W2CtM_2\tumbrella\ndZmG64W2CtM_0\tumbrella\ndZsXB4o-wdE_0\tairplane\ndZzfVDrmMj0_0\tbird\ndZzfVDrmMj0_1\tbird\ndZ1vVETiQAQ_0\tperson\ndZ6ub2CEvbg_1\tbicycle\ndZ6ub2CEvbg_2\tbicycle\ndZ6ub2CEvbg_3\tbicycle\ndaBl0Q92zLE_4\tbear\ndaBl0Q92zLE_0\tbear\ndaIJjuHo2EQ_0\tcow\ndaMcE2oorrE_1\tperson\ndaWo89I2Tuo_0\tskateboard\ndaWo89I2Tuo_1\tskateboard\ndaWywQD6R4g_8\telephant\ndaWywQD6R4g_0\telephant\ndaWywQD6R4g_2\telephant\ndaWywQD6R4g_4\telephant\ndaWywQD6R4g_5\telephant\ndaWywQD6R4g_6\telephant\ndaXEykL8UQ0_0\thorse\ndaZHZXfmY7k_0\tcat\ndaaHTdFcx5o_0\tboat\ndaaX2TXbYmo_2\tairplane\ndadAGYt0vS0_1\thorse\ndalHUNR5yAA_1\tperson\ndan-4YoB-Vw_0\tperson\ndaoysu5sfUQ_0\tperson\ndapxBMe8Wz8_1\tperson\ndaqWFFdK8Ck_0\tperson\ndawGJDtHlcs_0\tperson\nda4jNzO5wL0_0\tperson\nda61HPBGEwo_0\tbicycle\ndbU6Fn_5bHI_0\tbus\ndbXr-9m66-U_0\tperson\ndbdhdeVMuL0_0\tbird\ndbhGB6XW3fM_0\thorse\ndbxb42TzQ_g_0\tskateboard\ndbysY1V2TwI_0\tperson\ndby-fBGIPRU_1\tboat\ndby-fBGIPRU_4\tboat\ndb9i2fI8dv4_0\thorse\ndcADt99ndxg_0\tperson\ndcADt99ndxg_1\tperson\ndcBMrHLTvPo_0\tperson\ndcEW4y5AI1E_1\telephant\ndcHcm85hd5s_2\tbear\ndcH304rxwLY_0\tperson\ndcJN3WJZLOE_0\ttrain\ndcLR55c41rg_1\thorse\ndcLoVk60Gkg_0\tcow\ndcLoVk60Gkg_1\tcow\ndcLp5mtSkPA_0\tcow\ndcO5id4LTVE_0\tperson\ndcO5id4LTVE_1\tperson\ndcO5id4LTVE_2\tperson\ndcO5id4LTVE_3\tperson\ndcO5id4LTVE_4\tperson\ndcUA_Wf8vrc_2\tskateboard\ndcXdmOY1YCw_0\tcar\ndcXdmOY1YCw_1\tcar\ndcblbU5lyQU_0\tperson\ndcdXiEQkghM_0\tperson\ndcdXiEQkghM_1\tperson\ndcf4zn9wOjM_1\tperson\ndcj9u89LAu8_0\tumbrella\ndcoFS0-09xc_0\tperson\ndcoFS0-09xc_1\tperson\ndcwbXzJsVDw_1\tcar\ndcxhSnf9sg0_1\thorse\ndc1_WHDpL3w_0\tperson\ndc-BpV5fuQM_2\tcow\nddK4WXTyoWw_0\tcow\nddPN4QZuLBE_0\ttrain\nddPxOsA2Cro_0\tperson\nddPxOsA2Cro_1\tperson\nddW0MYEUWlc_0\tperson\nddaqR7COVYo_0\tperson\ndddKAnk7-hQ_0\tumbrella\nddlPux88liU_0\tperson\nddruq0KhCxM_1\tskateboard\nddsTE3NwHyM_0\tperson\nddtNIDCxqCk_0\tperson\nddw0wDJgJwM_0\tperson\nddxQR-NB6E4_0\tperson\nddzrzJEogWQ_4\tmotorcycle\nddzrzJEogWQ_6\tmotorcycle\nddzrzJEogWQ_0\tmotorcycle\nddzrzJEogWQ_1\tmotorcycle\nddzrzJEogWQ_2\tmotorcycle\nddzrzJEogWQ_3\tmotorcycle\nddzrzJEogWQ_5\tmotorcycle\ndd0CsqY6Fbo_0\tairplane\ndd8a6btF_B4_0\tperson\ndeDEnw72hQk_0\tperson\ndeNoMwyFOO4_0\tperson\nded6WOfO9O8_1\tperson\ndeep6EOo6ds_0\tperson\ndeihMrgBXEc_0\tperson\ndelKGPVRJsY_0\tperson\ndemxgFkqGxA_0\tbus\ndeqo50gGTBo_1\tairplane\ndew_lb_L9hE_0\tperson\ndezAUC4KbJI_0\tperson\nde1f8qTDYUI_0\tperson\nde2HZ6DBOuM_0\tperson\nde4mcJTPj48_0\tperson\nde4mcJTPj48_1\tperson\nde7-gbLffxs_0\tcow\nde8KeV2waGY_1\tperson\nde8V1ovs5eM_0\tperson\nde_fGa7Zxus_0\tperson\ndfAvID4lRsE_0\tperson\ndfAvID4lRsE_1\tperson\ndfDTR9mCUZI_2\tdog\ndfEF6SMFbGM_0\tskateboard\ndfKBB3-VicU_0\tbus\ndfK1HsVc2B0_0\tperson\ndfh2lETTLZI_0\tskateboard\ndfp4iVaXCpg_0\tskateboard\ndfqLJxxdinA_0\ttrain\ndfsTKKT5-UU_0\tperson\ndfseA2X5Cow_0\tperson\ndf_PzyC0gTw_0\tcat\ndf_SYY4pb3I_2\tcow\ndgGYa05XpYo_0\tskateboard\ndgIsZXSKACE_0\tperson\ndgOQKwvhLpE_1\tdog\ndgTYRveHMjM_0\tcat\ndgYN1OH5oc0_0\tzebra\ndgl2b2bRpq0_0\tperson\ndgtaJOOOtKg_0\tperson\ndgweyIjmmDY_0\tcat\ndgyGZqXgvag_0\tperson\ndg6u7R87Gh4_0\tperson\ndhFII58PWhI_0\tperson\ndhIL9wRZMm0_1\thorse\ndhIt9lg6Sbw_1\tboat\ndhUG1gnTlso_0\tdog\ndhZ-JmFNyak_0\tperson\ndhcVp1GmJyI_0\telephant\ndhcVp1GmJyI_1\telephant\ndhgs2glg_N8_1\tperson\ndhgs2glg_N8_0\tperson\ndhiYTV7DJLY_0\tcow\ndhjeKi58cuU_0\tcow\ndhkFVTvJ6ZU_0\tcat\ndhy85XNJT3c_0\thorse\ndh03d5vq1B0_2\tdog\ndh1XFXciUf4_3\tbus\ndh1XFXciUf4_2\tbus\ndh6zZFXD0_c_0\telephant\ndiDDNe-MVfs_1\telephant\ndiMmgSNBO8k_2\tperson\ndiRn1fE6zMg_0\tperson\ndiSTaGHORrc_0\tperson\ndiSZzd4jM0E_0\tperson\ndiUCxWmV084_0\tperson\ndiZ-mRLPpqI_0\tperson\ndidB6Es7Des_0\tperson\ndidTjworKXg_2\tumbrella\ndif0t09rdZg_1\tcow\ndioELry6bbk_0\tairplane\ndix7GRytfcw_0\tperson\ndix7GRytfcw_1\tperson\ndi1KJ0Mb5M8_0\tdog\ndi2TPYyIeWc_0\tdog\ndjIw9AQoU3o_2\tperson\ndjLPrNtPSY8_0\tperson\ndjLUJy1sWMg_0\tcat\ndjNzrBpqnnY_0\tcar\ndjSxYfG99k8_0\tcar\ndjaGBINLXTQ_0\telephant\ndjh9QeYLg7M_0\tairplane\ndjiTvgkjTW4_0\ttrain\ndjiTvgkjTW4_3\ttrain\ndjiTvgkjTW4_4\ttrain\ndjiTvgkjTW4_5\ttrain\ndjiTvgkjTW4_7\ttrain\ndjlet5--ZW0_0\tperson\ndjlet5--ZW0_1\tperson\ndjpCG2oprrA_1\tperson\ndjvQyzGNp7o_0\tperson\ndj2Qk--KIkk_0\tperson\ndj6yGGCBFWc_0\tperson\ndj8d91U-F_0_0\tperson\ndkQWD9hv4fo_1\ttrain\ndkQbDCav3eM_1\tperson\ndkSetHNXnNY_0\tcow\ndkb-6x7zo5E_0\tperson\ndkdCTCL5imo_2\ttruck\ndkdCTCL5imo_3\ttruck\ndkdCTCL5imo_4\ttruck\ndkiOcFZwrA0_0\tbear\ndknj-Sv4HUs_1\tperson\ndkpsViIYlsI_0\tcow\ndkw4aWG9l6E_0\tbear\ndkw4aWG9l6E_4\tbear\ndkw4aWG9l6E_5\tbear\ndkxLcr2kvIM_1\thorse\ndk3Nf8K3RzI_0\tboat\ndk4gT0vHgeU_0\tperson\ndk6h_GL9OZo_0\tperson\ndk7QISqnWZc_0\tbird\ndk7juEuA2is_0\tbear\ndk7juEuA2is_2\tbear\ndlAMvsjssrY_0\tperson\ndlDsSVM3JJ8_0\tperson\ndlG7MtSpAK4_0\tperson\ndlIG99k9Hoo_0\telephant\ndlIkYaty1Uw_0\tcar\ndlNMnGKJJjU_0\tcow\ndlQ1Gr54T74_11\tbicycle\ndlQ1Gr54T74_14\tbicycle\ndlQ1Gr54T74_5\tbicycle\ndlVOuZK_1bY_1\tperson\ndlVTSnDsl38_1\tknife\ndlW_HPbVriI_1\ttruck\ndlW_HPbVriI_3\ttruck\ndlW_HPbVriI_0\ttruck\ndlW_HPbVriI_2\ttruck\nEpH59JsxI3w_0\tcar\nEpIb8r7uBqM_0\tperson\nEpJ_M6rB_PA_1\tbird\nEpOaQjhIh_M_0\tairplane\nEpP_TLXxb7Y_0\tcow\nEpSURaF1BfY_0\ttruck\nEpT8zxDFPf8_0\tcow\nEpVdzlk5GYU_0\ttruck\nEpd3r6iiqVk_0\tbicycle\nEpeIZCFbjw0_0\tskateboard\nEpnttpyYTAo_0\tperson\nEpoqtu0Pqe4_0\tcow\nEp8bd1STWKw_0\tmotorcycle\nEp81Lk66O50_0\tperson\nEp84L7WDoyE_0\tperson\nEqBJeYu5f_E_0\telephant\nEqBJeYu5f_E_3\telephant\nEqHBjvHkvf0_0\tperson\nEqJR5UZAlSg_1\tcar\nEqLYPeo9ZC0_0\tperson\nEqMqvcHp8Ko_0\tcar\nEqMqvcHp8Ko_1\tcar\nEqSYKCxmeDA_0\tdog\nEqSYKCxmeDA_1\tdog\nEqh7XqsYl5M_2\tperson\nEqmnFPweBmk_1\tboat\nEquATbp9uL0_0\tperson\nEquATbp9uL0_1\tperson\nEqvMMBAZP2o_0\tperson\nErUllSQJNgI_4\telephant\nErUllSQJNgI_5\telephant\nErWUOje4g8Q_0\tmotorcycle\nErX04vJ-JcU_0\tcat\nErf0FkqYsTE_0\tperson\nEro36xFQKS4_0\ttruck\nEro36xFQKS4_1\ttruck\nEr4yJXTWNNo_3\tbicycle\nEr4yJXTWNNo_4\tbicycle\nEr4yJXTWNNo_5\tbicycle\nEr5D0fXZsjk_0\tperson\nEr9tboOA5k8_0\tperson\nEsEreMKZP7Q_1\tperson\nEsQ05q5ZZVM_3\tskateboard\nEsQ05q5ZZVM_5\tskateboard\nEsQ05q5ZZVM_2\tskateboard\nEsYZbF7hCTE_0\tperson\nEsZV26-jxX8_0\tmotorcycle\nEsbWwOYbT8Q_0\ttrain\nEskqA8x8mX4_2\tairplane\nEsrUSkNrqWs_1\tperson\nEs0O5wtTZ2Q_0\tperson\nEs9GOUryI0U_0\tperson\nEs9Yq8uZ4fA_0\tperson\nEs-W0AxQ5Us_1\tcow\nEtebDuK3fUY_0\tperson\nEtlKR9-Q2dk_0\tperson\nEtx8YkcrSF8_0\tperson\nEt0RRuaW-Rg_1\tdog\nEt1PKq61KAk_0\tperson\nEuETmswYRrs_0\tcow\nEuHJB5UXmZg_0\tumbrella\nEuHvelij5ao_0\tcat\nEuIGG3PoslE_0\tperson\nEuInxfWuqqA_0\tperson\nEuZnOeXR020_0\tperson\nEua2VIbXEMs_0\tboat\nEufXUqphYVw_0\tperson\nEumfsHXsVGk_0\tperson\nEunz2V1RXXo_0\ttrain\nEurWaA7qCDw_0\tbear\nEuwjSGtSYlY_0\tperson\nEuwjSGtSYlY_1\tperson\nEuzDIk8ag30_0\tperson\nEuzVaAXsy4o_0\tmotorcycle\nEu0nzh2HQNk_0\tperson\nEvDZK2cFYVE_0\tmotorcycle\nEvGoGf-YCA8_0\tbicycle\nEvKPt0vynKY_0\ttruck\nEvN8x67_EQ0_0\tperson\nEvZF9DagIoQ_4\thorse\nEvZF9DagIoQ_0\thorse\nEvZF9DagIoQ_1\thorse\nEvmcyDEPnoA_1\tskateboard\nEvvbUe6FBSM_0\tbird\nEvvij-hmE4A_0\tperson\nEwBKceBTBbo_0\tdog\nEwBwIUrHR3o_0\tperson\nEwBwIUrHR3o_1\tperson\nEwDyryqt94g_4\tairplane\nEwDyryqt94g_5\tairplane\nEwKIz0qAvKQ_0\tperson\nEwSJeylFWsY_0\tperson\nEwUGFtWeyMA_0\tperson\nEwUeAvO5mrE_0\tcow\nEwU8puKxN8Y_0\tperson\nEwU8puKxN8Y_1\tperson\nEwWCc9whfDI_0\tcow\nEwYNowdS57c_0\tperson\nEwet2EA1xX8_1\telephant\nEwet2EA1xX8_2\telephant\nEwet2EA1xX8_0\telephant\nEwozH_35SDg_0\tperson\nEwq-V9jATzg_0\tperson\nEw8lEc8Ufi8_1\tbus\nExCPGilpuMM_0\tperson\nExCPGilpuMM_1\tperson\nExCjkt_zXuw_0\tperson\nExCjkt_zXuw_1\tperson\nExJjWM_rAnI_3\tairplane\nExJjWM_rAnI_1\tairplane\nExPBVcERfwY_1\tperson\nExPBVcERfwY_3\tperson\nExPBVcERfwY_4\tperson\nExT3xg9phtQ_0\tperson\nExVHmko3jfY_0\thorse\nExW1ju88BW8_0\tcat\nExb1TjMi76I_0\tboat\nExc3W9o5-04_1\thorse\nExe2EizU9VQ_0\tcow\nExe2EizU9VQ_1\tcow\nExfZl3DY8JM_0\tperson\nExfZl3DY8JM_1\tperson\nExl9alp64lE_1\tperson\nExqpcHBGBlw_1\tperson\nExvcP05yrS0_0\tperson\nExvcP05yrS0_1\tperson\nExxZODpPkQQ_1\ttrain\nExz2WL2-kR0_0\tgiraffe\nEx4__JMKkqI_0\tperson\nEyMzZV5iTEA_0\thorse\nEyP_0uEuXVs_1\tbear\nEybT7tq6XGk_0\tperson\nEymmgPoUyuM_0\tperson\nEymmgPoUyuM_1\tperson\nEymmgPoUyuM_2\tperson\nEyn7IfnWm4o_0\tairplane\nEyn7IfnWm4o_3\tairplane\nEyn7IfnWm4o_1\tairplane\nEyn7IfnWm4o_2\tairplane\nEyp8nornJW0_0\tbear\nEyp8nornJW0_1\tbear\nEyrfi9lGdoo_1\tairplane\nEyuKu6qMB6g_0\tperson\nEywYZ3Gjwuc_0\tperson\nEywnxH68jDU_0\tcow\nEyzwbz1ZxmU_0\tcat\nEy2TgrQ30Z0_1\tbicycle\nEy2TgrQ30Z0_2\tbicycle\nEy36TlCS4rQ_0\tperson\nEy4BLGQL2Bg_0\tbear\nEy7eosaz0zU_0\tperson\nEy7us0SSVAs_0\tairplane\nEy7us0SSVAs_2\tairplane\nEy7wIzCkFU4_0\tperson\nEy7wIzCkFU4_1\tperson\nEzC0tuKaVGA_0\tperson\nEzEX4OUEHHQ_1\tskateboard\nEzGa4SSPsbI_0\tbicycle\nEzYjRjhff20_0\tperson\nEzZEWp1cluc_0\tperson\nEzeDITt3y5I_0\tperson\nEzeDITt3y5I_1\tperson\nEzlyx_EudUQ_0\tperson\nEzlyx_EudUQ_1\tperson\nEzuizVcVbSA_0\tperson\nEz6I4TpzC5I_0\tperson\nE0K5Ll7wHUw_0\tbird\nE0YZDyUoHTM_0\tknife\nE00cOMpNw3o_0\tmotorcycle\nE01EgIBFxRk_0\tperson\nE038teDC3EM_0\tperson\nE0-Z0KM1UB4_1\tperson\nE1AwHXQ00ns_0\tperson\nE1MTmF3FAN0_0\tbicycle\nE1NfSTmGCRE_0\tknife\nE1ZhuBRYvKY_0\tcow\nE1bNSKg9iv8_0\thorse\nE1oEO09-bAw_0\tdog\nE1pmsS_ufrs_0\tperson\nE1xPwEvYymk_1\tperson\nE1xPwEvYymk_2\tperson\nE1xPwEvYymk_0\tperson\nE1zxNG3Fglo_0\tbird\nE17S76lXHfI_0\tperson\nE1_ETAQHwcM_0\tperson\nE2O5Y6VAhIc_0\tperson\nE2O5Y6VAhIc_1\tperson\nE2Pobz5qoAE_0\tperson\nE2Pobz5qoAE_1\tperson\nE2Vqlq1BQYs_0\tairplane\nE2WWQOKGeb4_0\tskateboard\nE2aiCls-clY_0\tperson\nE2lj1iRVceA_0\tskateboard\nE22IW-PgLfU_0\tperson\nE28Cad7vBrw_0\tperson\nE28Cad7vBrw_1\tperson\nE29-bZY3lEo_0\tairplane\nE3NmlH6taDs_0\ttruck\nE3SKOBDl6u0_0\tperson\nE3enDSeq6P0_0\tperson\nE3tmvYSpQSQ_0\tperson\nE35M5UWMXeE_0\thorse\nE35M5UWMXeE_2\thorse\nE4Bl9c7JbYs_0\tperson\nE4DFW1SxJfY_0\tdog\nE4DFW1SxJfY_2\tdog\nE4TfSUdVt8U_1\ttruck\nE4pulnGY9X8_1\tperson\nE43SZ65LnfY_0\tcow\nE45LqepDuqg_1\tperson\nE5BtXla2lCQ_0\tbicycle\nE5CQkNJct6Q_0\tmotorcycle\nE5HB-EDNtE8_0\tperson\ndlZZzrMO6yY_0\tperson\ndlbAWAuByWk_0\tperson\ndlcovhFKigE_0\tperson\ndlh5RGS5Bzw_0\tbird\ndlkVXsIhcZg_1\tperson\ndlo83yH621I_1\tcow\ndl2g71ftw9A_3\ttrain\ndl2g71ftw9A_4\ttrain\ndl2g71ftw9A_5\ttrain\ndl6ogvuxF78_0\tperson\ndl_fuQYhAP8_0\tperson\ndmDdRd6wULk_2\tdog\ndmJ1DuWiAdM_0\tperson\ndmMz5FhGOCc_1\tperson\ndmVAi4WMi3M_0\tperson\ndmVAi4WMi3M_1\tmotorcycle\ndmVAi4WMi3M_3\tmotorcycle\ndmW77KHtuCQ_0\thorse\ndmYSNG-7VCg_0\telephant\ndmfX7DsSS1k_0\tbicycle\ndmuWxnAfMn4_0\telephant\ndm4rFNN7FZQ_0\ttruck\ndm-lOmiP2d8_0\tcow\ndnAQ7q60f_g_0\telephant\ndnB0we4_DrY_0\tcow\ndnB6auv8PBk_0\tperson\ndnFZkG7_E1w_0\tperson\ndnNh07bnI_s_0\tcat\ndnUXo5nstys_0\tperson\ndnVV1s-LcAY_0\tperson\ndnY-4hOzYts_1\tperson\ndncQtuB_6qA_0\tmotorcycle\ndncxd1B2sLk_0\tgiraffe\ndnwqVE3lPyY_1\ttrain\ndnwqVE3lPyY_2\ttrain\ndn_r7u_5apk_0\tskateboard\ndoHOuG6wqXY_0\tmotorcycle\ndoSDuIGLFXY_0\tcat\ndoTj5H8Uf1I_0\tcow\ndoUwj_z1x5o_0\telephant\ndoX3oiADm_s_1\tperson\ndomu9ia2Vo8_0\tperson\ndorx67yK7WU_0\tbird\ndovn1QHCR7o_0\tperson\ndowbL0CZ5do_0\tbicycle\ndo1QIWrYeW8_0\tperson\ndo5o5Dw0vPc_1\telephant\ndo5o5Dw0vPc_4\telephant\ndo7abiC5aZk_1\tcar\ndo82ENX9cOc_0\tperson\ndo-LmSJTPj4_0\tskateboard\ndpDG64ULlUg_0\tboat\ndpGCSoTITrw_2\telephant\ndpJWbIaQYoI_0\tperson\ndpJWbIaQYoI_1\tperson\ndpQP5r61_GQ_0\tperson\ndpUorqkSYZE_0\tdog\ndpYYMgh5TS0_0\ttruck\ndpcwUs5srlc_0\thorse\ndpi0u6pfCTM_0\tperson\ndpjLyHb9AyI_0\tperson\ndpkF3SwOunc_0\tdog\ndpn6vUVXBuM_2\tumbrella\ndptZbHZQYPM_1\tdog\ndptZbHZQYPM_2\tdog\ndpxGzRQqAaU_0\tmotorcycle\ndpxGzRQqAaU_1\tmotorcycle\ndpxGzRQqAaU_2\tmotorcycle\ndpxVPiv62SY_0\tperson\ndp2cUWhnP0A_0\tknife\ndp3Q_aTYeJ4_0\tperson\ndp_JQh45a50_0\tperson\ndp_1VrEUWbU_0\tperson\ndqCFYWRf9g8_1\telephant\ndqDLl7BlAAA_0\tskateboard\ndqFRS9o1CSU_1\tperson\ndqOoL5LiXc8_0\tboat\ndqQPbKE4UhQ_0\tperson\ndqTlCZzLk6A_1\tcow\ndqWEwvhVNiI_0\tperson\ndqavRiIA-38_0\ttruck\ndqj-msAUvnc_0\tcat\ndqzc4W6f-x4_1\tperson\ndrAhAL_F38Y_0\tperson\ndrAh2lmjDs4_0\tskateboard\ndrJGoPHMunk_0\tperson\ndreDU-1isrI_0\tperson\ndre_PgfS8yw_0\telephant\ndrf5ijiEkUo_0\tperson\ndrm2oJ3X1HM_0\tperson\ndrqFwF60pgE_0\tairplane\ndrqFwF60pgE_1\tairplane\ndrqFwF60pgE_2\tairplane\ndrqe2hP0PKI_0\tperson\ndr3TumG_tlI_1\tcow\ndr4dU5UDF-Q_0\tperson\ndr8s5VC9Fxg_1\tperson\ndsLbM2wZHrc_0\tdog\ndsPwJ3J1ZKA_0\tperson\ndsTR1vv9XLE_0\tperson\ndsTR1vv9XLE_1\tperson\ndsUuAVsJSi4_1\tmotorcycle\ndstcI7MYsZ0_0\tperson\ndsyBSejpe-k_0\tperson\nds1BJMsasQI_0\tperson\nds6FmQYwgYw_0\tskateboard\ndtDGbuCwBuY_0\tbicycle\ndtHgnX0NtxE_0\tperson\ndtMbzXL9wO4_2\tbear\ndtOFqz41TJ0_0\tbird\ndtR2UeJbIvg_0\tperson\ndtWfbusf4Es_0\thorse\ndtYdUj-d8fA_0\ttrain\ndtZrB9iDzgQ_0\thorse\ndtlUL4D7_NM_0\tbird\ndtvZaXxNgKQ_0\tperson\ndtwUG12h74g_0\tperson\ndt8Tngmse50_0\tbicycle\nduOX3z4IJSY_0\tperson\nduTvmDpj0sI_3\tboat\nduTvmDpj0sI_2\tboat\nduV82Wn9rXk_0\tcar\nduZYUVeDXEM_0\tcat\nduaO7S-EH1A_1\tperson\nducdg4KXQsg_0\tperson\nduoFWPZbeNc_0\tperson\ndupnmzaPsWA_6\telephant\ndutp3txJPTY_0\tperson\nduvuNqufLjs_0\tcow\ndu5hbB5w3UU_0\thorse\ndu96VR7vtOk_0\tbird\ndvKKmu56UkE_0\tperson\ndvS2DSYGOGg_0\tperson\ndvbVbBosw38_1\tperson\ndvgf3R9k0uY_0\tbird\ndvur4MZD_yc_0\tperson\ndvur4MZD_yc_1\tperson\ndv0ptUC-DIE_0\tperson\ndv6ymk8duso_0\tbird\ndv_KURooPDU_0\tperson\ndwbRsYPV7Ag_0\tperson\ndwpopXTeeGc_0\tperson\ndwrYJ92znpw_0\tcat\ndwy7k_gtEco_1\tboat\ndw8kejnR7L4_0\tperson\ndw-2_KqGeYY_1\tbird\ndxFrLHoW9jI_0\tmotorcycle\ndxGlDl4IukI_0\thorse\ndxGlDl4IukI_1\thorse\ndxViI6VXh6Y_0\tdog\ndxn8VDPNvJM_0\tperson\ndxq9r-qrJ2A_0\tperson\ndxsQn1MuZRA_0\tbus\ndx0z7DYxGSw_0\tperson\ndx4rtOOz7tA_1\tumbrella\ndx6ucdpKZP0_0\tperson\ndx8nEHWD1xc_0\tperson\ndyAC2ey1DQU_4\tbird\ndyJ83t1zgkU_0\tskateboard\ndyPMbIsTtFs_0\tperson\ndyPt3VKGZPo_1\tperson\ndyR4vnjF5do_0\ttruck\ndyZixtbxEE4_1\tperson\ndym-lDsiSTM_0\tboat\ndyt8LtUqIMU_0\tboat\ndyy3oxsiErU_19\ttruck\ndyy3oxsiErU_14\ttruck\ndy2J0aeX5eQ_0\ttruck\ndy3nkqKOjbk_0\tperson\ndy6zETD5NFo_0\tcow\ndzEKq7fsVnQ_0\ttrain\ndzNRDfnNbeE_1\tperson\ndzS2ClyakEg_0\ttruck\ndzXv_YFLPqg_0\tperson\ndzahMuEcbCM_0\tcow\ndzeNnQOePGs_0\tperson\ndzhSVb26d7Q_0\tumbrella\ndzoQb8C3vxE_0\tperson\ndzsHYOJpBbY_0\tbear\ndzv-u3s_YtI_0\tperson\ndzyVndvBofo_1\thorse\ndz3SP1rd9zE_0\tcar\ndz_ATSJBx6k_0\tairplane\ndz_ATSJBx6k_1\tairplane\nd0J7uodSxF8_2\tmotorcycle\nd0NY8eqs19s_0\tmotorcycle\nd0NtMMBjQp0_1\ttruck\nd0NtMMBjQp0_2\ttruck\nd0NtMMBjQp0_0\ttruck\nd0RIwZfoGNg_0\tperson\nd0ZEYzyD9Vg_0\tperson\nd0b8-K_6D68_0\tumbrella\nd0hJditcWj4_0\tperson\nd0hQQC2i1Y0_0\tperson\nd0hQQC2i1Y0_1\tperson\nd0hdtlKidzs_0\tperson\nd0h9QWelhII_0\tboat\nd0lVKBOzOQ0_0\tskateboard\nd0qGN1A7XJA_0\tperson\nd0vHpkvShqg_0\tgiraffe\nd0vUARlHvjc_1\tcow\nd0v47QFRyvg_0\tperson\nd0v47QFRyvg_1\tperson\nd00UKAQHK2A_0\tperson\nd02xOzIVP-s_0\tperson\nd04Dr38addQ_0\tairplane\nd09H7U6x-Fc_0\tcat\nE5OHeMbBp9s_1\tgiraffe\nE5RbbN1bPN8_0\tperson\nE5YibOn90Co_0\tskateboard\nE5b9Yug5vbk_0\tbicycle\nE5b9Yug5vbk_4\tbicycle\nE5b9Yug5vbk_1\tbicycle\nE5b9Yug5vbk_2\tbicycle\nE5b9Yug5vbk_3\tbicycle\nE5dBaFyBYX0_0\tairplane\nE5me_giHEOE_1\tperson\nE5trQkGM3Wk_0\tcat\nE5wZ4pk5X0I_1\ttrain\nE59OnpOGBLU_0\tskateboard\nE6Am4hIuXvk_0\tperson\nE6Avey2AVRM_1\tperson\nE6A8vfHTdOQ_0\tperson\nE6EtoMfo384_0\ttrain\nE6EtoMfo384_1\ttrain\nE6GvpwdOQrw_2\ttrain\nE6GvpwdOQrw_3\ttrain\nE6GvpwdOQrw_8\ttrain\nE6JLxU918TE_0\tbicycle\nE6XGO0hx4N8_0\tperson\nE6Y2QsetU0M_1\tperson\nE6s0XT5G7Eo_0\tbird\nE6s0XT5G7Eo_1\tbird\nE6uGh-cPDjI_0\tbicycle\nE62w4NFSm5E_1\tdog\nE64d0EH39M4_0\ttrain\nE67ceZopcqQ_0\tperson\nE67ceZopcqQ_1\tperson\nE68IhhK04s0_1\tgiraffe\nE68IhhK04s0_2\tgiraffe\nE7BIM8cnCrc_2\ttrain\nE7BIM8cnCrc_0\ttrain\nE7F0Gt3Rea4_1\tperson\nE7F0Gt3Rea4_0\tperson\nE7LY2yKO0Jg_0\tknife\nE7MvCesCxNk_0\tknife\nE7dG4qPI_QY_1\tknife\nE7eYGQjaVYs_0\tbird\nE7hXPqOOiqo_1\tboat\nE7hXPqOOiqo_0\tboat\nE7qoCZ2e-vQ_0\tdog\nE7rhwzBxMqY_0\tperson\nE7zwjNToyao_0\tperson\nE70FO7I2AQ0_1\tperson\nE70FO7I2AQ0_0\tperson\nE76moy2SQhA_0\tperson\nE8JYTxKfqmQ_0\tboat\nE8OzYJ2gVAs_1\tbicycle\nE8OzYJ2gVAs_2\tbicycle\nE8OzYJ2gVAs_3\tbicycle\nE8RSSepY8tk_0\tperson\nE8R5lzlo5qw_0\tmotorcycle\nE8Xxr8SUaEY_0\thorse\nE8h4YnZbJg4_1\tperson\nE8n_eTUwyhc_0\tperson\nE8pbsHhMGOw_0\tperson\nE842T5CgJfk_0\tperson\nE854nPMWssI_0\thorse\nE8-Z9saoTjk_0\tperson\nE8_NjWtQtgI_1\tcar\nE9J2Brm4LSg_0\ttruck\nE9J2Brm4LSg_1\ttruck\nE9N59GTZ8uE_0\tperson\nE9R_qLxcZdY_0\tbird\nE9S5Tk5r2wU_0\tperson\nE9ZjM9SY__o_1\tperson\nE9ZjM9SY__o_0\tperson\nE9sCn_XaSHw_1\tbicycle\nE9sHGoiMmXc_0\tperson\nE9zmtafFrCo_0\tdog\nE9-1FSPKZ7k_0\tperson\nE-DE7HZ04WY_1\tperson\nE-OdBMMpwlo_0\tumbrella\nE-VRMpgKXIE_0\telephant\nE-VRMpgKXIE_7\telephant\nE-VRMpgKXIE_1\telephant\nE-VRMpgKXIE_2\telephant\nE-VRMpgKXIE_4\telephant\nE-YDPyDXtR8_0\tcow\nE-h1XNBlqsE_0\tperson\nE-pnZZeRFyQ_0\tperson\nE-q9j7xipsA_0\tcat\nE-seUZ3B-Ts_0\tmotorcycle\nE-zFmY_9LWk_0\thorse\nE-0FMMDuLw8_0\tperson\nE-3jsRP7KHc_0\telephant\nE_En6n1IyBw_0\telephant\nE_GC0IeKtu4_0\tperson\nE_K6zdkr0mo_1\tperson\nE_Xi5uEIiec_2\tbicycle\nE_e6E8T7on0_0\tdog\nE_02tA9RLyw_0\tumbrella\nE_7qbAkVDYE_0\telephant\nFAKE4Rfwdik_0\tperson\nFATjlgllzBU_0\tperson\nFATjlgllzBU_1\tperson\nFAdlwBJZk78_0\telephant\nFAeK9y98GL8_0\tzebra\nFAiIhoJh5uQ_0\tcat\nFAm6HgSzPTA_0\tcow\nFAn11rZ-gsU_0\tperson\nFAqiar6B2U8_2\tbird\nFAu0yvyjW-Q_4\tboat\nFAu0yvyjW-Q_9\tboat\nFAu0yvyjW-Q_1\tboat\nFAu0yvyjW-Q_5\tboat\nFAx0CsAigS4_0\tmotorcycle\nFA_K15dKk6k_0\tbear\nFBAcUphtxR4_0\tperson\nFBIVWWIbq-8_1\tcow\nFBIawPqElJ8_0\tbus\nFBKIUCHqUQk_0\tskateboard\nFBKIUCHqUQk_1\tskateboard\nFBNFSYoMCNM_0\tbird\nFBOWbksU5pI_0\tperson\nFBOWbksU5pI_1\tperson\nFBjp-C_Sbug_2\tbicycle\nFBjp-C_Sbug_11\tbicycle\nFBnFn5mY2R0_0\tperson\nFBwWw9c4KdY_0\tperson\nFBz0aAYDBFI_1\tperson\nFB8F1ku1XkU_0\tperson\nFCBsCwjCPWU_0\tperson\nFCQB6p_GcDY_0\tperson\nFCRAvY0glAI_0\tairplane\nFCd1d_7Hfpg_0\tumbrella\nFCkT11nk468_0\tairplane\nFClLRpdDi9A_0\tperson\nFCnE02wQQk4_0\tbird\nFCp7AKKYViY_0\tperson\nFC-ONjCL7tM_0\tperson\nFC_gwQU4yrs_0\thorse\nFDJyHtHix-0_0\ttrain\nFDYS2AyPJhc_0\tperson\nFDZBIlbFrk0_0\tperson\nFDej1TTCjP0_0\tumbrella\nFDfaLuM3y5A_0\tperson\nFDkiv1x0OGQ_0\tcar\nFDq3yKNo4Qs_1\tperson\nFDvTPzckQKc_0\tmotorcycle\nFD3pT-lj2tc_0\tcat\nFEM7OGFO_BI_0\telephant\nFEN0F0V1nhg_0\tdog\nFEOAvRWKb-k_0\tairplane\nFEU4yHFzkZs_1\tperson\nFEU4yHFzkZs_0\tperson\nFEWZolQuMv0_1\tperson\nFEfYdrS3kFc_0\tbird\nFEjcdYO4xPo_0\tperson\nFEoFDmI0pxI_0\tairplane\nFEzBza78J4w_0\tperson\nFEzBza78J4w_1\tperson\nFE0DpZ9GXoM_2\tperson\nFE0DpZ9GXoM_0\tperson\nFE0DpZ9GXoM_1\tperson\nFE0Q5phKq3c_0\tperson\nFE0Q5phKq3c_1\tperson\nFE4gj8EYF9k_0\tperson\nFE51Dml-nZY_0\tperson\nFE7iv_llNT4_2\tbicycle\nFE-JTPLk3fI_0\ttruck\nFFCtm1GZH_s_1\tbird\nFFHJUeZ_KKE_1\ttruck\nFFHJUeZ_KKE_2\ttruck\nFFLxkwDj1b0_2\tbus\nFFME8B_6LNA_1\tmotorcycle\nFFQl2DLyjdk_0\tcat\nFFQl2DLyjdk_1\tcat\nFFantnd2gLY_0\tperson\nFFantnd2gLY_1\tperson\nFFd_4DPNyRI_0\tcar\nFFijp_s0YwA_0\tdog\nFFi3nSvA0WY_0\tperson\nFFjqbw4R9l0_0\tcow\nFFm26XU-R7c_2\tperson\nFFm26XU-R7c_0\tperson\nFFm26XU-R7c_1\tperson\nFFndlV1rKas_0\tairplane\nFFpyQ_5PU7M_0\tbus\nFF9eHa3K8fM_1\tbird\nFGO6y3WssIg_0\tperson\nFGQCxd5EAx0_2\tairplane\nFGQCxd5EAx0_3\tairplane\nFGQCxd5EAx0_1\tairplane\nFGcS28ri5uY_0\tperson\nFGdEufjjhtg_0\tperson\nFGicL13npRI_0\tperson\nFGkNC4hzcfM_0\tperson\nFGkx6qk4oDk_0\tperson\nFGmjmDC1RoU_1\tskateboard\nFGmjmDC1RoU_0\tskateboard\nFGoutavzP5Y_0\tperson\nFGqrkJ3h0DA_0\tskateboard\nFG0PrdHReB0_2\tperson\nFG5l2wX8ccA_0\thorse\nFHAj71IwE7E_0\tskateboard\nFHA6nVCnv28_0\tperson\nFHB5eraeYEw_1\tknife\nFHJupOaUmtQ_1\ttrain\nFHOLOunv9Ec_0\thorse\nFHTc_V_05W0_1\tbird\nFHT1DAZpJVY_0\tcow\nFHZ-3pbJQrY_0\tbird\nFHgO4zu5RGA_0\tperson\nFHu50D73Fzo_0\tperson\nFIA67WzAuNs_0\tbear\nFIA67WzAuNs_1\tbear\nFIB12MYkANg_1\tbear\nFIDI0sZMPVU_0\tperson\nFIGhnuJWX5M_0\tperson\nFIGhnuJWX5M_2\tperson\nFIHYnB8Jrh4_0\tperson\nFIMbYQASgkk_0\thorse\nFIQ1iL3jVkM_0\tgiraffe\nFIV4OFmfS_s_0\tperson\nFIV4OFmfS_s_1\tperson\nFInOWVIV_go_0\tmotorcycle\nd1N4NJqa_8E_0\tperson\nd1PqtOyYTY0_0\ttrain\nd1PqtOyYTY0_1\ttrain\nd1PqtOyYTY0_2\ttrain\nd1Quy8k5O88_0\tcow\nd1UWs3bPTsc_0\tperson\nd1UWs3bPTsc_1\tperson\nd1YYgiXq3tw_0\tperson\nd1YYgiXq3tw_1\tperson\nd1bzn92PO0c_0\tperson\nd1eo2OWc45Q_0\tcow\nd1tf08A41eo_0\tperson\nd1ukwE8h4f8_0\thorse\nd1wbMXvcgNc_0\tperson\nd1wlubAM1-k_0\tperson\nd10K79pdybE_3\ttrain\nd14rOFFvTg4_0\tperson\nd14rOFFvTg4_2\tperson\nd14rOFFvTg4_1\tperson\nd165nDy63o8_0\tbicycle\nd165nDy63o8_1\tbicycle\nd17kaiZ5Ztc_0\tperson\nd2DRRd9l3TI_0\tperson\nd2RD5tyZt6c_0\tperson\nd2TxcbWHoBM_0\tcat\nd2WfBDEMf40_0\ttruck\nd2ZGi2fOtPY_0\tperson\nd2cDVorBK8s_0\tairplane\nd2cDVorBK8s_1\tairplane\nd2e49A9MnF4_0\tperson\nd2lSueNvuG4_0\thorse\nd2ns5iCGj78_0\telephant\nd2sn_b1z1Vw_0\tperson\nd2wHwCwQymw_0\tperson\nd2zgNRFDpSw_1\tbird\nd203fSHLzv8_0\ttrain\nd21TfucuHss_0\tumbrella\nd217pENbZVs_0\tperson\nd28DHw2okF8_0\tperson\nd3F_Gm514J0_2\telephant\nd3G8COtsJco_0\tperson\nd3MN8Sm5tiY_0\tperson\nd3MVAijPTjY_0\tmotorcycle\nd3P2bH2t8IQ_0\tperson\nd3Wdg9MPgLA_1\tskateboard\nd3Wdg9MPgLA_0\tskateboard\nd3duKA35FEI_0\tperson\nd3jP_YP-6EQ_0\tperson\nd3ro5gubiaQ_0\tperson\nd3ro5gubiaQ_1\tperson\nd3rzFaWiWwA_5\ttruck\nd3sHFgbvhIU_0\tcar\nd33yoN6QyYg_0\tbus\nd36tDEgs-IA_0\tperson\nd4A2uUrnVWI_0\tperson\nd4Cumy6qZPY_0\ttruck\nd4DbIWORtjY_0\tperson\nd4DbIWORtjY_1\tperson\nd4GvMFc_Vqg_0\tknife\nd4Le0GuzhaY_0\tskateboard\nd4QkJdQwkCo_0\tmotorcycle\nd4VJot5IZek_0\tperson\nd4VJot5IZek_1\tperson\nd4WRTfC57h0_0\thorse\nd4b9-LX5V1s_1\tcow\nd4hB6abJCs8_0\tperson\nd4mhHPSo7C8_0\tskateboard\nd4q-0AcOs78_0\tperson\nd4vhL4dar5s_0\tgiraffe\nd4vhL4dar5s_1\tgiraffe\nd45YTUkd_9M_0\tperson\nd47DPSbvftI_0\tperson\nd484zxSSkJM_1\tperson\nd4_lDGwny4k_0\tskateboard\nd5Ao3JBz7WM_0\tperson\nd5B0EMjLeZE_0\tperson\nd5PBtpn_6JQ_0\tperson\nd5gDBPwofbs_0\tperson\nd5gDqlNLGmw_0\tperson\nd5hj8eaC5fQ_0\tperson\nd5jIlHa1Y6o_0\tcow\nd5m8giMORSk_0\tperson\nd53_McJDtt4_0\tperson\nd55FAEl6kfM_0\tcat\nd55rz05ynyg_0\tairplane\nd6AkvjKCaE0_0\tperson\nd6AkvjKCaE0_1\tperson\nd6TWHVESLa8_6\tcow\nd6TWHVESLa8_5\tcow\nd6VCXnnHXGQ_0\tperson\nd6VCXnnHXGQ_1\tperson\nd6YTAD3T2i8_0\tperson\nd6a2EN1cB-4_0\tperson\nd6cgbxc35Ms_0\tperson\nd6mM21E4x-4_0\tumbrella\nd6m3DUG5E7Y_0\tperson\nd6uLbEhrIvw_0\tairplane\nd65wDJoMyA8_0\tperson\nd67YXl13SSo_0\tperson\nd6-bn34gHFc_0\tperson\nd7H5qLPNFz0_1\telephant\nd7cwZ3G7xSU_0\tbird\nd7kWNGqyvRk_0\tperson\nd7mQdSSoZ2E_0\tperson\nd7m0BF65qro_1\tperson\nd7m0BF65qro_2\tperson\nd7n5m9UuhP4_0\tperson\nd7n5m9UuhP4_1\tperson\nd7yxmt8AvOM_0\tperson\nd7yxmt8AvOM_1\tperson\nd71rdGKeKkE_0\tperson\nd74EhPMCxb0_0\tperson\nd7-3m4Nz8fk_0\thorse\nd8CJ5urtRlk_0\ttrain\nd8HIJN0pULI_0\tperson\nd8XcNMVXCD8_0\tbicycle\nd8b-SN3JEvk_0\tperson\nd8dPRbquLuM_1\tperson\nd8dPRbquLuM_0\tperson\nd8t8y3kLzgc_0\tperson\nd84iekZaJHc_4\tknife\nd9JyT5Kko5c_0\tperson\nd9LvxSh5P-Q_0\tperson\nd9OaiymMq0w_0\tperson\nd9PCSJzZTy8_0\tperson\nd9Pj3WrvXXc_1\tperson\nd9S0dKjWhNU_0\tperson\nd9S0dKjWhNU_1\tperson\nd9S0dKjWhNU_2\tperson\nd9YlucRFs0U_0\tperson\nd9cSZXEb_5E_1\tperson\nd9dysX9rdmA_0\tskateboard\nd9hh6urZ5FU_0\ttrain\nd9kzobAaimY_0\tmotorcycle\nd9lIw5maa3M_0\tperson\nd9qijNyVVmU_0\tperson\nd95k-74VSVE_0\tcow\nd-JD-mAXyIA_0\tdog\nd-Mnc38YAmw_8\ttruck\nd-OQw6tKhuM_0\tknife\nd-S3AmiMI1s_0\tcar\nd-e8mKtYWjk_0\tperson\nd-e8mKtYWjk_2\tperson\nd-e8mKtYWjk_1\tperson\nd-fv8fmGSlY_0\tperson\nd-hMPjLP2WE_0\tbicycle\nd-hMPjLP2WE_1\tbicycle\nd-hgDDQ3kwg_0\tperson\nd-h6ncywZ58_1\tperson\nd-h6ncywZ58_0\tperson\nd-oFe9Z0Obs_0\tdog\nd-oFe9Z0Obs_1\tdog\nd-rpsQgR8sw_0\tperson\nd-22m5Sq5OU_0\telephant\nd-5xdAZSjX8_2\tskateboard\nd--9RMf5LCA_1\tboat\nd_AudyfCYzg_0\tcar\nd_EP2nM4YMw_0\tbus\nd_ElAbuvxGQ_0\tdog\nd_ElAbuvxGQ_8\tdog\nd_SB-LVXyi0_1\thorse\nd_SmnRMWLD8_0\tdog\nd_S0JCKcFCg_0\tcow\nd_hsQ2L-klo_0\tperson\nd_nTA-SKHNM_2\tknife\nd_nTA-SKHNM_6\tknife\nd_ocJQiPpn0_1\tskateboard\nd_vnePeLmwI_0\tperson\nd_2HhXHP8fg_1\tcow\nd__UUbvo2t4_0\tperson\neADPEBi8wWs_0\tcar\neADPEBi8wWs_1\tcar\neADqJI9JKq8_0\tperson\neAFdLVF01GU_0\tbicycle\neANH6WnEpPs_1\tperson\neAPcJi7CaBw_2\thorse\neAPcJi7CaBw_1\thorse\neARl2H_FaEU_0\tcat\neAXN0KAt66I_0\tperson\neAXN0KAt66I_1\tperson\neAYoRncVO74_1\tperson\neAZbke5Perk_0\tcar\neAfmOFI5jUM_0\thorse\neAsHKktPNSo_1\thorse\neAvDt4p-AvA_1\tknife\neA3lmhfjTuM_0\tcow\neA5hiUXY2_Q_4\tairplane\neA5hiUXY2_Q_6\tairplane\neA7FV9uQbYw_0\tbus\neA8fIAfGi5k_0\tperson\neBB5vRA9JPE_0\tknife\neBHEKUkaBcI_0\tbird\neBLisw9b8i8_0\tcow\neBLisw9b8i8_1\tcow\neBLisw9b8i8_2\tcow\neBMqhmQr7vI_0\tbicycle\neBRcZ5KDeEA_0\tknife\neBgLKDW3lH4_0\tperson\neBmdALv9WEE_0\tperson\neBwWJ_geg4Q_0\tknife\neBy554vRg9M_0\tperson\neB83_xIotrw_0\tbicycle\neB83_xIotrw_3\tbicycle\neB_ZHbAvx-c_1\tperson\neCInOWr32gc_0\tdog\neCNG8qj36vs_0\tcow\neCSzfVb87kI_0\tperson\neCUuH2vPeDI_0\tperson\neCWhtTVetLA_0\tumbrella\neCeVtq40bcM_4\tbus\neCf8h359-j0_0\tbus\neClBvJnyYa4_0\ttruck\neCmgHa6ThE4_1\tperson\neC3Fwv7Uows_0\tperson\neC-5SEhAGvo_0\tcow\neC_fRVwxsiI_0\tperson\neDJamx945Ao_0\telephant\neDJamx945Ao_1\telephant\neDSAGlcfwKA_0\tperson\neDSmePW-Vrg_0\tperson\neDXqzj7vKFI_0\tmotorcycle\neDX2HUt9ttU_0\tperson\neDX2HUt9ttU_1\tperson\neDuzDDESzU0_0\tperson\neDwjZL3IGqM_0\tperson\nFIrviDrZriY_0\tbus\nFI2T176uKi4_0\tperson\nFI4oF175yHo_0\tcow\nFJCE3uzu0i4_0\tdog\nFJL5lb3wBKI_0\tairplane\nFJPRJ0A8BII_0\tboat\nFJVcRzA_pdI_0\tairplane\nFJdcStnbgU0_0\tperson\nFJl_FwYbg8s_1\tknife\nFJmyu27Omwk_0\tperson\nFJsMdQrRgFs_0\ttrain\nFJvHbRGgbXM_0\tgiraffe\nFJvSXVq8PPk_0\tbicycle\nFJxbfz8q8Qw_0\tperson\nFJzU4eC5GiI_0\tperson\nFJ5jeLsVXys_0\telephant\nFJ5jeLsVXys_1\telephant\nFJ7oeGn4dBM_0\tcat\nFKGFVLnchKE_0\tskateboard\nFKGFVLnchKE_2\tskateboard\nFKGFVLnchKE_4\tskateboard\nFKGFVLnchKE_5\tskateboard\nFKKoXDLhFjo_0\tperson\nFKMCYA2_RMs_0\thorse\nFKMCYA2_RMs_2\thorse\nFKMsbMSiqrQ_0\tperson\nFKTETXdoJjk_0\tperson\nFKVxjU1kTMM_0\tperson\nFKWzB37H8-E_0\tcow\nFKdcZ0D4-K8_4\thorse\nFKdcZ0D4-K8_1\thorse\nFKhBf2FcrKE_1\tperson\nFKhBf2FcrKE_0\tperson\nFKnj73Wv84c_0\tumbrella\nFKsZiccYt_g_0\tperson\nFKwKsWjLhiI_0\tperson\nFKzvgRVfOjM_1\thorse\nFKzvgRVfOjM_5\thorse\nFK0ezSvbg7o_0\tdog\nFK37T3KvNUU_0\tcow\nFK8OxK802HI_0\tperson\nFLF92L3WRrs_0\tperson\nFLF92L3WRrs_1\tperson\nFLQzeGFBo2I_1\tbird\nFLQzeGFBo2I_2\tbird\nFLTewjXG6Wc_1\tperson\nFLTewjXG6Wc_2\tperson\nFLTewjXG6Wc_0\tperson\nFLWAw0tGOo8_2\tbicycle\nFLWAw0tGOo8_3\tbicycle\nFLZeutEdtzU_1\thorse\nFLqZVv798FE_1\tperson\nFLqZVv798FE_0\tperson\nFLq3zU7UtgQ_0\tskateboard\nFLr23Hv4LfE_0\ttrain\nFLr23Hv4LfE_2\ttrain\nFLskMa3WD7M_0\tperson\nFLyV4pkEHUg_0\tperson\nFL1q74zVLvo_1\ttruck\nFL8ulwhcOho_1\tcar\nFL-QttmKDc0_0\tairplane\nFL-73OGqifE_0\tcat\nFL_DeYOGkaU_2\thorse\nFL_DeYOGkaU_0\thorse\nFMHc-oH_rOE_0\tperson\nFMTZga_deFY_0\tdog\nFMig7WOUQyU_1\tbear\nFMig7WOUQyU_2\tbear\nFMv3NfETfq4_0\tbicycle\nFMv3NfETfq4_1\tbicycle\nFNCMx4Aum_M_1\tmotorcycle\nFNJmejn3KNQ_0\ttruck\nFNJmejn3KNQ_3\ttruck\nFNJmejn3KNQ_5\ttruck\nFNKJAi0Xbz0_0\tperson\nFNNdAL0qtWM_0\thorse\nFNSpSfZSQfE_0\tperson\nFNbjJJgHt6c_1\tperson\nFNgfcu9JUHA_0\tcow\nFNjDy-du_gs_0\ttruck\nFNv5k4sCs5k_0\tperson\nFNxfPhr1AZk_0\tperson\nFN1B1veyxCQ_0\tcow\nFOAmP97Gboo_0\telephant\nFOAmP97Gboo_2\telephant\nFOL80Pq_HSs_0\tcat\nFOXwGm4ddCk_1\tperson\nFOacAsl9vUM_1\tbird\nFOnRpTgHAdI_0\tperson\nFOyA2uyFS0s_0\tcar\nFO-yhRhInHQ_0\tmotorcycle\nFO_sYJabdgQ_1\tbird\nFPBkLbjkE0I_1\tperson\nFPC9a1ebnRk_0\tperson\nFPFEZjz68RM_0\tperson\nFPHxPqZ9of4_0\telephant\nFPIVRAQI9Ao_1\tairplane\nFPS-rWu8sfw_0\ttruck\nFPdj2aDA2Is_0\tperson\nFPd8NgysFbw_0\tperson\nFPhiHYzZrc8_2\tbird\nFPmbKUp9Apc_0\tperson\nFPoBK2S6-kE_0\telephant\nFPpdaMeuTPM_0\tperson\nFP-joReSPjM_1\ttrain\nFP-joReSPjM_4\ttrain\nFQBe4ewvq3k_0\tbus\nFQDYCsUTzLU_0\tperson\nFQIKRtrwRJU_0\tperson\nFQKMItJWON8_4\tbicycle\nFQNa7v1nuHs_0\tbird\nFQNa7v1nuHs_1\tdog\nFQPeEa0PIhY_0\tperson\nFQQ5mFLQS_8_0\tairplane\nFQTA_Rs2r4k_0\tairplane\nFQa2-poPUOQ_0\tperson\nFQiI3CA-HsU_2\tperson\nFQiI3CA-HsU_0\tperson\nFQiI3CA-HsU_1\tperson\nFQnnRHyzLcE_0\tboat\nFQyvUPmvsSo_0\tbus\nFQ0G5VjpRO8_0\tcat\nFQ09pTeRKXM_0\tperson\nFQ8nNpJodyM_0\tperson\nFQ_PnAPHimg_0\ttrain\nFQ_YvOmwGng_1\tskateboard\nFQ_YvOmwGng_2\tskateboard\nFQ_YvOmwGng_0\tskateboard\nFRBmAObAjLg_0\tumbrella\nFRCsksZQW0g_0\tmotorcycle\nFRFZtNbUMfU_0\tperson\nFRFZtNbUMfU_1\tperson\nFRKbwt_HIJY_0\tcat\nFRUF5D_Bg4I_0\tboat\nFRZeTLb7R70_0\tperson\nFRcpw1KTh4w_0\tskateboard\nFRh68K9peM8_0\tknife\nFRs6gVga80M_2\tairplane\nFR0IeE_jWVE_1\tperson\nFSCpm1kxTIE_0\tumbrella\nFSJSVNwlHck_0\tperson\nFSSrkLtKRBk_0\tperson\nFSchPfgxMmk_0\tperson\nFSmTDuGYKRo_0\tperson\nFSrvVBrHdIY_0\tperson\nFSrvVBrHdIY_1\tperson\nFSs-_cK-4DE_1\tbird\nFS8ZnDA42Xg_0\tzebra\nFTHxfldxSrg_0\tperson\nFTlLAXuBE2M_1\tperson\nFTlLAXuBE2M_2\tperson\nFTlLAXuBE2M_0\tperson\nFTr8b641J_g_2\tzebra\nFTr_sg-tAYA_0\tperson\nFTr_sg-tAYA_1\tperson\nFT7LfULOrmU_0\tperson\nFUNI1-oxWb0_0\tperson\nFUPer2xPyRM_0\tperson\nFUQokq7Dm_0_0\tbird\nFUWPXNKt90g_0\tskateboard\nFUcLObUwigo_1\tperson\nFUcQGevNVQs_0\tperson\nFUp8cy7p6kc_0\tperson\nFUt-f-8QJmk_0\tboat\nFUzb9oSwhq4_2\thorse\nFU63gEB5T14_0\tperson\nFU63gEB5T14_1\tperson\nFU-Gyo-nX8w_0\tperson\nFU-Gyo-nX8w_1\tperson\nFVGYeJ_eKRY_0\tperson\nFVGYeJ_eKRY_1\tperson\nFVSihamjW0c_0\tperson\nFVcaEg-4Saw_0\tairplane\nFVm133076uE_0\tperson\nFVxqyMXxbTg_0\tperson\nFVxqyMXxbTg_1\tperson\nFVyZRq7FJUM_2\tperson\nFVyZRq7FJUM_0\tperson\nFVyZRq7FJUM_1\tperson\nFWAdovzWBpk_0\tperson\nFWCxpF5CAAo_0\tperson\nFWH6qzGM4Ko_0\tcat\nFWTx-_C46YA_0\tdog\nFWVW97tTSiI_2\tskateboard\nFWZANVS2JwI_0\tbird\nFWbVfjbC570_0\ttrain\nFWd_KJNB1hY_0\tperson\nFWeJwZsAuq4_3\tknife\nFWiwkCVxsvU_0\tairplane\nFWpcgznz11Q_0\tknife\nFWqFrwl7d-g_0\tairplane\nFWqFrwl7d-g_2\tairplane\nFWuSKVVP9Gw_0\tairplane\nFXPnVqm98h8_2\tcar\nFXbqlcQOm4U_1\tcar\nFXcjcGBH8uA_0\tairplane\nFXdP8V2Fyag_0\tbus\nFXdevKY06to_0\tbus\nFXjUPTGnrIk_1\tperson\nFXjUPTGnrIk_0\tperson\nFXrzFKXFtUE_0\tskateboard\nFXvqDQa0_pw_0\tbus\nFXz3PiouB_s_0\ttruck\nFX7DATABx3o_0\tperson\nFYPRZ3A5Wug_1\thorse\nFYQxEw6enVw_0\tknife\nFYR_8E37mhY_1\tboat\nFZJlwJ_5CIY_0\tperson\nFZJ0L36775Q_0\tbear\nFZOwW_igs2Q_0\tperson\nFZUo3m0w40U_1\tboat\nFZXz9ivLbZE_1\tperson\nFZfD0ASOr-0_0\tperson\neD5a0lOEA4c_0\tperson\neD5_C8Rnll0_1\tcow\neD9mxZpbjpo_3\tknife\neEBoNITml_U_2\tairplane\neEBoNITml_U_5\tairplane\neEKY2ZIJ7cw_0\tperson\neEKY2ZIJ7cw_1\tperson\neEUzIzmFpmg_0\tdog\neEZirBqUuUc_0\tcow\neErb9l8tm9Q_1\tperson\neEwALO20qQs_0\tmotorcycle\neEzaprIjPOA_1\thorse\neE7zgmIkklg_0\tperson\neE_bJ6JguBg_0\tperson\neE_bJ6JguBg_1\tperson\neFDTDuBtPdg_1\telephant\neFIUN94eOFY_0\tskateboard\neFKWB3vWXzM_0\tperson\neFNnJotKCuE_0\tdog\neFQAqsrxJIk_1\tcow\neFQAqsrxJIk_0\tcow\neFYXRQfFBFk_0\tperson\neFYi8GYHOwc_0\tbus\neFYi8GYHOwc_2\tbus\neFYi8GYHOwc_1\tbus\neFbHzEjDjsQ_0\tperson\neFbHzEjDjsQ_1\tperson\neFbOmylKLps_0\tbicycle\neFbOmylKLps_1\tbicycle\neFbOmylKLps_2\tbicycle\neFbOmylKLps_3\tbicycle\neFbOmylKLps_5\tbicycle\neFbmkhM4yvA_1\tskateboard\neFeLxXgEWb4_9\tairplane\neFeLxXgEWb4_10\tairplane\neFeLxXgEWb4_19\tairplane\neFkMiDqxNNg_0\tperson\neFn7qz_Ik-g_1\tbicycle\neFsEtWFKOCE_0\tperson\neFsEtWFKOCE_1\tperson\neFsJVO58dOk_0\tmotorcycle\neFsJVO58dOk_1\tperson\neFtXO4KQyP0_0\tperson\neF6vo2K3X7Y_1\thorse\neGANqnJQvcA_0\tperson\neGEeIkSKn9I_0\tperson\neGFxLRdHt9o_0\tperson\neGIMcDTDuZI_2\tgiraffe\neGKe_SHbpew_0\tdog\neGLaqISw-ZU_0\tcow\neGXX9n0KkAw_0\ttrain\neGavpqx_a-Y_1\tperson\neGeSgNqD64Q_0\tcat\neGp90l6AeQM_3\thorse\neGp90l6AeQM_4\thorse\neGp90l6AeQM_6\thorse\neGp90l6AeQM_7\thorse\neGp90l6AeQM_1\thorse\neGp90l6AeQM_2\thorse\neGsO1ybeNmw_0\tperson\neGulNc3Hz6E_1\tperson\neGw-BT7HLw0_0\tperson\neGx11vRzfMI_0\tperson\neG420j0UncU_0\tcat\neG9ay7ouawQ_0\tboat\neG_gCk-NdFc_0\tbicycle\neHFxA8eOkKo_1\tdog\neHJOSAF8Ksc_0\tboat\neHMokGJS_8k_0\tbird\neHPZiFRZgH8_0\tperson\neHS3e7Drwlw_0\thorse\neHYl5vL9urI_0\tperson\neHYl5vL9urI_1\tperson\neHZGFVBiNbM_0\tperson\neHhu8cP6sYY_1\ttruck\neHlKAc_jO3w_0\thorse\neHlKAc_jO3w_1\thorse\neHmn6jMH470_0\tbicycle\neHo7GgOz-4M_0\tbicycle\neHo7GgOz-4M_1\tbicycle\neHpMDoo4x9o_0\tperson\neHpMDoo4x9o_1\tperson\neHrYu8_xQuI_3\tairplane\neHuFhF5mn60_2\tdog\neHuHorwvDFE_0\tperson\neH-lfDuzZRU_0\tperson\neIbRJYX77lM_1\tperson\neIceWO1K4hg_1\tknife\neIlLo4L0TBY_0\tperson\neIm2mZqCQIU_0\tperson\neItSvz_9tc8_1\thorse\neI5A6Q8wsk8_0\tperson\neJGswWs5a_U_0\tperson\neJJBtIMsces_0\tcat\neJNeGPvJZBs_0\tperson\neJN7jtqxGc0_1\tperson\neJO3ahTuQlg_2\tknife\neJTzEdYt2KA_0\tperson\neJTzEdYt2KA_1\tmotorcycle\neJZyuG0FB0M_1\tperson\neJg7Dq1HzW8_1\tperson\neJi66YisQnM_0\tcat\neJnTGfqwSKw_0\tperson\neJntPRQdD6A_0\tcow\neJntPRQdD6A_3\tcow\neJxFV3LV_-o_1\telephant\neJzkkZWgmiM_0\tperson\neJ2omVOUJv4_0\tperson\neJ4AprAxRh4_0\tairplane\neJ4AprAxRh4_7\tairplane\neJ4AprAxRh4_5\tairplane\neJ9q5sR4oiE_1\ttrain\neJ9q5sR4oiE_3\ttrain\neKBgCy3izjg_0\tperson\neKCONra70xU_1\tperson\neKGFKx5vbJw_1\tbird\neKGFKx5vbJw_2\tbird\neKJMggclbAI_0\ttruck\neKYCRb3cMSc_0\tcat\neKcN648xBxg_0\tcow\neKdNbqJsxIY_1\tcar\neKirxEVv1N4_1\tgiraffe\neKpHpiZZSOY_0\tmotorcycle\neKsu0SXh0Cg_0\tgiraffe\neK5wkhSqhQg_0\tperson\neLAIclbgwtw_1\tmotorcycle\neLAIclbgwtw_2\tmotorcycle\neLCZ9U490do_0\tperson\neLK_O-E6TXY_0\tcow\neLLFV2_GBOs_1\tcow\neLLFV2_GBOs_4\tcow\neLLFV2_GBOs_5\tcow\neLLFV2_GBOs_0\tcow\neLLFV2_GBOs_3\tcow\neLRLhwJpaKE_0\tperson\neLXWvZhL6g4_0\tcat\neLfUxNIWQn8_0\tcat\neLsJ-MoKt-c_0\tmotorcycle\neLzEA8IlB5E_0\tcat\neL2OKu4DhkM_1\tbear\neL-v_R-bG30_0\tskateboard\neMJ8eEFu7lo_1\tcar\neMJ8eEFu7lo_3\tcar\neMN980Fn4Kc_1\thorse\neMQEyMimXFU_0\tcat\neMWM---NOF0_0\tperson\neMcgmNHMY_g_0\tperson\neMdVb5oIUWc_0\tperson\neMgUOtsKC0w_0\ttrain\neMsSwXfIf7o_0\tperson\neMv2h_s0LpQ_1\tskateboard\neMwSfQmonxM_0\tbird\neM5e2PBO5hY_0\tgiraffe\neM-1RwyzQpI_1\ttruck\neM-1RwyzQpI_4\ttruck\neM-1RwyzQpI_5\ttruck\neNDHGq_Vm3A_0\tperson\neNEaC09BQF8_0\tperson\neNG3je3HCHI_0\tperson\neNG3je3HCHI_1\tperson\neNIXfUjWW10_0\tbus\neNSkFxbG_L0_0\tskateboard\neNTeTVBDq8U_0\tperson\neNVGmOIKNII_0\tskateboard\neNVGmOIKNII_2\tskateboard\neNYeXwUr7rY_0\tskateboard\neNbwp7DEy6A_0\tdog\neNbwp7DEy6A_1\tdog\neNlXrdcWYPA_0\tperson\neNllsU_utBs_0\tgiraffe\neN0ufEmLTDM_0\tperson\neN3a3uFzNxw_0\tperson\neOJorgJNcl4_1\tcar\neOMSAOLQMc0_0\tperson\neOMro57lp5o_0\tbicycle\neON5oS1ddkA_2\tknife\neOXMKiuur7c_0\tperson\neOZ2mMo0l60_0\tperson\neOe9DskHw1g_4\tairplane\neOe9DskHw1g_3\tairplane\neOhLZkf2gyQ_0\tperson\neOj2KctQDKQ_1\tbear\neO0M1RCeWaA_0\tdog\neO9s3APOXdI_0\tbear\nePDBmIR0Mnk_1\tbear\nePEoVXrSERQ_0\tperson\nePPnXOa8FII_0\tmotorcycle\nePWPPUSuctk_0\thorse\nePWPPUSuctk_2\thorse\nePWPPUSuctk_3\thorse\nePaqZZz_gtY_1\thorse\nePgL4a_1DcI_0\tperson\nePgqzaxKKo8_0\tperson\nePhchRaBs-k_1\tairplane\nePhchRaBs-k_2\tairplane\nePjAF53eBSA_0\tperson\nePkzyffCJhs_0\tperson\nePli_zXbgF4_5\tbear\nePli_zXbgF4_1\tbear\nePli_zXbgF4_2\tbear\nePli_zXbgF4_3\tbear\nePli_zXbgF4_4\tbear\nePoC0Pj8xLA_2\tperson\nePo6J3guHBw_0\tperson\neQA0KwcbJlQ_0\tperson\neQI72zFfl34_0\tcow\neQI72zFfl34_2\tcow\neQMmOyBJUaA_0\tperson\neQOqA8LeUOU_1\ttruck\neQOqA8LeUOU_2\ttruck\neQOqA8LeUOU_8\ttruck\neQS3V0HV61g_0\tperson\neQTlUSSbOyY_0\tperson\neQWRQaVSPT8_0\tskateboard\neQXSsw2MJGk_0\thorse\neQZEFoxVGuY_2\tperson\neQZOAGlSYBc_0\tperson\neQcocP3auyk_0\tcar\neQfbBM_c96I_0\tknife\neQfbBM_c96I_1\tknife\neQi8AZ4DQO4_0\tairplane\neQjFi5iBL-c_0\tskateboard\neQl0Q82jNOY_0\tcat\neQmSzg2ZEpw_0\tperson\neQmSzg2ZEpw_1\tperson\neQoRdZR8_q8_0\tperson\neQpbjnMSNLE_1\tbus\neQ1R5EruVgo_0\tbird\neQ1R5EruVgo_1\tbird\neQ2eWzgVggo_0\tperson\neRAZ8LnDRN4_0\tperson\neRBc8OmROx4_0\tcat\neRCMzS-dM8o_0\tperson\neREzhoz4UA8_0\tbicycle\nFZieBxFsZO4_4\tbird\nFZieBxFsZO4_7\tbird\nFZieBxFsZO4_8\tbird\nFZieBxFsZO4_11\tbird\nFZsDQUdCBiE_0\tperson\nFaINra3PYko_0\tbus\nFaINra3PYko_2\tbus\nFaINra3PYko_1\tbus\nFanmFyCIvSc_1\tskateboard\nFaxr0F1n4lk_0\tperson\nFa8JS9CCs60_0\tperson\nFbC6M7cRN1k_0\tperson\nFbLE0CqDZ_I_0\tperson\nFbN-_RdBAoA_0\tperson\nFbRfH2tJCZg_0\ttrain\nFbUasHXeVXg_1\tperson\nFbVrmfwHLD8_1\tcar\nFba1mHso_c8_0\tperson\nFbcl3O89qPI_0\tperson\nFbryy4ItyRo_0\tmotorcycle\nFbsxP5HIH-w_0\tperson\nFbtbQbo3w6A_0\tperson\nFbtbQbo3w6A_1\tperson\nFbtbQbo3w6A_2\tperson\nFbzdX2M1spw_0\tperson\nFb9GVgZUQkk_1\tbird\nFb-bT-5HFvo_1\tperson\nFb-bT-5HFvo_0\tperson\nFcAKq2q6WuI_0\tperson\nFcGoc7P1MnA_0\tairplane\nFcHZFDzsW6U_0\tperson\nFcI2xE1s0tE_0\tperson\nFcJofbjqKR0_0\tperson\nFcNTnULQ914_0\ttrain\nFcPxUMks1f8_0\tairplane\nFcQ9ypCnsnM_3\telephant\nFcdE5l-9Cl4_0\tperson\nFcfkxe_EegE_3\tskateboard\nFckxSGw75TA_8\telephant\nFckxSGw75TA_1\telephant\nFckxSGw75TA_3\telephant\nFckxSGw75TA_4\telephant\nFckxSGw75TA_6\telephant\nFcmq6FVlPrs_2\thorse\nFcyT7NFOtOU_1\ttruck\nFczLlZB8PPQ_0\thorse\nFdBcdDQa2Yc_0\tperson\nFdG3QrZtdYo_0\tperson\nFdM1BVOZnpc_0\tperson\nFdM1BVOZnpc_1\tperson\nFdYZH48B1gQ_0\tgiraffe\nFdYpikKc6Rk_0\tperson\nFdcxQx4sFow_0\tperson\nFdgWx-kasEQ_0\tcar\nFdgw87Au0kg_0\tperson\nFdp1t1Kk42s_0\tperson\nFdvgBe0Ix0A_0\tperson\nFdvgBe0Ix0A_1\tperson\nFdviMb1gxkI_0\telephant\nFdyA9CQ40Xo_0\tcat\nFd1Rn6HvibQ_0\tbus\nFd1ZmuLPSNA_2\ttruck\nFd1ZmuLPSNA_0\ttruck\nFd1ZmuLPSNA_1\ttruck\nFd1ySlMqOEk_0\thorse\nFd6kpMD00LI_0\tperson\nFeAmji-BcLE_0\tskateboard\nFeHGwC6UYlQ_1\tperson\nFeHGwC6UYlQ_0\tperson\nFefqZU-M3NQ_0\tdog\nFeioRbELmKY_0\tcat\nFel-MqoIa98_0\tperson\nFenJI9gPekk_0\tperson\nFevOpclGxX8_0\tperson\nFe0XVxKTD10_0\tperson\nFe1ne3adKqs_0\tperson\nFe1o0fdRyjk_0\tperson\nFe1o0fdRyjk_1\tperson\nFe1o0fdRyjk_2\tperson\nFe_r1BcuOm8_0\tairplane\nFfCfKve9svg_0\telephant\nFfGzM6IRg6I_0\tperson\nFfTyXxo_JLY_0\thorse\nFfWtRI5MlvQ_1\tperson\nFfWtRI5MlvQ_0\tperson\nFfddIx2fdDE_0\tperson\nFfkcxMLN90Q_1\tperson\nFfkcxMLN90Q_0\tperson\nFfpScNxcfaE_0\tperson\nFfpuED53W2w_0\tperson\nFf3kCsp4dss_0\thorse\nFf37VadXulw_0\tperson\nFf37VadXulw_1\tperson\nFf-s3k4nzl0_0\tcow\nFgAW1wm55t4_0\tumbrella\nFgBAfHhZDtY_0\tcow\nFgCkJ9L956k_2\thorse\nFgHkoen3Fbs_0\tperson\nFgHkoen3Fbs_1\tperson\nFgHkoen3Fbs_2\tperson\nFgK205YdiNI_0\tzebra\nFgaH6B8Im-s_1\tperson\nFgh-oweWR10_1\ttruck\nFgh-oweWR10_2\ttruck\nFgh-oweWR10_5\ttruck\nFgkgjnYWuvc_0\tmotorcycle\nFglWoBFeCGs_0\tboat\nFgqe5FVDM7w_1\tbus\nFgqe5FVDM7w_3\tbus\nFgtxhgrL-1s_0\tbicycle\nFhAkQ-D6j7M_1\tperson\nFhNe0p3NvAk_0\tperson\nFhS2OrbfOqA_0\thorse\nFhTIUIB4MQk_0\tperson\nFhdb7UXlKgw_0\tperson\nFhhQQi3XBRs_0\tperson\nFhim9zq_3dc_0\tdog\nFhtl-JSkWvY_1\tskateboard\nFh1QSbERb_I_0\tperson\nFh1jlYGKYy8_0\tcow\nFh2wm1SuBlM_0\tperson\nFh5hapK4iY0_0\thorse\nFh-e1BaovqE_0\tperson\nFiAj5FRP_QI_0\tbear\nFiAj5FRP_QI_1\tbear\nFiAj5FRP_QI_2\tbear\nFiGZEZ8BFeg_0\tperson\nFiLeL7fMtKI_0\tperson\nFiMl9o33Uaw_0\tperson\nFiQbZpev_LA_0\tperson\nFim4ZNdANXI_0\thorse\nFipIgAA0lFk_0\tbicycle\nFirrKl6H41c_0\tperson\nFirrKl6H41c_1\tperson\nFivrGIBKDvo_1\telephant\nFiz1rnLi2OM_0\tperson\nFi4kJfnwDFc_1\tbicycle\nFi4kJfnwDFc_0\tbicycle\nFi7LPQxqu14_0\tperson\nFi9uLLmtWaQ_0\tperson\nFi_IAiAUqaU_1\thorse\nFi_IAiAUqaU_0\thorse\nFjBRf4S85bg_0\telephant\nFjBRf4S85bg_1\telephant\nFjCz86a5wp4_0\tperson\nFjF5nRRKjKc_0\tperson\nFjMslXNPmHo_0\tairplane\nFjRDB5KtmZk_0\tcow\nFjUvDc65QJo_0\tperson\nFjZltjNG2NU_0\tskateboard\nFjfP5wdsmM0_0\tcat\nFjo3Q6r1Unc_0\tcow\nFjsVcnD_MIg_0\tmotorcycle\nFjvoIjZBqfU_1\tperson\nFjvoIjZBqfU_0\tperson\nFj98ZrblH1g_0\tumbrella\nFkAQLLdAAbk_0\telephant\nFkAQLLdAAbk_1\telephant\nFkFAVoUYxPc_1\tskateboard\nFkOkAlvY34U_0\tcow\nFkSrQgrkwxM_0\tperson\nFkSrQgrkwxM_1\tperson\nFkZy3LGoN9I_0\tdog\nFkkUslZGIbg_0\tbear\nFkvcJknwKuY_0\tperson\nFkzewHxki8o_0\tskateboard\nFk4XzK5XI6A_0\tbus\nFk4XzK5XI6A_1\tbus\nFlD1RAiVpek_0\tperson\nFlD1RAiVpek_2\tperson\nFlD1RAiVpek_1\tperson\nFlEhS-F3ygQ_0\tumbrella\nFlGO6UYJUzE_0\thorse\nFlNEteNmUhc_0\tperson\nFlR1fAhH2Xo_0\tdog\nFlYY0RaMPNY_0\tperson\nFlgN1oA45yM_0\tbear\nFl2yqFTps4E_0\tperson\nFl6OhW0-1w0_0\tperson\nFl9EhNo7Keg_0\tperson\nFmDFcSMFeno_0\tperson\nFmDFcSMFeno_1\tperson\nFmDOHRJspxI_0\tperson\nFmMYoani5Vg_0\tperson\nFmOLwdbHDxQ_0\tperson\nFmOfXWRFoXQ_2\tbird\nFmUhkvEy_7s_0\tperson\nFmUhkvEy_7s_1\tperson\nFmVDxGIS5zk_5\ttrain\nFmVDxGIS5zk_7\ttrain\nFmVDxGIS5zk_8\ttrain\nFmVDxGIS5zk_9\ttrain\nFmVDxGIS5zk_10\ttrain\nFmVDxGIS5zk_1\ttrain\nFmVDxGIS5zk_2\ttrain\nFmc6udEpldU_0\tcat\nFme4Abd5nUA_2\tbird\nFme4Abd5nUA_1\tbird\nFmoAxj0I_HE_0\tperson\nFmqOvCWa7zg_0\tperson\nFmrozJZpKR8_0\ttrain\nFmsAY671mqQ_7\tknife\nFmuPNtoqS2E_0\telephant\nFm1Depfmi_k_1\tperson\nFm5EMiek6AE_0\tperson\nFm6Hq8f2Qxk_1\tairplane\nFm6Hq8f2Qxk_2\tairplane\nFnEnQ8PP_eE_0\tskateboard\nFnEnQ8PP_eE_1\tskateboard\nFnGScEGhwDA_0\tperson\nFnKvuj-emb4_0\tperson\nFnKvuj-emb4_1\tperson\nFnMl1BAE_jc_0\tbear\nFnMl1BAE_jc_4\tbear\nFnNceIdqZ3w_0\tperson\nFnNceIdqZ3w_1\tperson\nFnTofG0IZf0_0\tperson\nFnb6xihA7ck_0\tperson\nFncXKaqIxJo_0\tperson\nFncXKaqIxJo_3\tperson\nFniMTwzxRZQ_0\tperson\nFnv6GlZeZ98_2\tairplane\nFnwZm6-uVkU_0\tperson\nFn6j8CspFw4_5\thorse\nFn6j8CspFw4_2\thorse\nFn6j8CspFw4_3\thorse\nFn6j8CspFw4_4\thorse\nFn7CPx1Df1I_0\tdog\neRGlFEYZ74g_0\tperson\neRQQ8fY6DVA_0\tperson\neRToPN2xDdI_1\thorse\neRToPN2xDdI_2\thorse\neRVbBhT_bcs_0\tperson\neRXcoQINrwY_0\tcow\neRa3aIGemkw_0\tperson\neRiOVczmKs0_0\tperson\neRk0k7ru0C0_0\tperson\neRlVo64o3EE_0\thorse\neRn_VZZAhDc_0\tbird\neRpQzm5PYXw_0\tperson\neRvRu0q-GoE_0\tdog\neR2L8Yeikhc_0\tperson\neR2s4XgNo7o_2\tdog\neR6IwGLaa1M_0\tbicycle\neR7y-Ei3DLg_0\tperson\neSGBtfzFobI_0\ttrain\neSId-3VXvKk_3\tdog\neSIwAUMyFgU_0\tperson\neSKH9cYOKk8_0\thorse\neSPrJOSU8AM_0\ttrain\neSa1vsOaz1c_0\tknife\neSiLV8rS59E_0\tperson\neSiLV8rS59E_1\tperson\neSljhVPS-Ik_2\tperson\neSljhVPS-Ik_0\tperson\neSpAsKZSmiA_0\tairplane\neTDKrXMMrQ0_0\tcow\neTKPoRwNChU_0\tperson\neTKPoRwNChU_1\tperson\neTKSWSWvAyw_1\tperson\neTNf-Cqbbro_1\tperson\neTQF3UDg8qc_0\ttruck\neTTKvmF97nI_0\telephant\neTUWLCcJU2k_2\tbus\neTU8LeMW9qA_0\tperson\neTc1z6mbb50_0\ttruck\neTdIp3O6Gdc_0\tbear\neTkYJ5e2d6g_0\tperson\neTkbZ2QtHvw_0\ttrain\neTkbZ2QtHvw_1\ttrain\neTpyN9lx8_4_0\thorse\neTsE0jLxU3w_0\ttruck\neTsE0jLxU3w_2\ttruck\neT3B8Dicg34_1\tperson\neT5K9fPU-0g_0\tperson\neUGoobFpS4s_0\tperson\neUKe6XaWIfA_0\tmotorcycle\neUQjLdCSTbY_0\tperson\neUQ4P2JG1yg_0\tbus\neURPg0TbtFI_0\tperson\neUU0KJ-w2bc_0\tperson\neUVgOxQT_-8_0\tcow\neUbEHnOzRA8_0\tperson\neUbEHnOzRA8_1\tperson\neUbEHnOzRA8_2\tperson\neUe_Rayk8X8_0\tperson\neUyzGl0--ms_1\tperson\neU6G8jITD_Y_0\tairplane\neVJOOrHqc34_1\tskateboard\neVL1UQ_nteE_0\tcar\neVNGBAn5Oxc_0\tcat\neVPABDrI9js_0\tbird\neVYydWvg5Go_1\tperson\neVcLRosJZew_0\tperson\neVhB8QJJogM_1\tknife\neVn8akHyS64_0\tairplane\neVn8akHyS64_2\tairplane\neVn8akHyS64_3\tairplane\neVn8akHyS64_6\tairplane\neVuy4uctm28_0\tperson\neVu1gME4-Qs_0\telephant\neVu1gME4-Qs_1\telephant\neVywFyCLwko_0\tperson\neVzfhyg8qFU_0\tperson\neV2KIbTSnH4_1\ttrain\neV4pA62ABv8_1\ttrain\neV6nRsgY8PQ_0\tperson\neV64Qw4Zebk_0\tperson\neV-VIypuuNY_1\tbird\neWHnCpVoKhw_0\ttruck\neWbvhqFVvXk_0\tboat\neWlQOgHQT7g_0\tairplane\neWpIepmfRus_0\tperson\neWpIepmfRus_1\tperson\neWsle8FxRvY_0\tperson\neWyDiulNMGo_0\tmotorcycle\neW6l7xJBq-Q_1\tboat\neW6o2X8qAtQ_0\tcar\neXDegroOl34_0\tperson\neXECAC_iXPc_0\tperson\neXLLe0Z-fJk_0\tperson\neXUIt5B2NQc_0\tperson\neXYniqUW4z8_0\tbicycle\neXYniqUW4z8_2\tbicycle\neXaCA1qL7uY_0\tperson\neXeifN6Jv8c_0\telephant\neXeifN6Jv8c_1\telephant\neXeifN6Jv8c_3\telephant\neXeifN6Jv8c_4\telephant\neXeifN6Jv8c_7\telephant\neXfkthdw2L4_0\tperson\neXixQXmPyYw_0\telephant\neXoF6xS_5u4_3\tknife\neXuelMqu_1M_0\tknife\neXveKyc2TQg_0\thorse\neXxAlPRFiqs_0\tperson\neXxAlPRFiqs_1\tperson\neXxAlPRFiqs_2\tperson\neX3bd4kHxuc_9\tairplane\neYDpQFJpz7k_0\tperson\neYJe2k1E0XQ_0\tbus\neYY-Mz3L_Ac_1\telephant\neYeHu-IftM0_0\tperson\neYnlQEvgHVc_0\tcat\neYqlHj6MSc0_7\tbicycle\neYyGqoW9Q3c_0\tbus\neYyri5GAJDE_0\tperson\neZEN_5rnTLM_0\tperson\neZL3Ew4O7YI_0\tperson\neZXS_3nTpdo_1\tmotorcycle\neZXS_3nTpdo_2\tmotorcycle\neZZb5rnc1iA_0\tbus\neZf-Rsr1aNs_1\ttrain\neZgo_XfmmO0_0\tperson\neZgo_XfmmO0_1\tperson\neZl_FRsZx3o_0\tperson\neZym_LkJnpY_1\tknife\neZ2Y_Qtg0VU_0\thorse\neZ2Y_Qtg0VU_1\thorse\neZ4N2Y737ss_0\tperson\neZ_peGgPSDE_0\tperson\neaHXGY8ImzY_0\tperson\neaOqHSeEVG0_0\telephant\neaR-dFaZRGc_2\tgiraffe\neaTX3J2X23g_1\tperson\neaTX3J2X23g_0\tperson\neaalMrdHsQ0_0\thorse\nearUgdES0lk_0\tperson\neaxPmkwGK5U_0\tbird\nea1EeKBBjxk_0\tumbrella\nea1YcZPjbxU_2\ttruck\nea4saeRZ0_M_0\tperson\nea8mbQn2kv0_2\tdog\nea8mbQn2kv0_1\tdog\nebFgEyNciRc_0\tcow\nebMZJ-lUhbw_2\tbicycle\nebMZJ-lUhbw_3\tbicycle\nebMZJ-lUhbw_1\tbicycle\nebOubiwIUC0_0\tperson\nebV9mcxICDs_0\tdog\nebY5nNOPdN0_0\tperson\nebY52fJyTPs_1\tperson\nebY52fJyTPs_0\tperson\nebagV2pOV20_0\tboat\nebhnTUXh7Pc_0\tcat\nebh7xOXlO7Y_1\tperson\neboXP28MlOE_0\tairplane\nebt0_AWnuyM_3\tbear\nebyMEAOqPhQ_0\tskateboard\nebz4umtEYag_1\tmotorcycle\neb0UO8Y5r5A_0\tcar\neb1-qD5D7Us_0\tperson\neb5d4XIDSqs_0\tcar\necDEmZdWz8Q_0\tperson\necGOS5ZO0Tw_0\tskateboard\necGOS5ZO0Tw_1\tskateboard\necJIf9dcDHk_0\tperson\necKMZLATsNg_0\tperson\necKst7suEZo_1\tmotorcycle\necPynengjhg_0\tperson\necUmR_974l4_0\tbear\neccbjuLjCr0_0\tbicycle\necex13DrS00_1\tbus\necgqb4spDo0_0\tcow\neclnV3fwFVg_0\tcar\necndV9N-b9M_0\tboat\necrgwn6gB7c_2\tperson\necrgwn6gB7c_0\tperson\necrgwn6gB7c_1\tperson\nec0L5W9HzYQ_0\tperson\nec0zPF4t8jM_0\tperson\nec10-YUa1PE_0\tperson\nec4Mjwm2hyQ_0\tperson\nec4ya7ogbFU_0\tperson\nec59VG2krTI_0\tknife\nec7hzm4ZgOM_0\tbus\nec8daVdUMW8_0\telephant\nedErePLiFl4_0\ttruck\nedFb7FxjVPc_1\tperson\nedOvHaEGfM0_0\ttrain\nedO7Q7znUJA_0\tcat\nedPmPMqUt4c_1\tperson\nedPmPMqUt4c_2\tperson\nedS79MnRXwE_1\tperson\nedYcGdD4UGI_0\tperson\nedd8R4oDMdg_0\tperson\nedlAlkitTfg_0\tbird\nedlAlkitTfg_1\tbird\nedq1Zw1FWGY_0\tperson\nedrtSs6UdCI_0\tboat\nedtqJ_N0258_0\tperson\ned0O35MjM6Q_0\tcow\ned5jfyH6JyI_0\tperson\neeEjRmROBZs_0\ttrain\neeEjRmROBZs_1\ttrain\neeJDVUC0bio_0\tbird\neeV0a3p0uz8_1\tdog\neeYr-ujfh4Y_1\tperson\neeYtwUSuQzY_1\tairplane\neeYtwUSuQzY_2\tairplane\neeYtwUSuQzY_0\tairplane\neeZyIsjtgj0_0\ttrain\neeahFaPbx5M_0\tskateboard\neea6uRdJLL4_1\tbird\neee-1I8uLeU_0\tcow\neefTfPIGkq4_1\tperson\neef-qkyU0jY_0\tboat\neepn_UxMI5o_0\tskateboard\nFoFA-VOPhV8_0\tzebra\nFoIc9MjzbBk_0\tperson\nFoSynLz7aJ8_0\thorse\nFoSynLz7aJ8_1\thorse\nFoUqmWxXlNU_0\tperson\nFoUqmWxXlNU_1\tperson\nFobAHnW_q6s_0\tperson\nFog-McdMlO0_0\tperson\nFomH9b8uRKs_2\tknife\nFot4m5WU4Aw_1\tperson\nFot4m5WU4Aw_0\tperson\nFouVJvkYyPs_0\tperson\nFpCdNHknwMQ_3\tcar\nFpCdNHknwMQ_5\tcar\nFpEzn8x46OE_0\tbird\nFpGO4RTCIuk_6\tbicycle\nFpGO4RTCIuk_0\tbicycle\nFpGO4RTCIuk_2\tbicycle\nFpGyjKY-NIk_0\tmotorcycle\nFpGzMvzCvKo_0\tperson\nFpI0Do5LaU8_0\tperson\nFpTdRnuOS8M_0\tperson\nFpTdRnuOS8M_1\tperson\nFpaob2f1sqE_1\tperson\nFpaob2f1sqE_0\tperson\nFpev0w7vGO4_0\tperson\nFprxIVYXUL4_0\thorse\nFpzpuYeDf6M_0\tbus\nFp1vbL5guA0_0\tperson\nFp2HgWZlr2k_0\tperson\nFp7RJqXwz6c_0\tperson\nFp-TG2XDrC4_4\tcar\nFp-TG2XDrC4_0\tcar\nFp-TG2XDrC4_1\tcar\nFp-TG2XDrC4_2\tcar\nFp-TG2XDrC4_3\tcar\nFp_5yBxyvR4_0\tumbrella\nFqFhpogmR2s_0\tcat\nFqHStgmNnKA_0\tbicycle\nFqHStgmNnKA_1\tbicycle\nFqTHQ5KBbaY_0\telephant\nFqjhuAhttZw_2\ttrain\nFqjhuAhttZw_1\ttrain\nFquAMi_ikSA_0\ttruck\nFqxWiT-6dLM_0\tperson\nFqxZmvVkHIA_0\tgiraffe\nFqxZmvVkHIA_2\tgiraffe\nFqx-wOpqzZo_0\tairplane\nFqzYUW3X9pc_0\tperson\nFqzYUW3X9pc_1\tperson\nFq_esHSu_sk_0\tperson\nFrC2HuRBsYA_0\tperson\nFrC-Gp1GmVw_0\tcow\nFrC-Gp1GmVw_1\tcow\nFrIO6gNGeao_0\tperson\nFrUCytgm6sM_2\thorse\nFrViqM6fVR0_0\tdog\nFrVxG6x7tj0_0\tknife\nFrVxG6x7tj0_2\tknife\nFrgvokGeeds_0\tperson\nFrk0tcM1o_w_0\tperson\nFrm5N8YRz_E_0\tperson\nFrpsbU7nO00_0\tperson\nFrxIGKawDiA_0\tperson\nFrzgyfVukw4_0\tperson\nFrz8huGrR4M_2\tmotorcycle\nFr0K__Q_Kv4_1\tbird\nFr2qdnHURF4_0\tboat\nFsHjWJUILr4_0\tperson\nFsScYp1HNk0_0\tperson\nFsXYM3nf7O4_0\thorse\nFsZyoaRLGfw_1\tperson\nFskWl7cTGUU_4\tmotorcycle\nFslFjbzL4rY_0\ttrain\nFsuA_2-7e1w_0\telephant\nFsuA_2-7e1w_1\telephant\nFsvwyL1hLDU_0\tbus\nFswGt3qhUXE_1\thorse\nFs6Lk0xDsWk_0\tdog\nFs6Vua80iU4_1\tbus\nFs-DmOC6Ksw_0\tperson\nFtAgz58w2vs_0\tperson\nFtC0Y3Dca60_0\tdog\nFtD8uBgTi3E_0\tcat\nFtJ8y0gIpKg_0\tdog\nFtJ8y0gIpKg_3\tdog\nFtJ8y0gIpKg_5\tdog\nFtJ8y0gIpKg_1\tdog\nFtJ8y0gIpKg_4\tdog\nFtMshKheG8Q_0\telephant\nFtet3EW_gR0_0\tskateboard\nFtet3EW_gR0_1\tskateboard\nFtj_1qTEwE8_0\tcow\nFtqLCjhRQgQ_1\tperson\nFtqLCjhRQgQ_0\tperson\nFtwMaVMlLbM_0\tperson\nFtwZasadNWo_1\tperson\nFtwZasadNWo_0\tperson\nFt3Xr78g1jg_0\tdog\nFt4RUB75d64_0\thorse\nFt5ZV3L5LV4_0\tperson\nFt8VPp_VNJs_0\tknife\nFuCuNV5vL-8_0\tperson\nFuIIvsD7qyY_1\tperson\nFuMf00RPDmg_0\tbear\nFuNvDTe7cAM_0\tbird\nFuR3p7f2R30_0\tperson\nFuTf8iiIHWI_0\tmotorcycle\nFuVQuZfX71w_1\telephant\nFuc49AUfyaA_1\ttrain\nFufG8eRehvk_0\tperson\nFuoKMOMcl0I_0\tbus\nFu3A7S4V26Q_0\tperson\nFu4p4U9AqY4_0\ttrain\nFu5TDXXdHyc_3\ttrain\nFu5TDXXdHyc_0\ttrain\nFu5TDXXdHyc_1\ttrain\nFu5TDXXdHyc_2\ttrain\nFvB0FA24g0c_0\tmotorcycle\nFvD-5pXN6B4_0\tperson\nFvF8CGSAVBw_0\thorse\nFvIqBpjD4A4_0\tperson\nFvKJQTsxS6o_0\tbus\nFvNiWF5wWJA_0\ttruck\nFvN6HD0c3I8_1\tbicycle\nFvQ8wYSFAhA_0\tbird\nFvZ_lMA5MYE_0\tperson\nFvZ_lMA5MYE_2\tperson\nFvZ_lMA5MYE_1\tperson\nFvcxD9PJ1-g_0\tbear\nFvcxD9PJ1-g_1\tbear\nFviKCn2JGbY_0\tperson\nFvksDxENves_0\tbird\nFvksDxENves_1\tbird\nFvmW4A9wN1c_0\tperson\nFvslrkU6Ii8_1\tskateboard\nFvslrkU6Ii8_5\tskateboard\nFvslrkU6Ii8_4\tskateboard\nFvuJoToFsZ0_0\tskateboard\nFv2LjW2C5SU_0\tknife\nFv2LjW2C5SU_2\tknife\nFv2SAN8CNlg_0\thorse\nFv6OQz_y5V0_0\tperson\nFv80QjBLyXw_3\ttrain\nFv80QjBLyXw_4\ttrain\nFwBCZ90I_aw_0\tcat\nFwIN5LlmnSA_0\tperson\nFwMy9UR3xJA_0\tperson\nFwMy9UR3xJA_1\tperson\nFwNHDlUxkVE_0\tperson\nFwSQA6A_bWE_0\tperson\nFwZzzptQg0s_0\tperson\nFwZzzptQg0s_1\tperson\nFwf5SGfOguQ_0\tbird\nFwf_1L-RQB4_0\tcow\nFwhmGtqpt5s_1\tskateboard\nFwrkNuHACuE_0\tperson\nFwtyj6Ut62E_2\tdog\nFw8NHywJSJw_7\tairplane\nFw8NHywJSJw_8\tairplane\nFxHZCFGlLk8_0\ttruck\nFxI0-u_zPQQ_1\tskateboard\nFxI0-u_zPQQ_0\tskateboard\nFxJg66y6Vj4_0\tperson\nFxJ0douRc4s_0\tperson\nFxMnA-aNvVI_0\tknife\nFxXVgnAjOCs_0\tperson\nFxXVgnAjOCs_1\tperson\nFxitbyLzBbw_0\tperson\nFxmfshFrhyg_0\tperson\nFxmfshFrhyg_1\tperson\nFxp_EDLEylo_1\tbear\nFxxuVRsJiCQ_7\tbird\nFxxuVRsJiCQ_9\tbird\nFxxuVRsJiCQ_11\tbird\nFx74SXbZiUI_0\tboat\nFx-8EgSEaDg_0\tperson\nFyFea2NifCo_0\telephant\nFyKB3iEKNlg_0\tperson\nFyO1UliwWNQ_0\tskateboard\nFyQulDaVp8I_0\tperson\nFyTFrxalrzY_2\tbicycle\nFyb5_PxuzrI_1\tairplane\nFyjgIZnRT0A_0\tperson\nFylDI9Ssx18_0\tdog\nFyqooE73pSs_0\ttrain\nFyuLo6pvAxk_0\tperson\nFyuLo6pvAxk_1\tperson\nFy6UODQTxBw_0\tdog\nFy8cULzM424_0\tperson\nFzJOOqEWb48_0\tperson\nFzP8vDH_ynM_0\tbicycle\nFzV_56qru4c_1\tperson\nFzV_56qru4c_0\tperson\nFzaaAJ_dGjI_1\tdog\nFzc4L1eWvQ0_0\tknife\nFzeiG746wec_0\tperson\nFzoJlCfL5bc_0\tperson\nFzpV3zrU7w0_0\tcat\nFzufL9SIDZ4_2\tperson\nFzvLoCiUbCU_0\tperson\nFz4RMW4ONrQ_0\tskateboard\nF0Ekv-HAlnk_0\tairplane\nF0G64yaBMBM_0\tperson\nF0G64yaBMBM_1\tperson\nF0I59IAm-vo_0\tperson\nF0Qk5fG3X-M_0\tperson\nF0Q9zBIa4vg_0\tknife\nF0Q9zBIa4vg_1\tknife\nF0Q_-7qxWws_3\telephant\nF0UBtRxGNhA_5\tbird\nF0XjqeFLlgU_1\tbird\nF0ZAshDVPxg_0\tperson\nF0c4qnJQtDU_0\tbear\nF0gFV3Zl1ew_0\ttrain\nF0gFV3Zl1ew_2\ttrain\nF0hx5kgZ3go_0\telephant\nF0mBUyvb90Y_0\tperson\nF0qXU9y4p-Q_0\tperson\nF0z1cmfnPsQ_1\tbicycle\nF1B_Y1twDK0_0\tcow\nF1CZ2DPXJ9M_4\tbicycle\nF1CZ2DPXJ9M_1\tbicycle\nF1KHVI6XeVo_0\tperson\nF1eNAhwM5Pc_0\thorse\nF1jGg9828BI_0\tperson\nF1j27LEBSpI_1\tcar\nF1qXLHQywDc_1\telephant\nF1sQlUVWZLM_0\tperson\nF15XLgp6ED4_0\tskateboard\nF2Bb2pFQRyU_1\tperson\nF2EV6W4vdT8_0\tbus\nF2GhztG-3ZM_0\tcat\nF2HupbPd4Rc_0\tperson\nF2JDbaIJXuM_0\tperson\nF2JeBrL43Kg_0\tperson\nF2JnnpLll3c_1\thorse\nF2Kd_wTgfHc_0\tbird\nF2N-fmDDyCs_0\ttrain\nF2an_w-D4WM_0\tdog\nF2bbT3y10lk_0\tperson\nF2dx02YK1MY_0\tcat\nF2kBHcrY7Ck_0\tperson\nF2nvlBMOvGc_0\tboat\nF2nvlBMOvGc_2\tboat\nF2nvlBMOvGc_3\tboat\nF2nvlBMOvGc_4\tboat\nF2yvXHbr1Us_6\tbird\nF2yvXHbr1Us_7\tbird\nF20W1m4x2Ys_0\tperson\nF20_Ihwr_1Y_0\telephant\nF21R2kQ-je4_0\tperson\nF2244CO9Fuo_0\tairplane\nF250PqK5Gb4_1\tairplane\nF3AMItpIJlI_0\tdog\nF3AMItpIJlI_4\tdog\nF3FUBdTgY7c_0\tcar\nF3FUBdTgY7c_1\tcar\nF3Lz3rnQ-7A_0\tperson\nF3Lz3rnQ-7A_1\tperson\nF3NneLgyZiU_0\tperson\nF3RkQzIQjeU_2\tbicycle\nF3XFJeSjPDU_0\tbird\nF3XFJeSjPDU_3\tbird\nF3gY7oCc-j8_0\tcat\nF3j318NP2P0_0\tperson\nF3j318NP2P0_1\tperson\nF3oP1Se_HdQ_0\tmotorcycle\nF35JtGCIiCo_0\tdog\nF377W3trtdg_2\tdog\nF4DJmxH-fuw_0\tskateboard\nF4FXVb3DdJE_0\tperson\nF4FXVb3DdJE_1\tperson\nF4HgVMHEiVQ_1\tbird\nF4Ja9TDp5eg_0\tperson\nF4R1rt0I4Ik_1\tperson\nF4R1rt0I4Ik_0\tperson\nF4WWEXEO6Cw_0\tairplane\nF4hUo05eI2s_0\tperson\nF4hVb1AsJ9M_0\tumbrella\nF4hVb1AsJ9M_1\tumbrella\nF4hp-2UBFcI_0\tperson\nF4l8U4NGPMU_1\telephant\nF4rQJlBkGa8_0\tperson\nF4tzOjT91r0_2\telephant\nF41NWCYabpM_0\tperson\nF44j0JHVdfU_2\tbicycle\nF44z7XXoIZk_0\tcow\nF4-R6x6hSno_0\tairplane\nF4-R6x6hSno_3\tairplane\nF5IEcbmSBiU_0\tperson\nF5UiBt9FiQ4_1\ttruck\nF5brWxznDYA_0\tbicycle\nF5drV0qDFvU_0\tperson\nF5pSgana5Ds_0\tperson\nF5pwABHMaZM_1\tskateboard\nF5y_lQCCiYk_0\tperson\nF51aHL_AuQ8_2\tperson\nF51aHL_AuQ8_0\tperson\nF51aHL_AuQ8_1\tperson\nF54NzXjey4Q_1\tperson\nF6AkwJu9acQ_0\tperson\nF6BUhbvKAY0_3\tbear\nF6I3hGIdHBM_0\tairplane\nF6L1DckOdFs_0\tperson\nF6L1DckOdFs_1\tperson\nF6L1DckOdFs_2\tperson\nF6UTU1zVfY0_0\tperson\nF6X-PDReV8U_0\tskateboard\nF6uVxnnSkQg_0\tcat\nF63FWqs6n6A_1\tperson\nF63OB46zw20_0\tperson\nF66U-dCKTVs_5\telephant\nF67kQb83GEo_0\tperson\nF7Aw74QT7I8_0\tmotorcycle\nF7D1ccHfWQM_0\ttrain\nF7GYFMuRxr8_0\tperson\nF7MruF3gqRk_0\tperson\nF7MruF3gqRk_1\tperson\nF7M2n9Irv10_0\tperson\nF7adrDrejOI_0\tbicycle\nF7adrDrejOI_3\tbicycle\nF7adrDrejOI_7\tbicycle\nF7iFGXShjIg_1\tknife\nF7lmwAhsTVE_1\tcat\nF7lmwAhsTVE_0\tcat\nF7wyUoc1ELM_1\tperson\nF7wyUoc1ELM_0\tperson\nF72e40LPG8g_2\tairplane\nF72e40LPG8g_3\tairplane\nF72yH9hRoS0_0\tperson\nF77I6mkMOmM_0\tperson\nF77I6mkMOmM_1\tperson\nF77WzfDD-Ac_0\tperson\nF77WzfDD-Ac_1\tperson\nF8VZcw3-DMg_0\tperson\nF8XbiaxQYFA_0\tcar\nF8kTGPYH29o_0\tairplane\nF8sVrU5FfZw_0\tperson\nF8vyo42LQM0_0\tairplane\nF9KIXBo3lNI_0\tbird\nF9KIXBo3lNI_1\tbird\nF9WnfUhb8A4_0\tboat\nF9hhOJk3fdY_0\tperson\nF9jiY40SX4g_0\tperson\nF9kDOaogdPA_0\ttrain\nF9kDOaogdPA_1\ttrain\nF9nirQJj4wc_0\tmotorcycle\nF9qYvrO4nMM_1\tperson\nF9qYvrO4nMM_0\tperson\nF942FTRne2Q_0\tperson\nF95fIsG0A7U_0\thorse\nF98XVAomn1s_0\tperson\nF-AROt5V1zQ_0\tairplane\nF-L2byRMMEI_0\ttruck\nF-QpXlvCAdw_0\tgiraffe\nF-RVugkjZ1k_0\tperson\nF-RVugkjZ1k_1\tperson\nF-dxzMmjOT0_0\tperson\nF-dxzMmjOT0_3\tperson\nF-dxzMmjOT0_1\tperson\nF-poowwxrxU_0\tperson\nF-3G1FhnsdY_2\tcow\nF-3G1FhnsdY_3\tcow\nF-7EAK7rTI8_0\tbird\nF_AoZsBu8j8_0\tperson\nF_AoZsBu8j8_1\tperson\nF_BBB0J-9tQ_0\tmotorcycle\nF_CsG_jIxC8_1\ttruck\nF_I4rwh1mtE_0\tperson\nF_JJmqKJBnY_0\tperson\nF_Kw8qyfgjU_0\tperson\nF_WtOi2ZeSE_0\tumbrella\nF_oxJfyCUrw_0\tperson\nF_wVAS7hR9E_0\tcat\nF_5NdFCcCrQ_0\tairplane\nF_59LD9YnAU_2\tperson\nF_8qVC7MHM0_0\tperson\nGABXImD8qwM_3\tdog\nGADBGhd7Hbc_0\thorse\nGAF3BbJqKos_0\tperson\nGAF3BbJqKos_1\tperson\nGAGFuwQyn2A_1\tperson\nGAVdXzEftIU_1\tperson\nGAaPJd_iVeU_0\ttrain\nGAb6ZqG64o4_0\tperson\nGAb9NG_JnoU_0\tcow\nGAe7SnwoPQQ_1\tairplane\nGAg-aVsz7AI_1\tperson\nGAinaDnPPO0_0\telephant\nGAnYrNhN90c_1\tperson\nGAoDRtFNSeQ_0\tbird\nGAoaBt8kfHQ_0\ttrain\nGApyoyRTlPk_0\tperson\nGArUrBTpgzk_4\tairplane\nGArUrBTpgzk_1\tairplane\nGArUrBTpgzk_3\tairplane\nGAzsUwyCRAI_0\tcow\nGBF7wVda328_0\tdog\nGBLwQswYGpQ_0\tdog\nGBUiAfFHr8o_0\tperson\nGBYAc4swbr8_0\tperson\nGBYFzcFWKtI_0\tskateboard\nGBYeOSgHxaw_1\tperson\nGBhV-vm_cDs_0\tmotorcycle\nGBhV-vm_cDs_1\tmotorcycle\nGBhV-vm_cDs_2\tmotorcycle\nGBhV-vm_cDs_3\tmotorcycle\nGBjWoHEvi24_0\ttruck\nGBnf-AAsQts_0\tperson\nGBvWcmiB_zQ_0\tperson\nGBv60Rpf6hA_0\tperson\nGBwqR6gIUJk_0\tperson\nGBwqR6gIUJk_1\tperson\nGB0RUQ72TDU_1\tmotorcycle\nGB0RUQ72TDU_2\tmotorcycle\nGB0RUQ72TDU_4\tmotorcycle\nGB1A1gXLxF8_1\tumbrella\nGB1A1gXLxF8_0\tumbrella\nGB2Z9Zd9kCM_0\tcow\nGB3M7jlJvZo_0\tumbrella\nGB3dD_Sz5yA_0\tcow\nGCECUCM275I_0\ttruck\nGCECUCM275I_3\ttruck\nGCECUCM275I_4\ttruck\nGCECUCM275I_1\ttruck\nGCECUCM275I_2\ttruck\nGCHyhn505e4_0\tperson\nGCL5aSCyDAQ_1\thorse\nGCR8piyI8to_0\tperson\nGCdYlCKelqg_2\tbird\nGCf79ImcoV4_0\ttruck\nGCiR2DBKEUo_3\tumbrella\nGCiR2DBKEUo_0\tumbrella\nGCyZCLCX4jI_1\tbus\nGC5X3-Zi5fo_0\tbear\nGC_4PRhWwy0_1\tperson\nGDBvvswiioY_0\thorse\nGDErDO6sQxg_0\tperson\nGDHukw9i8AE_0\tbear\nGDPBufHJ6pE_0\tperson\nGDVxjq335kg_0\tperson\nGDVxjq335kg_1\tperson\nGDW_ebhUmXg_0\tperson\nGDeoeNk-jj8_1\ttrain\nGDgRHR5rt5g_0\tdog\nGDhVskUd-i0_0\ttruck\nGDkTfXax1EI_1\tperson\nGDr1CfMsWCo_0\tknife\nGDyR3j6e9uU_0\tbear\nGD0qZhFYMtE_1\tbear\nGD5H2vUIQUM_0\tbird\nGD7nVz18opA_0\tcow\nGEC16HE9LPs_0\tskateboard\nGEK0W7Soe5I_0\tperson\nGEOILdSs_m4_0\tperson\nGEXtPkuLXV4_0\tperson\nGElPgxFGsYM_0\tperson\nGEmM96O2bm0_0\tperson\nGEoAqEILC5I_0\tbicycle\nee4MHg5K9xo_0\tperson\nee4MHg5K9xo_1\tperson\nefANTTg0s7E_0\tperson\nefD7irKhsjg_1\tzebra\nefFDVTrJnI0_0\tperson\nefQ-zUFNN-U_2\tairplane\nefQ-zUFNN-U_3\tairplane\nefQ-zUFNN-U_0\tairplane\nefQ-zUFNN-U_1\tairplane\nefUVmXxR3pI_0\tperson\nefXikRhGmrs_0\tperson\nefdHHLZ3g1Q_0\tmotorcycle\neffHbT0DhsY_1\thorse\neffHbT0DhsY_2\thorse\neffHbT0DhsY_3\thorse\nefj0ZypW97U_0\tperson\nefl9qpSfN9o_0\tskateboard\nefo_cgnnucQ_4\tknife\nefqCl5PWA5Y_2\tbear\nef6fQWU1KdY_0\tperson\nef9zPCUJ5uQ_0\tboat\negByT16s_54_0\tperson\negByT16s_54_1\tperson\negHnmalt3d8_0\thorse\negQiifLgKHE_0\tperson\negVsaW3pIR8_0\tbus\negotrU2sxIs_1\tcow\negotrU2sxIs_0\tcow\negymuz3YUjw_0\tperson\neg0xHA2KO2M_0\tcar\neg0xHA2KO2M_1\tcar\nehAg6V-5Puk_0\tairplane\nehB-VoBE8As_0\tperson\nehFoBFIrRho_0\tperson\nehFvz7g6tcc_1\tperson\nehFvz7g6tcc_0\tperson\nehF--LpGjPU_0\tperson\nehI3hX4P2gg_0\tbus\nehSU0TuduDM_9\tboat\nehSU0TuduDM_0\tboat\nehSU0TuduDM_3\tboat\nehSU0TuduDM_7\tboat\nehSU0TuduDM_8\tboat\nehTOHuz8De4_0\thorse\nehhoOXi21uc_0\tperson\nehhzn87_kyY_0\tknife\nehpsJCYWhMo_0\tdog\neh0-hoyeQv4_0\tperson\neh383O3j2o8_0\ttrain\neh8ClQx55Pk_0\telephant\neh8ClQx55Pk_3\telephant\neh8ClQx55Pk_1\telephant\neh-Hpgj7SPM_0\tbird\neiIxHOvvvog_0\tperson\neiKfZPTeN-M_0\tperson\neiMVAVfFk50_1\tgiraffe\neiNlPbSqaQM_2\tbear\neiOC7H2_I7E_0\tmotorcycle\neiYV7UFe9_4_0\tperson\neiZm5CglnLc_0\tperson\neiirsESzuHs_0\tbicycle\neim8NPBqZXg_1\tperson\neis2vlxPtf4_1\tperson\neivFKGFBySc_0\tperson\neivMnaQyUKU_0\tperson\nei0PFx0qNIQ_1\tperson\nei0PFx0qNIQ_0\tperson\nei4Yn0KXnAM_0\tperson\nejDpzIUHAMk_0\tperson\nejD4KjqrkFo_0\tcat\nejIMw0_a1Zo_0\tperson\nejIMw0_a1Zo_1\tperson\nejVKT8cDDTY_2\tmotorcycle\nejoDQZqi4DU_0\tperson\nejsflVtvinE_0\tdog\nejzqfqBU2XY_2\thorse\nejzqfqBU2XY_0\thorse\nejzqfqBU2XY_1\thorse\nej5D22-gpzY_0\tperson\nekBhYo1n09M_0\tperson\nekGn7Al_5S0_0\tperson\nekOQkNLi9gA_0\tperson\nekPQmhXqsJs_0\tcow\nekQPPxQDQrA_0\tbird\nekYErFjRBcY_0\tperson\nekaQzIhIz6U_0\tperson\nekhId7QWajE_0\tperson\nekw22HGT0TY_0\tperson\nek6F1Yy6r4g_1\tperson\nek6F1Yy6r4g_0\tperson\nek9m3wFRD78_0\tmotorcycle\nelAJmgZ3uV8_1\tperson\nelIopJ6sLS8_0\tmotorcycle\nelS7CV83kDQ_0\tcat\nelbH9USSXbU_1\tperson\nele_x5If5RM_0\tcat\nelfDIDNaxO8_0\tbicycle\nelfDIDNaxO8_1\tbicycle\nelk9Eg_zAzA_0\thorse\nelwOqTHVPb4_0\tcar\nel_1tnvsCAY_0\telephant\nemAlGe0D2Ro_0\tcar\nemBk5WfF9MA_0\tperson\nemFvwwYH0Dk_0\tperson\nemLp02HobE4_0\tperson\nemO2DsNKmTw_0\ttrain\nemVjapACNME_0\tperson\nemWHcaPL5H0_0\tperson\nemXkTzHEyT4_0\tboat\nemhCPyXIbNk_0\tperson\nemqrQO4JZsU_1\tskateboard\nemxIavKneZw_0\tperson\nemzfRpng4hM_0\tbicycle\nem3XyVBpKCc_0\ttrain\nenA3HVeW4MM_1\tperson\nenCpXewY40c_0\ttruck\nenCpXewY40c_1\ttruck\nenR0OQhVBwE_0\tperson\nenWAeU6n9LQ_0\tperson\nenXS9AGUoow_0\tmotorcycle\nenY96p1ZALE_0\tknife\nenfPrTim6AU_0\tcow\nengcDIwacLg_1\tperson\nengcDIwacLg_0\tperson\nen06DIx0cz0_1\tperson\nen06DIx0cz0_0\tperson\nen6AOaqCY1s_0\ttruck\nen9gUgAJoek_0\tperson\neoFFf1yMhOg_0\tperson\neoauVNDdle8_0\tperson\neodvToXk2OQ_1\tcow\neodvToXk2OQ_0\tcow\neohpHQHPoXo_0\tdog\neovUEztTVZ4_0\tperson\neoyj6UfwM1c_0\tairplane\nepIcFi7yUZg_3\tcow\nepK_YUgNzQI_0\tcat\nepUTWEmTW1o_0\tbus\nepXYWAgJeJM_0\tperson\nepZSAxAzWRs_0\tperson\nepeLK68bI3k_0\tperson\nepeLK68bI3k_1\tperson\neph8ACa_bv4_0\tperson\neph8ACa_bv4_2\tperson\nepis0oQPudE_1\tperson\nepu8oDLyhBw_0\tcow\nepxbwMupoU0_0\ttruck\nepxxfkiUpVQ_0\tperson\nep15pnX1AxU_0\ttruck\nep4od2aZYv8_0\tdog\neqAMk_GzwUg_0\ttruck\neqMRouLMQI0_0\tperson\neqPXFnE2SxE_0\tperson\neqTdm4-YomY_0\ttrain\neqWb0eTMl98_0\tcow\neqiPG6XAei8_1\tperson\neqiVR6aa8XA_0\tperson\neqnF1_Lwa94_0\tmotorcycle\neqswu7XtVeE_0\tboat\neqswu7XtVeE_1\tboat\neqvu61eQ-D0_0\tperson\neqwZeHPEjT0_0\tbus\neq2VUeTEEGM_0\telephant\neq2VUeTEEGM_1\telephant\neq2-yJIiWyA_0\tskateboard\neq7fzAhOZEo_0\tperson\neq8-99wqpC4_0\tmotorcycle\neq-XVpUOFlQ_0\tcow\nerDb15O0GYM_0\tperson\nerIMuEor6gc_0\tperson\nerJzcEpQ-sA_0\tperson\nerKEWcCPgjU_0\tperson\nerKRZXMcCzQ_0\tbus\nerLW6pBgIrE_0\tperson\nerLW6pBgIrE_1\tperson\nerWerfoGejo_1\tdog\nerZ0-WmkPj8_0\tperson\nerfJrdfPp8M_0\ttruck\neri-jOmjJ5U_0\tperson\nerprzr0GCa0_0\tperson\nerrX-c_luf8_0\thorse\nerwHbfRwbDc_0\ttrain\neryYeuoNAdw_0\tperson\nesEKixC0bi0_0\tmotorcycle\nesFUx8MS7FU_0\tperson\nesFUx8MS7FU_1\tperson\nesHEHZv3XAw_0\tperson\nesdMTvdz7G8_0\tperson\nesd9prHEDmY_0\tcat\nesnr6cTpfQI_0\tskateboard\nesnr6cTpfQI_1\tskateboard\nesrkVh27SSg_0\tgiraffe\nesr3dKZtZ9I_1\tperson\nestRADheTso_0\tperson\nesxEV1BYf8g_0\tdog\nes0lurDiGrM_0\ttruck\netCrz_vcvJI_0\tzebra\netFtHhL2hac_6\tbicycle\netHjccaFHjw_0\tperson\netZXvy6wqZM_0\tcat\netZjkcz1NXE_1\tperson\netfOefeQ0NA_2\tknife\netgjVXNON5k_0\tperson\nethiyhktDW0_0\ttrain\netrQY3yeg8M_1\tperson\netrQY3yeg8M_0\tperson\netu6chaT_o0_0\tmotorcycle\netu6chaT_o0_1\tmotorcycle\netu6chaT_o0_2\tmotorcycle\neuNO4mGjpL4_0\tperson\neuS2rEsG-jA_0\tperson\neuaiFpmh6SU_1\tperson\nGEuy-JvOFBM_0\thorse\nGEwLV10zHSM_0\tperson\nGEwYE_QVNHE_0\tboat\nGE061if8j60_0\thorse\nGE8D0jEjasg_1\tbird\nGFCN_4akSi4_0\tperson\nGFMwf7Ly_Sc_2\tperson\nGFMwf7Ly_Sc_0\tperson\nGFN08ryY-U0_2\tknife\nGFTwQgse_Lk_4\tknife\nGFXh14V5BN0_0\tcow\nGFkCQFowcfs_0\tperson\nGFkCQFowcfs_1\tperson\nGFlTNatYs1E_2\thorse\nGFlTNatYs1E_0\thorse\nGFmBVLxS0W4_0\tperson\nGFsVA4Rxqv0_0\tcow\nGFtZEmPze30_0\tperson\nGFytNaOS7eE_0\tboat\nGF28RuK9Mio_0\tperson\nGF28RuK9Mio_1\tperson\nGF29WU5hVFU_1\tumbrella\nGF29WU5hVFU_2\tumbrella\nGF4b86WLzWE_0\tperson\nGF-zdmzb4zY_0\tbus\nGGBhXIkXN-U_0\tdog\nGGCSOyr8iNg_0\tcat\nGGNkUcwxgU0_1\tairplane\nGGX2r0RT9h4_0\tbird\nGGY5BDDn5LE_0\tperson\nGGtf7t-SVb0_0\tperson\nGGytoCC23B4_0\tdog\nGG2kiaUm9pg_0\tperson\nGG_CxOFs69U_0\tbicycle\nGHAR-041e4w_0\tperson\nGHF_00q4fw0_0\tperson\nGHN9eBe1Bp8_0\tknife\nGHWPuquucrM_0\tcow\nGHZjWHKMwyw_1\ttruck\nGHqedSEAQ9k_1\tperson\nGHqmzbJnjVg_0\tperson\nGHu-Q-Jbh6E_0\tumbrella\nGH_-l0dCs1A_0\ttruck\nGINmKyxk55E_0\tperson\nGIOByl4-GaE_0\tperson\nGIQcZHeI0rA_1\tknife\nGIRWosek2kk_0\tperson\nGIesL1NmKrU_0\tairplane\nGIiKoRSDN-Q_0\tskateboard\nGIiKoRSDN-Q_1\tskateboard\nGItE5rGj_-g_0\tperson\nGI0iwCtSgJY_0\tperson\nGI7YeWGyVRM_0\thorse\nGJAe8ctAWb0_0\tperson\nGJHbNDEY178_1\tperson\nGJHbNDEY178_0\tperson\nGJIPOsnsWAg_0\tperson\nGJIPOsnsWAg_1\tperson\nGJL8p4_PeKo_0\tperson\nGJMk0Meedm0_0\tperson\nGJbtzWK_dYk_0\tperson\nGJpkQJ1A6Gw_1\tcow\nGJy5Zhvk6lE_0\tperson\nGJ1O_aGTN94_0\tmotorcycle\nGJ4kWS7SklQ_0\tperson\nGJ7mp6eUiPg_0\tcar\nGJ9641JuJGs_1\tperson\nGJ9641JuJGs_0\tperson\nGKCr5DPt-O4_0\tcar\nGKC9zObtOMM_0\tperson\nGKEhy910De4_0\ttrain\nGKWJ0lgaDCg_0\tumbrella\nGKWJ0lgaDCg_2\tumbrella\nGKewJtAM0mQ_1\tperson\nGKewJtAM0mQ_0\tperson\nGKhEkZ-cdNQ_0\ttrain\nGKlP0uncbyg_0\tperson\nGKlP0uncbyg_1\tperson\nGKlP0uncbyg_2\tperson\nGKlP0uncbyg_4\tperson\nGKmEvD6kEV0_0\tbicycle\nGKn-IcumftE_0\tperson\nGKpcLh6EzTI_0\ttruck\nGKs6SswOMow_0\tskateboard\nGKyR_cV3NzE_0\tbird\nGK1HKUicpqc_0\tperson\nGK7khWET2AA_0\tperson\nGLBHzmRhRXw_0\tperson\nGLCLinUtVWM_0\tperson\nGLJJdMPYSaY_0\tperson\nGLLgtpj5VIc_2\telephant\nGLLkz3ew2Cw_0\tperson\nGLN48vyNNE8_0\tperson\nGLOfyCC7cpg_1\tperson\nGLOfyCC7cpg_0\tperson\nGLTbuhg3c9c_0\tcow\nGLTcmtEP3PQ_6\tperson\nGLTcmtEP3PQ_0\tperson\nGLTcmtEP3PQ_1\tperson\nGLTcmtEP3PQ_2\tperson\nGLTcmtEP3PQ_4\tperson\nGLT0qdbJFmE_0\tperson\nGLYc7lsUKvQ_0\tcow\nGLemLQ7Taz4_0\tdog\nGLiiNf5XBGw_1\tperson\nGLnBX7vZMds_0\tcar\nGLncyVpSovs_0\tperson\nGLonpYW6Yi8_0\tperson\nGLsxpYW-07A_0\tperson\nGLy3RuBdLZ4_0\tgiraffe\nGL2K160VZnM_0\tairplane\nGL5i6mrfwJQ_0\tperson\nGL6eTReYh8E_0\tgiraffe\nGL7g579uon4_0\tbus\nGL_EwiiBm1A_1\tperson\nGL_EwiiBm1A_0\tperson\nGMCQFxoF1UE_0\tbear\nGMJi6djWGYg_0\telephant\nGMLP7F_Da2w_0\tperson\nGMVqWicQ2d4_0\tmotorcycle\nGMeN9Z1A9X4_0\tcar\nGMj9b1A2R98_0\tbus\nGM3BiiUS2Xw_0\tcat\nGM31sVP8NMA_0\telephant\nGNJ088XwXpI_2\tskateboard\nGNLzZ4OPnHc_0\tboat\nGNLzZ4OPnHc_1\tboat\nGNN-BevC79g_0\tknife\nGNRZ4AjoiSE_0\tairplane\nGNawMpiTEFs_0\tperson\nGNnrNuC9zGU_2\tperson\nGNnrNuC9zGU_1\tperson\nGNqCvE7d9mE_0\tperson\nGNr1nF-F-40_2\tboat\nGNvEs3KBgRw_0\tperson\nGN97F0ERx8k_1\tperson\nGN97F0ERx8k_0\tperson\nGOE3QOj97xk_0\tperson\nGOLZ7CWDXjk_1\tperson\nGON778LYTqk_0\tperson\nGOQICMUoGL8_2\tperson\nGOWRiwkZo2U_0\tperson\nGOW84-_w-LQ_0\tbicycle\nGOZwEuPDmzc_0\tperson\nGOb0e4ojb3c_0\tairplane\nGOkeNGfFi8Q_0\tperson\nGOkeNGfFi8Q_1\tperson\nGOpAs6aca30_1\tperson\nGOpAs6aca30_2\tperson\nGOrO-A4yd5c_0\tperson\nGO0RyAWdVQA_0\tperson\nGO1tmJmOjZU_0\tcow\nGO9YRVC_2SA_3\telephant\nGO9YRVC_2SA_4\telephant\nGO98cqZbP2o_0\tcar\nGO98cqZbP2o_1\tcar\nGPABD8HFpQU_0\tskateboard\nGPCArlk4udc_0\tbird\nGPHwY1J1u04_1\tcat\nGPLKI0foxxc_0\tperson\nGPUUqd1IyNA_0\tdog\nGPUdCDtaGOQ_2\tboat\nGPViSMkz1ds_1\thorse\nGPViSMkz1ds_0\thorse\nGPZznxc87vA_0\tcow\nGPlHiCxNeIU_0\tperson\nGPnO7jt_-JI_0\tperson\nGPn2JSguaBI_4\tumbrella\nGPn2JSguaBI_0\tumbrella\nGPn2JSguaBI_1\tumbrella\nGPtN0Kb9qZs_0\ttrain\nGPzwYc908OM_0\tbicycle\nGP2YaQXsf0s_0\tumbrella\nGQJu2FlmC0A_0\tknife\nGQRDl6gw-n8_2\tbear\nGQRDl6gw-n8_3\tbear\nGQV1QfplpXU_0\tperson\nGQ6mrqpELDs_0\tperson\nGQ99sfZjwTo_0\tperson\nGRMv9irLuQw_0\tmotorcycle\nGRQUwn0jA8Q_0\tperson\nGRRXv9O7hNk_0\tmotorcycle\nGRRullNXQUY_3\tskateboard\nGRTcBPmHWPU_0\tmotorcycle\nGRjf8G-WDvc_0\tperson\nGRk94EZiwO8_0\tskateboard\nGRo9Bmi4ghA_0\tcat\nGRwCcOF0NyI_0\ttrain\nGRwCcOF0NyI_3\ttrain\nGRwCcOF0NyI_1\ttrain\nGRwCcOF0NyI_2\ttrain\nGRwvd8Xl-l0_0\tbird\nGR5qTAjCnB4_0\tcow\nGSD3hdUWKNg_0\tperson\nGSD_Asi3tsA_0\telephant\nGSD_Asi3tsA_6\telephant\nGSIFRlloCGA_0\tcow\nGSMYNBUuI74_1\tmotorcycle\nGSb8ilGRCd8_0\tumbrella\nGSkpDZZFQd4_0\tboat\nGSmR-G7zCN0_0\tairplane\nGSqatXKKzUU_1\tboat\nGS1El_XLryU_3\tbird\nGTaW87cQCZk_0\tbird\nGTegSO4BiDY_0\tperson\nGTgztSxvdzw_0\thorse\nGTg35QGB0bQ_1\tperson\nGTg35QGB0bQ_0\tperson\nGTjqtTiUFFA_0\tperson\nGTkZ7eZIV5I_0\tskateboard\nGTpF9CW8Kyo_2\tcow\nGTpF9CW8Kyo_3\tcow\nGTpF9CW8Kyo_0\tcow\nGTpF9CW8Kyo_1\tcow\nGTt9sqczKqg_0\tperson\nGTuP3gwjf70_0\tperson\nGT4askC-EmE_0\tskateboard\nGT4askC-EmE_2\tskateboard\nGT6Ta63CfGc_0\tbus\nGT7pB1SoSWQ_0\thorse\nGUA64cJx_1s_0\tperson\nGUG7toTLyt4_0\tbear\nGURTVjQ25hM_0\tairplane\neufhHTT-6cc_0\tperson\neujtr13Kbtg_0\tcow\neutsycO_2Zw_0\tumbrella\neu0WWqOzPNI_1\tboat\neu07YiPAVxk_0\ttruck\neu6zY6HpY1M_0\tperson\nevA7SzcjAkU_2\tknife\nevA7SzcjAkU_3\tknife\nevA7SzcjAkU_0\tknife\nevDr0RJRRV8_0\thorse\nevMMyqn2S94_0\tperson\nevRaMSC7xlI_0\ttrain\nevVOgDU7DsE_6\ttruck\nevcE8ru07G8_0\tumbrella\nevcWn6cN50A_0\tumbrella\nevhP2M5P0rM_1\tperson\nevksM4sehcQ_0\tcat\nevtk4IiqjkM_1\tperson\nevw-tqTTtQ8_0\thorse\nev1ATOeJPxY_0\tperson\nev1ATOeJPxY_1\tperson\nev53NALjp3I_0\tperson\nev7a6Z-ZOv4_0\tperson\nev-fVsUuvfA_0\tperson\newB46nb-ZFI_0\tbird\newFZmQCCZm0_0\ttruck\newFZmQCCZm0_2\ttruck\newOgoCimrdA_0\telephant\newUWpmdjLHA_0\tbicycle\newUWpmdjLHA_2\tbicycle\newgdEY7GtsQ_1\tairplane\newkBRzmoZzo_1\ttrain\newkBRzmoZzo_2\ttrain\newkeB8zzSVE_2\tdog\newkeB8zzSVE_3\tdog\newkeB8zzSVE_1\tdog\newoUjWEEJS4_0\tdog\new9rbdv73TA_0\tumbrella\nexR3lT_G3Yk_0\tknife\nexZF88kJoP8_0\tperson\nexjWaQ0ssbM_3\tairplane\nexjWaQ0ssbM_0\tairplane\nexjWaQ0ssbM_1\tairplane\nexn-_MfEP6Q_0\tperson\nexoNfV0vU_Q_1\tperson\nexoNfV0vU_Q_0\tperson\nexw_qJh1qp8_0\tcat\nex6Il_1Ielw_0\tmotorcycle\nex7mPB9cYwc_0\tperson\nex7mPB9cYwc_1\tperson\nex-yo1W_s34_0\tskateboard\neyAxkbxVdHA_0\tperson\neyAxkbxVdHA_1\tperson\neyNJXyldIhM_0\tperson\neySeJsY8tZU_0\thorse\neyZeTi4-udw_0\tboat\neycvZhhuzOI_0\tperson\neyd3cO1cRyw_0\tperson\neyg_dFAAJ_c_0\tumbrella\neyi_kSPelbM_0\tperson\neyo2iTfyALs_0\tcat\ney49lNbkqdQ_0\tperson\ney7evH7qmFA_1\tperson\ney9CIllx21w_2\ttruck\ney9CIllx21w_5\ttruck\ney9CIllx21w_8\ttruck\nezOxb6H18Dk_0\tperson\nezX_8NsARn4_1\tperson\nezYCeDV1Aew_0\tbicycle\nezam_iANUkY_0\tmotorcycle\nezdehi1wmW4_0\tcow\nezktd-PtOQo_2\thorse\nezktd-PtOQo_3\thorse\nezrNhnjWp-s_0\tperson\nezrNhnjWp-s_1\tperson\nezu6OcJjjLk_1\tperson\nezvAmpvi364_1\tperson\nezyLlrEVZRU_1\ttrain\nez4u6-2yh8U_1\tperson\nez7mJtg4aoU_0\tcow\ne0Al-yQwL8w_1\tbear\ne0C174hEUpI_0\tperson\ne0HCj6FnKMo_0\tperson\ne0HrgDMAL5c_0\tboat\ne0K-Wc2SGSk_0\tperson\ne0V--elE2Dc_3\tboat\ne0V--elE2Dc_0\tboat\ne0XejLvBbTw_0\tmotorcycle\ne0dXS2okSxo_0\ttrain\ne0jUh6hQykw_0\tperson\ne0jUh6hQykw_2\tperson\ne0kJTvItoXc_1\tperson\ne0kJTvItoXc_0\tperson\ne0qJxStHuGA_1\tskateboard\ne0rXPv5Q8ac_0\tperson\ne1KQ3rXcBVg_0\tairplane\ne1KQ3rXcBVg_2\tairplane\ne1KQ3rXcBVg_1\tairplane\ne1S7tY6zlBs_0\tbus\ne1ZNGYPt280_0\tcow\ne1a0tLtZdm8_0\tperson\ne1dAdTW0-s8_0\tperson\ne1guDr5Lq88_0\tperson\ne1iYijyYnIc_0\tperson\ne1iYijyYnIc_1\tperson\ne1v5-Vy3ikU_0\tmotorcycle\ne11u2SRsMQk_0\tumbrella\ne110Ssoc3rc_0\thorse\ne2Biqc_Y8fI_0\tboat\ne2Biqc_Y8fI_1\tboat\ne2C6vpxx1BQ_1\tperson\ne2C6vpxx1BQ_0\tperson\ne2DeceLJ4QU_1\telephant\ne2DeceLJ4QU_0\telephant\ne2DmJ2nN-bM_0\tperson\ne2DmJ2nN-bM_1\tperson\ne2IXk3LUK0k_1\ttruck\ne2Jc499uBac_0\tbus\ne2MbvKCUxBQ_0\tskateboard\ne2oWEimFUeM_0\tboat\ne2oWEimFUeM_6\tboat\ne26M0NUTUcs_0\tperson\ne29Si0sk8Vs_0\tperson\ne3Ep8F-TVbQ_1\tbicycle\ne3Ep8F-TVbQ_0\tbicycle\ne3MrKt1yh3E_0\tairplane\ne3ezeG4Gm80_1\tknife\ne3fz03vzrmQ_0\tperson\ne3pGW6uqeQA_0\tcat\ne3tP581aZ0Q_0\tperson\ne34jQApS9Bw_0\tperson\ne3_zIH1Jrf0_0\tperson\ne4R8Aj-X5iA_1\thorse\ne4ZrrwoRRXc_0\tbear\ne4c8OdRhAyA_0\tknife\ne4c8OdRhAyA_3\tknife\ne4iZ27N3agg_0\tperson\ne4rO9AJXQzY_1\tperson\ne4yT58KhTcs_1\tairplane\ne4yT58KhTcs_2\tairplane\ne4zdJYlc4z8_0\tperson\ne47QRGUx_Hs_0\ttruck\ne47QRGUx_Hs_1\ttruck\ne48A0CBQct8_0\tperson\ne5CFfGS4B1s_0\tperson\ne5DZWu7GqG4_3\tbicycle\ne5MbNYLt7wU_0\tperson\ne5MbNYLt7wU_1\tperson\ne5RlRpaBXnE_0\tdog\ne5UjJAZHaBc_0\tperson\ne5VUEXqXFTM_0\tumbrella\ne5kfPy-MIGw_0\telephant\ne5lFDgi4EIs_0\tcow\ne5-Pz_Q8VUA_0\tperson\ne6F88LQJoLc_0\tperson\ne6G0gHixPGE_0\tboat\ne6IQ-jfygns_0\tperson\ne6IQ-jfygns_1\tperson\ne6T5hbKQwAs_0\tperson\ne6aWxOF189s_0\tperson\ne6hz-jEGxsg_0\tperson\ne6muu75RFmg_0\tbus\ne6s13mZyuYY_0\tskateboard\ne6s13mZyuYY_2\tskateboard\ne6s13mZyuYY_3\tskateboard\ne6xT3S6wuwE_0\tperson\ne64lVlYKNYs_0\thorse\ne7IeNjbA7ms_0\tmotorcycle\ne7JZ2C-e9_w_1\tskateboard\ne7Q3z9gbUw8_0\tskateboard\ne7TKWwysO8Q_0\telephant\ne7W79Xp4qxI_0\tperson\ne7aF0fG2O2U_0\tbear\ne7aF0fG2O2U_1\tbear\ne7eZQb8WjmQ_0\tperson\ne7xAzZCvd_Y_0\ttruck\ne70XtlB-Au8_0\ttruck\ne70XtlB-Au8_1\ttruck\ne70XtlB-Au8_2\ttruck\ne70XtlB-Au8_3\ttruck\ne70XtlB-Au8_7\ttruck\ne70jqVThihE_3\tknife\ne70jqVThihE_1\tknife\ne72VJJ7jkoI_2\tairplane\ne76gr0pJMLg_0\tboat\ne8BQbcBgcjc_0\tperson\ne8VeeESy9Xc_0\thorse\ne8XzpXJnucs_0\tmotorcycle\ne8XzpXJnucs_1\tmotorcycle\ne8XzpXJnucs_2\tmotorcycle\ne8Y4hXyFPDY_0\tperson\ne8ZFu6n4mg8_0\tperson\ne8b7eo56B5Y_1\tperson\ne8b7eo56B5Y_0\tperson\ne8mSJe1G9U4_0\thorse\ne8mSJe1G9U4_1\thorse\ne8mSJe1G9U4_3\thorse\ne8mSJe1G9U4_4\thorse\ne804z6ehgWE_0\ttrain\ne836XbTclWA_0\tperson\ne86xkdgTdTA_0\tperson\ne873uWjeaPU_0\tperson\ne88X3OKvqTI_0\tcow\ne9Ceg407V2o_1\tbird\ne9GSzFiQj8I_0\tperson\ne9GoxfmycMQ_0\tperson\ne9MugXot7JI_0\telephant\ne9MugXot7JI_2\telephant\ne9MugXot7JI_1\telephant\ne9Y8BHEdYpg_1\tperson\ne9Y8BHEdYpg_0\tperson\ne9Z237Wup_E_0\tboat\ne9aADbJBMmQ_1\tboat\nGUY72Rg_9g4_3\tairplane\nGUY72Rg_9g4_0\tairplane\nGUY72Rg_9g4_1\tairplane\nGUY72Rg_9g4_2\tairplane\nGUcZWh6tol4_0\tcow\nGUq5xrqphew_0\tcow\nGVCJZzVnGUQ_2\tperson\nGVCJZzVnGUQ_0\tperson\nGVCJZzVnGUQ_1\tperson\nGVG_dHMt7eA_0\ttruck\nGVRLfBtpGgA_0\tperson\nGVeNt6hXwK4_0\tperson\nGWCwYIRE8YU_0\tperson\nGWIAU4GsgZM_0\tperson\nGWQD6FxWwpk_0\tboat\nGWckuI3sTHA_0\tbear\nGWmOpSmpGmg_0\tcar\nGWmOpSmpGmg_1\tcar\nGWmOpSmpGmg_2\tcar\nGWsXKIAM9yY_1\tcat\nGWsXKIAM9yY_0\tcat\nGWygvbszdUs_1\ttrain\nGXS6axKBr7A_0\tperson\nGXX1pJeR1HE_0\telephant\nGXX1pJeR1HE_1\telephant\nGXZ3IXi7YXk_0\tperson\nGXcbgDsx_Zc_0\tperson\nGXfsYdVEMeA_10\telephant\nGXfsYdVEMeA_0\telephant\nGXfsYdVEMeA_5\telephant\nGXfsYdVEMeA_6\telephant\nGXfsYdVEMeA_8\telephant\nGXgoAnrkdVg_0\tperson\nGXiDQ52vcoY_0\tperson\nGXoA1zfvnOA_0\tcar\nGXrzW-OHh_Q_0\tcow\nGXtA9dxzvII_0\tperson\nGXyeuhOYX2k_0\ttruck\nGXyeuhOYX2k_1\ttruck\nGX1v3ymtHtc_0\tperson\nGX-3aTTy4lM_0\tperson\nGX-3aTTy4lM_1\tperson\nGX-3aTTy4lM_2\tperson\nGYA-3PblNaU_0\tperson\nGYHWtVM2x6c_0\tperson\nGYTD79P3b8w_1\tperson\nGYT5Cq1tl2Q_0\tcat\nGYWNYnWPaeE_0\tperson\nGYY-ElZl7ZM_0\tdog\nGYldHkVSD_A_3\tairplane\nGYmeM7epDjY_0\tperson\nGYmeM7epDjY_1\tperson\nGYoXwAkvJns_0\tperson\nGYsx_49_O1U_0\ttruck\nGYuIsHEGV6o_0\tperson\nGYuMuXQgLPI_0\tperson\nGY0HVEiAPvo_0\tperson\nGY3D9bb9kLY_0\tairplane\nGY65ShkktrM_1\tperson\nGY9iCFFBA20_0\tperson\nGY-carc6vxw_2\thorse\nGY-carc6vxw_3\thorse\nGY-carc6vxw_4\thorse\nGY-dmOLQNH4_0\ttruck\nGZIpKCyb0bU_0\tairplane\nGZLsv-Y_aRw_0\tperson\nGZM5nvvMeNo_1\tairplane\nGZOUGcF_xaM_2\ttrain\nGZThnpa-8Ak_0\ttrain\nGZUk3BlrK7k_0\tperson\nGZWH1bUqm9U_0\tperson\nGZYSkuRZwGE_2\tskateboard\nGZb9G8sVRz4_0\tperson\nGZb9G8sVRz4_1\tperson\nGZgL3ZQI9nM_0\tcow\nGZhuCclpFuk_0\telephant\nGZq8tIKR9b4_5\tbus\nGZsP_n7aFMo_0\tperson\nGZxvpxqvHFs_1\tairplane\nGZ0bYvVD_us_1\tbird\nGZ1aL_iE5a8_1\tperson\nGZ6PRvVVeZk_0\tperson\nGaAL3IYDUgM_0\tskateboard\nGaD4QsNCcik_0\tperson\nGaF_t9Af1hg_3\tumbrella\nGaJvFxg_lFY_0\tperson\nGaJ7Bu5UrgQ_1\tbus\nGaJ7Bu5UrgQ_2\tbus\nGaVmURUD-i8_0\tperson\nGaYAyNs2FDI_1\tperson\nGad1St-JBls_0\tdog\nGaeWhfSP3EA_2\tknife\nGagCDetg0dg_0\tbicycle\nGai7qgVSFc8_1\tcat\nGangZBQawtQ_0\tperson\nGax9nZtMs7M_0\tperson\nGayl2EVJTkw_0\tdog\nGa3YHyqOqYY_1\tperson\nGa3YHyqOqYY_0\tperson\nGa_Oju23T9s_0\tperson\nGbBl5CcJgeE_14\telephant\nGbBl5CcJgeE_6\telephant\nGbBl5CcJgeE_8\telephant\nGbBl5CcJgeE_9\telephant\nGbBl5CcJgeE_10\telephant\nGbC0DAAn-XU_3\tbear\nGbC0DAAn-XU_12\tbear\nGbC0DAAn-XU_14\tbear\nGbE-oXaNVBA_0\telephant\nGbE-oXaNVBA_3\telephant\nGbE-oXaNVBA_5\telephant\nGbE-oXaNVBA_6\telephant\nGbE-oXaNVBA_7\telephant\nGbE-oXaNVBA_8\telephant\nGbE-oXaNVBA_9\telephant\nGbE-oXaNVBA_12\telephant\nGbGEC5pQ9f8_1\tcow\nGbHLET097K8_0\tboat\nGbN_zMz1D6o_0\tperson\nGbOK07Tq7mA_0\tboat\nGbVDftpuPMo_1\tperson\nGbW-55xLUnQ_0\tairplane\nGbY3uHcC3ys_0\ttruck\nGbbhlv2Obsc_0\tperson\nGbbhlv2Obsc_1\tperson\nGbd1-rm9Oyw_0\ttruck\nGbmEMxbMtCI_0\tbicycle\nGbs4s3pX3H0_5\tknife\nGbs4s3pX3H0_0\tknife\nGbs4s3pX3H0_1\tknife\nGbs4s3pX3H0_2\tknife\nGbs4s3pX3H0_3\tknife\nGbulfCx1hwo_0\tperson\nGb_YkJHLgns_0\ttrain\nGb_YkJHLgns_1\ttrain\nGcCQF52Ok14_5\tperson\nGcCQF52Ok14_1\tperson\nGcCQF52Ok14_3\tperson\nGcCQF52Ok14_4\tperson\nGcEgsdqMiBg_1\tperson\nGcEsDxUkr00_5\telephant\nGcEsDxUkr00_1\telephant\nGcRRhnk4ynk_0\tperson\nGcnVDv6bIAk_0\tperson\nGctFFbsebBs_0\tperson\nGcwS7IyeG5Y_0\tmotorcycle\nGc0lgXRlxGE_1\tperson\nGc0lgXRlxGE_0\tperson\nGc3iNFz3s-o_0\tcow\nGc5OyOM0VxI_1\tperson\nGc5OyOM0VxI_0\tperson\nGdI2CnryrFQ_2\tcar\nGdNJ-VDNc3k_1\tperson\nGdQuxx_RXvs_2\tbear\nGdbphRsxpKU_5\thorse\nGdbphRsxpKU_3\thorse\nGdfyxcmHHOQ_0\tperson\nGdiGBeJ9m_k_0\tperson\nGdiGBeJ9m_k_1\tperson\nGdsJ0QHb83w_1\tperson\nGdsJ0QHb83w_2\tperson\nGduwjeptozQ_0\tperson\nGd5qUjEeqZ4_0\tmotorcycle\nGeHV-tf-ZGA_0\tbus\nGeUECF6hDkg_0\tairplane\nGeb74PkjTYY_1\tperson\nGehgPYVYwDs_0\tperson\nGek3IJfBaU0_0\ttrain\nGeuYAXldbbg_4\tairplane\nGeuYAXldbbg_1\tairplane\nGeuYAXldbbg_2\tairplane\nGeuYAXldbbg_3\tairplane\nGewTJtB97l8_2\tknife\nGe2suMLyOTY_0\tcow\nGe4SjOnEYWs_1\tperson\nGe4SjOnEYWs_0\tperson\nGe8RWLzmrE0_0\tperson\nGe8RWLzmrE0_2\thorse\nGe9uJatNWuw_0\tperson\nGe9uJatNWuw_1\tperson\nGe-VfDpriPY_1\tperson\nGe-VfDpriPY_0\tperson\nGfCjURNr9T4_0\tperson\nGfLxzlZxHic_0\tperson\nGfbcHsH3DKI_0\tperson\nGfeXUZVyvL4_0\tperson\nGfefENTSQOI_0\tperson\nGfkX7I9bclY_0\tcow\nGfqA0SZPeXU_2\thorse\nGfqA0SZPeXU_3\thorse\nGfxwasnA0Ao_0\tbird\nGfxwasnA0Ao_3\tbird\nGfyBiJNU7bY_0\tcar\nGf50aWojLhk_1\tairplane\nGgV4eSmNyaA_1\telephant\nGgV4eSmNyaA_0\telephant\nGgcoCmlTlbc_0\tperson\nGgfESlKFIkU_0\tdog\nGgkncqtrgPI_0\tperson\nGgsFohIKlpw_0\tdog\nGgyOGY2q9xE_0\tskateboard\nGg9uDi7KjJ0_0\tperson\nGhBPvHC15BE_0\tperson\nGhHPtGuUtRY_0\tperson\nGhI4uqxOQpc_0\thorse\nGhLdswZDYMs_0\tbicycle\nGhLdswZDYMs_1\tbicycle\nGhMC34aeHnU_2\tperson\nGhMC34aeHnU_0\tperson\nGhMC34aeHnU_1\tperson\nGhQRZOseJfY_0\ttruck\nGhbtO__NASs_0\tperson\nGhbtO__NASs_1\tperson\nGhbt5lVT3dk_0\ttruck\nGhiVm-6oFyg_0\ttrain\nGhwtPgHjLvg_0\tdog\nGhxWr3HvvXA_1\tperson\nGiRzA3Fe1-s_0\tperson\nGijruln92tk_0\ttruck\nGik59IGJFLo_0\tbird\nGioAI9XlGGg_0\tbird\nGioEMsI07Jw_0\tperson\ne9ihaIQuVMU_0\tknife\ne9ihaIQuVMU_2\tknife\ne9iolRKSwBw_0\tperson\ne9mOqKDBOVg_0\tperson\ne9nH--aGWDM_0\tperson\ne90GV6rl3NE_0\tperson\ne9-w67QSEBs_0\tperson\ne9-w67QSEBs_1\tperson\ne9_LqDqVkGs_0\tperson\ne9_LqDqVkGs_1\tperson\ne9_LqDqVkGs_2\tperson\ne-PcZyfAPZ4_0\tperson\ne-R-FxrDQao_0\tperson\ne-dVHSE1qXI_0\tperson\ne-gU8I2kZyY_1\tbicycle\ne-n0pRU6uSk_0\tbus\ne-n0pRU6uSk_1\tbus\ne-qbVMLqnEw_0\tperson\ne-siUblegSA_0\tdog\ne-siUblegSA_1\tdog\ne-v2yWUGKiU_1\tboat\ne-zbkYroVUk_0\tperson\ne-43rdp3psc_0\tperson\ne--Qr92yhBo_2\thorse\ne--vN-5QX-E_0\tperson\ne-_nLPye6sc_0\tperson\ne_APlM8VSiw_1\tperson\ne_APlM8VSiw_0\tperson\ne_FyX6iUBZk_1\tperson\ne_GD2rN9Jcg_0\tperson\ne_SYVD0TY14_0\tairplane\ne_UwPkRMD74_0\tperson\ne_aHtRh2PpI_0\tcat\ne_b_4zlKmdo_0\tgiraffe\ne_qdDAeerKQ_1\tbird\ne_-SOM0hufo_0\ttruck\nfAHFZWyNZQ4_0\tbird\nfAHFZWyNZQ4_2\tbird\nfAJAQb5tzFA_0\tdog\nfAJ939SI_YI_0\tperson\nfAKXvHREf8E_0\tbird\nfAMkbedQ0GI_1\tperson\nfAQoNDLgds4_0\tbear\nfAUG8-TdflE_0\tperson\nfAjj5137yKM_0\tbicycle\nfAm_6grpTOI_0\tperson\nfAyBUKM7898_0\tperson\nfAz2ecihxEU_0\tperson\nfA5ArJS7ScI_0\tcar\nfA6XfSl7pqY_0\tperson\nfA_OWAI_8kc_0\tperson\nfBH6rLEukMU_0\tperson\nfBIh-CAYfy0_0\tperson\nfBLrr2zYnRw_1\tperson\nfBLvIU3Q7Rw_0\thorse\nfBPjBSdwz1o_0\telephant\nfBPjBSdwz1o_1\telephant\nfBP3dZYp3sM_0\tperson\nfBT1cNog4Lw_0\tperson\nfBkDTXhVYCs_0\tgiraffe\nfBmp8URVoB4_0\tcar\nfBsQegHOF8Y_0\tperson\nfBtfkn4uDKE_0\tcow\nfBvAf66603Q_0\tperson\nfBwrgO05rqo_0\ttruck\nfByljFegqK4_0\tperson\nfCADagfWgSU_1\telephant\nfCK_OirKTO4_0\tperson\nfCMJnkyFS5c_0\tperson\nfCMJnkyFS5c_1\tperson\nfCPVsi1S2jM_0\tcat\nfCTNp-hiUkQ_0\tperson\nfCTNp-hiUkQ_1\tperson\nfCT0UeuTcQk_0\tperson\nfCUZclkgF-c_3\tcar\nfCUZclkgF-c_4\tcar\nfCUZclkgF-c_5\tcar\nfCVoLETgca4_0\tbicycle\nfCW56GByDs0_1\tperson\nfCW56GByDs0_0\tperson\nfCX_8Q_OAos_1\tdog\nfCZXrHFimHM_0\tperson\nfCbvdNQUcRE_0\tcat\nfCdlrWXZ7kY_0\tperson\nfCiWi1Dk-yE_1\tperson\nfCkgtao7rJk_0\tmotorcycle\nfCmwPCLYVXE_0\tskateboard\nfCmwPCLYVXE_1\tskateboard\nfCm-8YmQfoY_1\tgiraffe\nfCoXLMBzqTc_0\tcat\nfCohGx6PWyM_0\tperson\nfCr-fmsVVWE_0\tperson\nfCsSoErwvfw_2\tskateboard\nfCsSoErwvfw_0\tskateboard\nfCsSoErwvfw_1\tskateboard\nfCtyUxRaSdQ_0\tskateboard\nfCwicNYDKmo_0\tperson\nfCzWVcZvGuk_1\tmotorcycle\nfC6O_2ljm_c_1\tperson\nfC6O_2ljm_c_2\tperson\nfC6O_2ljm_c_0\tperson\nfC8FUnipL3M_0\tbird\nfDBgRd9yK8Q_5\tairplane\nfDBgRd9yK8Q_1\tairplane\nfDBgRd9yK8Q_4\tairplane\nfDCK-s1gX18_0\tskateboard\nfDCadv28EEo_1\tperson\nfDCadv28EEo_0\tperson\nfDFpsal4hHo_0\tperson\nfDIVkvMCQ9I_1\tcow\nfDJjIhw4XBI_2\tperson\nfDJjIhw4XBI_1\tperson\nfDLBxom0wgI_1\tcat\nfDVesIz_ON0_1\tperson\nfDe30IPiQ0Y_1\thorse\nfDuiW9_sHcQ_1\tperson\nfDyXAhF761Q_0\tperson\nfD89z8ycv7U_0\tperson\nfD89z8ycv7U_1\tperson\nfD89z8ycv7U_2\tperson\nfEDj20Gce80_0\tboat\nfEK6hdzjG5E_0\tcow\nfESV3o1vc1A_1\tbird\nfES_1kR2d8o_0\tperson\nfEVLKYBuE7k_0\ttruck\nfEXq69B6L0s_0\tgiraffe\nfEZ5cqJWg0A_0\tbicycle\nfEdlpwoza6o_0\tperson\nfEdlpwoza6o_1\tperson\nfEdlpwoza6o_2\tperson\nfEgqRE0XOMM_0\tperson\nfEh5hyz4LCU_0\tskateboard\nfEiWI60P4XI_0\tbicycle\nfElOryAiN0s_0\tperson\nfEmh4mfGsCA_0\tperson\nfEupHSTMXLk_0\tknife\nfE0raHY_nY8_0\tcat\nfE_sSvVFvZU_0\tdog\nfFBkKrJlobs_0\tcow\nfFEDu-fiUUM_0\tperson\nfFGmvl4E9QI_0\tbird\nfFImZECw1c0_0\tskateboard\nfFImZECw1c0_1\tskateboard\nfFOTZMvg0n0_0\thorse\nfFRp0dBucFA_0\tbus\nfFTJuANVr2I_0\tperson\nfFWU4PNTKDo_0\tperson\nfFWU4PNTKDo_1\tperson\nfFaJ5epORzQ_0\tperson\nfFd91uPKDVA_0\tperson\nfFksYDaR-NI_1\telephant\nfFmCHQgzMRc_1\tperson\nfFmCHQgzMRc_2\tperson\nfFmhW2ygNKw_0\tperson\nfFncU3kR5qw_0\tcar\nfFogpyIr-Ic_0\tperson\nfFq0hnzgGSw_2\tbicycle\nfF0RlMrKBFo_0\tbicycle\nfF1S-952IOU_0\thorse\nfF3WOuwnvrA_3\telephant\nfF3WOuwnvrA_5\telephant\nfF3pBoS7xFg_1\tperson\nfF3pBoS7xFg_0\tperson\nfF34g3sNiHo_0\tperson\nfF7snD5S5Q4_0\tcar\nfF_BanWRtKo_1\tskateboard\nfF_BanWRtKo_0\tskateboard\nfGGJnSDPzUI_0\tperson\nfGI6_U9U_zc_1\tperson\nfGPsR0YiVaE_0\ttrain\nfGgJ0VACAo4_0\tumbrella\nfGlnCmVPzIs_0\tperson\nfGrC6VCXVL4_0\tperson\nfG1NOqIRoLA_0\tperson\nfG6uSVeocMo_0\tperson\nfG-4n3Gy1fk_0\tperson\nfHO3g6Q_bNE_0\tperson\nfHUjlWalvJQ_0\tperson\nfHVJzD_AvV8_0\tperson\nfHepRAiQQ04_0\tcow\nfHlfVMMfXNg_0\tperson\nfHm5WgSYk2Y_0\tbus\nfHoBjwC8H50_0\tdog\nfHoBjwC8H50_3\tdog\nfHsaxiTw0dI_0\tmotorcycle\nfHzSK8AEv5U_0\tperson\nfHzzixV1xyg_1\tcow\nfH5U2jXbkEg_1\tknife\nfH8PS8Fjvbg_1\tcow\nfH8PS8Fjvbg_2\tcow\nfIABVBcluZ0_0\tskateboard\nfIABVBcluZ0_1\tskateboard\nfIFMCt78hmI_0\ttruck\nfILyoB3Pgrg_1\tdog\nfIM7jmsq_FE_0\tperson\nfIN8z4lkdyA_0\tcar\nfIN8z4lkdyA_2\tcar\nfIN8z4lkdyA_3\tcar\nfIPXE6MOZp0_0\tairplane\nfIT1bTlW3UQ_0\tperson\nfIVT3rTMptI_1\ttruck\nfIXFrPFEL0w_0\tgiraffe\nfIlXSJxnKD8_0\tperson\nfInEVgREyyY_0\tdog\nfInYB8sD7tM_0\tperson\nfIrb5Y93wjw_0\ttrain\nfIvUwaa2ziY_0\tperson\nfIyrHecb8SQ_0\telephant\nfI0VoDDN2lE_2\tperson\nfI0VoDDN2lE_0\tperson\nfI0VoDDN2lE_1\tperson\nfI5fnVs_kWg_0\tmotorcycle\nfI8DySScPWU_0\tskateboard\nfJGPTgv8EUs_0\tperson\nfJJBGybbnH4_1\tknife\nfJJX9D4siG4_0\tcat\nfJTeqi3aqRc_0\tcar\nfJYGkMT9c6U_0\ttruck\nfJY5zGaYs8s_0\tperson\nfJdWgbIMXZ0_5\ttrain\nfJdWgbIMXZ0_0\ttrain\nfJdWgbIMXZ0_2\ttrain\nfJpRqXhL3wE_0\tskateboard\nfJp4DAu46Yg_1\tperson\nfJxbRDMY46o_0\tperson\nfJyBgU7rZvE_0\tperson\nfJ71o3Q-oVE_1\tcat\nfKDRpRcSnrw_0\tcat\nfKHs2FNZk6M_0\tperson\nfKLJqhEdsTY_0\tcow\nfKLJqhEdsTY_1\tcow\nfKLS0DAexvw_1\tboat\nfKLS0DAexvw_2\tboat\nfKLS0DAexvw_3\tboat\nfKRZ4PPWgg8_1\tperson\nfKcOtlmf6r0_3\tboat\nfKcOtlmf6r0_2\tboat\nfKgpRiyDlvc_0\tperson\nfKhENDvpnmA_0\tboat\nfKhe37bCgeA_1\thorse\nfKp-Lvw2bUM_2\telephant\nfKp-Lvw2bUM_3\telephant\nfKp-Lvw2bUM_4\telephant\nfKrxRvMxZqM_0\tperson\nfKxBpYS29uM_0\tdog\nfKyPRwF5y6s_0\tperson\nfKzFEc6hR-c_2\tperson\nfK89Z2AwlCg_3\tbus\nGiuUBGsdiqI_0\tperson\nGizeLrnWRmk_1\tperson\nGizeLrnWRmk_0\tperson\nGi--TM8Xz3I_0\tperson\nGjCs_s2EnpE_0\tperson\nGjFr4qO_LX4_0\tdog\nGjJFQButa0w_0\tbear\nGjJk6U2crcw_0\tskateboard\nGjJp-yqt7xk_0\tairplane\nGjZDPTKpIdE_0\tperson\nGjZP-buSAG8_0\tperson\nGjdyi0kf79Y_0\ttruck\nGjfhgZMeHAA_0\tperson\nGjgu3OFbWKI_0\tbear\nGjkrI0adkJk_0\tperson\nGjmNPrYyCwg_0\tperson\nGj87GZKvhdo_0\thorse\nGkCXvg93pAA_0\tcow\nGkGG1F5by14_0\tperson\nGkddmkbGSAc_0\tcat\nGkfp-yV9e94_0\tperson\nGklwzbjOzYQ_0\tperson\nGkmRFBuktnQ_0\tperson\nGkxkfi_wHeA_1\tmotorcycle\nGkxkfi_wHeA_0\tmotorcycle\nGk6IzYQADXg_1\tskateboard\nGk6IzYQADXg_0\tskateboard\nGk9v8ABOPNw_1\telephant\nGlLzIn-6ouU_1\tbicycle\nGlLzIn-6ouU_2\tbicycle\nGlPdixjfu44_0\tcat\nGletqIQ8irw_0\tmotorcycle\nGlsMcq1cM2c_1\tbird\nGlxEVs7z_7Y_0\tperson\nGl7S2JNezLg_0\tboat\nGl7S2JNezLg_3\tboat\nGl9cy66E4FQ_2\tknife\nGl_UMssuTWU_0\tperson\nGmI47tbiNQ0_0\tperson\nGmKT2rhDILU_1\tknife\nGmQX3sIhhqo_0\tcow\nGmS0yrU3Hcw_0\tperson\nGmUFocQWPTo_1\tboat\nGmdxq1glmKY_1\tdog\nGmeGRg8XZ5M_0\tperson\nGmvKmbIHKHM_1\tperson\nGmvKmbIHKHM_0\tperson\nGmww9V50JtU_0\tdog\nGm9BnQSZlxk_1\tperson\nGm9kb3zHsLA_0\tcat\nGnFoElm_rrw_0\tdog\nGnGd8Q_cSHU_0\tperson\nGnGd8Q_cSHU_1\tperson\nGnO2sxJNWjk_0\telephant\nGnRp7QHoAr4_0\ttrain\nGnkSrEpnmRo_1\tperson\nGnmgLr5p-r8_0\tbus\nGno0JyFsjGk_5\tknife\nGn0av9LV5FU_0\telephant\nGn3AqY6vUyU_0\telephant\nGn7B_MiLuhA_0\tskateboard\nGoEBr-GbeCk_0\telephant\nGoEcYxqxcZ8_1\tbus\nGoEy1J3s8Xs_0\tcow\nGoRGaOgttBU_0\thorse\nGoUjZ5wJ2do_0\tcar\nGoWyqQorqOY_0\tcat\nGoXlqK766lk_0\tperson\nGolDzhH16vg_0\ttrain\nGorfZ7y-Jw8_0\tskateboard\nGosFitiV7as_0\tperson\nGotzQ9ecvkM_0\tperson\nGoubTEJzKUI_0\tperson\nGo16BKYvDSs_0\thorse\nGo5M-oyC28A_0\telephant\nGo8BM-B0ML4_0\tskateboard\nGpCjTjkSw3k_0\ttrain\nGpCjTjkSw3k_5\ttrain\nGpCjTjkSw3k_3\ttrain\nGpCjTjkSw3k_4\ttrain\nGpCjTjkSw3k_2\ttrain\nGpDilZGSveI_0\tperson\nGpJmJforKzo_0\tperson\nGpPbMduP_3Y_0\tcow\nGpProJiVxa4_0\tbear\nGpTPDl3MzZw_0\tcat\nGpVy_gD1slw_0\tdog\nGpY4Nw8LLy4_0\tbird\nGpkftB3rq5g_0\tdog\nGpn_kF1lXuc_0\tbicycle\nGpn_kF1lXuc_8\tbicycle\nGpn_kF1lXuc_13\tbicycle\nGpn_kF1lXuc_14\tbicycle\nGpzE4RQTM1Y_0\tairplane\nGp3g6UYBBzw_0\tperson\nGp3g6UYBBzw_1\tperson\nGp70TnjZRfU_1\ttrain\nGp70TnjZRfU_2\ttrain\nGp70TnjZRfU_0\ttrain\nGqZeX-EEEL8_0\tperson\nGqc_LkQvKak_2\thorse\nGqjVd_dRiB8_0\tperson\nGqjVd_dRiB8_1\tperson\nGqjoBpwsgUc_0\tperson\nGqjoBpwsgUc_1\tperson\nGqntj1GoicU_0\tbus\nGqzN0dyl5p4_4\ttruck\nGq-mMFeLCyo_0\tperson\nGrG-ipHg_4w_0\tperson\nGrK4qEJjeKE_0\tairplane\nGrNDwiO4kdI_0\tairplane\nGrQ0zJbkeXE_0\tperson\nGrXOOtPiIGw_0\tzebra\nGrYsw9-Skqg_0\tperson\nGrZvWtxffXE_0\tperson\nGrpvM1_CRqI_0\ttrain\nGruxXrzWzjk_0\tairplane\nGruxXrzWzjk_2\tairplane\nGruxXrzWzjk_3\tairplane\nGruxXrzWzjk_5\tairplane\nGrzyUDtV-Ug_0\tperson\nGr6be_D6d9Q_2\tskateboard\nGsFDHyoPppk_0\tperson\nGsGHB19iuE4_0\tperson\nGsKJMkVSeV4_2\tairplane\nGsL7VYYWhu0_0\tperson\nGsOgw9XtlWc_0\tairplane\nGsOgw9XtlWc_1\tairplane\nGsTlT_7Zb1Y_0\ttrain\nGsVvc55IHn0_0\tskateboard\nGshXL9V-lrM_1\tperson\nGsj4aXqBPHM_0\ttruck\nGsn06D15nmk_0\tmotorcycle\nGsrSyK5ymQo_0\tboat\nGsrenPacLW0_1\tperson\nGs67R7prarI_1\tmotorcycle\nGs7J9Yo-uF0_0\tcow\nGs7J9Yo-uF0_1\tcow\nGs79ZsyWm74_0\tperson\nGtAKWYvc9kY_0\telephant\nGtCbEqqQgqY_0\tperson\nGtCbEqqQgqY_1\tperson\nGtD2m1EXxjc_1\tbicycle\nGtKaIcQJZcc_1\tperson\nGtLYNeredOY_0\tboat\nGtVrmoeEcMM_0\tknife\nGtZPw5ftw88_0\tperson\nGtZSRodviU8_0\tperson\nGta1hcIAAE0_0\telephant\nGtiiYqVQ2Kw_0\tperson\nGtmp8y8APfQ_1\tskateboard\nGtnqm4SnEXo_0\thorse\nGtnqm4SnEXo_1\thorse\nGtnqm4SnEXo_2\thorse\nGtnqm4SnEXo_3\thorse\nGtnqm4SnEXo_4\thorse\nGtqcx01NTTw_0\tknife\nGtsvc9lA7hs_0\tairplane\nGt33VfmFDWw_0\tperson\nGt6q9b3QUvE_0\tbicycle\nGt6q9b3QUvE_2\tbicycle\nGt7thmVY6aQ_0\tperson\nGuQvGMFuhu4_1\tcar\nGuQvGMFuhu4_3\tcar\nGuXelRN3wMo_4\tbear\nGuaD24NfCe0_0\tperson\nGuawwNMbfBI_0\tperson\nGue43DvNTGc_1\ttrain\nGuf15LHosg8_0\tperson\nGugU0nZdPJU_0\tbus\nGuhfGduN9v0_0\tperson\nGulmsZq-VsU_6\tboat\nGulmsZq-VsU_0\tboat\nGulmsZq-VsU_3\tboat\nGulmsZq-VsU_4\tboat\nGulmsZq-VsU_5\tboat\nGusEs8RA4_o_0\tmotorcycle\nGuwTG6RtcFI_0\tperson\nGu4MWCc2Wws_0\tbicycle\nGu-vFv_w9Vo_0\tperson\nGvFmkdxnKyI_0\thorse\nGvIj2sMkJwM_0\tperson\nGvNhgCGtUOQ_0\ttruck\nGvQvyfTNykM_0\ttruck\nGvRM_UnjJoE_2\thorse\nGvdMRPX4KR4_0\ttrain\nGvdMRPX4KR4_1\ttrain\nGvdMRPX4KR4_5\ttrain\nGvoIcT-hFek_0\tperson\nGv9mTaerVLc_0\tperson\nGwFrSa-YwfI_0\tbear\nGwFrSa-YwfI_1\tbear\nGwIn1NaaEwE_0\tbus\nGwbpMG2B14Y_0\ttruck\nGwgaNLd1f7s_0\ttruck\nGwlNXPuUvXM_0\tperson\nGwnBP9a07RE_0\tperson\nGwnBP9a07RE_3\tperson\nGwnBP9a07RE_4\tperson\nGwnBP9a07RE_1\tperson\nGwnBP9a07RE_2\tperson\nGwx1ad4lW1Q_2\tperson\nGwyl7djxZkg_0\tcow\nGwy4ODXAAU8_0\tperson\nGw5YyHT1Nt8_0\tperson\nGw9Vi_Io9DM_0\tperson\nGw_Tiv72jms_1\thorse\nGxANCkxq7Ng_0\tmotorcycle\nfLCd0DDhfBk_0\tperson\nfLEUT0rTkv0_0\tbird\nfLJniCJFPTg_3\telephant\nfLPHwVvk6K4_0\tperson\nfLPHwVvk6K4_1\tperson\nfLWW1YWO26Y_0\tbird\nfLdMmSIfseM_2\tperson\nfLdMmSIfseM_0\tperson\nfLe279fKywo_0\tdog\nfLsDTJxlsW8_0\tperson\nfLwrxElzLZs_0\tperson\nfLyNbq9v6kg_0\tperson\nfL1w15qwbqE_0\tperson\nfMOnb4P7tww_1\tperson\nfMOnb4P7tww_0\tperson\nfMO1J7ojQqk_0\tdog\nfMTosfHKy2I_0\tdog\nfMi6lVyCOHw_0\tboat\nfMwCpOTv9RY_0\tbus\nfM-puV4uyzs_0\tperson\nfNAZ9IDLZy0_0\tperson\nfND_OguW0MM_1\telephant\nfNIdPhAsjiM_0\tcat\nfNJSPU5r3sc_0\tperson\nfNO_o1D0kvY_0\tperson\nfNdRm3HWQmo_1\tmotorcycle\nfNgr2EBEDCQ_0\tcar\nfNgr2EBEDCQ_1\tcar\nfNg3y0FHjgg_0\tperson\nfNhDT1fwzKM_0\tperson\nfNhDT1fwzKM_1\tperson\nfNh54BNEJBQ_0\tcat\nfNw9dDcM4ms_0\tbear\nfN-FYknWOSk_1\tperson\nfN-FYknWOSk_2\tperson\nfN-43XPvLwg_0\tmotorcycle\nfOLR2dvBtqo_0\tcow\nfOO1pHvrPWQ_0\tperson\nfOatLQK_AyQ_3\tbicycle\nfOcPVX4sAxg_0\thorse\nfOjKgQf86dk_0\thorse\nfOkrLuGKDvk_0\tperson\nfOkrLuGKDvk_1\tperson\nfOkrLuGKDvk_2\tperson\nfOsd2aWzfBo_0\tcow\nfOtnatCU7_Q_0\tperson\nfOuV2101nEo_0\tbear\nfOv8ocd2xhA_2\tknife\nfO30fgQYdT4_0\tbus\nfO8Do_0RQXU_0\tperson\nfO9GgD7GqE0_2\tbus\nfPBIIZV6fuU_0\tperson\nfPMNtuJztSA_0\tperson\nfPVn9Wxf_HQ_0\tperson\nfPVn9Wxf_HQ_1\tperson\nfPrhiYslRjA_0\tperson\nfPzDDdztZNk_0\thorse\nfPzQyo7caqU_0\tperson\nfPzqpL90owQ_6\tbear\nfP5AyxuGIS8_0\tperson\nfP8x_x2_k5g_0\tperson\nfP-DMm3u5n4_0\tcat\nfQEGEb4W3IE_0\tperson\nfQNyLEXwnn0_0\tperson\nfQOjoYB5hPQ_0\tperson\nfQOjoYB5hPQ_1\tperson\nfQOymYsdTtU_0\tperson\nfQdA_-549Dk_0\tdog\nfQh5RtZzYzo_0\tbicycle\nfQlChBB42M0_0\tperson\nfQoJWcmQmsU_1\tperson\nfQo0G2i1QjY_0\tperson\nfQt3g_9u1RQ_0\tairplane\nfQyE_yIAu_0_1\tskateboard\nfQ26oO2Y5NM_0\tbicycle\nfQ4H6UmTepU_5\tgiraffe\nfREDiuJlBf8_0\tperson\nfREDiuJlBf8_1\tperson\nfRFF0xtrWhI_0\telephant\nfROdeQpu88o_1\tknife\nfRS5rhYP7LM_0\tperson\nfRXDSh8gr0c_1\tperson\nfRZ7Wze7ATs_3\tknife\nfRcegyxH0Is_0\tcar\nfRhNtVu6anA_0\tdog\nfRjCbO3MyU8_0\tperson\nfRmnBvuwZlU_0\tdog\nfRmnBvuwZlU_1\tdog\nfRrLguORoeU_1\tumbrella\nfRrLguORoeU_2\tumbrella\nfRrd-Z2R-Gs_0\tperson\nfRtzYh_gGgI_1\tcow\nfRwzMPH6Kvw_0\tperson\nfR1zDIeBHFg_0\tperson\nfR6FrFNXUxY_0\tperson\nfR-JNy5hccc_0\tumbrella\nfSA7T5svJ-o_0\tbus\nfSBe_a8ZkZU_0\tcat\nfSey4VJgLM0_0\tperson\nfSfKYTVt7V8_2\tbird\nfSfX4Z6SR2U_0\thorse\nfSj-h8lAhWw_0\tcat\nfSoqM6oq2AA_0\ttrain\nfSoqM6oq2AA_2\ttrain\nfS0098HnnhM_0\tperson\nfS3KL3nj7FY_0\tperson\nfS73PiHaNi8_0\tperson\nfS8_byjM-1M_3\tzebra\nfS8_byjM-1M_0\tzebra\nfS_6fgFOiPU_3\ttrain\nfTFLfGUcgMs_0\telephant\nfTFLfGUcgMs_3\telephant\nfTFVwPKxUHE_2\telephant\nfTP9YgSJZg8_2\tknife\nfTVb5uxWnsI_0\tperson\nfTVb5uxWnsI_1\tperson\nfTgirzB_QLU_0\tperson\nfThV1JtaTJg_0\tperson\nfTkIm1nb6qg_1\tbird\nfTkIm1nb6qg_2\tbird\nfTnnG_WcLYY_3\tknife\nfTnnG_WcLYY_4\tknife\nfTwiavhNzxs_0\tperson\nfUB-cH8rjW4_1\tperson\nfUB-cH8rjW4_0\tperson\nfUF__EdDFVs_0\tskateboard\nfUISEtXSRYM_0\tperson\nfUU4R6RP4ek_0\tmotorcycle\nfUXpqgf4jUA_0\tbus\nfUd8LjmonBM_0\tperson\nfUetaCH3tZk_0\tperson\nfUg6JULdTnU_0\tperson\nfUonzpmV18o_3\tbird\nfUqVKgWVVNY_1\tperson\nfUqVKgWVVNY_2\tperson\nfUwzXH9i0yQ_0\tperson\nfUx60fl9UkU_0\tperson\nfUzsVWD48bA_0\tperson\nfU3o6Frqdww_0\ttruck\nfU4DzirdCVE_1\tairplane\nfVAmI93Yb6E_0\tcat\nfVAsOuag4vY_1\tgiraffe\nfVHZEHosow0_2\tperson\nfVH3n0aghP4_1\tperson\nfVH3n0aghP4_0\tperson\nfVH7PpDqlPE_0\tboat\nfVIVas1R1tk_0\tcow\nfVOy449KQlY_0\tperson\nfVX7qR-o-9I_0\tcat\nfVZfWzDBb-c_0\tperson\nfVZ_9hWIGpA_2\ttruck\nfVdrMKHN9WY_1\tcow\nfVq7Of0Tr-s_0\tperson\nfVr3XVUzJaA_0\ttrain\nfVv5EqFYsAY_0\tperson\nfV80H_L3AN8_1\tmotorcycle\nfWLqbV7Z7Go_1\tperson\nfWLqbV7Z7Go_0\tperson\nfWb_-8hhubg_0\tperson\nfWmJ9tUUCwg_0\tperson\nfWpdcmgr5r4_0\thorse\nfWxgjNDC4OQ_0\tcar\nfWxgjNDC4OQ_1\tcar\nfWxsOgW3P6U_0\tperson\nfW1Z_Mx1RaA_0\tperson\nfW4fh_WBiMY_0\ttrain\nfW7yPljMFRc_0\tperson\nfW7yPljMFRc_1\tperson\nfW_HPaNBsDE_0\tcat\nfXCFktk2xdc_0\tperson\nfXLB02IH0G4_0\tperson\nfXLB02IH0G4_1\tperson\nfXOdZ0uKuBc_1\tdog\nfXWqvRfBWto_0\tperson\nfXX7K6CQfBw_0\tairplane\nfXYn01Cgmqs_0\tdog\nfXY7h0cc6tw_0\tcow\nfXbnEKMaIoM_1\tboat\nfXbnEKMaIoM_0\tboat\nfXka5y708fI_1\tperson\nfXowuJDXhhU_0\tperson\nfXyBm7_EDVc_0\tskateboard\nfXzIQASqygY_0\tbird\nfX-kSrf_K8w_0\thorse\nfYDgPdRtmjU_0\ttrain\nfYLtnvuW_VI_0\tmotorcycle\nfYMA0fLN8sI_0\thorse\nfYN5ZIicl_k_0\tcar\nfYmfHE2mONE_1\tperson\nfYnsIFGQfT8_0\tperson\nfYql4FiApLQ_0\thorse\nfYtm_pGBWkU_0\tperson\nfYu5ChRgapY_0\tmotorcycle\nfYw5KVCsg_4_0\tperson\nfYyI8x0tNAA_1\tbear\nfY4-6vsjmD8_0\tperson\nfY82KLfOpbk_0\tperson\nfY82KLfOpbk_1\tperson\nfZCdkf9VQzU_2\tcow\nfZEFEAYBlGE_0\tcat\nfZFYdgZbSBg_0\tperson\nfZFYdgZbSBg_1\tperson\nfZJOS8BlA-w_0\tperson\nfZOtury_J_w_0\tperson\nfZTIKbSjOhk_0\tairplane\nfZTJH_9Pqvg_0\tperson\nfZTJH_9Pqvg_1\tperson\nfZWP75nltcM_0\tbird\nfZXzEYFmZ_8_0\tperson\nfZXzEYFmZ_8_1\tperson\nfZiiYH3WfD8_0\tskateboard\nfZnbOFaSEQc_0\tperson\nfZnbOFaSEQc_1\tperson\nfZp_UgW_xZU_1\tmotorcycle\nfZp_UgW_xZU_0\tperson\nfZu7wEVEuX8_0\tperson\nGxHmm60dKvc_0\tskateboard\nGxLI4BFLrps_0\tperson\nGxPYf4SAQvE_0\tperson\nGxPYf4SAQvE_1\tperson\nGxWuAfBV300_0\tperson\nGxg0Pt_9bIE_0\tperson\nGxwwTXW-DdQ_2\ttrain\nGx1zPI3b2oc_0\tperson\nGx3xtKPwlz0_1\thorse\nGx4ryd6AGl4_1\ttrain\nGx4ryd6AGl4_2\ttrain\nGx4ryd6AGl4_3\ttrain\nGx4ryd6AGl4_0\ttrain\nGyGdlCtDdJc_0\tperson\nGyIKdb5KDHk_1\ttrain\nGyPRnKI78iA_0\tperson\nGyU8x9urAxE_0\tmotorcycle\nGyVDsnuS5jU_0\tperson\nGyXlgRxQ1jo_0\ttrain\nGyXlgRxQ1jo_1\ttrain\nGyZHiIEOBos_0\tcat\nGya_TrOGXpo_0\tperson\nGyhjyC5aJ8U_0\tbus\nGyjb_P1W7TA_2\tbus\nGyn_wSuRB3w_1\ttruck\nGyzaf_gaIYY_0\tmotorcycle\nGy9JueTT4XU_0\tperson\nGy_XuBCvbUc_1\tdog\nGy_XuBCvbUc_2\tdog\nGzB9OTV44PA_0\tperson\nGzHy2xjKB_8_0\tperson\nGzLmftr6tl8_0\tperson\nGzRkvFxVlx0_0\tperson\nGzTDLPCsgSM_0\tperson\nGzVj8bI0bSk_0\tskateboard\nGzVj8bI0bSk_1\tskateboard\nGzcgYGEqOlY_1\thorse\nGzesZ0laH2w_0\tmotorcycle\nGzizYdL25ZY_0\tperson\nGzjkTrnmEnU_0\tairplane\nGzjkTrnmEnU_1\tairplane\nGznFDBDT2c0_0\ttruck\nGznFDBDT2c0_2\ttruck\nGzrgq_nWH_Q_0\thorse\nGzujCDTak_4_0\thorse\nGzujCDTak_4_2\thorse\nGzy_PnFtEpM_0\tperson\nGz3Np50b9q4_0\ttruck\nG0DQ6VdMp-U_7\tcar\nG0DQ6VdMp-U_0\tcar\nG0DQ6VdMp-U_1\tcar\nG0DQ6VdMp-U_2\tcar\nG0DQ6VdMp-U_4\tcar\nG0DQ6VdMp-U_5\tcar\nG0DQ6VdMp-U_6\tcar\nG0FSe53KN-w_0\tperson\nG0WsFATo9RQ_0\tperson\nG0dXxEbeJnM_1\tperson\nG0d44YoKXX4_0\tperson\nG0kDhLojiI4_0\tgiraffe\nG0leBoTgEx4_0\tperson\nG0rwWyFSsYE_0\ttrain\nG0r2tR6EcF8_1\tperson\nG0urH-9ytbc_0\thorse\nG01Xi8VMxgQ_0\tperson\nG03JTuHY_RM_0\tknife\nG1AIHF-KITc_0\tperson\nG1AtN7CvCXw_0\tperson\nG1EnmuHlxig_0\tperson\nG1P_XnEL4dc_1\tperson\nG1P_XnEL4dc_0\tperson\nG1TS-PvdREA_0\tperson\nG1TS-PvdREA_1\tperson\nG1ThERK4a8E_4\tairplane\nG1ThERK4a8E_0\tairplane\nG1UoN56m5DM_0\tperson\nG1YNrrT9-z8_0\tbird\nG1YNrrT9-z8_1\tbird\nG1cY71JK5_E_0\tmotorcycle\nG1c0-CTyZ3I_0\tperson\nG1dKhZZARDk_0\tairplane\nG1z6RMtKkbM_0\tbird\nG1z6RMtKkbM_1\tbird\nG11cHAnx17E_0\thorse\nG13ARgckI9w_0\tperson\nG17Kpx1bgXM_0\thorse\nG1_R_EJpLZU_0\tcow\nG2FXcVDezv4_0\ttruck\nG2HOmWxj5gg_0\tperson\nG2LNQIwbLHE_0\tperson\nG2S4rwP6qJY_0\tbicycle\nG2V6wliL2AA_0\tknife\nG2g4Z-Syzi8_1\tdog\nG2lFYYEolz4_0\ttrain\nG2lFYYEolz4_2\ttrain\nG2x5gACWSwA_0\tcow\nG2z7yjdCUuI_0\tairplane\nG23Q_C35Uqs_0\tbear\nG24yJOgl9t0_1\tperson\nG25iisvOYhA_0\tcat\nG2-v9IBlnTs_0\tperson\nG3AuCS7s68w_0\tbird\nG3IID08lWos_0\tperson\nG3P-Vvra2GU_0\thorse\nG3SowFCFa0g_0\tperson\nG3VeVH6pbdE_1\tperson\nG3a0EYtnqHA_0\tperson\nG3cazaory7w_0\tperson\nG3f8bIoGGZ0_0\tdog\nG3kNB0zhHQc_0\tperson\nG3pT4MJrpDI_5\tumbrella\nG3pT4MJrpDI_6\tumbrella\nG3pT4MJrpDI_4\tumbrella\nG3vP7_U6yXU_1\tcow\nG37Dm4oy794_0\tbicycle\nG38EbyEOITE_0\thorse\nG38SrxcVYWs_1\tperson\nG39ryVtNnhQ_3\telephant\nG39ryVtNnhQ_8\telephant\nG39ryVtNnhQ_9\telephant\nG39ryVtNnhQ_11\telephant\nG4PD_RAK48Y_0\tperson\nG4VPBDOgq54_1\tskateboard\nG4VpcUuXgRs_0\tperson\nG4VpcUuXgRs_1\tperson\nG4ckSGXUGts_0\tperson\nG4fbkcKiZVg_0\tperson\nG4nRZ4PHvC4_0\tdog\nG4rJejZ9FIM_0\tcar\nG4r0UJvtDXs_0\tcow\nG4xFWKKoN0M_0\tmotorcycle\nG47wnMA6RVE_0\tbus\nG4_xR7lZIPo_3\tbear\nG5D1cAo2D6s_1\tperson\nG5JwolS0D1M_5\telephant\nG5QgL60_yfc_0\tknife\nG5SlrQeATlc_0\tbus\nG5SlrQeATlc_2\tbus\nG5hG8j0KxBI_0\tperson\nG5ixkqq66VA_0\tperson\nG5rBbx_kODY_0\tperson\nG5ztukDN_Qg_0\tzebra\nG51fdi_hG_0_0\ttrain\nG52uuPWcC3M_0\tumbrella\nG553b8ZAd3Q_0\tperson\nG58FuwBYL-0_0\tskateboard\nG5_UJ1wEKh4_0\tperson\nG6OttGznP9E_0\tperson\nG6OttGznP9E_1\tperson\nG6QMME1QbK8_2\tcar\nG6Qmm4T-cd0_0\tbus\nG6WiR4W4WWk_0\tperson\nG6b9lySVCCY_0\tperson\nG6eAvUHoDkc_0\tperson\nG6fvYSH13nI_2\ttrain\nG6iVTjyPM04_1\thorse\nG6sFOs8MgGU_0\tbird\nG6sFOs8MgGU_3\tbird\nG6sFOs8MgGU_6\tbird\nG66e5ltBFoI_0\tperson\nG7DhRPK7pwc_1\tbicycle\nG7F-ufxEXPY_0\tknife\nG7H7fQ_Q1Ec_0\tperson\nG7H7fQ_Q1Ec_1\tperson\nG7ID9RdMSkE_0\tperson\nG7MvPG8Qv84_0\tgiraffe\nG7TezoE9Cmo_0\tperson\nG7WblvVQPF0_0\tperson\nG7Z01jmMzlI_0\tbird\nG7krBQa_KLc_0\tperson\nG7p90FBQk_0_0\ttruck\nG7slUshqPvY_0\telephant\nG74HXSqYO-A_0\tmotorcycle\nG75uQAEuUkE_0\tperson\nG766vinfuBw_5\tbicycle\nG766vinfuBw_9\tbicycle\nG77KKnCpwWY_3\tskateboard\nG8EC6svgwKU_0\tperson\nG8NIqmq7YdE_2\tbear\nG8V2UsTc1Ik_0\tcat\nG8V33bTVNII_14\tbicycle\nG8V33bTVNII_1\tbicycle\nG8V33bTVNII_2\tbicycle\nG8V33bTVNII_6\tbicycle\nG8V33bTVNII_9\tbicycle\nG8XX8bkx6Ek_0\tperson\nG8hStuDYwH0_2\tairplane\nG8kDZAPbUe8_0\tperson\nG8kDZAPbUe8_1\tperson\nG8k84FwnW2k_0\tmotorcycle\nG8lDrK3u3r0_2\telephant\nG8lfwRN3Iew_12\tboat\nG8lfwRN3Iew_0\tboat\nG8lfwRN3Iew_8\tboat\nG8lfwRN3Iew_9\tboat\nG8lfwRN3Iew_11\tboat\nG8sDCWad2Bg_0\tcat\nG8s2n3jAKW8_0\tcow\nG8tbj2R0iso_0\tperson\nG80DOuBBH_Y_3\tairplane\nG8--2JpJa6g_0\tperson\nG9DdsOO1mZo_0\thorse\nG9FQJdIxjsk_0\tbird\nG9YPEOrV5UU_0\tperson\nG9YPEOrV5UU_1\tperson\nG9YPEOrV5UU_2\tperson\nG9ZKH_DS9DU_0\tperson\nG9gsnqhd_Sw_0\tcat\nG9hPaEx7Ci0_1\tknife\nG9i66tUOspc_0\tdog\nG9juxPad3zY_0\tperson\nG9nlPUwJQB0_0\tperson\nG9nvXjuig6s_0\tperson\nG9qCl1NZelo_0\tcow\nG9rxIfeUWVo_0\tairplane\nG9vDsElCKAY_0\tdog\nG9zd0G8dIt0_0\tperson\nG93PAKTtVpM_0\thorse\nG97UC0qtVDw_0\tperson\nG97YtHMd2hw_0\tperson\nG99rEXOdlC8_0\thorse\nG9_TgGWQQi8_0\tperson\nG-Sr-qmWZNo_0\tcow\nG-YYtvCU7qY_0\tdog\nG-d6o3nTBFA_0\tzebra\nG-nFiFb0Xos_1\tknife\nG-nbiqZuFdc_2\thorse\nG-qCe2DK3Tk_0\tmotorcycle\nG-u_ThqhoJE_0\ttrain\nG-yCRlVSs6w_0\tperson\nG-3kOsn1fPY_1\tperson\nG_ADLUKVq8Y_0\tboat\nG_LtPKO6be4_0\thorse\nfZ1GVGZmTRA_0\tperson\nfaJuqm4umTQ_0\tperson\nfaSv8ijeKeE_0\tperson\nfaVBgge6xkE_0\tperson\nfaW2tWwuCMg_1\tperson\nfaW2tWwuCMg_0\tperson\nfahs60oGhLU_0\ttrain\nfatTPMeG5Pc_1\tbear\nfa-rHhFEloA_1\ttruck\nfa--elcQpd4_0\telephant\nfbDYKST2P-I_0\tmotorcycle\nfbFVM0UM5V0_0\tperson\nfbM5MhIve5s_0\tdog\nfbM5MhIve5s_1\tdog\nfbiXTCkCkqY_0\tskateboard\nfbmZZXaRkak_5\thorse\nfbmZZXaRkak_6\thorse\nfbmnWcE_64U_0\tskateboard\nfbsyvHQPZZk_1\tdog\nfb3Iq9yQ1VY_0\tperson\nfb3WxEfe8l8_0\tmotorcycle\nfcCb2W4HMLk_0\tperson\nfcD6n99azfw_0\tperson\nfcGNPf6n7Ws_0\tbear\nfcWegrm8wCE_0\tperson\nfcbcnvGoWLs_0\tcar\nfchtQi7-OD4_0\thorse\nfclxNO1L-rY_0\tcow\nfcpGNeDgpDI_0\tperson\nfc1qNL5u2wg_0\tperson\nfdCTLMd6wEY_0\tcat\nfdQaoSZKA_s_0\tperson\nfdRULl8YSnU_0\tcow\nfdYvCuft5zQ_4\telephant\nfdYvCuft5zQ_5\telephant\nfdYvCuft5zQ_1\telephant\nfdYvCuft5zQ_2\telephant\nfdZBeWyKON0_0\tperson\nfdbvWvUoFW8_1\tbird\nfdbvWvUoFW8_2\tbird\nfdbvWvUoFW8_3\tbird\nfdkrZ9uL854_0\tperson\nfdlDkbbDniw_1\telephant\nfdmV18YEDKM_0\tcat\nfdnBDcIwPBA_0\tperson\nfd3ea86gmJI_0\tmotorcycle\nfd3ea86gmJI_1\tmotorcycle\nfd8Ba2cZgxI_2\tbear\nfeAexE1IYq8_0\tperson\nfePU3BlF4Zc_0\tperson\nfePU3BlF4Zc_1\tperson\nfeQX_1dqh9g_9\tbicycle\nfeQX_1dqh9g_1\tbicycle\nfeQX_1dqh9g_3\tbicycle\nfeZfxIunWHo_0\tperson\nfeZoXB7I6wE_0\tperson\nfedmeW-WImw_0\ttrain\nfegJtwcNo5c_0\tbicycle\nfeh4XVzjQdI_0\tcat\nfelt48AIbIs_1\tperson\nfenYF-k-y4c_0\tskateboard\nfeqLG8n4nDE_1\tperson\nfe05wKXl2cI_0\tperson\nfe05wKXl2cI_1\tskateboard\nfe5_49oxMwc_0\tperson\nffIQZZ_P3ck_0\tcat\nffOeGlw8_C8_1\tcow\nffZoY75S_-k_1\tbird\nffZoY75S_-k_0\tbird\nffbSaNikNF4_1\telephant\nffeYBfcgF3s_0\tperson\nfftSD6UfvEA_1\tperson\nffttXyArNGc_1\tknife\nffvXiSjPp6c_0\thorse\nffwk_8ycQiA_0\tperson\nff1PHzfARZk_0\tperson\nff5MH6QQuJk_6\tknife\nff5MH6QQuJk_2\tknife\nff5SaJnQg5M_0\tperson\nfgEpQHGYIjc_0\tperson\nfgFy8l-b1iI_0\tmotorcycle\nfgJJxPEHVZQ_0\tperson\nfgPShysxuQM_0\tcat\nfgQE-9shdmQ_0\telephant\nfgUjCKe_e_Y_0\tperson\nfgWtwTKCtMQ_0\tperson\nfgfizI4AnVs_0\tperson\nfggT4HM2Uy4_0\tperson\nfgsaC375d38_1\tbird\nfgvUj1mCqio_0\ttrain\nfg1ISXcyb10_1\tdog\nfg5mCaScLE4_10\tumbrella\nfg5mCaScLE4_0\tumbrella\nfg5mCaScLE4_3\tumbrella\nfg5mCaScLE4_4\tumbrella\nfg5mCaScLE4_6\tumbrella\nfg5mCaScLE4_7\tumbrella\nfhHLCLuQAdE_0\tbird\nfhHLCLuQAdE_3\tbird\nfhHLCLuQAdE_4\tbird\nfhHLCLuQAdE_1\tbird\nfhHLCLuQAdE_2\tbird\nfhQN_vhNmgo_0\tcow\nfhan95LbdqQ_1\tknife\nfhmsHcZfBC4_0\tperson\nfhutr5rLQN0_0\tperson\nfh5lB6U-7Wk_0\tperson\nfiGa0nIEYbw_0\tperson\nfiKecNhAgFU_0\tmotorcycle\nfiS0pY80kkU_0\tdog\nfiWtkuDUFvM_0\telephant\nfiZAhg2twZs_0\tperson\nfigjWJDEn1c_0\tperson\nfijO0rB1rfY_0\tairplane\nfinRU64JVRU_1\tbus\nfi2s2k_aamk_0\tperson\nfi46OpYa89I_3\tbicycle\nfi46OpYa89I_10\tbicycle\nfi46OpYa89I_2\tbicycle\nfi6gdEVUAUc_0\tcat\nfi8YGUm_6x0_0\tperson\nfi9GleMDHIc_0\tperson\nfjF31Mh-tNQ_0\tperson\nfjKXALm76kI_0\tbus\nfjXufPzimEQ_0\tperson\nfjZ4J-BZX2U_0\tperson\nfjaHYcaE7-w_0\tperson\nfjaHYcaE7-w_1\tperson\nfjnR81fSTeI_0\tumbrella\nfjnxqBnMZzs_0\tperson\nfjtn0lRVX_4_0\ttruck\nfjwgdNBSCFc_0\tperson\nfjwgdNBSCFc_1\tperson\nfj29rB34ea8_0\tperson\nfkERi_ma2UE_0\tperson\nfkERi_ma2UE_1\tperson\nfkHiDyuUaWA_0\tperson\nfkIfLHGu_CQ_0\tperson\nfkQEEtG6Tbg_0\tperson\nfkSf5a3q6oY_0\tboat\nfkSf5a3q6oY_3\tboat\nfkUDB0V3UXc_0\thorse\nfkUDB0V3UXc_1\thorse\nfkVSILZPyXg_0\tbear\nfkaKyYrWPpQ_0\tperson\nfkfnbZ2MSXk_4\tbicycle\nfkfnbZ2MSXk_0\tbicycle\nfkfnbZ2MSXk_6\tbicycle\nfkx0e2gvPYA_0\ttruck\nfkyM4LNUCck_0\tperson\nfk0v7vZDpgU_0\tperson\nfk10mtIF_Hs_0\thorse\nfk8yMMO1gRA_0\tperson\nfk8yMMO1gRA_1\tperson\nflADy--Uwx8_0\ttruck\nflERyzHjhzQ_0\tskateboard\nflMijcdhRAU_0\tperson\nflgTyT4DB7E_0\tbear\nflgaLcoSjb4_0\tbear\nfluEronPyZk_0\tcow\nfl6-NRwVy10_0\tperson\nfl7Q9yxFoOs_2\tperson\nfl95IAyDN-s_0\tskateboard\nfmERtylbqN4_0\tperson\nfmGJj0qYc6g_1\tperson\nfmGJj0qYc6g_2\tperson\nfmLKgz4DQhQ_0\tairplane\nfmL66yeOiI8_0\tperson\nfmRfUvIIvT8_0\tperson\nfmYELQL9Cs0_0\tbus\nfmbEAdugI3Q_0\tperson\nfmbb6SQ6qiI_0\tperson\nfmbb6SQ6qiI_1\tperson\nfmbu89zGN4Y_0\tperson\nfmdem4Z9BHI_0\tbird\nfmfg5yyhjkA_1\tperson\nfmiq_EhaURY_1\tperson\nfmiq_EhaURY_0\tperson\nfmtIa6nxUd4_0\ttrain\nfmuzrZHZYis_0\tskateboard\nfmwC1khd3BU_2\tperson\nfm3zFVlJw4k_1\tperson\nfm-ScTLdSL8_1\tbus\nfm_bcsJYhu4_0\tdog\nfnAGderLxPg_0\telephant\nfnAGderLxPg_3\telephant\nfnDP4B5jpSY_0\tperson\nfnFMQ2VFlEc_0\tperson\nfnOL3ZL61u0_0\tperson\nfnOkwsmzdaI_0\thorse\nfnRq5X91IV0_0\tperson\nfnZR6FD_eZ8_0\tboat\nfnZR6FD_eZ8_1\tboat\nfnbSgwO8v0c_1\tboat\nfnbsAmTQJOs_0\tbicycle\nfnbsAmTQJOs_1\tbicycle\nfniJ36z0_Pc_0\tcow\nfnj1YtAaztU_0\tperson\nfnkHdQf9H3w_0\tknife\nfnmuFbydHek_0\tperson\nfnpjkwiPkSY_0\tskateboard\nfntRlkYDiD0_1\tperson\nfntZVzkwhz4_1\tperson\nfnvst-Sk4MU_0\tumbrella\nfnvst-Sk4MU_1\tumbrella\nfnz6gTPuInQ_0\tdog\nfnz6gTPuInQ_1\tdog\nfoAoOCF4rE4_0\tcar\nfoI1jEbg9uA_0\ttrain\nfoJs0wXX1O8_0\ttruck\nfoaFgrzsPOY_0\tperson\nfobJTCY7ifQ_0\tbus\nfodsoLtLzqI_1\tcat\nfojRgMUsu3c_0\tperson\nG_RgJ0t0Cbo_0\tperson\nG_aU-_2ZiSw_0\tdog\nG_lOQAV6xWs_0\tcat\nG_poofS7HD0_1\tperson\nG_poofS7HD0_0\tperson\nG__VTazZtp0_0\telephant\nHARRnedV05U_0\tcar\nHAVUursfTOI_1\tzebra\nHAtu6frOH1k_0\tperson\nHA1TDbNot8E_0\tperson\nHA-iE7bcfT0_0\tcar\nHA-iE7bcfT0_1\tcar\nHBI13CpuAmI_0\tknife\nHBLJbCs1mSg_0\ttruck\nHBMah_r3E1g_0\tperson\nHBOqQBe7rhE_0\tperson\nHBO6G57uhXA_0\tperson\nHBY4_6b_sRY_0\tcat\nHBiSuZWtb4E_0\tboat\nHBmaJJ0nTAo_0\tperson\nHBwjWdXrpPA_0\tdog\nHBzYVphfmRQ_0\tperson\nHCA4jkg9HTY_1\tperson\nHCA4jkg9HTY_0\tperson\nHCEjNJewxbw_0\tperson\nHCJ1EYfF8qg_0\telephant\nHCKZ7kihdaM_2\tairplane\nHCMBgpQ2z18_0\tcow\nHCSbzHGXxmA_0\tcat\nHCczjWUmlW0_1\ttruck\nHCczjWUmlW0_0\ttruck\nHCg0k7LnfkY_1\tcow\nHCg0k7LnfkY_0\tcow\nHCiRQdh20qg_0\tdog\nHCm-B3JjzhY_0\tcow\nHCpxRBja8lE_0\tperson\nHCp6gYC9NFE_0\tcow\nHC72_Yrigik_0\tperson\nHDN4DqO_KLg_0\tdog\nHDQEWwETuU4_0\tperson\nHDRKiYaoEnA_0\tperson\nHDSw0KM8cSs_0\tperson\nHDkI156rPRA_0\tperson\nHDmK6y86kYM_0\tperson\nHDmK6y86kYM_1\tperson\nHDnYEdh7xG8_0\tperson\nHDqUvaFm_R0_0\tskateboard\nHDr5if6Mb_4_0\tperson\nHDziFGwpXmg_1\tcar\nHDziFGwpXmg_2\tcar\nHDziFGwpXmg_3\tcar\nHDziFGwpXmg_7\tcar\nHD1tKnKT1Dc_0\tmotorcycle\nHD7QKzuFNas_1\tperson\nHD7QKzuFNas_0\tperson\nHD_alEnCVhM_0\ttruck\nHD_alEnCVhM_1\ttruck\nHD_wYO2_O8k_0\tperson\nHD_4ZJr68p8_1\thorse\nHEIjtOJze90_0\tperson\nHEfIJ3wMKRI_1\tperson\nHEmv-biWoEA_0\tairplane\nHErkHysJd-M_0\tperson\nHEr_leMW1zE_0\tbear\nHEr_leMW1zE_3\tbear\nHEr_leMW1zE_1\tbear\nHEyY4zEX-no_0\tperson\nHE-4YEdBwuw_0\tdog\nHE-4YEdBwuw_1\tdog\nHFDK_y7kibQ_0\tknife\nHFE9ujNILoA_0\tcat\nHFQFlm1jWiE_0\tperson\nHFQFlm1jWiE_1\tperson\nHFRCZSouOn4_0\tbird\nHFWQl2JJfic_2\tperson\nHFa18pRSsXU_0\ttrain\nHFlanXHBGHg_0\tperson\nHFuw8C2bQ6g_0\tperson\nHF07qDRPgrw_0\thorse\nHF1xhyTtWLk_0\tmotorcycle\nHF3Nn3KqXOk_0\tperson\nHF3Nn3KqXOk_1\tperson\nHF4PefI86r0_0\tperson\nHGFcsJmjWHs_0\telephant\nHGFcsJmjWHs_9\telephant\nHGFcsJmjWHs_4\telephant\nHGFcsJmjWHs_5\telephant\nHGFcsJmjWHs_7\telephant\nHGLC_YFRxPY_0\tskateboard\nHGLLnmQiCU0_0\tperson\nHGLLnmQiCU0_2\tperson\nHGLLnmQiCU0_1\tperson\nHGLdrgf2e2c_0\tperson\nHGVNoha70iA_0\ttruck\nHGZDROOjAY4_1\tperson\nHGZDROOjAY4_0\tperson\nHGeCBN48g9o_0\tperson\nHGm4OftDlT8_2\thorse\nHGnIxotAPOU_0\tperson\nHGnegc2CRTM_0\tperson\nHGvXva6SUvE_0\tperson\nHGw4URr4QUs_0\tperson\nHG1zQzSX2rU_0\tperson\nHG8oY2Ac4-M_0\tperson\nHG_JAnXBzJQ_0\tskateboard\nHHGq5gd6w1g_0\tskateboard\nHHPW65GVeoA_0\tperson\nHHRUnCEVnAo_0\tcat\nHHc5mD1TxGQ_1\tknife\nHHe9m9BOi3A_0\tperson\nHHgC0pkNiIA_0\tperson\nHHgC0pkNiIA_1\tperson\nHHi26rWtC38_0\tperson\nHHx5E8VfnkY_0\tperson\nHH0OILx6PKY_0\tperson\nHH1JApHMx2I_0\tdog\nHH148v63a5o_0\tperson\nHH9wMNMJ2sE_0\telephant\nHIBd79qG-XQ_0\tperson\nHICJGOFvwoc_2\tbird\nHIHX1rpDx_I_0\tcat\nHIIQ917jPqg_0\ttrain\nHIJGcmgyEcg_0\tknife\nHIJGcmgyEcg_1\tknife\nHIKyhRtWQ4c_2\thorse\nHIK-Z8wXFug_0\tperson\nHISWMgqg80E_0\tskateboard\nHITf8extnnk_0\tperson\nHIXuU8Z0N9o_1\tmotorcycle\nHIgiF2bkOys_0\tperson\nHIgiF2bkOys_1\tperson\nHIiu2EVu5H8_0\tperson\nHIqhXDkhHsc_0\tperson\nHIqr0-BB8Xo_1\tknife\nHIrcAjP1fDs_2\tbird\nHIz27dqnl20_0\tbus\nHI3L38NCy0A_1\tboat\nHI3L38NCy0A_0\tboat\nHI_h7HfFDVw_0\tboat\nHJGPBeom3y4_1\tumbrella\nHJSiTzkFpHk_0\tperson\nHJVpMFJT2LU_0\tperson\nHJVpMFJT2LU_1\tperson\nHJg7wtoy2vk_0\tperson\nHJhZhn0zf1s_0\tperson\nHJi1L5HxuLo_0\tskateboard\nHJi1L5HxuLo_1\tskateboard\nHJi1L5HxuLo_2\tskateboard\nHJq4kVvdeRg_1\tskateboard\nHJrd3kpvjh0_0\tperson\nHJr5BOgO9XY_0\tperson\nHJ6BZjeSHTY_0\tboat\nHKFJzdCsRfA_0\tperson\nHKGK0FLN9vA_2\tzebra\nHKGK0FLN9vA_3\tzebra\nHKIwynmyQp4_0\tperson\nHKWELXwIVvI_0\tperson\nHKqHmDjxF6Y_1\tperson\nHKsVn1IWaas_0\tperson\nHK28Vb__IfY_0\tperson\nHLAEqFEcR90_4\thorse\nHLAEqFEcR90_0\thorse\nHLAEqFEcR90_2\thorse\nHLAEqFEcR90_3\thorse\nHLBgSJD-3lg_0\tbicycle\nHLL_j-CQKqQ_0\tumbrella\nHLaiRkL4gFA_0\tmotorcycle\nHLhbGKVR4mE_3\tdog\nHLy3UUDhaJY_4\tgiraffe\nHL06bx_HNg0_0\tcat\nHL6dNcrAEoM_0\tperson\nHL8fh6O6iUA_1\ttrain\nHL9F68y-0kY_0\thorse\nHL9F68y-0kY_1\tperson\nHL9o2Vs9d8s_1\tperson\nHMF0KrAf0iI_0\tperson\nHMIGIwIcNq8_0\tperson\nHMJerOjZn4I_0\tperson\nHMQQrRvzwiM_0\tboat\nHMUBbUP6Ko8_2\tboat\nHMV7H81wz84_0\ttrain\nHMb-pPTMZ5I_0\tumbrella\nHMxMledcSVE_0\tperson\nHMyUpcpZGdM_1\tbird\nHM4hJE0Db2Q_0\tperson\nHM4zY3uzwOQ_0\tperson\nHM7sD8YClkI_0\tperson\nHM_3ck6yooo_0\tperson\nHNGh3Rvn6Sw_2\tknife\nHNGh3Rvn6Sw_3\tknife\nHNRwM8zXMTM_0\tperson\nHNXQ_dkhX-Y_0\ttruck\nHNdRITK9TGE_0\tperson\nHNeVOXPyunw_2\tperson\nfo9SmkQa35Y_0\tmotorcycle\nfo9SmkQa35Y_1\tmotorcycle\nfpM1eiK3iok_0\ttruck\nfpNLFTgOciY_1\tumbrella\nfpRq9BsaPzs_1\thorse\nfpRq9BsaPzs_2\thorse\nfpVZYKlsFsU_0\tboat\nfpdUwZ8Gnd8_1\tcow\nfpeYfCUzvDY_0\tcat\nfpkxYBJDTtI_0\tperson\nfpkxYBJDTtI_1\tperson\nfpmtNez1u0o_0\tbus\nfpnTZF4bvk8_0\tperson\nfpomSxrdTyE_0\tperson\nfpo2kf1idyo_0\tperson\nfpp_41AxRNI_5\tgiraffe\nfpp_41AxRNI_1\tgiraffe\nfpp_41AxRNI_4\tgiraffe\nfqQL3QPq-lo_0\ttrain\nfqXvzEGxSak_0\tbus\nfqcie5yyOxA_0\tcat\nfqfHWT5hjkY_0\tcat\nfqkVB4qZbgw_0\tperson\nfqlWb2OJg3Y_0\tbus\nfqnioIm10xY_1\ttrain\nfqpMhE5qOKk_1\tperson\nfqxGN6r9oIY_0\tzebra\nfq5Zh2Lo9GQ_0\telephant\nfq959dAMasM_0\ttruck\nfrFSlwby-0k_0\ttrain\nfrFrggXiJZY_1\tperson\nfrItg4I9oEQ_0\tperson\nfrItg4I9oEQ_1\tperson\nfrJtciauQQw_0\tperson\nfrRHj0FPzVQ_1\tperson\nfrW5BpQ3-Fw_0\tperson\nfrXxZevI11c_0\tperson\nfrXxZevI11c_1\tperson\nfrY6tIPR-Co_0\tbicycle\nfreW9Vk3GhU_1\tperson\nfrfLZ70XIXI_1\tdog\nfrgCmAtYao4_1\tboat\nfrh4LMyWaQw_0\tperson\nfrn-rfqmGVs_0\tperson\nfrx5Uv7-1zw_0\tperson\nfr3S3gEtDS0_1\tperson\nfr616yExbeg_0\tknife\nfsD7pYdfrpg_0\tperson\nfsE0DlVODpY_1\tperson\nfsFtKjirvM4_1\tperson\nfsFtKjirvM4_0\tperson\nfsOoFz6I_js_1\tperson\nfsOoFz6I_js_0\tperson\nfsVlTdh13Lk_0\tperson\nfsXVGaRpUNg_0\tperson\nfsd-DhcH5gE_0\tperson\nfsd-DhcH5gE_1\tperson\nfsh-wcyuPM0_0\tperson\nfs3oXXx75XA_0\tperson\nfs6L5bmf4pQ_1\tperson\nfs6Rgfl4CtI_0\tboat\nfs6p-qaLswQ_0\tcow\nfs7RdtNY3Ck_0\telephant\nfs9uDpde9ig_1\telephant\nftG2YflDq_E_0\tknife\nftH3_awR5ZA_0\tperson\nftIp5PyaGNc_1\tknife\nftNSK_rSs98_1\tairplane\nftSUBEOhdck_0\tcat\nftX9ErOmiAE_0\tcar\nftX9ErOmiAE_1\tcar\nftcnCvd4yeU_0\tperson\nftlmGO0CnHk_0\ttruck\nfuHAM8D3ros_3\tbicycle\nfuO2QMXiDMU_0\tmotorcycle\nfuPtCtdvowQ_0\tperson\nfuSxdcdxe70_1\tperson\nfuSxdcdxe70_0\tperson\nfuh4-mC5fvg_0\tcar\nfuklviv_MRE_0\ttruck\nfunKReksXEQ_4\thorse\nfur41mRCURs_0\tcow\nfutBuKCP9zw_0\tumbrella\nfu5d7x7pORY_0\thorse\nfu_f4n_bYPU_0\tperson\nfvAislzoQVU_0\tperson\nfvDUF-aukF4_0\tperson\nfvH1bolPY2U_0\tperson\nfvKg6ReEigA_14\tbicycle\nfvKg6ReEigA_2\tbicycle\nfvKg6ReEigA_3\tbicycle\nfvKg6ReEigA_4\tbicycle\nfvKg6ReEigA_5\tbicycle\nfvKg6ReEigA_8\tbicycle\nfvKg6ReEigA_11\tbicycle\nfvKg6ReEigA_15\tbicycle\nfvKg6ReEigA_16\tbicycle\nfvKg6ReEigA_17\tbicycle\nfvKg6ReEigA_19\tbicycle\nfvLauezWx5g_1\tskateboard\nfvLkNgA4N0k_1\tperson\nfvZYmQ6SJrQ_0\tperson\nfvcIpyJFuQA_0\tperson\nfvdoipKMj4g_0\tperson\nfvfb_kQCs-I_0\thorse\nfvhVuqonUHg_0\tperson\nfvhVuqonUHg_1\tperson\nfvlGWjjirUQ_0\tperson\nfvqWMyJJqog_0\tperson\nfvqWMyJJqog_1\tperson\nfvtTggVCkFk_0\tperson\nfvzbC9c98ik_0\tdog\nfv42-nzlEsY_0\ttrain\nfv8F7gjL7Js_0\tairplane\nfwCUjUa0cHQ_0\tperson\nfwG8C9CEISw_0\tperson\nfwLL8mlHf0I_0\tbicycle\nfwL9zu2j3rk_0\tperson\nfwQMFtFdERs_0\thorse\nfwQMFtFdERs_1\thorse\nfwTB5tDP4cU_0\tperson\nfwT-VIjQCa8_0\tperson\nfwop4msktdA_0\tcow\nfwv2gGVEi6g_0\tperson\nfwwOICMutXc_0\tdog\nfxFzCD192K4_1\tbird\nfxHZn2FXRGk_0\thorse\nfxHZn2FXRGk_1\thorse\nfxQYhMoNR9I_0\tperson\nfxQY5tnybxQ_0\tskateboard\nfxWwYiT8yXk_0\tperson\nfxWyDyUmxuY_0\thorse\nfxbNI1vTtq0_0\ttrain\nfxbjh88g3Vw_0\tperson\nfxcDLsblNhs_1\tbird\nfxdVSYuYJOE_0\tperson\nfxhuSOpUuGs_0\tperson\nfxr4HpTRNS0_0\tdog\nfxxjK3mjCF0_1\tperson\nfxyg5GQk8H8_0\tairplane\nfxyg5GQk8H8_2\tairplane\nfxyg5GQk8H8_3\tairplane\nfxyg5GQk8H8_4\tairplane\nfx07mGL1WQY_1\ttrain\nfx2_nahpAfE_0\tperson\nfx4HT1nuEg4_1\tperson\nfx4HT1nuEg4_0\tperson\nfx9TwmuIYCY_0\tskateboard\nfx9fckiExps_0\tperson\nfx_zN3FWeJ0_1\tbus\nfx_zN3FWeJ0_3\tbus\nfx_zN3FWeJ0_0\tbus\nfyE4_usnxHc_0\tperson\nfyE4_usnxHc_2\tperson\nfyOZZ_u9Jm0_0\tperson\nfyOxr6iISdI_0\telephant\nfyRO8_b4wJU_0\tperson\nfyTzI2wuC0M_0\tperson\nfybHaZZmAzE_1\ttrain\nfydZoAN9JpI_0\tperson\nfydZoAN9JpI_1\tperson\nfydZoAN9JpI_3\tperson\nfyhSoeveW3I_0\ttrain\nfyyLjISjzvM_0\tperson\nfyztN8okJkU_0\tperson\nfyztN8okJkU_1\tperson\nfy5GdRFHsLs_0\tcat\nfzFR54WdDEU_0\tperson\nfzV_Z79golE_1\ttruck\nfzaNjkWQtW0_1\tskateboard\nfze3woUbt0w_0\tdog\nfzh-lO5lQhQ_1\tbird\nfzoZsW3AMTU_0\tbird\nfzp3cT3c5Wg_0\tperson\nfzp3cT3c5Wg_1\tperson\nfzp3cT3c5Wg_2\tperson\nfzqX7N7ICQw_1\tperson\nfzqX7N7ICQw_0\tperson\nfzrGdIi_J9k_0\tperson\nfzr9mWLJM6E_1\tperson\nfzr9mWLJM6E_0\tperson\nfzvrWQX908c_0\tperson\nfz1PTzziIcg_0\tperson\nfz1kPSLo_p8_1\ttrain\nfz8emqnbleQ_1\tboat\nf0BJ56Dn3D0_0\tcat\nf0E5mPnVSSU_1\tperson\nf0JOvKbLwTQ_0\tperson\nf0LbneUbWUk_0\tcow\nf0TYLMAZLpA_0\tperson\nf0XZTHcpmZY_4\telephant\nf0XZTHcpmZY_2\telephant\nf0XpDJO5Tw0_0\tperson\nf0XpDJO5Tw0_1\tperson\nf0Z8cmobjWs_0\ttruck\nf0Z8cmobjWs_4\ttruck\nf0Z8cmobjWs_7\ttruck\nf0Z8cmobjWs_8\ttruck\nf0mYYISWwxo_1\tperson\nf0mYYISWwxo_0\tperson\nf0o0SmB2JAE_1\tcow\nf0o0SmB2JAE_0\tcow\nf03_N__tWuI_2\telephant\nf1ASjw4-yL8_0\tperson\nf1Da4qa1SIw_1\tperson\nf1EKnOQEf5g_0\tboat\nf1GkfW2mOlE_0\tperson\nf1G2DlbJqyI_0\tperson\nf1HKyLr8nL0_0\tperson\nf1JCS5F-LuU_0\tperson\nf1KEvGLqqwI_1\tumbrella\nf1O6FYMq5zk_0\tperson\nf1XB0uA4Dvo_0\tbus\nf1Z1HedJzos_0\tskateboard\nf1fEuZwBkDQ_0\tperson\nf1nxCdtYwdQ_0\thorse\nf1sTzp9ahWM_1\tperson\nf1sTzp9ahWM_0\tperson\nf1uaPSveXCI_0\tperson\nf2ADBeQ0Vys_0\tperson\nf2ADBeQ0Vys_1\tperson\nf2EbBSZ8osI_0\tzebra\nf2EbBSZ8osI_1\tzebra\nf2HKs4L6fwE_0\tperson\nf2HKs4L6fwE_2\tperson\nf2HKs4L6fwE_1\tperson\nf2MDAAk-Euo_1\tperson\nf2ULSb7lIAo_0\tcow\nf2ULSb7lIAo_1\tcow\nf2ULSb7lIAo_3\tcow\nf2hfKAL0ZoA_0\tumbrella\nf2hfKAL0ZoA_4\tumbrella\nf2hhMTSObNY_0\tskateboard\nf2p2YcmHn8c_1\tbicycle\nf2s4nNZ_qew_0\tboat\nf2ypHkP1WUg_0\tperson\nf3EOdxK13SU_0\tgiraffe\nf3HU85Jx7m0_0\tcow\nf3JkzQkcdVM_0\thorse\nf3Kxw7yBcW0_2\tperson\nf3Kxw7yBcW0_1\tperson\nf3Np8rGlxOE_1\tperson\nf3VJKfFdBW0_1\ttruck\nf3aufQBTMME_0\tboat\nf3bk60UZpqE_0\ttruck\nf3bk60UZpqE_5\ttruck\nf3bk60UZpqE_9\ttruck\nf3kQ_6EG8cM_0\tperson\nf3spBT1AGyw_0\tperson\nf31ePv3WlNc_0\tperson\nf33OpHIFMWA_1\telephant\nf33OpHIFMWA_3\telephant\nf33OpHIFMWA_0\telephant\nf33OpHIFMWA_2\telephant\nf35syqOsqSo_0\tboat\nf38P7AlhP5g_0\tperson\nf39rc-7_QQc_0\tperson\nHNr7Ed0_pQY_1\tbus\nHNtUUtLCSDY_0\tgiraffe\nHNtojLNWnKQ_0\tperson\nHN6XGq0aRx4_0\tperson\nHN84N_vu_hw_1\tperson\nHOAbQ4r1tzM_1\tknife\nHOA47mRJ9B8_0\tperson\nHOOwNsMTi9g_0\tperson\nHOSMm-4fUVM_0\tdog\nHOZcbA0OPF0_0\tperson\nHOkS1ljUX4s_0\tbear\nHOmzECHFah4_0\tdog\nHOxzSXuj0O0_0\telephant\nHOxzSXuj0O0_3\telephant\nHO6yeFgs7Hs_1\tbicycle\nHO7Uf5Enr1U_1\tperson\nHPAa3KI1Z30_1\tdog\nHPDws9wJu40_0\ttrain\nHPIdRNu7STU_0\tdog\nHPIxVE3OLG4_0\tperson\nHPPTr0Mpe0A_0\tbicycle\nHPRp9F-4ts4_0\tdog\nHPSJZXcOiEc_0\tperson\nHPjcp8hS6vs_0\tperson\nHPjcp8hS6vs_1\tperson\nHP0RUfuvfx4_0\tperson\nHP4O8FbEpEg_0\tbus\nHP6ROW7ahtU_0\tperson\nHP6YRIGqiI4_0\thorse\nHP62suxiDNw_0\tbicycle\nHP62suxiDNw_2\tbicycle\nHP62suxiDNw_3\tbicycle\nHP62suxiDNw_1\tbicycle\nHP9u4FmRvbw_1\tbear\nHQBhagraDwo_0\tcat\nHQIxUlu7xSY_0\tperson\nHQKVBNWD_ls_0\tperson\nHQM9aDN7Tf0_0\tperson\nHQZVUknJ0lw_1\tperson\nHQZVUknJ0lw_0\tperson\nHQePQ1mfzKw_0\tperson\nHQePQ1mfzKw_1\tperson\nHQhnj0h9OyA_0\tperson\nHQhnj0h9OyA_1\tperson\nHQjXFK_0sFo_0\tperson\nHQxihmm6sSs_0\tperson\nHQz_At1F0Yk_2\tbicycle\nHQ4ZWia0f1E_2\tcow\nHQ9gmrJ6Bm4_3\tairplane\nHQ9gmrJ6Bm4_4\tairplane\nHQ9gmrJ6Bm4_5\tairplane\nHQ9gmrJ6Bm4_1\tairplane\nHRCOvhALHv0_0\ttrain\nHRUX75Ve2aQ_0\tperson\nHRVMd5SmF8Y_0\tumbrella\nHRl1VhUfhok_0\tperson\nHR1wffFOaEw_0\telephant\nHR4ExP8Ompc_0\thorse\nHSKpu2UmvBo_0\tperson\nHSKpu2UmvBo_1\tperson\nHSN6tO3rh-c_0\tperson\nHSVWpwFagLg_1\tperson\nHSdyrMzM64w_0\tcow\nHS3WVWEFHm8_1\tperson\nHS3WVWEFHm8_0\tperson\nHTAnAeW5Bhs_0\tbird\nHTS20hgMcFQ_0\tbicycle\nHTTz78R4i0c_0\tperson\nHTehrgCQAPo_0\tperson\nHTgldgqci04_0\tperson\nHUFGafskCjw_0\tperson\nHULASsoz03U_0\tperson\nHULASsoz03U_1\tperson\nHUPxNiCgjn0_0\tknife\nHUfwe7j7IBE_0\tperson\nHUgX2V1AkVw_0\tperson\nHUiMyxUEC_A_0\tperson\nHUv2tT_n5Bo_0\tperson\nHUy4cHFX-04_0\tperson\nHUz7znJTRNg_1\tumbrella\nHU_HuNQ4TDw_0\tcow\nHU_HuNQ4TDw_1\tcow\nHU_HuNQ4TDw_2\tcow\nHVEmUm86PBo_0\tmotorcycle\nHVI1w93kCfo_0\tperson\nHVOWKezX_bo_0\thorse\nHVOWKezX_bo_2\thorse\nHVYf36PFglw_0\tdog\nHVY9hWgMujc_1\ttruck\nHVeqzrLyVtk_0\tperson\nHVkFV2q27S0_1\tperson\nHVkQkPaQbrw_0\tperson\nHWAW-J3ZpIs_0\tcow\nHWA45moBwMo_0\thorse\nHWEI24n2tHY_0\tperson\nHWItJuo6DSM_0\tbus\nHWXgDvYdlHE_1\tperson\nHWZSmtWVH54_0\tperson\nHWZenKFJqkY_0\tperson\nHWZenKFJqkY_1\tperson\nHWZenKFJqkY_2\tperson\nHWfpkRSnZp8_0\ttrain\nHWfpkRSnZp8_2\ttrain\nHWjaeLf99dU_0\tbear\nHWr9Kqi0B2A_0\tperson\nHWsTMfZok5E_0\tperson\nHWtKIjJacjk_0\tperson\nHWtyII4CMWg_0\tcar\nHWtyII4CMWg_3\tcar\nHW7FTNqTKhs_0\ttrain\nHW7yQK_j65g_0\thorse\nHXARJhNURSs_0\tperson\nHXH_F5SX6FU_0\ttruck\nHXH_F5SX6FU_3\ttruck\nHXH_F5SX6FU_1\ttruck\nHXKnqbEGfVw_0\tbird\nHXKnqbEGfVw_6\tbird\nHXKnqbEGfVw_1\tbird\nHXKnqbEGfVw_2\tbird\nHXKnqbEGfVw_3\tbird\nHXLA3nbxgh4_0\tperson\nHXWoqdza4oA_0\tdog\nHXaAJtjX1mE_0\tbicycle\nHXaAJtjX1mE_2\tbicycle\nHXaAJtjX1mE_1\tbicycle\nHXa-0NlFTP4_0\tperson\nHXcSrTLsF9c_0\ttrain\nHXhYYfE4uN8_0\tperson\nHXvgiezvrYI_0\ttruck\nHXx4tRTfGRM_1\tdog\nHX0kjr3XYHI_1\tbear\nHX7P1ipPByA_0\tdog\nHX-gTvdUaOE_2\tmotorcycle\nHYLAdzbqvC0_0\tperson\nHYWEWmMMrsU_0\tcat\nHYW3dAv02gE_0\tcow\nHYW6VucwAEg_0\tperson\nHYXFGMzivds_10\ttruck\nHYbuNzqXmyY_0\tperson\nHYiN6skKjfY_0\tknife\nHYoonHvZXCc_0\tmotorcycle\nHY1aAYxxlQo_0\tperson\nHZC5bba_V4Y_0\tknife\nHZJ-JQkt590_1\tbicycle\nHZKExvpKLQ8_1\tperson\nHZLdGfto2mI_0\tcar\nHZSPPN3TMx8_0\tbird\nHZZadt4SIl0_0\tdog\nHZceU_BV2GM_0\tperson\nHZceU_BV2GM_1\tperson\nHZceU_BV2GM_2\tperson\nHZd4rCCsNMs_0\tskateboard\nHZd4rCCsNMs_1\tskateboard\nHZd4rCCsNMs_2\tskateboard\nHZkmrVeoUV4_0\tperson\nHZscUISrdww_0\tperson\nHZ-tGW__JOI_0\tcat\nHaE1N8Q1b7s_1\ttrain\nHaMpIMApSi8_0\tperson\nHaO3z-4gcBs_2\ttrain\nHaRliuOtm7s_1\tperson\nHaiLotzzEXk_1\telephant\nHaiLotzzEXk_2\telephant\nHaiLotzzEXk_0\telephant\nHarW34izH-M_1\tperson\nHauA239AM7I_0\tdog\nHavxbX8tng0_0\tperson\nHayoEz1x5Ks_0\tperson\nHayoEz1x5Ks_1\tperson\nHay4Nx9S5-k_4\tbicycle\nHay4Nx9S5-k_1\tbicycle\nHa8XGRvxQxs_0\tperson\nHa_OuYxLXIs_0\tperson\nHa_w-xJsHAY_0\tzebra\nHbBCtCXKIEE_0\tperson\nHbH7DpR0WUw_0\tperson\nHbJufGCjdSE_1\tperson\nHbKh31cncOI_0\tbird\nHbLoxqqdYsQ_0\tcow\nHbQu1mfGg4c_2\telephant\nHbQu1mfGg4c_3\telephant\nHbQu1mfGg4c_0\telephant\nHbQu1mfGg4c_1\telephant\nHbcyjRGbMBY_0\tdog\nHbcyjRGbMBY_1\tdog\nHbhmBauZqxE_0\thorse\nHbq35QImz2w_0\tperson\nHbq35QImz2w_1\tperson\nHbuCy2fsJk8_2\tknife\nHbyKQdGpxhA_0\tboat\nHb3INTcuOVk_0\tperson\nHb5zCzD4J_E_1\ttrain\nHb5zCzD4J_E_2\ttrain\nHb5zCzD4J_E_3\ttrain\nHb5zCzD4J_E_6\ttrain\nHb5zCzD4J_E_7\ttrain\nHb5zCzD4J_E_8\ttrain\nHb5zCzD4J_E_11\ttrain\nHcBQXS22BDs_0\tperson\nHcJTaK6Q9P8_0\tperson\nHcXN4Pwnaeg_0\thorse\nHcfxwdbwk8c_0\tperson\nHchet3FQwII_0\tperson\nHcxL3_INS_0_0\tperson\nHc5ZM6UWTbY_0\tperson\nf4OI46BYh08_0\tskateboard\nf4Oj9uMeFdI_0\telephant\nf4PgAt4YpfE_0\tcat\nf4P-R7h_gTU_0\tperson\nf4QyVWC6yrw_0\tperson\nf4XkIcezAd8_0\tperson\nf4XkIcezAd8_1\tperson\nf4Y2tjwOV2k_0\tcow\nf4Y2tjwOV2k_1\tcow\nf4bys9o_Z2M_2\tbird\nf4s0cImpNBM_1\tcow\nf4xLPprxm30_4\tknife\nf49BXPlU-iI_0\tknife\nf4_Mfc9Ccg8_0\ttruck\nf5BIXG_nLok_0\tbus\nf5HsrI3Codk_0\tbird\nf5J7yrE24eY_0\tperson\nf5LuupUslCU_0\tperson\nf5Q2iD7VUx8_0\tskateboard\nf5W37dv91tU_0\tperson\nf5apNjAecEc_0\tperson\nf5bVoAXze0Q_0\tmotorcycle\nf5d1IXK1Tz0_0\tbird\nf5rzpIRd4wA_0\ttrain\nf5wHsLucnf8_0\tperson\nf5zEWaDr1jg_0\tcat\nf50eMXA_-bM_1\tperson\nf50eMXA_-bM_0\tperson\nf53Jmsa7Jkc_0\tperson\nf6AcbdJ77A4_1\ttrain\nf6E2ODGGF28_1\tperson\nf6Px5vjTeRI_1\telephant\nf6Px5vjTeRI_0\telephant\nf6Px5vjTeRI_3\telephant\nf6Px5vjTeRI_5\telephant\nf6UBVcEIt3I_1\tperson\nf6cXiuO-MvQ_1\ttruck\nf6dVANLzPTY_0\tperson\nf6dVANLzPTY_1\tperson\nf6o6ukW_Qog_2\tbear\nf65c6sEDtkE_0\tperson\nf7A6AOC8fOg_0\tperson\nf7A6AOC8fOg_1\tperson\nf7ExsvPto-E_1\tmotorcycle\nf7Fs7-jGglk_1\tbear\nf7GJgMh9xt4_0\tperson\nf7WvltLziTI_0\tboat\nf7cI-B4pJso_0\tcow\nf7kLnCuNTQo_0\tcow\nf7lmZQGcfBA_3\telephant\nf7lmZQGcfBA_4\telephant\nf7lmZQGcfBA_0\telephant\nf7lmZQGcfBA_1\telephant\nf7oBEoL94vw_0\tperson\nf7pnt1rB9kI_0\tperson\nf7x074oihas_0\tperson\nf73BEqi2_DM_0\tperson\nf7-S_iQAyKU_0\tcar\nf7-htlH5qd4_0\tbird\nf7-htlH5qd4_2\tbird\nf8A1o9Nbs64_0\tskateboard\nf8BXIJnggCI_1\tboat\nf8BXIJnggCI_3\tboat\nf8BXIJnggCI_4\tboat\nf8Dp8Yvyr_0_0\tperson\nf8PVrlhAIV4_0\tperson\nf8T4DHNu6MY_1\ttruck\nf8ZxXHSqC_8_0\tboat\nf8cW6kw6240_0\tperson\nf8mzzGhPBaw_1\tcar\nf8q3fKwf5PY_0\tknife\nf8yFyIwDCQ4_4\tgiraffe\nf8zLCa1oGOE_0\thorse\nf8z83D9vGPo_2\tknife\nf80hjE6vabs_0\tperson\nf80hjE6vabs_1\tperson\nf84ypk41ULc_0\telephant\nf9H0LrBLc9Y_0\tperson\nf9H0LrBLc9Y_1\tperson\nf9H0LrBLc9Y_3\tperson\nf9H1bUagACA_0\thorse\nf9H6UaPUITk_0\tcat\nf9LOlCLfsJs_0\tperson\nf9N4Jxt-kUs_1\tknife\nf9N4Jxt-kUs_2\tknife\nf9TCFTluRIc_1\tbus\nf9e12AC1jXM_0\tbear\nf9oWC3kSP1M_0\tmotorcycle\nf9ovukmKaq4_1\tperson\nf9sPt8HIN0w_1\tskateboard\nf9sj-0ZFV6E_0\tperson\nf9sj-0ZFV6E_1\tperson\nf9v2ONFCiwQ_0\tperson\nf91XzUXz11U_0\tperson\nf96d9EwxAB4_0\tperson\nf9-IyW9tVLY_0\tperson\nf-FxqFk0TdM_0\tperson\nf-JXaNm7TBw_0\tperson\nf-J7SQBHRN4_0\ttruck\nf-Yei4idfG8_0\tairplane\nf-dhfS-geuI_1\telephant\nf-dhfS-geuI_2\telephant\nf-h9L-PN1ZM_1\tbird\nf-iLJUDdrD8_0\tperson\nf-niuVrgiIc_1\tperson\nf-rp_CghH-E_0\tskateboard\nf-s-4lM4qPA_0\ttruck\nf-w51BH60RQ_0\tperson\nf-1WVe76te0_0\tcow\nf-4EyKUawVo_0\tbear\nf-7ZEGsCz9U_0\tperson\nf_GKi-DGmzM_0\tperson\nf_Gf2hpt7y4_0\tgiraffe\nf_GudF8uST0_0\tperson\nf_NsA6enCZE_0\tperson\nf_OOyDOAAOU_7\telephant\nf_QhMhkyUSY_3\ttruck\nf_QhMhkyUSY_1\ttruck\nf_QhMhkyUSY_4\ttruck\nf_Us8TvJMUQ_0\tperson\nf_VwDCt9HTc_0\tdog\nf_WQIaZ5PjY_0\tboat\nf_bXOtZjzfo_0\tperson\nf_b0IaRqtbs_0\tperson\nf_jLGz53IpQ_0\tperson\nf_jLGz53IpQ_1\tperson\nf_mo54sXCc8_1\tperson\nf_mo54sXCc8_0\tperson\nf_rC1JIAMBU_0\tperson\nf_wk-NOqceY_0\thorse\nf_yMF9tkk70_1\tcar\nf_yvJuTzFHc_0\tmotorcycle\nf_yvJuTzFHc_2\tmotorcycle\nf_2I0S-EYu8_0\tdog\nf_3x9qJXCjA_0\tperson\nf_49EFLQ02I_0\tperson\nf_8S2hHC2rc_0\tbicycle\nf__fXHkVh5E_1\tcow\ngAKFUl9e_kg_0\tperson\ngAQ92hISW6g_0\tperson\ngARNWQDyaYM_0\tboat\ngAYbqApcfGs_0\tperson\ngAdIZN7_0SM_1\tairplane\ngAeHmfC6t5s_0\tcat\ngAetQXcftXM_2\tdog\ngAnOylz1kDY_0\tperson\ngAnmF0EFcB4_2\telephant\ngAorjWC_59o_0\tcat\ngAo9Rsd6xwg_0\tcow\ngA2FDYNulg8_1\tperson\ngA22uEcTAuY_1\tdog\ngA84cp5Keqk_0\thorse\ngA_a2Ajm7B8_1\thorse\ngBFsvbfVaLg_0\tperson\ngBJgWZcXu9o_0\tperson\ngBK7NwUcSoY_1\tperson\ngBOpan7nm6M_0\thorse\ngBOpan7nm6M_1\thorse\ngBPipHCII3M_0\tbus\ngBRc8zqsL78_0\tdog\ngBUOzZPs_o4_2\tperson\ngBUOzZPs_o4_0\tperson\ngBYqrtFnN_Y_0\tperson\ngBYqrtFnN_Y_2\tperson\ngBeaBC0u9cQ_0\tperson\ngBhKhiEJUCM_0\thorse\ngBiq_BH15FM_0\tdog\ngBoebgAjbVw_0\tperson\ngBoebgAjbVw_1\tperson\ngBs3hPLJTGs_1\thorse\ngBwCej92lKg_1\tperson\ngB0wConR2VI_1\tskateboard\ngB2QHXkiiHs_2\telephant\ngCDBnQV_G3c_0\tcat\ngCDBnQV_G3c_1\tcat\ngCGtBmntCiI_1\tmotorcycle\ngCHegjuq0os_0\tperson\ngCHegjuq0os_1\tperson\ngCI1E3Hezdo_2\tcow\ngCI1E3Hezdo_1\tcow\ngCTp3CdMHCo_0\tperson\ngCT0VAdPm98_0\tcat\ngCuOoA6aZ5U_0\tcat\ngC7K3OeQFHo_3\tbird\ngC7XtkA9y_Y_0\tdog\ngC-xUbdM-tU_0\tperson\ngC-xUbdM-tU_1\tperson\ngDAPPFBC9Gw_0\ttrain\ngDEpD9ek-O8_0\tskateboard\ngDGLrPPl_PU_0\tcat\ngDMsKJ61KPo_1\tskateboard\ngDOGAHsBM_o_0\tperson\ngDTs0BOj8Fw_0\tcat\ngDU0hHsqtbU_3\tknife\ngDU0hHsqtbU_5\tknife\ngDU0hHsqtbU_0\tknife\ngDVGs8wTXCQ_0\tcat\ngDkDXOm8z5Q_1\tcow\ngDkDXOm8z5Q_0\tcow\ngDk-zDBsv7g_0\tdog\ngDnSIxaiPzk_0\tperson\ngDn3-DCSgNg_0\ttrain\ngDsBFuJE6D8_2\tdog\ngDvOoWXI3yg_0\tperson\ngD2GATPADlA_0\tperson\ngD5_x_Bz1z4_0\tperson\ngD5_x_Bz1z4_1\tperson\ngED4_ImWufA_0\ttruck\ngEE_GCrAqF0_0\tperson\ngEJi9Jawk2A_0\tperson\ngEOxDCDD97k_1\thorse\ngESEn7ZZELM_0\tperson\ngESEn7ZZELM_1\tperson\ngEai3uMvvFg_0\tairplane\ngEai3uMvvFg_3\tairplane\ngEai3uMvvFg_4\tairplane\ngEhLmQnM720_0\tcar\ngEu4mV0DWRQ_0\tperson\ngE0ZQD1rCy8_0\tperson\ngE0mBxOEwRI_1\tskateboard\ngE0mBxOEwRI_3\tskateboard\ngE8ErAnVuzY_0\tbird\ngE8ErAnVuzY_2\tbird\ngE-GVN9ErhI_0\tperson\ngFEnoylVci0_0\tperson\ngFac0jUOjCE_0\thorse\ngFcIMdm4qtI_0\ttrain\ngFdHQTLSmnc_0\tairplane\ngFfVZSPVYmY_0\tperson\ngFiSl9m-w0k_0\tperson\nHdBc9ySq76E_1\tbird\nHdCyMGZFJhM_0\tperson\nHdFYXjdN5_8_0\tperson\nHdO2lmXvENQ_0\thorse\nHdR6VoZEwAU_0\tcat\nHdSXU0fhHbM_0\tperson\nHdT_9pXdxuc_1\tperson\nHdbZzqJGLo8_1\tcow\nHdcXcqUlgI4_0\tskateboard\nHdhKF0UWx4g_0\tperson\nHdh3nOzwVW8_0\tperson\nHdjbDB8UvCY_0\tperson\nHdo3_NQiVKw_0\tknife\nHd85XlwoOMc_0\tperson\nHd-wT5OTZDE_0\tperson\nHd-wT5OTZDE_1\tperson\nHeIrGQnIMOE_0\tdog\nHeLNz5XJe08_0\tperson\nHeTGT7JfvB0_0\tperson\nHeUD1Hrzswg_0\tbird\nHeYNsU-PKJs_0\tcow\nHeYNsU-PKJs_1\tcow\nHedUVNznPK0_0\tcar\nHedUVNznPK0_1\tcar\nHeoyKd78htI_0\tperson\nHeoyKd78htI_1\tperson\nHewdFRJAXH4_0\tperson\nHe08dewEgbY_3\tmotorcycle\nHe08dewEgbY_0\tmotorcycle\nHe1OQxCPk_w_0\tperson\nHe5cucK-e48_0\tperson\nHe6bAMDkCss_0\telephant\nHfDHvE46LYU_1\tbird\nHfDzCPRQ2nw_1\telephant\nHfEXlJ0dOhU_0\tperson\nHfEZYvYqq_Y_0\tcow\nHfHNi93ZHoo_3\tcow\nHfHNi93ZHoo_1\tcow\nHfOcLeLWchM_0\tperson\nHfZ871F0xSo_0\tcat\nHfnnbr4CeTg_3\tbus\nHfqI5BIpp0s_0\tperson\nHfq3_YJ7BpY_0\tmotorcycle\nHfq9JFmquE4_0\tperson\nHfvJc2dxUR4_0\tboat\nHf1Iyyz2DMY_0\tperson\nHf1Iyyz2DMY_1\tperson\nHf8JWsbSYYk_0\tperson\nHf8-8h45g-g_1\telephant\nHf8-8h45g-g_0\telephant\nHf8-8h45g-g_2\telephant\nHgDimNCaxF0_1\tbear\nHgFCKM4ndEc_0\tcar\nHgMYuCtsOwc_0\tperson\nHgMYuCtsOwc_1\tperson\nHgO57Npp9Yg_0\ttrain\nHgexaoNeZJk_0\tperson\nHgiYmNrxUzg_1\tperson\nHgkeptGXNt4_0\tmotorcycle\nHglF9x-ORXU_0\tperson\nHgr5__oevds_0\tperson\nHg2vqnLAc8I_0\tdog\nHg4DJ-x85Dw_1\telephant\nHg-R_RMIEN8_0\tairplane\nHhASNiFpJlw_0\ttruck\nHhF6cAtp7Xs_0\tknife\nHhGGJNmwWHk_0\tperson\nHhVSLU0A-wk_0\tcar\nHhcMy4KZ9mY_0\tskateboard\nHhfSUB2LOTU_0\tperson\nHhiUVwHWmwM_1\tperson\nHhiUVwHWmwM_2\tperson\nHhiUVwHWmwM_0\tperson\nHhjGAeK-XWg_0\tperson\nHhoRf1Ovlf8_0\tperson\nHhvq-cwBJgo_0\tperson\nHhwzl9x_m34_3\tcow\nHhxV27YhiqI_0\tskateboard\nHh1xD0M0N8Q_0\tperson\nHh6x850teNQ_5\tairplane\nHh6x850teNQ_7\tairplane\nHh6x850teNQ_8\tairplane\nHh6x850teNQ_9\tairplane\nHh6x850teNQ_10\tairplane\nHiBUWbOyqcQ_0\tperson\nHiGZ2EdJh2o_0\tperson\nHiMItbtVHcY_0\tcat\nHiMItbtVHcY_1\tcat\nHiNt0G1AIO4_0\tmotorcycle\nHiTE5nqzjBw_0\tzebra\nHiUz61ffgHA_0\tperson\nHiZDjdREbmc_0\tumbrella\nHim7gJ7sArU_0\tperson\nHim7gJ7sArU_1\tperson\nHinGUsliCKc_0\ttruck\nHirBTVnhNls_0\tcow\nHi4ITByGP0Q_0\tperson\nHi4mzrYdRBQ_0\thorse\nHi4mzrYdRBQ_2\thorse\nHi4mzrYdRBQ_3\thorse\nHi8Ey0o5mCQ_1\tperson\nHi-7ZtG_JWI_1\tperson\nHi_YHp3Jz48_0\tcow\nHjAtN_MbguE_0\tperson\nHjLLTWwaCB8_0\thorse\nHjNfykX021M_0\tperson\nHjNfykX021M_1\tperson\nHjgdNiVfO9M_0\tskateboard\nHjlX9nu9Vf4_0\tperson\nHjo13y8dFy4_0\tmotorcycle\nHjt_y0CW-dY_0\tperson\nHjt_y0CW-dY_1\tperson\nHjxd2cno65M_0\tskateboard\nHj0J8FVxBjg_2\tperson\nHj0J8FVxBjg_0\tperson\nHj0J8FVxBjg_1\tperson\nHkApyQz8MTY_1\thorse\nHkQ4tzUFCUU_0\ttruck\nHkW_wLkAKpg_0\tperson\nHke6h3Sv5bA_1\tbicycle\nHkzYNIDq0q4_0\ttrain\nHk45sdCRh9g_1\tbear\nHlEkgK08UfY_1\tperson\nHlTQbPXnzu8_0\tdog\nHlWsih27OmA_0\tbird\nHlaPVZM-53c_0\tperson\nHlfpirtC6oQ_0\tperson\nHlmuHGoCGAI_0\tcow\nHltyUzvtugM_1\tbicycle\nHlurUBv4bh0_1\tgiraffe\nHlurUBv4bh0_3\tgiraffe\nHlurUBv4bh0_4\tgiraffe\nHlwSaYwFLRE_0\thorse\nHl3qik9GRX4_0\tperson\nHl5MXwWiXWM_0\tperson\nHmDDLtJcD5g_0\tperson\nHmORePbYJkk_0\tskateboard\nHmPvsdwo_fY_0\tdog\nHmRm2phIiGo_1\tbird\nHmY8zwmIiac_0\tcow\nHmaGylwEFxw_0\tperson\nHmbTCfB3Vkg_0\tperson\nHmk4dZnPtRY_0\tbus\nHmn3xf-zqWI_0\tperson\nHmqV_7hAxdw_0\tperson\nHmr0jbygomI_0\tgiraffe\nHmwxDK0zo6U_0\tperson\nHmyj1zKgToA_0\tperson\nHm0kxS31F_U_0\tperson\nHm0kxS31F_U_1\tperson\nHnNJeASG0-M_3\tperson\nHnNJeASG0-M_4\tperson\nHnNJeASG0-M_2\tperson\nHnNzkYDhWks_1\tperson\nHnNzkYDhWks_2\tperson\nHnNzkYDhWks_0\tperson\nHnP7iXcgg8g_0\ttruck\nHnSHJ_iCdi4_3\ttruck\nHnSHJ_iCdi4_1\ttruck\nHnUrGKpAsOk_0\tcat\nHnbNOJpzYPE_0\tperson\nHnjhdtM8qSI_0\tskateboard\nHnptRKjBUF0_2\tboat\nHnwYRWj3fk4_2\tknife\nHnxaJbaAiUI_0\tperson\nHoH5exlgIxk_1\tskateboard\nHoLifxKZUpI_0\tperson\nHoLifxKZUpI_2\tperson\nHoLifxKZUpI_1\tperson\nHoNs_4V1pNs_1\tbear\nHoNs_4V1pNs_4\tbear\nHoP_nMgAxAk_0\tboat\nHoeeRkyNozc_0\tcow\nHon64st5_6g_0\ttrain\nHo2ixBE8dzE_0\tgiraffe\nHo5TcUOlb3Q_0\tmotorcycle\nHo5o7aBqNAc_0\tperson\nHo6N0OgD-1M_0\tperson\nHpBBda_pbf8_0\tmotorcycle\nHpGr16tW9dk_1\tperson\nHpQ90KkREGo_0\tperson\nHpUPD5_WMYI_0\ttrain\nHpZ3IzUfsGg_3\tbus\nHpbQsLdUHN4_0\tboat\nHpjyvLHus3Y_1\tskateboard\nHpkTeQdQ03Q_0\tskateboard\nHprw9lNWGGs_0\tperson\nHptcjVcfzgY_0\tcow\nHpwk73qvroU_1\telephant\nHpzTTAS6Qt8_0\tperson\nHp0SQy5w9Q4_0\tperson\nHp-eaTbVfLY_1\tbear\nHp-2Gb7Fwns_0\tcow\nHqxhhM71S2g_0\thorse\nHq1KLztJBrE_0\tperson\nHq6tGHLzg4Q_0\tperson\nHq814Tfrblw_1\tairplane\nHrHPBJOnFgg_1\ttrain\nHrHPBJOnFgg_0\ttrain\nHrHPBJOnFgg_4\ttrain\nHrHPBJOnFgg_6\ttrain\nHrdVu5J3rZQ_0\tperson\nHr-keYNRBhA_0\ttrain\nHsLZwGFHYUg_1\thorse\nHsNcZZ6iwHQ_0\tperson\nHsOiHc1moVk_0\tperson\nHsOkCwZLv_w_0\tbus\nHsOkCwZLv_w_3\tbus\nHsOkCwZLv_w_1\tbus\nHsOkCwZLv_w_2\tbus\nHsR2xk4I1as_0\tperson\nHsVKw_8AQtM_0\tperson\nHsZgeesgCZQ_0\tperson\nHsjVUPs3XB4_0\tboat\nHslbDMoiABY_0\tcar\nHslld67XdsY_0\tperson\nHswufOfUGyk_0\ttruck\nHsyscFWIPZs_0\tbus\nHs0HRqYcYqA_0\tcar\nHs6bVSOu98U_0\tdog\nHs_vQr20HdQ_0\tskateboard\nHs_vQr20HdQ_3\tskateboard\nHtErHV_tZqs_0\telephant\nHtIbfC8DDos_0\ttruck\nHtNaGNO6nnc_0\tperson\nHtRiNzzfakk_0\tperson\nHtUPhgHKN9c_1\tboat\nHth8t7jhKPs_4\thorse\nHth8t7jhKPs_7\thorse\nHth-I5KYVsI_0\tcat\nHt054jKgWfE_0\tperson\nHt9C8ABsxrg_0\tperson\nHt_bczKGV-0_0\tperson\nHuNIgJEUelo_0\tperson\nHuNIgJEUelo_1\tperson\nHuOzcY9ybpo_2\tdog\nHuVl7peYYF8_0\tperson\nHuVoecmBgpM_2\tbird\nHuVoecmBgpM_1\tbird\nHuZPTuSe7Zw_0\tperson\nHue6Q5JKEKw_0\tcow\nHun4T6fv3cs_0\tperson\nHuqC6CX9uRA_1\tperson\nHuyd-7WlWWU_0\tperson\nHu3xpcZqwRg_0\tperson\nHu9DGxLcg2c_0\tperson\nHu-VYy60p64_0\tperson\nHvHJi-EkL8c_0\tskateboard\nHvIubGltpPY_0\tdog\nHvLq5xDKM6E_2\tbicycle\nHvP4rcOll6k_0\tperson\nHvQGnFuiwtg_0\tcat\nHvTvaPx2hXw_1\ttrain\nHvhkLhJ4YFQ_0\tperson\nHvuLPfhVT3s_0\tperson\nHvyIg5RMLbU_1\tperson\nHvyIg5RMLbU_0\tperson\nHvyzpBvy40o_0\tperson\nHv5sH0eTE_M_0\tdog\nHwSP55CmiCk_0\tperson\nHwS3weg4aQc_0\tdog\nHwS3weg4aQc_2\tdog\nHwY6kiQlICc_0\tperson\nHwdEYJ2bZkg_0\tairplane\nHwdyzravQpY_0\tcat\nHwfLycybCD0_0\tmotorcycle\nHwgmR0Qlm_I_0\tperson\nHwipRH29Hr0_0\tbus\nHwnqezsko-Q_0\tperson\nHwnqezsko-Q_1\tperson\nHwxnH--ot8o_0\tcar\nHw0JhQaRYcA_0\tcow\nHw2Bhz2SkUI_0\tperson\nHw2Bhz2SkUI_1\tperson\nHxMniz8r1x4_0\tperson\nHxP056QWsGY_0\tperson\nHxP056QWsGY_1\tperson\nHxaFZyog34E_0\tperson\nHxaFZyog34E_1\tperson\nHxgU1Dh8wMs_1\tperson\nHxiBpvG82Ys_0\tmotorcycle\nHxq1wNRv5Yg_0\tperson\nHxv6y6I4mvE_0\thorse\nHx19D3w4xGI_0\tgiraffe\nHx_Z9TOIV8U_0\tmotorcycle\nHyJgfYNotwk_0\ttruck\nHyUY7bqdm9Q_7\tdog\nHyUY7bqdm9Q_0\tdog\nHyVLne6RE-A_0\tperson\nHyXjUWAQ970_0\tskateboard\nHygs9OBUgg4_1\tperson\nHyuQCu-z558_0\tmotorcycle\nHywSTw3dtgs_0\tperson\nHywSTw3dtgs_1\tperson\nHy4E2NZEc34_1\ttrain\nHzAOQnmw_bo_1\telephant\nHzAOQnmw_bo_2\telephant\nHzCClfShiwM_0\tperson\nHzCClfShiwM_1\tperson\nHzDzb9xxc6o_0\tperson\nHzESeh3ZV4g_0\tperson\nHzHWWeZEU6E_1\tskateboard\nHzJgpBBIk1o_0\tcat\nHzLm3QfIx9w_0\tperson\nHzLm3QfIx9w_1\tperson\nHzXBY-SJECY_0\thorse\nHzYY4-iAvrk_0\tcow\nHzdSxrJ2oBw_0\tskateboard\nHzkmlCJwvqo_0\thorse\nHzlcc_lAGVo_2\tskateboard\nHzqIVSJNXAU_1\tperson\nHztbwJhPXyk_0\tperson\nHz6I6jLi4NA_0\tdog\nHz8qayZDGpU_0\tperson\nH0Adt_c6kJo_2\telephant\nH0EEB1bPOjE_0\tperson\nH0VjOJvg49Q_0\tbicycle\nH0Ym6NE2ny8_0\tcat\nH0gWl9KRbHo_0\tperson\nH0k2WZec6aA_1\ttrain\nH0k2WZec6aA_3\ttrain\nH0k2WZec6aA_4\ttrain\nH0k2WZec6aA_0\ttrain\nH0u061QsnHw_0\tcat\nH0yhw97jkkY_0\tperson\nH0z8VqDW-vg_1\tairplane\nH01F2fhFpr0_0\telephant\nH097WsXpask_0\tperson\nH097WsXpask_1\tperson\nH1C2ZZeeVs0_0\tcow\nH1Hd5Japfbc_3\ttrain\nH1Hd5Japfbc_0\ttrain\nH1Hd5Japfbc_1\ttrain\nH1Hd5Japfbc_2\ttrain\nH1JIvu1dbbk_0\tperson\nH1JIvu1dbbk_1\tperson\nH1MTfTrQrE0_1\tperson\nH1d68B_jDjI_0\tperson\nH1hg-0_AS9A_0\tcow\nH1xBJoYM7rE_4\ttruck\nH1xBJoYM7rE_5\ttruck\nH117IshzypA_0\tknife\nH144B0rpQh0_0\tperson\nH144B0rpQh0_1\tperson\nH1-_3CvKDzc_0\tbird\nH2Q-46IlKEc_5\ttruck\nH2Q-46IlKEc_6\ttruck\nH2RoEMwxEAk_1\tperson\nH2TqEPsubdM_0\tbear\nH2iTxNLOK1Q_2\tmotorcycle\nH2iTxNLOK1Q_0\tmotorcycle\nH2iTxNLOK1Q_3\tmotorcycle\nH2vkpfO2yqU_0\tperson\nH22P5Z4GfkE_0\tperson\nH29Xe5gG_-s_0\tperson\nH3A2DSw_xNU_1\telephant\nH3GcVWKTVd4_2\ttruck\nH3NrFrjQlfc_0\tperson\nH3exbzmmPQY_0\tperson\nH3jC0oToDjU_2\tperson\nH3jC0oToDjU_3\tperson\nH3jC0oToDjU_0\tperson\nH3o1VsopVFM_1\tbicycle\nH3o1VsopVFM_2\tbicycle\nH3pifBCagTI_0\tperson\nH30IPtBzf_s_5\tskateboard\nH30ifg3HO_I_3\tdog\nH33IRr1Z3-w_1\ttrain\nH36UOsilz4M_0\tperson\nH4Hp-UJYZ_g_0\tbicycle\nH4JiUp8EH3s_0\tzebra\nH4VZD26aqe8_0\tskateboard\nH4VZD26aqe8_1\tskateboard\nH4bN1hcXw9Q_1\tperson\nH4dTHFeYa30_0\tmotorcycle\nH4eE_LAeWXQ_0\tperson\nH4eE_LAeWXQ_1\tperson\nH4gxLA7vTo4_0\tperson\nH4lBmXOi3Uc_0\tdog\nH40G2dsVha4_1\ttrain\nH41XJMKpfFM_0\tbus\nH42hQSjU97o_0\tknife\nH5NqMNaMEiM_0\tbird\nH5YO56LD_dY_0\telephant\nH5YO56LD_dY_1\telephant\nH5iHzuWmtDw_1\tdog\nH5sijKl_Xi4_0\tcow\nH50EXfjT2O0_2\tairplane\nH50EXfjT2O0_0\tairplane\nH50EXfjT2O0_1\tairplane\nH50-_mqAU14_1\tcow\nH55Ru4hgats_2\telephant\nH55Ru4hgats_3\telephant\nH6OhYxXS1So_0\tcat\nH6UwkC3sYic_0\tcat\nH6ZHYEOcjCI_0\tbicycle\nH6ZHYEOcjCI_1\tbicycle\nH6Z8sZ34ZGw_0\tmotorcycle\nH6dXJIZnH-k_2\ttrain\nH63oHdGMBAs_0\tbird\ngFunUi36tVM_0\thorse\ngFunUi36tVM_1\thorse\ngFvhLM1k-IY_2\ttruck\ngFwCuQBtZiU_1\tumbrella\ngF7IM-CiOdU_7\tbicycle\ngF7IM-CiOdU_0\tbicycle\ngGBEKYXUhbE_0\ttruck\ngGMxVO2zmP4_9\tbird\ngGMxVO2zmP4_1\tbird\ngGMxVO2zmP4_2\tbird\ngGMxVO2zmP4_5\tbird\ngGMxVO2zmP4_8\tbird\ngGSCGkm00jM_1\tbicycle\ngGYN2hnw1SQ_1\telephant\ngGdKtY4p1E0_0\tairplane\ngGt9CVOzJOI_3\tknife\ngGzaN_8PxZw_0\tskateboard\ngG8tfb-eSuo_0\ttrain\ngHC3HqRbW6g_0\telephant\ngHF9PM2MVuw_1\ttrain\ngHvzU7dfBU8_0\tgiraffe\ngHyK46CyQtA_0\tcow\ngH0LLPcn-H8_0\telephant\ngIBZr7Mh05k_0\tbird\ngIMq_fnjtSM_0\tcat\ngISy0wedyW4_0\tboat\ngInHAdlbB60_1\tskateboard\ngIsXFCo7Nt4_1\tdog\ngIxuS1GwPPo_0\ttrain\ngJV63DGM7Ew_1\tcar\ngJa0yNDBFio_3\tperson\ngJa0yNDBFio_0\tperson\ngJa0yNDBFio_2\tcow\ngJfD9eHnos4_1\telephant\ngJn5fXk7dCs_0\tairplane\ngJuZGVWuQQ8_2\tbicycle\ngJ-k_oHkqYc_0\tcat\ngKHR68FmKE8_3\tairplane\ngKHR68FmKE8_0\tairplane\ngKHR68FmKE8_4\tairplane\ngKmF78OWCUc_0\tmotorcycle\ngKqUwiPYSh8_0\tmotorcycle\ngK7dud30V7k_0\tgiraffe\ngK_K33gm3SA_1\tmotorcycle\ngLQWgnWqQ1Y_0\tbicycle\ngLRU7lXCgNw_1\tdog\ngLRexWYaW_Q_0\tskateboard\ngLbADp0AlZU_0\tbird\ngLtnBhTBpkA_1\tboat\ngL3uBv5NWJU_1\tbus\ngL7JySv9H4I_0\tbicycle\ngMAW4Am5_pc_0\tcow\ngMBTewi9VZg_0\tcow\ngMCCgBzug_U_0\tknife\ngMFgEtqbTXs_0\tboat\ngMJuszEOURk_0\tcat\ngMMJH4UYboM_3\tbus\ngMXt8X-xC_g_0\tdog\ngMlNev_l4Yg_0\tbus\ngMlhd1gczF4_0\tairplane\ngMsGe7w79Hg_1\tcar\ngM9tFNvc1xw_0\tcow\ngNDSQ2l9FYg_1\telephant\ngNMkDmfkZ1E_0\tmotorcycle\ngNcGXjn7g9o_0\tskateboard\ngNwKVPIi010_1\tskateboard\ngN2aKPpTpzQ_1\tdog\ngN7-cLfUlt8_4\tgiraffe\ngN7-cLfUlt8_6\tgiraffe\ngOOB0RZmnUA_0\tcow\ngORdlzUa3nQ_1\tbird\ngO48FZrUm88_0\tskateboard\ngO-8RNI2Puc_1\tdog\ngPhcXlQLLRU_0\thorse\ngPrWvEE7yjw_0\tcat\ngPteWZyyJeo_0\tcow\ngP3SQErTTOg_1\tmotorcycle\ngQBW4py4GhY_0\tskateboard\ngQEGmIhhEQ4_0\ttrain\ngQEGmIhhEQ4_1\ttrain\ngQEGmIhhEQ4_2\ttrain\ngQFqppfDRRk_0\tumbrella\ngQLZ5H-n0Uk_4\tknife\ngQVlREJXkik_0\tknife\ngQWTTEHj5Hs_0\tcat\ngQeqE3dgZoM_3\tairplane\ngQe5gykuyi4_1\ttrain\ngQpWY94Fx5E_0\tmotorcycle\ngQpuEhphXHk_0\tcar\ngQpxfwrF7Sc_0\tbus\ngQ6AUvEXuaQ_0\tbicycle\ngQ9HhxeKI4A_0\tmotorcycle\ngQ_SF2MtsUc_0\telephant\ngRFcteFGpLM_0\tskateboard\ngRJGd_HzC-8_0\tknife\ngRJpf6JwJeU_1\tgiraffe\ngRNKgw2D_mE_0\tknife\ngRVrvJioWZ8_1\ttrain\ngRoGrhv1ebI_0\telephant\ngRsOR1tKh8U_0\ttruck\ngR3ihf3rch0_0\tcar\ngSXDTJjj1jk_0\ttrain\ngSi2fNTUsy8_0\thorse\ngSlT3ALqvTM_0\tskateboard\ngS0DTbVQ2x8_1\tknife\ngS25yLrNO98_0\tbear\ngS2-SAccVh0_0\tskateboard\ngS7U-6Z8M2g_1\tknife\ngS_9D3OWXAk_0\tairplane\ngTqgARR0BBQ_1\tboat\ngT27MQBhatA_0\tskateboard\ngUDoTzwZlso_0\tdog\ngUL0-NbHvuA_0\tmotorcycle\ngUMLascwbtU_0\ttrain\ngUNCDmbzxq8_0\ttrain\ngUbc_OUTnOs_0\tairplane\nH7ONEeAkBFo_3\tmotorcycle\nH7ONEeAkBFo_2\tmotorcycle\nH7YUH_GBWdQ_0\ttrain\nH8B-3STVp6E_0\tcat\nH8LitQV6pNM_0\tcat\nH8SccYIiPs8_0\tzebra\nH8coORJpR80_1\tskateboard\nH8k1E1i7AvQ_0\tknife\nH9AQUC0N1zI_0\thorse\nH9JfwPhdCjg_0\tboat\nH9KjlXZYxJU_0\ttrain\nH9KjlXZYxJU_8\ttrain\nH9TUml4LflE_0\tcow\nH9UTvMwaoRg_0\tcow\nH9bbSssKl2o_14\tumbrella\nH9eutGBn3zw_0\tmotorcycle\nH-C6EBylvh4_1\tcat\nH-IoiGsEU5Y_0\ttrain\nH-QKbNwtoH8_1\tcar\nH-gh485Om10_0\tbus\nH-gh485Om10_1\tbus\nH-kkRVEs3Bg_0\tmotorcycle\nH-uiufHSb3s_0\tknife\nH-uvqjsUCLc_0\tdog\nH-uvqjsUCLc_1\tdog\nH-5Ynjv0dQI_1\ttrain\nH-62b99sK_s_0\ttrain\nH-62b99sK_s_1\ttrain\nH_Ei1gRODpw_0\tdog\nH_KMZLSAxMw_0\ttrain\nH_iI201Iqws_1\ttruck\nH_iYHl4pFuQ_0\thorse\nH_mRfG30Gzo_0\tskateboard\nH_1O-OBZ3BA_0\thorse\nH_6vxd3ckIY_0\tcat\nIADSsAb2KSo_1\tumbrella\nIADSsAb2KSo_2\tumbrella\nIAFApeJ5FvM_1\tmotorcycle\nIAOiNYVeqzE_0\tbird\nIAaINtcnO7A_0\tbicycle\nIAcbsZcN_pM_1\tmotorcycle\nIAkSntQ2Aso_0\thorse\nIAlz_evs7fU_3\tcar\nIApV0rfD9oQ_0\tdog\nIAsXYmK1baI_0\tmotorcycle\nIAwKojHnvtU_0\ttrain\nIBD9tJNb9_o_0\ttrain\nIBFp5y96q78_0\tmotorcycle\nIBFp5y96q78_2\tmotorcycle\nIBKLgBXZFzw_0\tmotorcycle\nIBYJQU6-nGg_2\tcow\nIBYg-hMbb04_0\tknife\nIBm1C4qJtTg_5\tumbrella\nIBm1C4qJtTg_8\tumbrella\nICQbVnaJL_0_0\tbus\nICZ4tinBQZg_1\tknife\nICZ4tinBQZg_2\tknife\nICZ4tinBQZg_3\tknife\nICg3W1-Prhk_0\telephant\nICnAWjPDzRw_0\tcow\nICtLhp-qveM_0\tboat\nIDCBO7W7xpo_0\tcow\nIDNvFEra8mc_5\thorse\nIDNvFEra8mc_1\thorse\nIDNvFEra8mc_2\thorse\nIDNvFEra8mc_3\thorse\nIDNvFEra8mc_4\thorse\nIDO6jw3u3_w_1\tairplane\nIDcxChwEqDs_2\thorse\nIDeGA2EV3WY_0\tairplane\nIDeimFOIbVc_0\ttrain\nIDmwsXLZKUs_0\tcow\nID1faW2L3rM_0\tcat\nIEOg-ZulFR0_1\tbird\nIEPYJyHfP2E_1\telephant\nIEYC-aYAQ40_0\tboat\nIE5qZDd7tWw_0\telephant\nIFGohfPURX4_0\tperson\nIFfS7hatV0s_0\ttruck\nIFkUMGE7bbc_1\telephant\nIFkUMGE7bbc_0\telephant\nIFrHlldbUdQ_0\tcow\nIFvO1O-6vqk_0\ttruck\nIHQvg9gYLjw_0\tdog\nIHSCfRs-J38_2\tskateboard\nIHY0eeHfBcY_4\ttruck\nIHjI35oW0T4_0\tcar\nIHxX0fKU9iM_1\tskateboard\nIH3E7RS6Hn8_0\tcat\nIH9BmEg26Cw_0\tperson\nIIBN7FGNNEs_1\ttrain\nIIBN7FGNNEs_2\ttrain\nIIBN7FGNNEs_3\ttrain\nIIBN7FGNNEs_4\ttrain\nIINTapIzzes_2\tskateboard\nIIw0KKAeBeQ_0\tskateboard\nII0JbbQq-Sg_1\tbird\nII61z65eDCY_2\tcow\nII61z65eDCY_0\tcow\nII94vSsb4Uc_0\tcar\nII_okDlDaO0_0\tcat\ngUt0vA8_1Ow_0\tairplane\ngUvZ3RC9tEU_0\tknife\ngU3SNUS1_ng_0\tbicycle\ngU4mBoB-b7k_1\ttrain\ngVAp7rt84ic_2\tbicycle\ngVCrRXledlU_1\tboat\ngVCrRXledlU_0\tboat\ngVV-5JdLuXk_3\tcar\ngVXzT_h1SFI_3\thorse\ngVXzT_h1SFI_4\thorse\ngVXzT_h1SFI_2\thorse\ngVaB7hwBhTA_0\tcat\ngVjL5txcFMI_0\tknife\ngVrTFXdPWJ8_0\telephant\ngVxqk8tLXL8_0\ttruck\ngV27xS9pqNQ_0\ttrain\ngV3Xmwy3RKo_6\ttrain\ngV3Xmwy3RKo_13\ttrain\ngV9A5NfFexQ_0\tcar\ngWcacGgcxYU_4\tbear\ngWlmYVY4kW4_1\tbicycle\ngWnhQi-zfEE_0\tskateboard\ngWpNWuo7vio_2\telephant\ngWpNWuo7vio_3\telephant\ngWsOR7UiwDs_0\tairplane\ngWz5ZMzC58s_0\tcar\ngXBIzdmmHbA_1\tbird\ngXEHUZgPCGg_4\tbear\ngXFmghAzaVg_1\tmotorcycle\ngXGvO4k4xQY_0\ttruck\ngXHsyuynhso_2\tknife\ngXW33K91X7c_0\tbicycle\ngXn0Y5X5MJE_1\tzebra\ngXn0Y5X5MJE_0\tzebra\ngXt0u16Y6ZY_0\tboat\ngY_Ey8Ps_ZE_0\tcow\ngZhsGXSn5bU_0\tmotorcycle\ngZqGyIMgMbs_0\tbicycle\ngZxcxQBlx0s_0\tcat\ngZzmloffFW4_0\tbus\ngZ8kZt451Ww_3\thorse\ngZ92ZDty9wI_0\tskateboard\ngaCEAVQd1-M_1\tbird\ngaS7x3F3gpk_0\tbicycle\ngaS7x3F3gpk_1\tbicycle\ngaS7x3F3gpk_2\tbicycle\ngaS7x3F3gpk_3\tbicycle\ngalykATgRC0_0\tcow\ngaqS-4IaQ5c_2\tbus\ngbA3ItatxL8_0\tskateboard\ngbE0vzWpHj0_1\tknife\ngbE0vzWpHj0_4\tknife\ngbGl_-TnPjk_0\tbird\ngbI95ZXEUz0_0\tknife\ngbTTJah5oMw_0\telephant\ngbTTJah5oMw_2\telephant\ngbgbqiiEKVs_0\tgiraffe\ngcBaPcA_1_0_0\ttrain\ngcExbr9FO94_0\tgiraffe\ngcJ7XqXHPwM_0\telephant\ngcT_dy3neEk_8\tbicycle\ngcXhYL06Acs_5\tbicycle\ngcYBNx0fUg8_0\ttruck\ngchz9HDvVDk_0\ttrain\ngc80cGOHyKM_0\tknife\ngdCpPYwBVlY_0\tknife\ngdEBkAYaDPw_1\telephant\ngdELg0NrkdA_0\tdog\ngdvUXfsBMIk_0\ttrain\ngdzzJI7xjBg_0\ttrain\ngdzzJI7xjBg_1\ttrain\ngd2O-Z5dOIk_0\tairplane\ngd4r5aA8jeg_0\tbird\ngd4r5aA8jeg_1\tbird\ngeBwGOC-lX4_0\ttrain\ngeBwGOC-lX4_1\ttrain\ngeBwGOC-lX4_2\ttrain\ngeBwGOC-lX4_3\ttrain\ngeQCe6Cq5MU_1\telephant\ngeQCe6Cq5MU_2\telephant\ngeWChvEotKU_0\ttrain\ngefGPLN-abw_0\tperson\ngfGsOzQ7gto_0\tbear\ngfS7FJH6Vkk_0\tbear\ngfUC20NWtjU_0\tmotorcycle\ngfVlQhN0BBU_0\tbicycle\ngfuVNdXffSs_0\tairplane\ngf1mvdt9kbI_0\thorse\nggIyqAThI1g_0\tbird\nggPHtWoCcKs_3\tumbrella\nggTFLaNIJck_0\ttrain\nggVLptkmsys_0\ttruck\nggpz03j1REI_0\tbus\ngg3sG7O2P-g_0\tbus\nghEfyxUaVGs_1\tcat\nghIGC_DOfuk_0\thorse\nghqqgJWnVEU_0\tknife\nghyp-SKVuC8_0\tmotorcycle\ngiVGzMF1Yo4_0\tskateboard\ngiVGzMF1Yo4_1\tskateboard\ngipHWMPB-W4_3\tbear\ngipHWMPB-W4_1\tbear\ngitOEvGnoYk_0\tairplane\ngi9bnW7uLkE_0\tcat\ngjGlUXCT9A4_1\tknife\ngjK5A6cIEnw_0\tdog\ngjRhqzTAkWw_0\tcow\nIJFaomtLVDE_0\tcat\nIJNUwvacbKY_0\tcow\nIJVUMGoBSQs_4\tcow\nIJXVtb2GeJ4_0\ttrain\nIJdYiBYP31A_0\tmotorcycle\nIJlBmhH72m4_1\tcow\nIJ6g4ZRBksE_0\tcat\nIKLj0LJIMKs_4\tairplane\nIKLj0LJIMKs_5\tairplane\nIKLj0LJIMKs_2\tairplane\nIKftyV_zwkE_0\tskateboard\nIKqmWAu3GF0_0\tdog\nIK7Mnvty4VY_0\tperson\nIK8IJWsxg3M_5\tairplane\nIK8IJWsxg3M_6\tairplane\nILAGhYr9yts_1\tmotorcycle\nILLYlwlFTzA_0\telephant\nILmTjHZqkCo_1\ttruck\nILqxie6aqXg_0\tbicycle\nILqxie6aqXg_1\tbicycle\nILqxie6aqXg_2\tbicycle\nIL1HokSKOyY_0\tcat\nIL9r35lU8So_0\tskateboard\nIMD3U_DzO3E_0\tmotorcycle\nIMD3U_DzO3E_1\tmotorcycle\nIMde-053G78_0\thorse\nIMulJdQXZvM_0\ttrain\nIM7vwh5qua4_0\tcow\nIM8dlwNTjXU_0\tcow\nIM8v82x7ovA_2\ttrain\nIM8v82x7ovA_1\ttrain\nINFs2lfikXE_1\tknife\nINULdzdrdys_0\thorse\nINXkuJ9WvIU_0\ttrain\nINZhGblywrk_0\tbus\nINkhg9y4asY_0\tbear\nINtj4nfjRA0_1\tbear\nIN2TGHJrQEg_2\tskateboard\nIN2TGHJrQEg_0\tskateboard\nIN2TGHJrQEg_1\tskateboard\nIOPYEZzmeqg_0\tcar\nIOPYEZzmeqg_1\tcar\nIOQuWawPM3k_0\tbird\nIOfUvlEkN7g_0\tbus\nIOiqrNof90k_1\tknife\nIO3Z-ebx_f8_5\tbus\nIPI2_GXx1tI_0\tbird\nIPWixEFBDOY_0\thorse\nIPfYf-nFKic_0\tairplane\nIPfYf-nFKic_1\tairplane\nIP1CH8MMir0_0\tknife\nIQOfCy4FW8w_0\tskateboard\nIQXAYnslAnc_0\tcar\nIQoVuUTZILY_0\tairplane\nIQsV_hTCyMA_1\tbicycle\nIQwk7Ge6Apk_0\ttruck\nIRK6-ixyaVI_0\telephant\nIRSbjN-mnJI_0\tskateboard\nIRZBnQJoKiU_0\tskateboard\nIRztQZ4bigY_0\tcar\nIR9A3u83crI_4\telephant\nIR-PGdIPgcE_0\tskateboard\nISAnMprDgCk_0\tskateboard\nISJW4GuahWg_2\tdog\nISSTEs8xDWk_0\tumbrella\nISYwpUKxHJU_1\telephant\nISYwpUKxHJU_2\telephant\nISYwpUKxHJU_0\telephant\nISud5E9hZxU_0\ttrain\nISud5E9hZxU_1\ttrain\nIS9s3kJzTcA_0\tairplane\nITCcMWC_RW8_0\tumbrella\nITbwhPVxFv0_0\tumbrella\nITrisbHlaJw_1\ttruck\nITzBy7T7_fI_1\tumbrella\nIT6TArZww6A_0\tcat\nIT8VqGbdH_A_0\thorse\nIT_zQ44PPOo_0\tdog\nIUH4PYmObvU_0\tdog\nIUO1sDZgGHs_0\tbird\nIUdyfRMOyX8_0\telephant\nIUdyfRMOyX8_8\telephant\nIUdyfRMOyX8_1\telephant\nIUdyfRMOyX8_2\telephant\nIUdyfRMOyX8_3\telephant\nIUdyfRMOyX8_4\telephant\nIUdyfRMOyX8_5\telephant\nIUdyfRMOyX8_6\telephant\nIUdyfRMOyX8_7\telephant\nIUf7a2WuoBw_0\ttrain\nIUgkMOA3siY_1\tbus\nIUlDlS2KD-k_0\tbicycle\nIUlDlS2KD-k_1\tbicycle\nIUzpvnXep7M_0\tbear\nIU7x7I53cng_0\telephant\nIVFq204Rr9c_0\tairplane\nIVHx3I13xdQ_0\tboat\nIVSJSu0PlsI_0\ttrain\nIVVFeaTw6IE_0\tbicycle\nIVjCZS2Fo7k_0\tbird\nIVpmCnL5cE8_1\tgiraffe\nIVrBPzhFMi8_1\tmotorcycle\nIVrBPzhFMi8_2\tmotorcycle\nIVzxeeJEtiY_1\tbear\nIV6EMw4XYco_0\tskateboard\nIV6EMw4XYco_1\tskateboard\nIWCZ1PDW99k_0\tmotorcycle\nIWVIIKxipc8_0\tmotorcycle\nIWVIIKxipc8_1\tmotorcycle\nIWn16DCfLbc_1\tknife\nIWumeAEXWVo_1\tboat\nIWu47p4l06Y_5\tumbrella\nIWu47p4l06Y_6\tumbrella\nIWu47p4l06Y_3\tumbrella\nIWu47p4l06Y_4\tumbrella\nIW1cFMDjPUk_0\tbear\nIW2mFJ8iw6Y_0\tbird\nIW4ZnmQeNtA_1\telephant\nIW4g0kfA3GE_0\ttruck\nIW5Vgh3SE-I_4\telephant\nIW7TwQ-hY7I_0\tmotorcycle\nIW7TwQ-hY7I_1\tmotorcycle\nIXTgztKfRQU_0\tskateboard\nIXVCCLG3_cw_0\tbird\nIXyV2vpIEA8_0\tdog\nIXyV2vpIEA8_2\tdog\ngja4H3sGrqQ_0\tcar\ngjdlZhmnGbk_0\tairplane\ngjfdI7hO92E_0\tbird\ngjquLAxFRWw_2\tumbrella\ngjx4xu1TyWU_1\tcow\ngj7W2zjQApw_3\tknife\ngkEoTLpAw7g_0\tairplane\ngkLRnt1OCH4_7\thorse\ngkRqNmGQbPI_0\tskateboard\ngkXKCuc0Moc_0\tskateboard\ngkXKCuc0Moc_1\tskateboard\ngkb4Ya5QW9M_0\tbird\ngkb4Ya5QW9M_1\tbird\ngkf0Bcsuhlc_1\tcar\ngkf0Bcsuhlc_3\tcar\ngkf0Bcsuhlc_4\tcar\ngkiUpdrObXo_1\telephant\ngkz49y5qcvc_0\thorse\ngkz-LCZcGtc_5\tbird\ngk1x_qYyDl4_0\tcat\nglNWqIolkq8_0\tskateboard\nglOskJOtnTU_0\tknife\nglOskJOtnTU_2\tknife\nglSdaND81E8_0\tperson\ngltHxIp_ma8_0\tbird\ngmCT9tUPTB4_1\tgiraffe\ngmdxOMQMgnw_0\tairplane\ngmnvPoB2cNY_0\tmotorcycle\ngm53_sbr85Q_1\tbird\ngm53_sbr85Q_2\tbird\ngm9M-m4mCZ4_0\tcar\ngm9M-m4mCZ4_2\tcar\ngnA9QVNkmTU_1\tknife\ngnD6mU9A2oo_0\telephant\ngnEttGTQqQ4_1\ttrain\ngnEttGTQqQ4_0\ttrain\ngnF9YJM1jaE_1\tcow\ngnGvXHS4UDs_0\tairplane\ngnM9SRiFh7M_0\ttruck\ngnM9SRiFh7M_1\ttruck\ngnPrHGB85WY_0\tbus\ngnTj3krZROI_4\tboat\ngnVo44q-XDI_0\tknife\ngnb1N_MLdcY_2\telephant\ngnwCzU63_YY_0\tperson\ngn2XuCFK-hE_0\ttruck\ngn2bME2rmGw_0\ttruck\ngoIfg0C9kmM_0\tdog\ngoOIZE0j6DM_0\tbicycle\ngoSyNORcJ00_0\tairplane\ngok9kHQ77dY_0\tskateboard\ngollBTymf8I_1\tbus\ngomnpeJd5zw_0\tboat\ngonzAOezSOQ_1\ttrain\ngonzAOezSOQ_2\ttrain\ngosq350N9dI_2\tskateboard\ngoyIWrU1Lbo_0\tcat\ngpBoXY6MM5E_0\tdog\ngpEiPRMcPwo_4\tbear\ngpY-o8xPA3w_0\tbicycle\ngpa4WfWCLa0_1\telephant\ngpa4WfWCLa0_0\telephant\ngpa9p4XNeKc_3\tbear\ngpbdiDEPd-s_0\tskateboard\ngpjqG97-SyQ_0\thorse\ngpmdLMUX53k_0\tbear\ngp2SDJHMADo_3\thorse\ngp2SDJHMADo_0\thorse\ngp2SDJHMADo_2\thorse\ngp9q0jvTKo0_0\tbird\ngqNgT7LxZSQ_1\tbus\ngqOfm9XTr6M_3\tairplane\ngqOfm9XTr6M_0\tairplane\ngqOfm9XTr6M_1\tairplane\ngqOfm9XTr6M_2\tairplane\ngqbDkeOx0mA_0\tmotorcycle\ngqgQpw4DWZA_0\tgiraffe\ngqhweewmNn8_0\tskateboard\ngqkLzCkKKtE_0\tskateboard\ngqucExXpPys_0\tcar\ngqxvRzuWcrI_0\tbird\ngrBVFo1wSjs_1\tbird\ngrFPTYaKb7Q_0\tbus\ngrI0uf6IwBw_0\tbear\ngrNkPqf-ySE_0\tdog\ngrWw42izM6M_1\ttrain\ngrWw42izM6M_2\ttrain\ngrWw42izM6M_0\ttrain\ngrbP7mKMX_A_5\tairplane\ngrbP7mKMX_A_1\tairplane\ngrbP7mKMX_A_4\tairplane\ngrdEE264TwM_0\tmotorcycle\ngrdIYaNewv0_0\tmotorcycle\ngrhIgcHgpOw_0\tbus\ngsCvhqZCWX0_0\tdog\ngsUrGSN-k00_0\thorse\ngsbJ13WiSvE_1\thorse\ngsfIYIQ1siA_0\tskateboard\ngsvn88OsH_8_3\tknife\ngsv7RJk7dtY_0\tdog\ngs_C12A8Wq4_1\tbicycle\ngtFIMtVrAGk_0\tbicycle\ngtNJSexRjxE_0\tcar\ngtNdVTTd0tg_0\tbicycle\ngtNdVTTd0tg_2\tbicycle\ngtOa6rSatLA_0\tcow\ngtQ_uFTKEck_1\thorse\ngtii5vwjSTY_1\tdog\ngtuj1cOmYSs_1\ttrain\ngtuj1cOmYSs_3\ttrain\ngtz5ClHTSVo_0\tcat\ngtz5ClHTSVo_1\tcat\ngt_WHCkauOA_1\tknife\nguVl_gp0sJE_0\tbus\ngugP5f2JRJ0_1\tbear\ngugP5f2JRJ0_0\tbear\nguh1OUkdIGE_0\thorse\nguktzkv1els_0\tboat\nguv5reh2NH4_0\tboat\nguxRXiegac0_4\tbird\ngvNxDnFriAI_0\tskateboard\ngvcioONBIcE_0\ttrain\ngviQTbs7dIk_1\tbird\ngvjcggbLXRo_0\telephant\ngvjcggbLXRo_1\telephant\ngvjcggbLXRo_2\telephant\ngvk0hzlYu9E_0\tumbrella\ngvraCN0RYko_0\tdog\ngvtY3fwbgdc_0\tcow\ngvuBfR3HXac_0\telephant\ngv4sQFTuJ-k_0\telephant\ngv7qY66lOhs_0\tgiraffe\ngv8pF9t1zYM_0\telephant\ngwKq56_M6Kc_0\thorse\ngwN_p_IRuoo_0\thorse\ngwP-6gOPn2c_0\tmotorcycle\ngwTc-69C_P4_0\tknife\ngwTyjJwBgRk_0\thorse\ngwy7eePYryM_1\tboat\ngw9MjutMhLs_1\tairplane\ngw9MjutMhLs_3\tairplane\ngw9MjutMhLs_0\tairplane\ngw9MjutMhLs_2\tairplane\ngxKnyBP8_cs_0\telephant\ngxejG9D0guY_1\tperson\ngxgZg6BU3ds_0\tdog\ngxgZg6BU3ds_1\tdog\nIX4HjI_9vLY_2\tdog\nIX4IwgbTdCk_0\tdog\nIYBF45M9nTc_0\tskateboard\nIYBzvotFEYo_0\tmotorcycle\nIYZZ-K_Ygpo_0\tbicycle\nIYdXz1cOCWc_0\tgiraffe\nIYukRQKxhFI_0\tperson\nIYukRQKxhFI_1\tmotorcycle\nIZESZPVT0zk_3\tbear\nIZGady38Nh8_0\tbird\nIZIPpBl_h0Q_5\ttruck\nIZIPpBl_h0Q_0\ttruck\nIZIPpBl_h0Q_6\ttruck\nIZJ1PO3Fkuw_0\tumbrella\nIZLMXYU4A-0_0\tairplane\nIZLMXYU4A-0_2\tairplane\nIZTfd31H0AI_0\tbicycle\nIZUO1x0QT1I_1\telephant\nIZ2nFUgP-Pw_1\telephant\nIZ2nFUgP-Pw_5\telephant\nIZ2nFUgP-Pw_6\telephant\nIZ2nFUgP-Pw_3\telephant\nIZ2nFUgP-Pw_4\telephant\nIaAPZOFgclo_1\telephant\nIaG7siKVlak_0\tgiraffe\nIaxZJVx5ptw_0\ttruck\nIaxZJVx5ptw_1\ttruck\nIaxZJVx5ptw_2\ttruck\nIaxZJVx5ptw_3\ttruck\nIa0DjYXcBWc_8\telephant\nIa0DjYXcBWc_4\telephant\nIa0DjYXcBWc_5\telephant\nIa0DjYXcBWc_6\telephant\nIa0DjYXcBWc_9\telephant\nIa0DjYXcBWc_10\telephant\nIa0DjYXcBWc_11\telephant\nIa0DjYXcBWc_12\telephant\nIa0DjYXcBWc_13\telephant\nIa0DjYXcBWc_14\telephant\nIa0DjYXcBWc_16\telephant\nIa0DjYXcBWc_18\telephant\nIa0DjYXcBWc_19\telephant\nIbEpwiOUFEI_0\tdog\nIb15GlTvqTQ_2\tskateboard\nIb2u6u-j2vk_0\tskateboard\nIcEs4vbIcDM_0\tumbrella\nIcSumCpVOy0_0\tskateboard\nIcZ2D-MawSg_0\ttruck\nIciJuq7ZY6o_0\telephant\nIckUkdfRndY_1\tknife\nIc1cufihs-0_0\telephant\nIc1cufihs-0_1\telephant\nIdSlvHXTrmE_1\tskateboard\nIdXPNOQD97w_0\tmotorcycle\nIdabN3kTjSk_0\tskateboard\nIdrTVVio1U4_0\tdog\nIdvQme2elLk_1\ttruck\nIdvQme2elLk_2\ttruck\nIdvQme2elLk_3\ttruck\nId6HsaEvZ0k_0\tperson\nIeB4Nf3h7T4_0\tbus\nIeENvG3Qtk0_5\telephant\nIeFUkGY1b4Y_4\telephant\nIeXb8CHr4ms_0\ttrain\nIefPtlA5ebA_0\tmotorcycle\nIehTemq8EYc_27\tbicycle\nIehTemq8EYc_28\tbicycle\nIehTemq8EYc_0\tbicycle\nIehTemq8EYc_6\tbicycle\nIehTemq8EYc_11\tbicycle\nIehTemq8EYc_15\tbicycle\nIehTemq8EYc_17\tbicycle\nIehTemq8EYc_19\tbicycle\nIejh8w6egIA_0\tumbrella\nIek9nAfsymA_0\tbus\nIewJcdqOzCY_0\ttrain\nIewJcdqOzCY_1\ttrain\nIe4Ct_HRDNw_1\tdog\nIe5lfGQndBs_0\tairplane\nIe8dc7EO7VI_0\tbicycle\nIe8dc7EO7VI_1\tbicycle\nIe8dc7EO7VI_2\tbicycle\nIfBft2ltqqE_0\tskateboard\nIfBft2ltqqE_1\tskateboard\nIfFnkz6EUno_1\thorse\nIfGZXa16ZnQ_0\tknife\nIfTrYE-Ox50_0\tcat\nIfZDLHBP_qk_0\tbus\nIfpbe7xlKp4_0\ttruck\nIf4WPZY4LIY_0\telephant\nIf8EotoXQVQ_1\ttruck\nIgO9_kN8D5I_0\tcat\nIgRs6nmhv2w_0\tcat\nIge9Idj8fDw_3\tcow\nIge9Idj8fDw_2\tcow\nIg0Luv6UlkE_1\tbicycle\nIg1JdzucmLI_0\tboat\nIg9jZPM0n2A_0\tcar\nIhR6ePM1wRw_0\tbird\nIhXAXy3VAqA_0\tcat\nIhdGvFfk3Ks_0\tbird\nIhlRPxknT9E_1\tmotorcycle\nIhp3YZGcRjM_0\thorse\nIhp3YZGcRjM_1\thorse\nIhsr3gT-u00_0\tcow\nIiBzrow5m9w_0\tcat\nIiH0f7VOXTY_4\tairplane\nIiH0f7VOXTY_2\tairplane\nIiH0f7VOXTY_3\tairplane\nIie6uM_sdLE_2\ttruck\nIie6uM_sdLE_5\ttruck\nIiscR53FEz0_1\tairplane\nIiy_W2tIOWI_0\tboat\nIi9URMIXJjc_0\tdog\nIjHqTBt-tzY_0\thorse\nIjMLYR0bH6g_0\tcow\nIjf2ZMTxDUs_0\tcow\nIj57BoIbMws_0\ttrain\nIkOG3ZnCvY4_0\tcow\nIkVifrtYlcI_0\tskateboard\nIklc7ijgOtA_0\thorse\nIkl-nlqwUJA_2\ttrain\nIkqFTjEXf4g_0\tmotorcycle\nIkrKcORoFLI_0\tcat\nIk4UxtlIrw0_5\tairplane\nIk4UxtlIrw0_13\tairplane\nIlJT6oek8KQ_0\tdog\nIlNV-gFlp3Q_0\tumbrella\nIlshSY2CGU0_0\tcow\nIl1TKTSRPO4_0\ttrain\nIl7GtfxmBlQ_0\tskateboard\nImCV4d0kYxY_0\tskateboard\nImEKl15Aipo_2\tbear\nImOLHl6gwLE_0\tgiraffe\nImO7oG_YuSU_0\ttrain\ngyBWGyhFuWg_0\telephant\ngyBWGyhFuWg_1\telephant\ngyBWGyhFuWg_2\telephant\ngyBWGyhFuWg_4\telephant\ngyBWGyhFuWg_7\telephant\ngybSfaDRdVA_2\tairplane\ngy3zF39Y7B8_0\tairplane\ngzQrsNwx8MQ_1\ttruck\ngzTHA0tMocM_0\tbird\ngzUj7KfvRPY_0\ttrain\ngzVdw-5l3sY_5\tbear\ngzWwT4ufwFY_0\tbicycle\ngzwzd6nOPoI_3\tbear\ngz13nfjIblU_0\tskateboard\ng0Jq0uIY3i0_2\tknife\ng0LufqNJtss_1\telephant\ng0LufqNJtss_2\telephant\ng0SdZmm5Mm0_0\thorse\ng0W6U-p-T2c_0\thorse\ng0om0nrfC4w_5\tairplane\ng0om0nrfC4w_1\tairplane\ng0tXovGqqSE_0\tcow\ng0zcJWO1MbU_1\tairplane\ng02OQmAgfo4_0\ttrain\ng02OQmAgfo4_1\ttrain\ng04xUjb4z0w_0\tbicycle\ng05TJKB5TL0_0\telephant\ng05TJKB5TL0_1\telephant\ng05TJKB5TL0_2\telephant\ng1HtoWJ3NjA_0\tairplane\ng1UUBEfyzJ4_0\thorse\ng1UUBEfyzJ4_1\thorse\ng1ZtaoEqtjI_0\tbus\ng1j_9A4-PL4_0\tcow\ng1n74kWqKFM_0\ttruck\ng1vq3JO3eH0_0\tskateboard\ng12DCVqfKjM_0\tairplane\ng13JzTyNCPY_0\ttruck\ng17hrSF1YN8_1\tbear\ng2Hh_97o7jY_0\tperson\ng2KeNy_WECo_0\tbus\ng2MUK80Ht8k_0\thorse\ng2MUK80Ht8k_1\thorse\ng2MUK80Ht8k_2\thorse\ng2MUK80Ht8k_3\thorse\ng2MUK80Ht8k_4\thorse\ng2cCr0rRIeo_0\tmotorcycle\ng2vRpfpQuNE_1\tmotorcycle\ng2-SNBvYdNc_3\tcar\ng2-SNBvYdNc_2\tcar\ng3DAFznLlXw_0\telephant\ng3e6vDSvpN4_0\tskateboard\ng3g7M2Xv3JY_0\tzebra\ng3ytRwjgoMI_2\thorse\ng3ytRwjgoMI_3\thorse\ng30xOR9j3_A_0\tskateboard\ng30xOR9j3_A_1\tskateboard\ng38MDXW9ndc_0\telephant\ng4BX8_C-NeQ_1\tdog\ng4KzjuhixSo_0\tmotorcycle\ng4KzjuhixSo_1\tmotorcycle\ng4R5jZXlnl4_0\ttruck\ng5OKbEXlegI_0\tcow\ng5SIvfoi7tE_2\tbird\ng5S-76eh6vs_0\tcar\ng5ztjA03q5k_0\thorse\ng55_MKVNAE8_0\tmotorcycle\ng57hZ17etp8_0\tskateboard\ng5-55T7AzUE_1\tskateboard\ng7Qk-cV3IFs_1\tcar\ng7YvJasRFj0_0\tskateboard\ng7fZhFRdYJs_3\tzebra\ng7oMLF6ZfT8_0\thorse\ng7oMLF6ZfT8_1\thorse\ng8aScpqmhVU_0\tumbrella\ng8iDSRkz_go_1\tboat\ng80ZYUNhRME_1\tdog\ng9Am-b3OqbI_0\ttruck\ng9RM9VSJPIY_0\tknife\ng9WrMIn5AkI_0\tskateboard\ng9sD-4RBa3Y_0\tmotorcycle\ng9uOEJm7wdw_0\telephant\ng9yESRreg5k_0\tboat\ng9zLmd4IZ78_0\tbus\ng91mK1sMiSI_1\telephant\ng9-6tclIBcc_0\tmotorcycle\ng-Dfzs3HQ8w_0\tboat\ng-EVS_QxLxA_0\thorse\ng-F4Eig_Rxc_0\tmotorcycle\ng-F4Eig_Rxc_2\tmotorcycle\ng-F4Eig_Rxc_1\tmotorcycle\ng-JTM0dCFFA_0\tcow\ng-SJXmYYHqI_0\ttruck\ng-SlOveVnAs_0\tcow\ng-Z7CA3qr1A_0\tskateboard\ng_B2r70EsjY_2\thorse\ng_XW0YLzND0_0\tmotorcycle\ng_XW0YLzND0_1\tmotorcycle\ng_XW0YLzND0_2\tmotorcycle\ng_dN59QhubM_0\tperson\ng_jq8Uy4P2s_0\ttruck\nhAHsYyTOJoI_0\tcow\nhAIBcR5MAVE_0\tboat\nhAUD4Cy2GiM_0\tcow\nhAUD4Cy2GiM_6\tcow\nhAUD4Cy2GiM_1\tcow\nhAUD4Cy2GiM_3\tcow\nhAUD4Cy2GiM_4\tcow\nhAUD4Cy2GiM_5\tcow\nhAVbFSsRfOY_0\tairplane\nhAcx9u12Rd0_1\tcat\nImZcOQCdJng_0\tskateboard\nImiKNikVSsM_0\thorse\nImqKWexMOEA_0\tbird\nImuhe4E1pxo_0\tcar\nImy4SpqoC4k_0\tcat\nIm3ooIguQHk_4\ttrain\nIm3ooIguQHk_5\ttrain\nIm3ooIguQHk_6\ttrain\nIm3ooIguQHk_0\ttrain\nInEZSi4Zz08_4\ttrain\nInEZSi4Zz08_1\ttrain\nInEZSi4Zz08_2\ttrain\nInTq6s23Ygc_0\tcat\nInn1lo0hbX0_0\ttrain\nInvv-JPzV-0_0\telephant\nIn_dBFPRoso_1\tairplane\nIobdPoAtEB0_0\tbear\nIoqRCAQzibw_1\tskateboard\nIoqRrAswOwY_5\tbear\nIoqRrAswOwY_4\tbear\nIo5wOOkpkdE_0\tskateboard\nIpOFHasyloc_0\tcat\nIpQQ9QabgiU_0\tbear\nIpVCKTRou10_1\ttruck\nIpVCKTRou10_3\ttruck\nIpVCKTRou10_4\ttruck\nIpYZmcVrqdQ_0\tcow\nIpnTUQCHioc_0\tgiraffe\nIp0c_3xCHRA_2\thorse\nIp-N_PYIqhA_0\tmotorcycle\nIqv9963BN8w_0\tcat\nIrAxUS0aBTQ_0\tbird\nIrDY9nE1V2I_1\tmotorcycle\nIr4CkmTmSXQ_0\tcow\nIsSCjgdAQiE_0\tdog\nIslqPZDUBHI_0\tmotorcycle\nIssSzh7Z-vo_0\tumbrella\nIsxRqs7KcbQ_0\tcat\nIs2E8gFNBWo_4\tbear\nIs2E8gFNBWo_1\tbear\nItL-C-szpU8_0\ttruck\nItiAXqRQm3A_1\tknife\nItkxwET4PNc_0\tdog\nItzhXkBVmEY_0\tcar\nItzlBA8cl3c_2\tairplane\nIt_dJluX63g_0\tcat\nIuN_risviek_0\tgiraffe\nIuZ6JD-k2nM_0\tdog\nIuk4W5KJbQ8_0\tbus\nIuwJz5d-8J4_0\tumbrella\nIuw0f-Y8t6I_0\tairplane\nIuw0f-Y8t6I_1\tairplane\nIu26NyEUoGY_2\tboat\nIu5GqI9oVnk_0\tmotorcycle\nIvJLhgaveaw_0\tskateboard\nIvMiQ2e-5hQ_0\tbus\nIvMiQ2e-5hQ_1\tbus\nIvX1MeQN-e0_0\tcat\nIvZSk33MtAc_0\tmotorcycle\nIvZqPTK9DEQ_0\tmotorcycle\nIvZqPTK9DEQ_4\tmotorcycle\nIvZqPTK9DEQ_3\tmotorcycle\nIvfWyYn_ifg_0\telephant\nIvjNeTpV6hs_0\thorse\nIvyftS2bPuo_10\tairplane\nIvyftS2bPuo_0\tairplane\nIvyftS2bPuo_2\tairplane\nIvyftS2bPuo_6\tairplane\nIvyftS2bPuo_7\tairplane\nIvyftS2bPuo_9\tairplane\nIwcC1J_ImAs_0\tcar\nIwd7i4kvS5c_0\tcar\nIwfpNUPSvpw_0\tmotorcycle\nIwgX5DfmIQo_0\tbicycle\nIwhf27USDD4_0\tmotorcycle\nIwmwVP_e5Ag_0\tbus\nIwxmbUX4fcg_0\tcow\nIw6-0LYvEmQ_0\tbird\nIw6-0LYvEmQ_1\tbird\nIw7zBsW9W5Y_0\ttrain\nIxLbLqfxrhg_1\tboat\nIxNA0hdkWGg_0\tcow\nIxObyCZ6OfY_4\tgiraffe\nIx8eS24W75g_4\tairplane\nIx8eS24W75g_5\tairplane\nIx8eS24W75g_0\tairplane\nIx8eS24W75g_1\tairplane\nIx8eS24W75g_2\tairplane\nIx8eS24W75g_3\tairplane\nIyNmzxdv8-Q_0\tbird\nIyQlh0wdd9I_8\tboat\nIyQlh0wdd9I_7\tboat\nIyU3NizvZuM_0\thorse\nIyU3NizvZuM_5\thorse\nIyj9D6cwI5o_0\tbicycle\nIyk9-k1RP-M_0\tcar\nIyk9-k1RP-M_1\tcar\nIyk9-k1RP-M_2\tcar\nIys_rL0bPcc_4\tboat\nIy4SrujSLuQ_1\telephant\nIy4SrujSLuQ_5\telephant\nIy4SrujSLuQ_6\telephant\nIy4SrujSLuQ_3\telephant\nIzC8vjFriRE_0\thorse\nIzPS29ghTxo_0\tknife\nIzQjjBqimYw_5\telephant\nIzQjjBqimYw_10\telephant\nIzQjjBqimYw_11\telephant\nIzQjjBqimYw_0\telephant\nIzQjjBqimYw_1\telephant\nIzQjjBqimYw_2\telephant\nIzQjjBqimYw_3\telephant\nIzQjjBqimYw_4\telephant\nIzQjjBqimYw_6\telephant\nIzQjjBqimYw_7\telephant\nIzQjjBqimYw_9\telephant\nIz4_9EtiVXc_0\tmotorcycle\nIz8cco4VLow_0\tcow\nIz8gKIZcfqo_0\tbear\nIz8uzZuBiXs_0\tbird\nI0eY-kKi2FM_0\tumbrella\nI0iEaW1Qg_o_1\tbear\nI0oVkr613Rw_0\tskateboard\nI0voLEPKkG8_0\thorse\nI0voLEPKkG8_1\thorse\nI0voLEPKkG8_2\thorse\nI0voLEPKkG8_3\thorse\nI0voLEPKkG8_4\thorse\nhAplCSSZqAs_0\tairplane\nhAplCSSZqAs_1\tairplane\nhAteY2rkmVg_8\tbus\nhAteY2rkmVg_1\tbus\nhAuFEp75jVo_0\ttrain\nhAzefhyFMN4_0\ttruck\nhAzsdnh5Iq8_0\telephant\nhA_YzyjSVZM_0\tbicycle\nhBDc0K6CvHg_0\tbus\nhBDvdp2RCCw_1\tairplane\nhBDvdp2RCCw_2\tairplane\nhBDvdp2RCCw_3\tairplane\nhBDvdp2RCCw_5\tairplane\nhBDvdp2RCCw_7\tairplane\nhBKuHV_S8lM_0\tskateboard\nhBOhA_sljfE_0\tumbrella\nhBcYx5Uc-vw_0\tcar\nhBcZZeXsCaw_0\tbicycle\nhBgxILtRUIc_0\tcat\nhB23PCerELA_0\tskateboard\nhB-M9w3C_Tw_0\tboat\nhCB731pKdcg_1\ttrain\nhChqLLLAmF4_1\tbird\nhChqLLLAmF4_0\tbird\nhCkn4pJxSkk_0\tbear\nhCrrYhe3x9Q_0\ttrain\nhCsCAXkiQ4Y_2\ttrain\nhCynNRrrTKI_0\tcow\nhC5Wac-AzgM_1\telephant\nhC5Wac-AzgM_2\telephant\nhC5Wac-AzgM_3\telephant\nhC5augWtBcQ_1\tbicycle\nhDAv3aPvZjc_1\ttruck\nhDLAKS4hCfc_0\tcar\nhDM4sCvlRoA_1\tairplane\nhDVx_yYysaA_0\tcow\nhDXpSU7bq44_0\tbicycle\nhDYV-Vz3xwA_1\tdog\nhEAQZIsaIew_1\ttrain\nhERCXzHI2nA_1\telephant\nhERyFpl4aDk_0\tdog\nhEWJZ4dCcIY_2\tcow\nhEZt4InN7Eo_0\telephant\nhEZt4InN7Eo_1\telephant\nhEZt4InN7Eo_2\telephant\nhEdpC8HEa-A_0\tmotorcycle\nhE04tUrJzXo_0\ttruck\nhE7N0N5vik0_0\tbird\nhE7N0N5vik0_1\tbird\nhE-VIrAVcBA_2\tbus\nhFFrC0_rJYA_0\tairplane\nhFTTcrUxPeg_0\tcow\nhFTTcrUxPeg_3\tcow\nhFdi9yxVkys_0\tmotorcycle\nhFdi9yxVkys_1\tmotorcycle\nhFixbos35O4_0\ttruck\nhFnKIVp-Dcc_0\tcow\nhFnKIVp-Dcc_1\tcow\nhFzR4bgxihU_0\tbicycle\nhGH72iljdzU_0\tbear\nhGRdOlSIQRU_1\ttrain\nhGRdOlSIQRU_2\ttrain\nhGRdOlSIQRU_0\ttrain\nhGiCVP3Z8l0_0\tumbrella\nhG6vW_xUZgA_0\ttrain\nhG6vW_xUZgA_1\ttrain\nhG959XPTh_8_0\tbear\nhG-quo0MZM8_0\telephant\nhG-quo0MZM8_1\telephant\nhHEIEEdrXYE_0\tcow\nhHIyy4Vda6M_0\tcat\nhHjzciM78AA_0\tcow\nhHtOM5_wiWM_0\ttruck\nhHtqPiAg32Q_0\tumbrella\nhH_akvS98jo_0\tskateboard\nhIH6LuoXbpE_0\tcat\nhIXTbG6ho4E_0\tperson\nhIXTbG6ho4E_1\tperson\nhIXTbG6ho4E_2\tperson\nhIz3ONvP-Bo_0\tzebra\nhI3P4BxIr-o_0\tbear\nhI3eGFKYRuc_1\thorse\nhJP8qg-kSZA_0\tcow\nhJTl4NJ0qIs_0\tperson\nhJhBQsD0_hw_0\tbus\nhJkgoq_T4Pk_0\ttrain\nhJmxsYAKHdc_0\tumbrella\nhJtloiw4D-M_0\tcar\nhJ_uvoDrzkI_0\tgiraffe\nhKJQH8VbGk4_0\tairplane\nhKJQH8VbGk4_1\tairplane\nhKYJZqP-44M_0\tairplane\nhKYJZqP-44M_1\tairplane\nhKgtNPTirdc_2\telephant\nhKgtNPTirdc_3\telephant\nhKlKPyuUYps_0\tbus\nhKtHZYDaoXA_1\ttrain\nhKtHZYDaoXA_2\ttrain\nhKtHZYDaoXA_3\ttrain\nhKtHZYDaoXA_0\ttrain\nhK6w0B1cu-I_0\tcow\nhK7VoN3cI74_0\tcat\nhLGnjjoilbo_0\tskateboard\nhLHaPstpghQ_0\tmotorcycle\nhLKzDOp8XLc_1\tzebra\nhLNcuJAwfDo_0\tcow\nhLVZsqfElxI_0\tdog\nhLX1LeVKgi8_0\tcat\nhLjDO37EQ60_2\tdog\nhLjDO37EQ60_1\tdog\nhLscdjfkeho_0\tcow\nhLte0Y4VWR0_0\tknife\nhL_QAgWBkJ4_0\tcow\nhL_noZA6D8E_0\ttruck\nhMGVdq71lME_1\thorse\nhMLkMrqUtA0_0\thorse\nhMRIDt-1dY4_0\ttrain\nhMgp2oyTB80_0\tcow\nhMjke9g_Ysw_0\thorse\nhMuO0MHPIOQ_0\telephant\nhMuO0MHPIOQ_1\telephant\nhMusKbJqZDY_0\tskateboard\nhNHGh8N1XGg_0\tknife\nhN_-56Oxma0_0\tdog\nhOJJ65CVNuM_0\tbird\nhOOwQSSrFVc_1\tcow\nhOid-qo2Ozw_0\tcow\nhOky3qIMxRY_0\tskateboard\nhOpJoO7UciM_1\tbicycle\nhOrAXl-jATo_0\tairplane\nhOxMkI1d3oc_1\tairplane\nhOxMkI1d3oc_3\tairplane\nhOxMkI1d3oc_4\tairplane\nhOxMkI1d3oc_6\tairplane\nhOxMkI1d3oc_7\tairplane\nhOxMkI1d3oc_9\tairplane\nhPEsz5u87CI_0\tbus\nhPIDFIwLI8c_0\tcar\nhPWhKQfDoXg_0\tairplane\nhPWhKQfDoXg_1\tairplane\nhPW2NpCU668_2\telephant\nhPW2NpCU668_0\telephant\nhPW2NpCU668_3\telephant\nhPW2NpCU668_5\telephant\nhPW2NpCU668_6\telephant\nhPW2NpCU668_7\telephant\nhPW2NpCU668_8\telephant\nhPa5hUze91s_0\telephant\nhPa5hUze91s_1\telephant\nhPb_Rq2yKRA_0\tcow\nhPo5Wd-otbY_0\tdog\nI0yz1LGLl08_0\telephant\nI1Ejpa2UWSk_1\tbird\nI1Pdo-p11tI_0\tmotorcycle\nI1Quuhyu2UI_1\tmotorcycle\nI1YfOiyQW_8_0\ttruck\nI1wfW86V8So_0\tdog\nI1wfW86V8So_2\tdog\nI14JWDgkllE_0\ttruck\nI14JWDgkllE_1\ttruck\nI19kQsgjFRA_0\tbicycle\nI2IfiPw2aKE_0\telephant\nI2OQlELjXvU_1\ttruck\nI2OQlELjXvU_2\ttruck\nI2OQlELjXvU_3\ttruck\nI2hmFe1pYes_0\thorse\nI2o_4OyrJlI_0\thorse\nI3DSZk-7nG8_0\ttrain\nI3JCCqGY3c8_0\ttruck\nI3KJj6GQ5QE_0\tcat\nI3OWw4AK0MI_0\tdog\nI330kG5lk5A_0\tknife\nI3-xBh-IrIo_0\tairplane\nI3_lU2I_AaU_0\tbicycle\nI4BMptNse7c_1\ttrain\nI4BMptNse7c_0\ttrain\nI4CMNv-VRDo_0\tbird\nI4Gi7kq5XAs_0\thorse\nI4HuQ8DDxoM_0\tskateboard\nI4WNAfBvm5E_1\tskateboard\nI4z-3IGHMW4_0\tdog\nI5KNdt1NT8g_0\tskateboard\nI5QNP3-QHLw_0\tcow\nI5SA8N1JKwM_0\tcat\nI5WNgPfoaZQ_2\tmotorcycle\nI5pU9zWz4Fg_0\tmotorcycle\nI6fJWB7DpAM_0\tbus\nI6oT6dLeq7A_0\tmotorcycle\nI6wEvIOC-Pk_0\ttrain\nI7GbkWE2A0M_0\tbus\nI7aUrrDieE4_0\tcow\nI7bKlZxD6Fs_0\tbicycle\nI7xOURJQUps_0\ttrain\nI7xOURJQUps_1\ttrain\nI7x_od8h4iw_0\tcow\nI7-iLB-NVGg_0\tdog\nI8FoWQrnHGY_0\tbird\nI8Ms0rXjfXU_0\tskateboard\nI8Ms0rXjfXU_1\tskateboard\nI8Ms0rXjfXU_2\tskateboard\nI8Qx-qd0eLg_0\tboat\nI8Qx-qd0eLg_1\tboat\nI8UlumMtAG8_0\thorse\nI8Vr0DzHV9U_0\tcow\nI8rww3UUjYI_0\tperson\nI9AGRokco_M_0\ttrain\nI9FPkgdc-5E_1\tcow\nI9XcFcBW-HM_0\tmotorcycle\nI9oAq_x5pqg_0\tbus\nI9yrFs_JpWc_1\tskateboard\nI94qZUJmKP8_1\tbicycle\nI94qZUJmKP8_2\tbicycle\nI-SRTsDkhLM_0\tcow\nI-TshjRdh74_1\tknife\nI-blRAakQjM_0\tboat\nI-h3cTJlsRc_0\tdog\nI-nb60BTO_g_0\ttrain\nI-raj-aLy8s_8\thorse\nI-ywD5MDZZ4_3\tcow\nI-ywD5MDZZ4_4\tcow\nI_LhSNsRHMs_0\telephant\nI_kI39ZHymk_0\thorse\nJAEzOCIew2Q_0\tairplane\nJAEzOCIew2Q_1\tairplane\nJAb3p7VYLzI_0\tbear\nJAb3p7VYLzI_1\tbear\nJAcHxxzG1vA_0\tmotorcycle\nJAf3nC1hYS4_0\tdog\nJAp2_UJfFao_0\tperson\nJAqAH7n-3lA_0\tbus\nJAzD-VzDxfc_2\tbicycle\nJAzD-VzDxfc_4\tbicycle\nJAzD-VzDxfc_5\tbicycle\nJAzD-VzDxfc_8\tbicycle\nJAzD-VzDxfc_11\tbicycle\nJAzD-VzDxfc_13\tbicycle\nJAzD-VzDxfc_17\tbicycle\nJAzD-VzDxfc_18\tbicycle\nJAzD-VzDxfc_19\tbicycle\nJA2PLZmRABc_1\tumbrella\nJBGewEMeWIs_1\tdog\nJBGewEMeWIs_5\tdog\nJBKG_tl08RU_0\tcow\nJBMhOrDLcho_0\tcat\nJBYr3VbJLoM_0\tperson\nJBkymGnh5mA_1\tbicycle\nJBkymGnh5mA_2\tbicycle\nJBkymGnh5mA_3\tbicycle\nJBkymGnh5mA_4\tbicycle\nJBlCFCV4sdw_0\thorse\nJBlCFCV4sdw_1\thorse\nJBxFgwl0To8_0\tcow\nJB0SELYSRXA_1\tbear\nJB-hzl-gILo_2\ttruck\nJCIJbwBevro_2\tbird\nJCSRBZQpYCw_1\tbear\nJCSRBZQpYCw_5\tbear\nJCTYAwT6ppk_0\tmotorcycle\nJCTYAwT6ppk_1\tmotorcycle\nJCTYAwT6ppk_2\tmotorcycle\nJCciDn0O6X0_0\tairplane\nJChsfz-p2KI_0\tcat\nJCuE5X37xIE_3\tboat\nJCuE5X37xIE_4\tboat\nJDJWapHD_kM_0\tboat\nhP8Jfo1RaSk_0\telephant\nhP8Jfo1RaSk_1\telephant\nhP8Jfo1RaSk_2\telephant\nhQWcyTkfPeU_1\tdog\nhQZDg__nxQA_4\tbear\nhQZ5lNlAXBI_0\ttruck\nhQe3_1EvqIY_0\tcow\nhQfYabI9_ec_0\tbird\nhQkbXGwGwyg_0\tskateboard\nhQve0ugvy6s_0\tmotorcycle\nhRAbtgVJiWI_0\tbear\nhRJ0Qk_qdAY_0\tairplane\nhRS45wmOq9c_0\telephant\nhSR-ZVA-vMU_0\tdog\nhSWyYOzvh0g_0\tdog\nhSf3uEm8r9M_0\tbus\nhShwtMLieCc_0\tboat\nhSiozs1nz7o_1\tmotorcycle\nhSzgOCvRfq4_0\tbear\nhS-h8AUEibc_0\tcow\nhS-h8AUEibc_1\tcow\nhS-h8AUEibc_2\tcow\nhTHBMsKC5ZI_0\tcat\nhTZr7OF0VuY_5\tdog\nhUJMSp4rMrc_0\ttrain\nhU0EbblT2vQ_2\tairplane\nhU388mZGPGg_0\tcat\nhU9B31AVZNg_0\tbus\nhVJjOdU5-yQ_0\tcar\nhVNKN_qFEUA_0\tbicycle\nhVOImOLBY1g_0\tskateboard\nhVdb-Q3aJ9E_0\tdog\nhVhNOzZA40E_0\tcat\nhVnD8rlLRgM_0\tbird\nhVq6NOrBwlM_1\tmotorcycle\nhVsAAQqAHyI_1\tskateboard\nhWHUct-PLfY_1\tmotorcycle\nhWHUct-PLfY_0\tmotorcycle\nhWNyVxx4a94_0\tcat\nhWn0ddeHF0I_2\tzebra\nhXAQH1xVKB8_0\tcow\nhXWQ710-JZQ_0\tmotorcycle\nhXagj4A6N-s_1\telephant\nhXbMo03RQWk_0\ttrain\nhXflTk4WVAA_1\tbear\nhXf7dimd2bo_2\tcat\nhXhtGcCMf5Q_0\tairplane\nhXsCNMb3eTc_0\tbicycle\nhYD7HKMKa3k_0\telephant\nhYFW5XhMxyg_1\tknife\nhYIPy3eyC9k_0\tcat\nhYQBaiC8d6Y_0\thorse\nhYTIV5X87S4_0\thorse\nhYgzs0gDiiU_0\telephant\nhYkPL7spYMo_1\telephant\nhYlmhAuVVh8_0\tbird\nhYtFyx0799o_0\tboat\nhY0vkwEtjLM_1\tbear\nhZAOhuPJTho_0\thorse\nhZAXlQqCmCI_2\ttrain\nhZCGOP3PHOM_2\tknife\nhZHjTTvcQ88_2\tbicycle\nhZOhuOcxTP8_0\tskateboard\nhZPYHGzIYh0_0\tcow\nhZeekc0i_b8_0\tmotorcycle\nhZiXqP-WaQk_3\tbird\nhZiXqP-WaQk_0\tbird\nhZiXqP-WaQk_1\tbird\nhZiXqP-WaQk_2\tbird\nhZygBhv-nDg_0\tmotorcycle\nhaC0TZbvBEU_0\tcat\nhaMtzn-TnOQ_0\tboat\nhaTl-PeSssc_0\tdog\nhakWXvIYvzo_1\tdog\nhanKUxPHFbA_1\tcar\nhanKUxPHFbA_0\tcar\nhaxabA27SnU_0\thorse\nha3C2hPzaiw_0\tdog\nha8hX-68TqI_0\tbear\nha8hX-68TqI_2\tbear\nhbKjt5OBryI_0\ttruck\nhbKjt5OBryI_2\ttruck\nhbKjt5OBryI_1\ttruck\nhbfiyMHycSs_3\tknife\nhbvJ3t9lpUo_0\ttruck\nhcXtsyICD30_1\tskateboard\nhcpMT5qGQ0U_0\tbus\nhdZkNo0t6wg_0\tboat\nhdbKePdCemQ_0\tcow\nhdqiOcfXejc_1\tzebra\nhdwZF4C-vYs_0\tcow\nhd_yXL53Z9E_0\telephant\nheQRV9di86s_0\ttrain\nheTgOW6o1ho_0\tzebra\nhedgcDGNngs_0\tbicycle\nheucaATRtbI_0\tcat\nhe_j-GZdCNs_0\tperson\nhfCbKe627p0_0\tairplane\nhfEl_mnX9X4_0\tskateboard\nhfGEkaEADUw_0\tmotorcycle\nhfGEkaEADUw_1\tmotorcycle\nhfGEkaEADUw_2\tmotorcycle\nhfcKFLBuJ_g_1\tdog\nJDcAM9ieTp8_0\tbicycle\nJDe9ulv2Nmo_0\telephant\nJD_njBej6V0_0\ttruck\nJD_njBej6V0_2\ttruck\nJEU2rZzAxRU_0\tskateboard\nJEbIHUJTFsM_0\tairplane\nJEdl8GROiQM_0\ttruck\nJExlAUEYZwc_0\tcat\nJE8SV6FOlC0_0\ttruck\nJFH3n9kI6aA_0\tboat\nJFO_Qz1y8-s_4\telephant\nJFQ_GztsLs0_0\tcow\nJFQ_GztsLs0_3\tcow\nJFZG_ebR2mk_0\telephant\nJFZpmduYfv4_0\tmotorcycle\nJFfYNQ2FmHU_0\tcow\nJFk4Qyn58CY_0\ttrain\nJFvQ7wc6c0o_0\tairplane\nJGDf9kSc-v4_13\tdog\nJGDf9kSc-v4_15\tdog\nJGDf9kSc-v4_17\tdog\nJGDf9kSc-v4_19\tdog\nJGDf9kSc-v4_1\tdog\nJGDf9kSc-v4_2\tdog\nJGDf9kSc-v4_6\tdog\nJGGj1z6Kujc_0\tdog\nJGGj1z6Kujc_1\tdog\nJGMfEFj5PVM_1\ttruck\nJGWBjvjqVhw_4\tskateboard\nJGanm9yGTJk_0\ttruck\nJGanm9yGTJk_1\ttruck\nJGanm9yGTJk_2\ttruck\nJGmHpQtJzic_0\thorse\nJGn6Ifa5bWI_0\tbird\nJG0B4rV4KEI_0\tdog\nJG6H3R9rErg_8\tairplane\nJG6H3R9rErg_0\tairplane\nJG6H3R9rErg_1\tairplane\nJG6H3R9rErg_2\tairplane\nJG6H3R9rErg_3\tairplane\nJG6H3R9rErg_4\tairplane\nJG6H3R9rErg_5\tairplane\nJG6H3R9rErg_7\tairplane\nJG6sceNvlnI_3\tboat\nJG6sceNvlnI_2\tboat\nJG872iaucFc_0\tumbrella\nJHBhDpq4HNs_0\tcat\nJHBtawKoltc_0\tcar\nJHTt9PSzrhU_0\telephant\nJHb8IVsjgMs_1\tbus\nJHdc9jvf4qA_0\tmotorcycle\nJHmG34eTWow_0\ttrain\nJHr57YE7IRs_1\tairplane\nJHy85i0So5U_1\tdog\nJH0Jzb0wOXw_3\telephant\nJH0Jzb0wOXw_4\telephant\nJH0Jzb0wOXw_2\telephant\nJISA50Bfj4U_0\tboat\nJIamGji7w9U_3\tbird\nJIiA0pG-MKk_0\tskateboard\nJI6MyG7aTvM_0\tbird\nJJSp2fu3lk8_4\tdog\nJJSp2fu3lk8_3\tdog\nJJq7YAYUado_0\tumbrella\nJJx7GdAuDQY_0\tskateboard\nJJyJR7TlQ7o_0\tmotorcycle\nJJ0Ja1ju2ec_1\thorse\nJJ0NBly53IU_0\tcat\nJJ8Vv2hiCCA_0\tcow\nJJ8Vv2hiCCA_1\tcow\nJKBJuICyV50_1\ttrain\nJKBJuICyV50_0\ttrain\nJKCFS8k_Qis_3\tbus\nJKGV5hbm5g8_0\tskateboard\nJKJQPHspLBs_0\tbird\nJKJQPHspLBs_1\tbird\nJKNRKGSvtsQ_0\telephant\nJKYPluJPL7c_0\tdog\nJKa7rPKrAwY_0\ttrain\nJKgPYc0K_hI_4\tcar\nJKgPYc0K_hI_1\tcar\nJKuhG9WLM2k_0\tairplane\nJK42K36SYLs_0\tbird\nJLE_jNuNoA0_0\tcow\nJLHP-3UxtMU_0\tboat\nJLb2dnuNhqs_0\tbus\nJLoS7DZH_ik_2\tairplane\nJLoS7DZH_ik_1\tairplane\nJLsEcZUU7FM_2\ttruck\nJL64rU6Jvmw_1\tgiraffe\nJL71b_9Cy9I_1\tumbrella\nJMDFSes_w0E_0\tcow\nJMMmrEdfRbk_0\tboat\nJMPKtdq9b0Y_0\ttrain\nJMR4IvE2sDo_0\tbus\nJMaahZTxRLk_6\tboat\nJMgbgNPBIJI_0\tbird\nJMnp6FLLbtw_0\thorse\nJMnp6FLLbtw_4\thorse\nJM1jSU4FEPw_2\tairplane\nJM4yr2pj-zg_0\tairplane\nJNDZBgXZBU8_0\tknife\nJNDZBgXZBU8_3\tknife\nJNDdt_ZPl1s_3\telephant\nJNNbk6jVfB4_0\tcat\nJNZDx8Ro_mM_0\ttruck\nJNe7ZednqQc_2\thorse\nJNkz_3Qtdfc_0\thorse\nJNnnm9ixKrM_3\tcar\nJNnnm9ixKrM_4\tcar\nJNnnm9ixKrM_5\tcar\nJNpuJeqVFxk_0\tmotorcycle\nJONF8-3gEoY_0\tgiraffe\nJONF8-3gEoY_1\tgiraffe\nJONF8-3gEoY_2\tgiraffe\nJObYghNlZas_6\ttrain\nJObYghNlZas_7\ttrain\nJOmeD6G33Dc_1\thorse\nJOoNVY1C6qI_0\ttrain\nJOoNVY1C6qI_2\ttrain\nJOqHfu-WVu8_2\thorse\nJOuB1UkVvKI_0\tairplane\nJOue8LphKc4_0\ttruck\nJOztmtwKz-k_0\tcow\nJPAjGBsi-rE_0\tbird\nJPMFXg-BXDE_2\tcar\nJPTFJk9f2nM_0\tdog\nJPevMGnX92M_0\tairplane\nJPiSmPAIpOI_0\tknife\nJPlZOEew4wg_0\telephant\nJPuDmwlAXzI_0\tskateboard\nJPwUpTvlZDA_0\tperson\nJPwUpTvlZDA_3\thorse\nJPw4R6t-0j4_1\tbird\nJQDX7gVR0qM_2\tknife\nJQDX7gVR0qM_0\tknife\nJQKDDMvCtt8_1\thorse\nJQNsIqNLn40_0\ttruck\nJQRxu6RVGMg_0\tcar\nhfwhbInEJAk_3\ttrain\nhfwhbInEJAk_2\ttrain\nhgFfz_RTcx4_0\ttruck\nhgFfz_RTcx4_1\ttruck\nhgxvhMjH_68_0\tmotorcycle\nhg6Z6JIwRMU_0\telephant\nhg6Z6JIwRMU_1\telephant\nhhM2TSF2GhA_1\thorse\nhhNlkY3SS6w_1\tbus\nhhYOJb0v5Yw_0\tcat\nhhlt4dfZmFE_0\thorse\nhhyzKC353Jo_1\tcar\nhiJ-OdPj_8c_0\tbird\nhiPDdAi1Qs8_0\tmotorcycle\nhiUH1zOfsfo_0\tcat\nhiZLv2E5zI8_0\telephant\nhjBLAHakI9c_0\tboat\nhjRlztwK-vg_2\tbicycle\nhjhbMbrRUWI_0\ttruck\nhj2P25O-nIk_0\tskateboard\nhkR10EU8YPI_0\ttrain\nhk0cDE4A_b0_0\tboat\nhk7M3PGcOhw_0\ttrain\nhk-IVoljyKE_0\telephant\nhk-IVoljyKE_1\telephant\nhlFPCpe8Akk_0\tairplane\nhlLrYrrOcY4_0\tdog\nhlNOQO4BIHg_0\ttrain\nhlnNVsSGjxA_3\tcar\nhlnNVsSGjxA_1\tcar\nhl4yLAJiWjQ_0\telephant\nhl7z1gnPPW0_0\tknife\nhl_YHwW5mrM_1\tbird\nhl_YHwW5mrM_0\tbird\nhmThCl2HK8E_0\tskateboard\nhmThCl2HK8E_1\tskateboard\nhmdH0Olcbx4_0\tbicycle\nhm98pilx9dE_5\thorse\nhm98pilx9dE_1\thorse\nhm98pilx9dE_2\thorse\nhm98pilx9dE_3\thorse\nhm98pilx9dE_4\thorse\nhnJ2wDmXD6w_1\tbicycle\nhnbZY12P-7g_1\telephant\nhne72NMSPuc_0\tbird\nhnffUBbBFoQ_1\thorse\nhnrSBT9miTE_1\tbird\nhnvbE27mWwI_2\ttrain\nhnvbE27mWwI_0\ttrain\nhn19XaR_wIs_0\tknife\nhn7ollCkAy4_5\tbicycle\nhn-1W1O8kZs_0\tboat\nhoLnPrkJ6sE_0\thorse\nhoLnPrkJ6sE_3\thorse\nhoNPAcq_5Ac_1\tbird\nhoNPAcq_5Ac_0\tbird\nhoYDTU50MTk_0\tcow\nhoe88GhFhq0_0\ttruck\nhomQXuwbe04_0\tcow\nhomx5sSuNr4_2\tbear\nhoozxxjd57c_1\tbus\nhotrXXenVAk_0\tcat\nho5YZstr1XE_1\tcow\nho7yo7nJk3o_1\telephant\nhpG2eG_hduA_0\tmotorcycle\nhpRxBuFhZ4M_0\ttrain\nhpRxBuFhZ4M_1\ttrain\nhpRxBuFhZ4M_2\ttrain\nhpRxBuFhZ4M_4\ttrain\nhpkXlhfYZfw_2\tmotorcycle\nhpkXlhfYZfw_1\tmotorcycle\nhpmC3OjLnZM_2\tboat\nhpmC3OjLnZM_0\tboat\nhpo-lwBTbFw_1\tdog\nhp3aTxzS9ms_0\tskateboard\nhqGhmP1u07Y_0\telephant\nhqoQm68UbGo_3\tairplane\nhqoQm68UbGo_2\tairplane\nhqsoIR9v8IY_0\tmotorcycle\nhq7f1_o4eFg_0\tairplane\nhrLkVz3_xGw_2\tbus\nhrW-pkK9osE_2\tbicycle\nhrW-pkK9osE_3\tbicycle\nhrgh69NXZqw_0\tcow\nhrj6I8n8nAc_0\tbicycle\nhrj6I8n8nAc_1\tbicycle\nhrrpTPwLZHA_0\tbird\nhrtiCeqnqLg_0\tcow\nhrziTee4b2c_0\tairplane\nhr5Q08OMeAU_0\ttrain\nhr7wUBMikww_0\tzebra\nhr7wUBMikww_1\tzebra\nhsMptx7tOLo_0\telephant\nhsMptx7tOLo_1\telephant\nhsMptx7tOLo_2\telephant\nhsM1eKbrqLs_0\tcat\nhsPK4wlNtI8_0\tcow\nhsYL355Fzio_0\ttruck\nhsfS5oT1y5M_2\tboat\nhskEM8GUmDE_2\ttrain\nhsmxUKxzapo_2\tskateboard\nhsmxUKxzapo_0\tskateboard\nhsyCfsJx7DI_2\tskateboard\nhsyCfsJx7DI_1\tskateboard\nhs2foQ_Xo8A_0\tskateboard\nhs-OEgnsLZs_0\ttrain\nhtDilkoPA-M_0\tairplane\nhtSBZwTBX98_0\thorse\nhteze9Fz1dc_0\tknife\nhtkybhLm0uk_0\tumbrella\nhtwBHgatd9c_2\thorse\nhtwBHgatd9c_3\thorse\nhtwBHgatd9c_0\thorse\nhuCxpuVT4GI_0\tdog\nhuDCqh-KRy4_8\tbicycle\nhuDCqh-KRy4_2\tbicycle\nhuDCqh-KRy4_3\tbicycle\nhuDCqh-KRy4_4\tbicycle\nJQnf7j7HpKY_0\tcow\nJQpJv-SOMS0_0\tdog\nJQ9LtiJVsd8_0\tcat\nJQ_dyIlBnGM_0\tcow\nJQ_6xcOuEfU_4\tcow\nJQ_6xcOuEfU_1\tcow\nJRA3LCwRGu0_0\tknife\nJRBLFsevgg0_0\ttrain\nJRJjI6mFa6s_1\tskateboard\nJRJnSf2qOXA_0\tairplane\nJRT0FH2KEsc_0\tcow\nJRcTFvzRC10_0\tbird\nJRcTFvzRC10_1\tbird\nJRsNcoTJJjE_0\tcat\nJRsn1likB7c_0\tboat\nJRyc_lxMJzs_0\tskateboard\nJR6JAx7xdGg_0\tcat\nJSA0JWvQbJg_2\ttrain\nJSdEdTcUHHI_0\tknife\nJSfXE4ExZ1U_0\tbird\nJSfXE4ExZ1U_2\tbird\nJSs6Sa8zR6c_0\thorse\nJS2cbpFwahY_0\tskateboard\nJTE0ABGzb30_1\tskateboard\nJTE0ABGzb30_2\tskateboard\nJTJgZcBM93k_1\tknife\nJTa9HkbXfSw_0\tcow\nJThBohLxRSc_0\tcow\nJTi4Oy6v9mM_1\thorse\nJTi4Oy6v9mM_2\thorse\nJTtjfwrK4Ls_0\tdog\nJT5zUQio3B0_0\tbus\nJUHMTmjUswE_0\tknife\nJUVHXeFTe3Q_0\thorse\nJUVHXeFTe3Q_3\thorse\nJUbPqBVbGQQ_1\ttruck\nJUpxTW6_BAI_0\tcow\nJUtd4FLjXio_0\thorse\nJU1N1nqXjII_0\ttrain\nJVKkxo7adX8_1\tknife\nJVQ6Gx2hGxs_0\tairplane\nJVTIzApj2UA_0\tgiraffe\nJVVtcOIACz0_0\tgiraffe\nJVg62b0T408_2\ttrain\nJVg62b0T408_0\ttrain\nJV2A3zWMRj8_0\tumbrella\nJV3Tbp30yp4_2\tmotorcycle\nJV3Tbp30yp4_1\tmotorcycle\nJV3Tbp30yp4_3\tmotorcycle\nJV-OfjEsQDs_0\tumbrella\nJWKZlCk_cts_0\ttrain\nJWXSXvHgoo4_0\tcar\nJXEyPb4Nzro_0\tskateboard\nJXP_CNg8grg_0\tcat\nJXi5KrVPz0M_1\tbird\nJXj_lj5QUp8_0\tperson\nJXmBBTT0YXQ_0\tcat\nJXobiO1_7Ts_0\ttrain\nJXwfPpl53Fs_0\tdog\nJYYAwimr2XQ_0\ttruck\nJYi7bWDL5os_0\tperson\nJYsWtLH_mjM_0\tbus\nJYsWtLH_mjM_1\tbus\nJYsWtLH_mjM_2\tbus\nJYvBo5FwjSg_0\telephant\nJYvBo5FwjSg_2\telephant\nJY2d1dohCDs_0\telephant\nJY3rSX-blgA_0\tcow\nJZBJ35lKlXw_0\ttruck\nJZOZuTiifHM_2\tboat\nJZXr-dGLkpU_0\tboat\nJZcy1T--d4M_0\tskateboard\nJZ_ri3awsso_0\tcat\nJaI9UR2n7ZE_0\thorse\nJaLswoS3xO8_0\tknife\nJaumrq8clZY_0\ttruck\nJa9rAQpB2_M_0\tcat\nJa_ofQ1ynAc_1\tairplane\nJa_ofQ1ynAc_2\tairplane\nJa_ofQ1ynAc_4\tairplane\nJbA11YWHpW0_1\tskateboard\nJbBxvvoOvpg_0\tbear\nJbK17NE3dvk_1\ttrain\nJbK17NE3dvk_0\ttrain\nJbK17NE3dvk_2\ttrain\nJbK17NE3dvk_3\ttrain\nJbPP4AwiNEc_0\tcat\nJbSkoHG6Vq4_0\tairplane\nJbfzd9wIyi4_0\tcat\nJbw0KUJqWpE_0\ttrain\nJb03yqEB5WI_1\tbus\nJb03yqEB5WI_4\tbus\nJb5lFDvKqXA_0\tbus\nJb6FIuynIuw_0\tbicycle\nJb-q7z_Mygg_0\ttruck\nJcJKjdDKuc4_0\ttrain\nJcRvhoBwgNg_0\tcow\nJcU-cdQmKV8_3\tbus\nJcU-cdQmKV8_1\tbus\nJcixSQRUnY4_1\telephant\nJcmTLrQZ7sE_1\tcow\nJcmTLrQZ7sE_0\tcow\nJcwl0kCsUTw_0\tumbrella\nJc5PS0Ejejw_1\telephant\nJc8eE1ayaX8_0\tcow\nJc9PdqC1rpg_0\ttrain\nJdUehtxAfys_1\tbicycle\nJdUehtxAfys_7\tbicycle\nJdwSAFvKg74_0\tcar\nJeAykU3MiKg_2\tairplane\nJeET8zb_gPQ_4\tknife\nJeNu9WVQOHY_4\tbicycle\nJeNu9WVQOHY_1\tbicycle\nJeNu9WVQOHY_7\tbicycle\nJeYCd0VP5EY_0\thorse\nJeb4SSyyZD8_0\tdog\nJe_fuH6-34I_0\tskateboard\nhujF3CEgAXI_0\tskateboard\nhulFEZUNu10_0\ttrain\nhutTW7ORN8g_0\tbicycle\nhutTW7ORN8g_1\tbicycle\nhuy9NXPynro_0\tcat\nhu6nRmzUcAw_0\ttrain\nhvWHb1kiV5g_0\tdog\nhvWs1FhyQlw_0\tumbrella\nhvhWoRQZMUU_0\tcat\nhvjNVTle8bQ_6\tairplane\nhvjNVTle8bQ_0\tairplane\nhvjNVTle8bQ_1\tairplane\nhvjNVTle8bQ_2\tairplane\nhvjNVTle8bQ_3\tairplane\nhvjNVTle8bQ_4\tairplane\nhvjNVTle8bQ_5\tairplane\nhvkIo-dZUUY_1\tbird\nhvlXyPikLUY_0\tbus\nhv49V2RzgHw_0\thorse\nhv7b1I-cRvI_0\ttruck\nhwOL2G-Lo54_0\tumbrella\nhwPkgOB1mEU_0\tcow\nhwTVAkfjjCY_0\tcat\nhwikEC2Jc0c_1\thorse\nhxC7dFDqfXo_0\tcar\nhxUn2A7Ko2g_0\tcow\nhyMlfx_ZEeI_0\ttrain\nhyMlfx_ZEeI_1\ttrain\nhyX6rKHZcLs_0\tperson\nhyb_qBoKG9Y_0\ttrain\nhyjjdUcyanE_1\tdog\nhyj8BJ_PMgQ_2\telephant\nhyrBL1wMHts_1\ttruck\nhy9Ml-3zAtM_2\tknife\nhy9jrpamopE_0\tumbrella\nhzBqPVIC7IQ_0\ttrain\nhzUTA7mGyKE_0\tbicycle\nhzeHyMcUmO4_0\tmotorcycle\nhzeHyMcUmO4_1\tmotorcycle\nhzz9JBRYjFs_0\tbicycle\nhzz9JBRYjFs_1\tbicycle\nhz5anqtArdI_0\ttrain\nhz5anqtArdI_1\ttrain\nhz7PXI6R6DI_0\ttrain\nh0IiMbTwz1Q_0\ttruck\nh0IiMbTwz1Q_1\ttruck\nh0hIpf9O0Vg_0\tbus\nh1MxYGy1SBc_0\tdog\nh1XtVmXF7CQ_1\telephant\nh19z0Ap_5Pc_0\tbus\nh2R46pcCEVg_0\tcow\nh2SNrfK0yQQ_2\tbus\nh2X0to3hDA4_0\tbicycle\nh2b9t_pnnNA_0\tcow\nh22FyeO_lyE_0\tumbrella\nh23R8X1WKjU_1\thorse\nh24uuiI34yI_0\tskateboard\nh27DK_oMwYY_0\tdog\nh3FnAKBB9Xc_1\telephant\nh3Lz61ficjc_2\tmotorcycle\nh3aEao1bRIY_0\tcat\nh3aZGHTjBwc_0\telephant\nh3o5ZykGOxI_4\telephant\nh3o5ZykGOxI_2\telephant\nh3o5ZykGOxI_3\telephant\nh3qOwaRYAi8_1\tbear\nh3uPELFKoCc_3\tknife\nh3uR99WtOh4_4\tbear\nh3_cWsxi4Qw_1\tskateboard\nh4CySJb83XI_2\telephant\nh4KXG16xA_Y_0\tdog\nh4LE2YVwHL0_0\tmotorcycle\nh4jU8ZrDZd8_0\tskateboard\nh4kmvN6NmyA_3\ttrain\nh4kmvN6NmyA_2\ttrain\nh4wsDcj7kcE_0\tcow\nh45-zE2gKFA_2\tperson\nh45-zE2gKFA_3\telephant\nh47dExP6oXQ_0\telephant\nh5C2RKknWfg_3\tbicycle\nh5C2RKknWfg_5\tbicycle\nh5C2RKknWfg_6\tbicycle\nh5KSLdybLIE_5\tbicycle\nh5KSLdybLIE_1\tbicycle\nh5KSLdybLIE_3\tbicycle\nh5dsU3N4joc_0\tcow\nh5hkvWWp7Qg_0\tknife\nh55Exp2rpSM_0\tknife\nh6FtP-5VnYM_2\tcow\nh6FtP-5VnYM_1\tcow\nh6McnZDPX3I_12\telephant\nh6McnZDPX3I_1\telephant\nh6McnZDPX3I_2\telephant\nh6McnZDPX3I_6\telephant\nh6McnZDPX3I_7\telephant\nh6McnZDPX3I_9\telephant\nh6McnZDPX3I_10\telephant\nh6Mvzt5e_eE_0\thorse\nh6jGPQLkE48_0\tperson\nh6ztcoDHYaY_0\tcat\nh62bO9Mfl9Y_0\tcat\nh64dmoPNWw0_0\tcar\nh7OZUnDKWbA_0\ttruck\nh7cXxMNxlcY_0\thorse\nh7uwd7opKjI_0\tmotorcycle\nh7uwd7opKjI_1\tmotorcycle\nh8BDqFH8e_w_0\ttrain\nh8BDqFH8e_w_1\ttrain\nh8BDqFH8e_w_2\ttrain\nh8EHrA_OM7c_0\tperson\nh8LiHNo4904_4\tairplane\nh8LiHNo4904_5\tairplane\nh8LiHNo4904_6\tairplane\nJfb3XGdt6VE_0\tcat\nJfdoYsRxF5k_2\tknife\nJfnHVMyUT0E_4\tbicycle\nJfqHeWyD5DQ_0\tskateboard\nJgLXpgcnjAA_0\tcow\nJgQbvDmM2Nk_0\tbird\nJggJWWHhlc4_0\tumbrella\nJg8FXSKMvTQ_1\telephant\nJhDNC6XRVG8_0\tcow\nJhDNC6XRVG8_1\tcow\nJhFvJHfP_NY_0\tcar\nJhPLC0PS9I0_0\tknife\nJh87zKRgN68_2\tboat\nJiMyZFGmGgM_0\tdog\nJifa2spqYV8_0\tairplane\nJijtEhm-Dk8_0\tbus\nJikSLpJ2xKw_0\tcow\nJinIHVE4_MI_1\tbear\nJioS9DumyIM_1\tcar\nJixd9HKGzWA_0\ttrain\nJi6bpPIPScI_0\tumbrella\nJjIvWQ-198c_0\tknife\nJja500M50Yw_0\tcow\nJja500M50Yw_1\tcow\nJj4KvC3TXro_0\tcar\nJj4KvC3TXro_1\tcar\nJkC1Udoysk8_1\tcat\nJkC4nV8LcTE_1\tbicycle\nJkH8ZtuvzDQ_0\tdog\nJkpQkpiRpVI_0\tbird\nJkzNUiOu1GI_0\tbus\nJk28bpr063o_4\tairplane\nJk28bpr063o_0\tairplane\nJlJQlaoy3ec_0\tcat\nJlrPaJIAP9k_1\thorse\nJluvPpeI2DY_0\ttrain\nJluvPpeI2DY_1\ttrain\nJlzsUphxgIY_0\ttruck\nJl1bEdoRG9I_0\tcow\nJl6gTtZcQH0_3\thorse\nJl6gTtZcQH0_0\thorse\nJl6gTtZcQH0_2\thorse\nJmblo1iMURo_0\tmotorcycle\nJmdMhGsyZvk_0\tboat\nJmvNubLPYGo_0\tbird\nJmxixgKAKzc_0\ttruck\nJm0S-kE2yVc_0\ttruck\nJm3dtu8GTos_0\tdog\nJnAaSoaN3FI_4\tboat\nJnHUNCeHEDc_0\tbird\nJnMkFSGB6Vw_0\ttruck\nJnXmNI53DWE_0\tperson\nJnrrNu9udj0_0\tbear\nJnvIx5y-ijs_1\tumbrella\nJnysuevt_4A_0\ttrain\nJn1gvGhxU5U_0\tbear\nJocAgPv-ZJo_0\tskateboard\nJohmecnKktI_0\tboat\nJopGEGMo-DQ_0\tdog\nJo50LBwjHIk_0\tbicycle\nJo50LBwjHIk_2\tbicycle\nJpDOBaNBwkc_0\ttruck\nJpFiApmpoHA_0\tcow\nJpL4Mv-uFi4_1\tdog\nJpRMc6MtCH8_0\ttruck\nJpWh1yQThRo_0\ttrain\nJpZwF6hOCDg_1\ttruck\nJpjAxQ_vsZw_7\tbicycle\nJpjAxQ_vsZw_1\tbicycle\nJpsOsewgXAg_1\tbird\nJpuCWzsE35k_1\tbird\nJp0GKZ9vA0c_0\tairplane\nJp1tvS1y4eI_0\tboat\nJqCaTxH5Ovk_0\tmotorcycle\nJqC81ViWFeE_0\tbear\nJqPkaGRIz6c_2\telephant\nJqT_Bx4fd1Q_0\tcow\nJqauh1bsJy4_0\tbear\nJq2ml2xQkHg_0\tcat\nJq8D628IlV8_1\tskateboard\nJq8D628IlV8_2\tskateboard\nJq8OMvgG6wc_0\tcow\nJrAvVMnkKEo_3\tbear\nJrKxxhHGR7E_0\tgiraffe\nJrZTstVj2wg_0\thorse\nJrbrXXDuxnc_0\thorse\nJrmyPAW-ItI_0\tdog\nJsNQXxg1PvE_0\tperson\nJsPtP21j3f8_3\tbear\nJsPtP21j3f8_1\tbear\nJscnB4QfAhY_0\ttrain\nJsiSPt3nv1Y_0\tcow\nJsiSPt3nv1Y_2\tcow\nJs2ZDfWZWtc_0\tcat\nJs69iFgcic0_2\tbus\nJtMMD0aJnPI_0\ttrain\nJtMMD0aJnPI_1\ttrain\nJtQzeWNt8IA_0\tumbrella\nJtQzeWNt8IA_2\tumbrella\nJtfp49L4LHg_0\ttrain\nJt1zVsUQGhI_2\telephant\nJt1zVsUQGhI_3\telephant\nJt8ikZGW768_0\tbicycle\nJuGusvu6Z7o_0\tskateboard\nJuKJKHykMKM_0\thorse\nJuKgukJ63eM_4\tskateboard\nJuME8_jaVdE_2\tcar\nJuME8_jaVdE_3\tcar\nJuMNRsOc0nU_1\tcat\nJuMNRsOc0nU_0\tcat\nJuNubQtCvrU_0\tbird\nJuNubQtCvrU_1\tbird\nJuO7qvp2GBs_0\tknife\nJuXqLoCgK4o_0\tbear\nh8OcTR0Z4yo_1\tairplane\nh8OcTR0Z4yo_2\tairplane\nh8OiIYhIPTs_2\ttrain\nh8PJps4Sj1E_0\tairplane\nh8PmDAKiKVc_0\tdog\nh8oTFl4XWTc_0\tbus\nh8ysn_L9udY_0\ttrain\nh8ysn_L9udY_1\ttrain\nh9FtsOFR3p8_0\tcat\nh9veoEpzRH8_0\tcow\nh9w20ChZ_7Y_0\tbicycle\nh9w20ChZ_7Y_1\tbicycle\nh96rR-VkJZA_1\tbear\nh96rR-VkJZA_2\tbear\nh966cxQyjvc_1\tairplane\nh-PS5v6ZTBY_0\ttruck\nh-VSmS49g5M_0\tskateboard\nh-npKkPbHSA_0\tboat\nh-qRpUteJV4_0\tbird\nh-vGllteZnI_0\ttrain\nh-1NdCqoxdU_1\tbird\nh-2DBPzbKUM_0\tcow\nh-27oWBBirE_0\tdog\nh-9WCj8sB6o_7\tairplane\nh-9WCj8sB6o_8\tairplane\nh-9WCj8sB6o_10\tairplane\nh-9WCj8sB6o_11\tairplane\nh-9WCj8sB6o_12\tairplane\nh-9WCj8sB6o_0\tairplane\nh-9WCj8sB6o_1\tairplane\nh-9WCj8sB6o_3\tairplane\nh-9WCj8sB6o_5\tairplane\nh_DH9wUjJZA_0\tcow\nh_Ey7gQJCSc_0\tcow\nh_KKvY3cK4o_0\tcow\nh_KKvY3cK4o_1\tcow\nh_XHdrNdD98_0\tbus\nh_tQ-ZVYe1M_0\tbird\nh_6GMOpsIOk_0\tcat\niACKPRGNEOU_0\tbus\niADpOEGdwQI_3\tbird\niALubFRPBXQ_1\tknife\niAL5KD5BwGQ_0\thorse\niAuV09oxF_c_0\tbus\niAzvkn-2C9s_4\thorse\niA_tYzSGuVg_0\tdog\niBDVD9if3VA_1\tbear\niBDVD9if3VA_3\tbear\niBDVD9if3VA_4\tbear\niBF1Cfv7RpE_2\ttrain\niBF1Cfv7RpE_3\ttrain\niBO6oNBr4hM_2\ttrain\niBmHl4vB2p8_0\tboat\niBmHl4vB2p8_1\tboat\niB2e_0wI6Cs_1\tbird\niCA5LKIvUak_0\thorse\niCUmfkHj2MM_0\telephant\niCWBysiT4fE_0\tairplane\niCoklLBZGi0_0\ttruck\niC-r2odD6Ss_0\tdog\niDBWSSj3Yag_0\tbus\niDMMfw0zrvQ_0\tcow\niDy5BzJGt50_0\tskateboard\niD0ptJ7ucww_0\thorse\niD0ptJ7ucww_2\thorse\niECVUNZOPOM_0\tcow\niEIRSDANY7g_0\tbird\niEcsL-BdEp8_0\tskateboard\niEeZD9_-mw4_1\ttrain\niEe9Qed4A6w_0\telephant\niEfRHR6In04_1\tdog\niEnwhpHkWPA_0\tdog\niErN5WNQuZ8_1\tbear\niFLG6c3XcMw_1\tknife\niFgR4_OYpgU_0\tboat\niFk_jNFfItI_0\tcar\niFsAXsW8t-8_1\tbus\niFsAXsW8t-8_2\tbus\niGB1OkMGELk_1\telephant\niGE04YY7P68_0\tmotorcycle\niGE8oPBzavo_0\tairplane\niGKh6_bzEe8_9\tairplane\niGKh6_bzEe8_5\tairplane\niGWCy-zysHU_7\thorse\niGWCy-zysHU_0\thorse\niGWCy-zysHU_2\thorse\niGWCy-zysHU_5\thorse\niGf0rCvWhZE_1\tbird\niGivgJkDWVo_0\telephant\niGivgJkDWVo_4\telephant\niGivgJkDWVo_5\telephant\niGivgJkDWVo_1\telephant\niGivgJkDWVo_2\telephant\niGmHR-MYdts_2\tskateboard\niGtwAlGgpuQ_0\tmotorcycle\niG3IZAIpSos_0\tcat\niG4w2A16Qy0_3\tboat\niG4w2A16Qy0_0\tboat\niG7OG-yAmkg_1\tboat\niHNSjj9GO9k_0\thorse\niHZNqzCjd7k_0\ttrain\niHbirUiASog_0\tskateboard\niH0SvXt_QEE_0\tcow\niH9qrmQO5wg_3\thorse\niH9qrmQO5wg_1\thorse\niH_5naROy0I_0\tmotorcycle\niIYyUq4tPbc_0\tcow\niIZw5oU3kz4_0\tdog\niIa2i3Fyyp8_0\tcat\niIgi9EuB83A_0\ttrain\niIlu4DSMMwM_0\tskateboard\niIoEhVh0sac_0\tbird\niIoEhVh0sac_3\tbird\niIoEhVh0sac_1\tbird\niIwKnWnoXd0_0\tskateboard\niI66ySv1M1E_0\tbear\niJcYkYS6CgE_4\tairplane\niJcYkYS6CgE_0\tairplane\niJcYkYS6CgE_3\tairplane\niJqRpAI5q0M_0\tcow\niJ0Pe8-N6i4_0\tbus\niJ5fEZLxnPw_0\tknife\niJ5fEZLxnPw_2\tknife\niKLuvvisn6Y_0\tairplane\nJvHU5ncnmtc_1\tcow\nJvkp32eVZyc_0\tcat\nJvm2k8MgJ5k_0\tcat\nJv1ayezpka4_0\tbird\nJv6b9zItltw_3\tbird\nJv6b9zItltw_0\tbird\nJwNWcW7nUBE_0\telephant\nJwNWcW7nUBE_2\telephant\nJwaPyA7kWhc_0\tcow\nJwnMWPlx6KU_0\tcow\nJw_nc2U4pKs_0\tskateboard\nJxKJB-QdFUA_1\tumbrella\nJxRKwF7KNOA_0\tbird\nJxSYbvgXcT8_0\tcar\nJxVoSlh710g_2\tbird\nJxc3ArJpyuY_0\tmotorcycle\nJxc3ArJpyuY_3\tmotorcycle\nJxdIZhohCtg_0\tcow\nJxlB8wLncYc_0\telephant\nJxzCLy2VyJA_0\tskateboard\nJx03EEph0bw_1\ttruck\nJx2PgBxlrLY_3\tairplane\nJx6xyX5sPMk_0\tcat\nJyKJFochwIQ_0\ttruck\nJyLFLF4shyY_0\tairplane\nJyLqTlaGOww_0\tknife\nJyM0FDmoMyQ_0\tairplane\nJyePA4nzTx8_0\ttruck\nJyhAOfW608o_0\tcow\nJyliijVyyUc_0\telephant\nJyliijVyyUc_1\telephant\nJy1hmMPCNks_0\tdog\nJy1hmMPCNks_1\tdog\nJy37u1dt8Qc_0\tdog\nJy_3PqINBss_1\tbird\nJzGkRevP9mU_1\ttruck\nJzNvJYTN1Pw_1\tbus\nJzNvJYTN1Pw_0\tbus\nJzNvJYTN1Pw_2\tbus\nJzNvJYTN1Pw_4\tbus\nJzNvJYTN1Pw_7\tbus\nJzm0H_o-LyA_1\tbicycle\nJzwF2_O5qho_0\tcow\nJzwF2_O5qho_1\tcow\nJzwF2_O5qho_2\tcow\nJ0Gb34OfhGs_0\tairplane\nJ0m2haAO_Pg_0\ttruck\nJ0uOEHqVD0g_1\telephant\nJ01a05fNHz8_0\tairplane\nJ05eYTq5pFE_0\tcow\nJ1BVFlR3Pzc_2\tbicycle\nJ1VVax1uIGc_0\telephant\nJ1YSacTJR64_0\tbear\nJ1YqrkAsUIs_1\ttruck\nJ1YqrkAsUIs_2\ttruck\nJ1YqrkAsUIs_3\ttruck\nJ1rYOpOlNqs_0\tcat\nJ1reV7ZinzE_2\ttruck\nJ1sQZHaGRVY_0\tcow\nJ1uF4oCMmtU_0\tcar\nJ10PTSVhLnQ_0\tcar\nJ10PTSVhLnQ_1\tcar\nJ10PTSVhLnQ_2\tcar\nJ142X1ly-gY_0\tcow\nJ17uKo2HgxY_0\tbird\nJ2R5C_XNnek_0\ttrain\nJ2Sh2XKvWOA_2\thorse\nJ3EToJg72Es_0\thorse\nJ3d48McH1L0_0\telephant\nJ3gk0p9Hm0o_0\tknife\nJ3hgEqlUzpg_0\tbus\nJ3hva1l0CWM_1\thorse\nJ3jOAuADP44_0\tboat\nJ3sMC-99CWs_1\tcow\nJ3zIT2YwDdY_0\tbicycle\nJ315ju7gD8Q_2\ttrain\nJ4eK5nQv9E0_0\tmotorcycle\nJ4hu4X1Hr7k_0\tbear\nJ4ithFdbyKY_0\ttrain\nJ4mDzsuGR1M_2\tbear\nJ43AWiRkRAI_0\tskateboard\nJ46c4FEAjQ8_0\thorse\nJ46c4FEAjQ8_2\thorse\nJ5CA6t8d7uA_0\ttruck\nJ5JNgpMvPks_0\thorse\nJ5Ss-cEKg9o_0\tskateboard\nJ5TS-1YKlWE_0\telephant\nJ5TS-1YKlWE_1\telephant\nJ51qDcGqOV8_0\tairplane\nJ5-O6tDEZO0_0\thorse\nJ5_8xLaPuIU_0\tcat\nJ6AHeX1RqWk_0\tbus\nJ6nRLSf9kms_1\tdog\nJ61MSyhI5Xg_0\tbird\nJ68NptJ9oRE_0\tskateboard\nJ7h1DaonvHY_1\thorse\nJ7jTtirQ85g_0\tmotorcycle\nJ7vNGyyYQ30_0\tdog\nJ73WpGWHEuE_0\tgiraffe\nJ73WpGWHEuE_15\tgiraffe\nJ73WpGWHEuE_1\tgiraffe\nJ73WpGWHEuE_2\tgiraffe\nJ73WpGWHEuE_14\tgiraffe\nJ79qVoBV6TM_0\tcar\nJ8Akt0d4r_k_0\ttrain\nJ8Akt0d4r_k_1\ttrain\nJ8dIP05jqRw_2\ttruck\nJ8dIP05jqRw_5\ttruck\nJ9SzI8MQm6Y_0\tairplane\nJ9ZGJucbLiw_0\tairplane\nJ9mX4rrWQto_0\tknife\nJ9n9_-FSk4Y_0\tdog\nJ916-YD5Qms_0\telephant\nJ-sHEYA-20k_1\tgiraffe\niKjaiW6gHPQ_1\telephant\niKjaiW6gHPQ_0\telephant\niKlCbkZsFzE_1\tcow\niLeUN6d8Aew_0\tgiraffe\niLeUN6d8Aew_1\tgiraffe\niLk3v-m1Z0U_0\thorse\niLvLOw8Jigg_0\tmotorcycle\niL0GMZ7iO3c_0\tdog\niL5OOut4Jek_3\tbus\niL9TAERxS4A_1\tbicycle\niL9hLZ_cXaI_0\tperson\niMfVd5_HBcE_0\tbus\niMqYyOcO4Fw_0\tumbrella\niMtt9-ROv_o_0\tdog\niMukpec9Vmo_0\tairplane\niMukpec9Vmo_2\tairplane\niMxzNRMiKMA_0\ttruck\niM3tOs60qxk_1\tairplane\niM8Lua_zTug_2\ttrain\niNQNSmu2BD8_0\tskateboard\niNWrFmCCfXw_1\tbear\niNa2jg_1Vyc_0\tcat\niNghTa86iWY_0\tcat\niN-bJwlR2i8_1\tbicycle\niOEuAB0dIs8_0\tdog\niOH00pYaMhY_0\tcow\niOJiYp298qc_3\tairplane\niOJiYp298qc_1\tairplane\niOd4NCiEBLw_4\tairplane\niOd4NCiEBLw_2\tairplane\niOgScMDTX_I_0\tskateboard\niOvWAp7U61k_0\tcow\niOzYv5IpFng_0\thorse\niO7wHeFO6Js_1\tcow\niO7wHeFO6Js_2\tcow\niPWL6FSzmS8_0\tumbrella\niPbg6G7tUVo_1\thorse\niP98M3c1PJw_0\telephant\niQB9bgZJCwA_0\tmotorcycle\niQPn_3iB6aU_0\tumbrella\niQYiakvHwnk_0\tbicycle\niQZ1QN-A3JQ_0\telephant\niQfs0MyXA-s_0\tairplane\niQxGihgbiM8_0\tcow\niQ_2xA5J-Zg_4\tbird\niQ_2xA5J-Zg_5\tbird\niQ_2xA5J-Zg_1\tbird\niQ_2xA5J-Zg_2\tbird\niRI3AkfYykI_0\tknife\niRLMFxqd6Vk_0\tbear\niRTTlG8M9FE_0\tcar\niRTTlG8M9FE_2\tcar\niRTTlG8M9FE_1\tcar\niRWWnw104cE_0\tbicycle\niRklgBUz8ME_0\tbus\niRk0aHyYWdM_0\tbird\niRlBKC_jfE0_1\thorse\niRlBKC_jfE0_2\thorse\niRlBKC_jfE0_4\thorse\niRmfa0b6jJk_0\tcar\niRpibBNFoiY_0\tknife\niRv5dyfU3ZQ_1\tcar\niRv5dyfU3ZQ_2\tcar\niRw-TCiikqw_0\thorse\niRw-TCiikqw_1\thorse\niR3sRTxVGtg_0\tairplane\niR4rImxKjK0_0\tcar\niR4rImxKjK0_1\tcar\niR5Zew8NcYU_0\ttruck\niR5Zew8NcYU_1\ttruck\niR5Zew8NcYU_2\ttruck\niR5Zew8NcYU_3\ttruck\niR5Zew8NcYU_4\ttruck\niR5Zew8NcYU_5\ttruck\niR5Zew8NcYU_6\ttruck\niR5Zew8NcYU_7\ttruck\niR5Zew8NcYU_8\ttruck\niR5Zew8NcYU_9\ttruck\niSCFoiWm7Xk_0\tbear\niSLNkNnHOXQ_0\tbicycle\niSYNvKIuAXc_0\tmotorcycle\niSbXpgu-7qA_0\tbicycle\niSeR1wQ4sl0_0\ttrain\niTF1bWOtrew_1\tbus\niTF1bWOtrew_2\tbus\niTWyYCJO0FI_2\ttruck\niTbEmIOM3Bg_2\tcar\niTbEmIOM3Bg_0\tcar\niTbEmIOM3Bg_1\tcar\niT3LIkn9wh4_0\tcar\niT5clmXCTEc_0\telephant\niUDGzAPkGLI_1\tairplane\niUEEnhAvRoY_0\tcow\niUSZKTFqatw_0\tairplane\niUX8ST-BSFg_1\tbus\niUZnCaGp148_0\tdog\niVH9ehKyau0_0\tgiraffe\niVRs9h04NcM_0\tcat\niVzRc0RW_Y4_0\tbird\niV4UGeMqQeY_0\tdog\niV8NpvUXve4_0\telephant\niV8NpvUXve4_1\telephant\niV9CFIQTmfs_2\tbicycle\niWP_wo9OSe4_0\tbird\niWo66ztRt0o_3\tboat\niWtj7if5cK8_1\tboat\niWv1rxdhH1E_0\tbear\niW1aIV39PQo_0\tmotorcycle\niW2g2j2VhbM_1\tskateboard\niW2g2j2VhbM_2\tskateboard\niXKQX0UfOqA_0\tcow\niXKQX0UfOqA_1\tcow\niXKQX0UfOqA_3\tcow\niXh4-KWp9S4_0\thorse\niXl114K8Y1E_0\tcar\niXxi1CQpbBk_2\tcow\niXzEoHyipJM_0\ttruck\niX7b9tWhoKg_0\tgiraffe\niYGSi3t8Do0_2\tcow\niYO5SD120r4_0\telephant\niYYdiX4oGjM_0\tskateboard\niYjiqdn7fVk_0\tbird\niYsgKLWI96c_2\tknife\niYtDe_tT_wo_1\ttrain\nJ-6KxfbaI6M_2\tcow\nJ_HdQVHBeco_0\tmotorcycle\nJ_l7W4IMhJo_0\tdog\nJ_n_3-KXet0_0\tdog\nKAGadYR0_LM_4\tbird\nKAGadYR0_LM_6\tbird\nKAGadYR0_LM_8\tbird\nKAKn8JmKESU_0\ttrain\nKAjM8ENV-F4_4\tskateboard\nKAxsc-ratJ4_0\thorse\nKA1A0hH1nVQ_0\ttrain\nKBIGw8UrUG8_0\tcow\nKBKaaEaIPRc_0\tcow\nKBNqKcj0xoc_0\ttrain\nKBP3moB3vz4_0\tbird\nKBRkCaaDjxU_3\tbus\nKBRkCaaDjxU_0\tbus\nKBe3_8RL_MI_0\tperson\nKBoY6Pa8f_M_0\tcow\nKCbzyGKBwC8_0\ttrain\nKCdR8nTa3p4_0\tskateboard\nKCipBL5_e5M_0\thorse\nKCy-RKy_KN0_0\tbicycle\nKC1md4Q_DlQ_0\tskateboard\nKDSxlGW6eRc_0\tumbrella\nKDZsS4MjllY_0\tmotorcycle\nKDaVTe3RbUY_0\thorse\nKDyYkCLIImM_0\tknife\nKD0Qm4z53a0_0\ttruck\nKD0Qm4z53a0_5\ttruck\nKD5LwDdfw0o_0\thorse\nKD9qqVSiPu0_0\ttrain\nKEGLFAbfrxs_0\tmotorcycle\nKERo3bKldwM_0\telephant\nKEW0fAHE_74_0\tbus\nKEW0fAHE_74_2\tbus\nKEagowlFwzI_0\tcow\nKEll3gbyIsk_0\ttruck\nKEll3gbyIsk_1\ttruck\nKEll3gbyIsk_2\ttruck\nKExfLNe3IbY_0\tairplane\nKE2StZtSBfk_0\tairplane\nKE3O7h2RC-s_1\ttrain\nKE_UJpQulNU_0\thorse\nKFEorB8NRcE_0\tboat\nKFFTHBaYcbw_0\tbear\nKFJtVwXfusI_0\tboat\nKFRZOFB41Jk_0\ttrain\nKFk_7p6X-zI_6\tcar\nKFk_7p6X-zI_1\tcar\nKFk_7p6X-zI_2\tcar\nKFk_7p6X-zI_4\tcar\nKFk_7p6X-zI_5\tcar\nKFnvvsS8eIE_1\tknife\nKGYrelsyNbk_0\tairplane\nKGbYHbiOfd8_0\tgiraffe\nKGwEL4VozSA_0\tboat\nKG8zBA9Gudg_0\tknife\nKHBsJZVKzks_0\ttruck\nKHG1hZsfjwQ_0\ttrain\nKHHyhgm1jZ0_3\tskateboard\nKHSjivlhX30_1\tbear\nKHcEC33udEg_0\tcow\nKHgLQP4XH9Q_0\tskateboard\nKHsYYKcSCSI_1\tcow\nKH0F1sJXKss_3\telephant\nKH0k5jfUZGg_0\tbicycle\nKH8QlsYIT1M_1\tbear\nKIPptA8AzYg_0\thorse\nKIjf6QGqdsw_0\ttruck\nKIjf6QGqdsw_1\ttruck\nKIqePeskBSk_0\ttruck\nKIy2LK1jsQ8_0\tperson\nKI8Arf5-ekw_1\ttruck\nKI8Arf5-ekw_4\ttruck\nKJIBdy7_10k_1\tbus\nKJIBdy7_10k_2\tbus\nKJJBVXnnqIw_0\tzebra\nKJcXjJ5S9yA_1\tdog\nKJrPyuVxWRg_0\tairplane\nKJrPyuVxWRg_1\tairplane\nKJvAK-5ExwY_2\ttruck\nKJ30mU3h4f4_0\tbear\nKJ7PQiJAKRM_0\telephant\nKKKiTv_k23A_0\tgiraffe\nKKO1QGoVQYU_0\telephant\nKKpwJEMQYv8_0\tdog\nKKsKKMjHYGM_0\thorse\nKK06xbUhklk_1\tbus\nKLC8OgkQnNQ_0\tboat\nKLEKnTRMmo0_1\tcow\nKLGAT1GQYGA_2\tbird\nKLMz6_P5QmA_0\thorse\nKLNmQqyAs54_0\tcow\nKLUTy4pqLZ0_0\tbicycle\nKLVZqPfRuTg_2\tbear\nKLVZqPfRuTg_7\tbear\nKLlN4H-eGYI_1\tskateboard\nKL6-Iu09-C8_0\tcat\nKMNaWZZK2Os_0\tskateboard\nKMOOcO5yE9E_1\thorse\nKMXuGjMAt7k_5\tbicycle\nKMXuGjMAt7k_6\tbicycle\nKMXuGjMAt7k_3\tbicycle\nKMajGvVnol0_1\tairplane\nKMajGvVnol0_4\tairplane\nKMajGvVnol0_5\tairplane\nKMajGvVnol0_6\tairplane\nKMajGvVnol0_7\tairplane\nKMiZgk_f50g_0\tdog\nKMlZbzTdutw_1\tcar\nKMlZbzTdutw_2\tcar\nKMsL64iYfOA_0\tcar\nKMtu1xThH2k_2\telephant\nKMyoO6YYfZk_0\telephant\nKNaoNUMT7m0_1\tcar\nKNg4K_bbY5Q_0\ttrain\nKN5hxi96gW0_0\tcat\nKN-_uhPPfoE_0\tcow\nKOKdrC_foXo_0\tairplane\nKOOd5IO8seo_0\tboat\nKOSUWuFIQjQ_1\tairplane\nKOVZk2ixqc0_0\ttruck\nKOgmgqcT21Y_1\tbird\nKOl1EDiK2e8_0\tmotorcycle\nKO6T6QdloiM_0\tbus\nKO7Ncyx1-9c_0\ttrain\nKPJDHcE-qeQ_0\tbicycle\nKPYtlDJa43o_0\tskateboard\nKPfbBNvFcmA_0\tskateboard\nKPj_wrsubOE_2\tbear\nKPkzyHL7IPg_0\tcow\nKPmvpNEHsPk_0\tskateboard\nKPzWIuvRlr0_1\tskateboard\nKP4ApNQiIEI_0\tcat\nKQB-ZyriFmI_0\tboat\nKQg6eO2jr_Y_0\tumbrella\nKQ5mchVgTXo_0\ttruck\nKRCLiP-JUsc_0\ttruck\nKRCLiP-JUsc_2\ttruck\nKRCLiP-JUsc_1\ttruck\nKRW0HyqDLg8_0\tdog\nKRjN1nx8mcE_0\tairplane\nKSDxU99SF6g_0\tmotorcycle\nKSHVle4SAM4_0\telephant\nKSZ7nkMWOsU_0\tskateboard\nKSZ7nkMWOsU_1\tskateboard\nKSj7hZ7oO18_0\tcow\nKS1ge4vlv64_0\tbicycle\nKS4vsIYGaCM_4\ttruck\nKS4vsIYGaCM_0\ttruck\nKS8UAlyHoCg_0\tdog\nKS_fak2guWU_1\tdog\nKTAMaZKxpF8_2\ttrain\nKTDhNtr8XF4_0\tairplane\nKTDzrCvIVQs_0\tdog\nKTQQtbUbWbA_0\tairplane\nKTZ2Jsj6_ig_0\ttruck\nKTdzxOjJNgI_0\tcar\nKTsTGNqrFuE_0\tumbrella\nKT7YiBWXqNk_0\tairplane\nKUZxnRyU2e8_0\tcat\nKUbSnz1yWxc_0\tknife\nKUc8Kw30V1Q_2\ttruck\nKUc8Kw30V1Q_3\ttruck\nKUc8Kw30V1Q_4\ttruck\nKUgY_2bsBC0_1\tskateboard\nKUhzqYZoYCI_0\tcow\nKUkcrqulhqg_0\tcow\nKUlpA-cpCpM_0\thorse\nKUumLype4AE_0\telephant\nKVFlTVdKQVw_0\thorse\nKVJCkQzQbMs_0\tperson\nKVmS-yiYu2c_0\tbicycle\nKVzW5MPT25A_0\tairplane\nKV0o55FO4XA_0\tskateboard\nKV3jtdzXA9U_0\tdog\nKV__RQ75-vw_1\tcow\nKWJiCsomGTA_0\tcow\nKWLl4vVumIs_0\ttruck\nKWSDQebY3dA_0\tcat\nKWwbFKgHqW0_0\tcar\nKWxd8IQ9_a0_0\tcat\nKW10UlO19uo_0\tbus\nKW4ovUCg7uU_0\tbicycle\nKW4ovUCg7uU_1\tbicycle\nKW5S4gsTVaQ_0\tknife\nKW7gAr7kgow_0\tdog\nKW_6RyjLGPI_3\thorse\nKXCQuD9phb4_1\tbird\nKXENib5sk78_0\tcat\nKXLWiz5ZUh0_1\ttrain\nKXLWiz5ZUh0_2\ttrain\nKXdF5__0yVQ_0\tcow\nKXf6k7PrX7E_1\telephant\nKXf6k7PrX7E_2\telephant\nKXrQkw1WPnk_0\tbird\nKXzu3MDaZn8_0\tcar\nKYK_Wg8JlTg_0\tskateboard\nKYK_Wg8JlTg_1\tskateboard\nKYTRCD2p-8Y_0\tmotorcycle\nKYZzKKYD7Yc_1\thorse\nKYaB_EEk344_0\tcat\nKYc__uUZkwc_3\tbicycle\nKYd6wCR0jVc_1\thorse\nKYd6wCR0jVc_0\thorse\nKYs4hm9X1Rg_1\tbicycle\nKYvXJXEbUMg_0\tbird\nKY0x7p41Q_A_0\tcat\nKY04L4VTsXc_1\tairplane\nKY04L4VTsXc_2\tairplane\nKY7D2Y5MQSo_0\thorse\nKZAf2uPS-us_1\thorse\nKZAf2uPS-us_0\thorse\nKZFniGi-fes_0\tdog\nKZJcgoY3r3U_0\tairplane\nKZSLQpdbGps_0\tmotorcycle\nKZYe6pqrLaQ_1\tdog\nKZhX7tDfYIA_0\tbus\nKZl_XArvSXk_0\thorse\nKZ4OuA1t3ZY_0\telephant\nKaUGkf-3N-4_0\thorse\nKaiX3d83DWA_0\tzebra\nKaj5B4nrWJU_0\tskateboard\nKapwOqVyzUk_0\tcat\nKaqToIfNxMY_1\tbicycle\nKauPg8P2kC4_1\tairplane\nKazepPKQz1M_1\tcow\nKazepPKQz1M_3\tcow\nKazepPKQz1M_4\tcow\nKa978At0k0Y_0\tairplane\nKa-4ZfE0GMQ_0\tmotorcycle\nKbA6UDJg1LE_0\ttrain\nKbA6UDJg1LE_1\ttrain\nKbGl5jqOQ7o_0\tcat\nKbRIbBeLBsM_3\tmotorcycle\nKbosOWR7ZSg_1\tboat\nKb3lxArGO8Y_0\tbicycle\nKb3lxArGO8Y_1\tbicycle\nKcDpzG8kKho_0\tcat\nKcL-zz1sb6I_0\tdog\nKceqMsKO-zc_0\tcat\nKcpGWNCD-uk_0\tcat\nKct9k6Q2YM8_0\tcar\nKcuEc9WwYSQ_0\tcow\nKcuEc9WwYSQ_1\tcow\nKcyLR4RxylE_0\tcow\nKcyMYgt62Go_0\thorse\niY5Sh73Lem0_0\tbird\niY6eEC8uY4E_2\ttrain\niY6eEC8uY4E_1\ttrain\niY9QlFmEBFY_0\tmotorcycle\niZsSK_iIOoA_0\thorse\niaGO2mTgoPo_1\tbicycle\niaGO2mTgoPo_3\tbicycle\niaWSU1ISWXQ_2\tairplane\niaWSU1ISWXQ_0\tairplane\niaflfMXT7QQ_0\tboat\niamGAsKNRhY_0\ttrain\niana0Lz1gs0_1\tmotorcycle\niasZRb9p3lg_0\tmotorcycle\nia1XmqAwn7M_0\tbus\nia6R3fqdlnE_0\tbear\nibcBDIGpMfo_1\tbus\nibd-Wxcr_x4_0\thorse\nibpj369yzbw_0\tumbrella\nibxmk7cGhTQ_3\thorse\nib5fWzJWV5A_0\tcow\nicDyRH3P-nM_0\tairplane\nicGjENlINL4_3\tskateboard\nich9rXZWjGY_0\tcar\nicic9NkCnf0_0\tcow\nicnuBKQZNBg_2\tbus\nicnuBKQZNBg_0\tbus\nicnuBKQZNBg_1\tbus\nicxOfJQ-l9I_0\tcar\nicxOfJQ-l9I_1\tcar\nicy3pC1Q0eA_0\tcat\nic7k8fkUDXs_0\tcow\nidnOwkwaCm4_0\thorse\nidnSzg_rV_k_3\tbicycle\nidoGYHCXGJs_0\telephant\nidq0Jqw8Oa0_2\telephant\nid1yzZ3HkTs_1\tknife\nieCL4lz7IJw_1\tboat\nieOpqoYhMOQ_0\ttruck\nieOpqoYhMOQ_1\ttruck\nieOpqoYhMOQ_2\ttruck\nieULzTIs9ls_0\tcow\niedgnWefCA0_0\tairplane\niedgnWefCA0_2\tairplane\niedgnWefCA0_3\tairplane\niewlg5CteEs_1\tairplane\nie8gkh6nQcA_0\ttrain\nifKKR-gCLSk_0\tcat\nifRQKBKIRSI_0\tdog\niff3KW8leKw_0\tairplane\niff3KW8leKw_1\tairplane\nifghH4Jo8D8_0\ttruck\nif31ci9xz_8_4\tbicycle\nif31ci9xz_8_1\tbicycle\nif31ci9xz_8_2\tbicycle\nigGtS-jZCQM_2\tcar\nigGtS-jZCQM_0\tcar\nigLVqNKw-yE_0\tbird\nigMWvnK1jEE_0\tgiraffe\nigMWvnK1jEE_3\tgiraffe\nigMWvnK1jEE_1\tgiraffe\nigQUACDrluw_0\thorse\nigU61tmxeE4_2\tskateboard\nigWsPt0nelg_1\tbus\nigcpSvypduQ_0\ttruck\nigcpSvypduQ_1\ttruck\nigdqmLfZ_cw_0\tairplane\nigjBIRwjlko_1\tdog\nigm6X4CZLmk_1\tbus\nignREcFRyaQ_7\tairplane\nignREcFRyaQ_8\tairplane\nigwghbZYjgg_0\tairplane\nihMDaxeTpZs_1\thorse\nihTjIMWOjuQ_1\tmotorcycle\nihUpF22zo4M_0\ttrain\nihUpF22zo4M_1\ttrain\nihWWle00xEE_0\tmotorcycle\nihh0J0AaWBs_0\ttrain\nihh0J0AaWBs_2\ttrain\niiA0hIRwwJA_0\ttrain\niiSWvRk3YfU_0\tbird\niiextKoe48U_0\tcat\niigPPpoo0W8_0\tknife\niiiOUcmwJPw_0\tcow\nii0PDMs-a0o_2\tcar\nii2ghwDAI3w_1\tairplane\nii_sG2SkeXM_0\tcat\nijB2Yh71VIg_2\tbear\nijJAWtORd2w_0\ttruck\nijJAWtORd2w_1\ttruck\nijVpcnt8HN8_0\tbus\nijXmwWOLvpM_2\thorse\nijXmwWOLvpM_1\thorse\nijdipMmraWc_0\ttruck\nijwhkKzyWE8_0\tairplane\nij0zLKtr0sA_0\tbird\nikGzd6ivk64_0\tmotorcycle\nikKFRS8Hivk_0\tbear\nikVu6XfZ3_A_1\tbicycle\nikafEc8p6rI_0\tbicycle\nikafEc8p6rI_5\tbicycle\nikafEc8p6rI_1\tbicycle\nikafEc8p6rI_3\tbicycle\nikafEc8p6rI_4\tbicycle\nikfmjumoUlM_2\ttrain\nik868nOtrZo_4\tbus\nik-jgdZW4Ek_0\thorse\nik__zZ1HZNg_1\tgiraffe\nilKErQ8ojz0_0\tumbrella\nilKErQ8ojz0_2\tumbrella\nilKErQ8ojz0_3\tumbrella\nilKW98Qvobg_0\tskateboard\nilvsheh1Cqs_0\tdog\nilxXSgvtFgw_0\tcow\nimEWC_Q-BSg_1\tcar\nimcRxs0K7H8_0\tbus\nimmhpBi8eWw_6\tskateboard\nim_FneG303c_0\tdog\ninEZ7ZLAS7s_5\tskateboard\ninJLKInP5kw_0\tdog\ninZmM8c-9NI_3\thorse\ninedUh-74-A_4\ttruck\ninodVLfFogA_0\ttrain\ninynAJrGhVU_0\tmotorcycle\nin061qZJjWI_0\tdog\nKc8WMzLKvvk_0\tcow\nKc-f3X7O-pw_0\tcat\nKc-x73DCumI_0\ttruck\nKdGgVhM0Ihg_0\tbird\nKdKlI0ZN6qo_0\tairplane\nKdQQqsAuU7o_1\tbicycle\nKdUSJz6UWLQ_0\tgiraffe\nKdXRnPKKeTU_0\tbird\nKddQJwFfv9s_2\tskateboard\nKdjMgSuON5w_5\tbear\nKdpUjVhfjG0_0\tperson\nKdyadP7Y1nU_0\tcar\nKd9Em2ABfN8_0\tcat\nKd-jTE5-2uE_1\tmotorcycle\nKeMITKdjHtk_0\tcat\nKenV2bIQf1o_0\tbicycle\nKevYmLAAigc_1\ttrain\nKe3R9FrGLcY_0\tdog\nKfJU66erPWo_2\tknife\nKfMO45jz-68_0\tboat\nKfS_UKkbQAA_0\tbird\nKfTV1TFY2b8_0\tbird\nKfaTw0euPQA_0\tmotorcycle\nKfjmKiZzSlY_0\tcow\nKfjmKiZzSlY_5\tcow\nKfkKe7q45KA_1\tmotorcycle\nKfkKe7q45KA_2\tmotorcycle\nKfkKe7q45KA_3\tmotorcycle\nKfpCncLoqOw_0\tcow\nKfwbVpPI0nU_1\tmotorcycle\nKgAFD_JvgrQ_0\tcow\nKgD3H0APDy0_0\tbear\nKgNS5HwFF_c_1\telephant\nKgVEQYicksA_0\tcow\nKgY5OrVnzv4_0\tcow\nKgo7SWtDdz4_1\tdog\nKg3xuyjNU7w_0\tumbrella\nKg7Qk4Gx9n0_0\tmotorcycle\nKhKZwdKiqms_0\tcow\nKhKcHaH_ALo_0\thorse\nKhPKq8O30VM_0\tbicycle\nKhPKq8O30VM_2\tbicycle\nKhPKq8O30VM_4\tbicycle\nKhuC9snWfpI_0\tcow\nKh7rAO7jCGc_0\tairplane\nKh_KwBHfGQ8_0\tcow\nKiHy8IMQ6zA_0\tairplane\nKiaUDlPLxzk_1\tbear\nKixl-Wmj3kg_0\tmotorcycle\nKjaag6B-MIQ_1\tskateboard\nKjca1u6P3NE_0\tcow\nKjiI2E3l3Mk_1\ttruck\nKjiI2E3l3Mk_2\ttruck\nKjqaJ25GUBI_0\tbus\nKj3dRtd4xQI_1\tcow\nKj3dRtd4xQI_0\tcow\nKkD23XYUG9c_0\tumbrella\nKkMNGzvNkg4_9\tbird\nKkNYBz9ZaVA_0\tbird\nKkNYBz9ZaVA_1\tbird\nKkPf9AB1HZo_1\telephant\nKkRq1ogJq-4_0\tskateboard\nKkXTT9C4xfc_0\tcow\nKkdSKHS7P50_1\tskateboard\nKks6eJqnZLQ_0\tdog\nKks6eJqnZLQ_2\tdog\nKks6eJqnZLQ_3\tdog\nKks6eJqnZLQ_4\tdog\nKks6eJqnZLQ_5\tdog\nKk6BgYl9OjA_7\tbicycle\nKlEK-vv3DVo_0\tbear\nKlENnLskuCU_0\tcat\nKlG0czACle4_1\tcow\nKlG0czACle4_0\tcow\nKlG0czACle4_2\tcow\nKlG0czACle4_3\tcow\nKlqbHICh4G4_0\ttrain\nKmJhshcviXA_0\tknife\nKmbMzgXFdKs_1\tairplane\nKmbMzgXFdKs_2\tairplane\nKmbMzgXFdKs_0\tairplane\nKmfmqwmQneM_0\tbird\nKmr5uVYVSDo_0\tcar\nKmuV8XfAjvw_0\thorse\nKm3GmgNJlL8_0\ttrain\nKm3GmgNJlL8_1\ttrain\nKm3GmgNJlL8_4\ttrain\nKm7w520V5vs_0\tairplane\nKnIxVxIho9w_1\tbird\nKnN2yDre-aM_0\tboat\nKnTu6keaGs0_2\telephant\nKnTu6keaGs0_0\telephant\nKnXPxa1RzmU_0\tcow\nKncYvkV6rwc_0\tboat\nKnql8E5Khc8_0\telephant\nKnuD87lrS8w_0\tskateboard\nKnvGRqLQ5iM_1\ttrain\nKoA6bPmALeA_0\tcat\nKoXgGmdVCBM_1\tbicycle\nKoXgGmdVCBM_10\tbicycle\nKoXgGmdVCBM_2\tbicycle\nKoXgGmdVCBM_3\tbicycle\nKoXgGmdVCBM_4\tbicycle\nKoXgGmdVCBM_5\tbicycle\nKoXgGmdVCBM_6\tbicycle\nKoXgGmdVCBM_7\tbicycle\nKoXgGmdVCBM_8\tbicycle\nKosi26dm76A_0\thorse\nKo5wlBGl200_0\thorse\nKo_Nx24OGxM_2\tairplane\nKpDzoM2xtwc_2\ttruck\nKpDzoM2xtwc_3\ttruck\nKpDzoM2xtwc_5\ttruck\nKpHFaYsgWrg_2\telephant\nKpHFaYsgWrg_1\telephant\nKpVflkpC7d4_3\tbus\nKpVflkpC7d4_5\tbus\nKpVflkpC7d4_0\tbus\nKpVflkpC7d4_2\tbus\nKpXxo2n6AYw_1\tmotorcycle\nKphl0WRacss_0\tknife\nKqAvXx4bN5k_0\tcat\nKqQgFUEAS-M_0\ttrain\nKqavxpR698k_6\tdog\nKqavxpR698k_0\tdog\nKqavxpR698k_1\tdog\nKqfo6_qcthc_0\tcar\nKqjhaIJMY5U_0\tcat\nKqnqyAczaqs_4\tbus\nKqqyldSpJh4_0\thorse\nKqqyldSpJh4_1\thorse\nKqzkADa-Lqw_1\ttrain\nKq1x16QvM1g_0\tdog\nKrGJjt0yq-s_1\tbus\nKriNb3dhqVQ_1\tskateboard\nin9LFcixPXo_0\tskateboard\nioEMtB2bP6o_0\tbird\nioESr4H79KY_0\tboat\nioGc_R8NJow_0\tcow\nioKahF3aFWw_0\thorse\nioKahF3aFWw_1\thorse\nioOHxrHumIk_1\tairplane\niobYquCNk5k_0\tcow\niojaZ646ie8_0\tskateboard\nipLnwxta1Jc_0\tboat\nipOJVFLMLIk_2\tbird\nipOJVFLMLIk_0\tbird\nipgB9KXnzK8_0\thorse\nipg_y1T2OsM_0\tcow\nipg_y1T2OsM_1\tcow\nipqQlNsINy8_2\tairplane\nipt6gWgCgis_0\ttruck\nip5xVRJOpP8_0\tumbrella\nip8BFE94TKo_0\tairplane\nip8BFE94TKo_2\tairplane\niqDJJqLVBBk_1\telephant\niqExYW2fPfc_0\tbear\niqicuLBaF_g_0\ttruck\niqlKzflOl00_1\tbus\niq1FaWFylpI_0\tmotorcycle\niq6izTYp-DU_0\tmotorcycle\nirBsER6ITHw_2\tskateboard\nirDs_vWExnM_1\tbicycle\nirDs_vWExnM_2\tbicycle\nirU_BJXoU9I_1\tcow\nirWY8s-JuBs_3\tairplane\nirWY8s-JuBs_0\tairplane\nirWY8s-JuBs_1\tairplane\nirWY8s-JuBs_2\tairplane\niramP9ihj_w_1\tbird\nirgacv6LobE_0\tmotorcycle\niri1MtEgOjQ_0\tbear\nirs2O6YOB5I_3\telephant\nirs2O6YOB5I_5\telephant\nirs2O6YOB5I_1\telephant\niruY-BU0rpg_4\telephant\nirzcPf--6uQ_0\ttrain\nirzcPf--6uQ_4\ttrain\nirzcPf--6uQ_5\ttrain\nir4EYn7Fz5A_0\tdog\nir5E9O2Tonk_0\tboat\nir7Dq5dPxOQ_0\thorse\nisPplb7aotI_0\tboat\nisPplb7aotI_3\tboat\nisU4229ndXM_0\tcat\nisfwmnXNmeM_2\tcow\nislz_HxqOnI_0\tbird\nisvvRHvNuIw_4\tumbrella\nisynk11V9s8_3\tairplane\nisynk11V9s8_1\tairplane\nisypXPZMgns_2\tboat\nisypXPZMgns_3\tboat\nitKyPMv5z0Y_0\tumbrella\nitKyPMv5z0Y_2\tumbrella\nitc-A2zwSGM_0\tdog\nitrvgHryhIY_0\ttrain\nits4C4ty2oA_0\tskateboard\nittQcsrECUE_1\tbear\nittQcsrECUE_2\tbear\nit1EatlrBkg_0\tcat\nit3KS-r39EQ_1\tknife\nit3hCzfmyfs_0\tcow\nit6DtEGdhas_0\tcat\nit8Fid-mqRQ_0\ttruck\niuEbY8B4Qo4_0\tcow\niuEbY8B4Qo4_1\tcow\niuFmdispR2U_0\tbicycle\niuRmu4BN6bw_0\ttrain\niumTd9IGDho_0\ttrain\niusgUMlrYFA_0\tairplane\niutdZMWA8f0_0\tperson\niuumrgHW8zM_0\tumbrella\niu3sd1qnr8g_0\tcar\niu9Av4HCmiw_0\tknife\nivDeIaJYIlE_0\ttruck\nivT103z2bwc_0\tgiraffe\nivdfO5VqKo4_0\tcat\nivgTXhIqccY_0\tcat\nivi1frbFnGw_1\tgiraffe\niwFO7lcVjKc_2\tcow\niwFO7lcVjKc_0\tcow\niwFO7lcVjKc_1\tcow\niwX4cgfQn5s_0\tbird\niwczN64AC9Y_0\tbus\niwp5aVOXWaM_0\tairplane\nix8S6CRuUFg_3\tbear\niyAvqfMVOeA_0\tcat\niyLZZlL-B80_0\tcow\niyMbIICjtcg_0\tcow\niybJfH6iVdU_0\tbus\niygW3-Ovcic_0\tcow\niyn1OZFmvXE_2\tbird\niyz9Lq13Mcg_0\tcow\nizbTUTqkG7c_0\tcow\nizx70OqPYBc_0\tdog\niz9-Vl4e9po_0\ttrain\niz9-Vl4e9po_2\ttrain\niz9-Vl4e9po_3\ttrain\niz-BT0NAs6k_1\tknife\ni0Eg02B3JoM_0\telephant\ni0Ez1KT7sTo_0\thorse\ni0ZE0kXl5oU_1\tskateboard\ni0eMgZ0riHI_2\tbird\ni0gg-mJNKlU_0\tcow\ni05OPAsrmJI_3\telephant\ni05OPAsrmJI_1\telephant\ni05OPAsrmJI_2\telephant\ni09cuoC14q4_0\tbear\ni1DfyWe0Jh4_0\tcow\ni1DfyWe0Jh4_1\tcow\ni1NfFxZmBSA_0\tbus\nKrvsSuIgrJQ_4\thorse\nKrvsSuIgrJQ_1\thorse\nKsT2_VxPkb4_0\tknife\nKsXzFCpHMPU_0\tgiraffe\nKsyud0_i1zI_0\tbus\nKtINrfbQSXk_0\tknife\nKtV59qZg7BU_0\ttruck\nKtX4x9k3J2A_0\ttrain\nKthi3i2WM3s_1\tskateboard\nKtkN77asAj4_0\thorse\nKtplZx6_ecU_1\tknife\nKtqvSap6uig_0\tskateboard\nKtxb4OmaAjA_0\tcar\nKt3uQcxNltk_0\tzebra\nKt9neWWjkHM_2\tbear\nKuBa9tep8xk_0\tbear\nKuQgP71vfZ0_0\ttrain\nKuYBJ90zNYw_0\tumbrella\nKuYjBUvU-ws_0\tumbrella\nKuYrzelSfIw_0\tcar\nKulks153IS8_0\ttruck\nKulks153IS8_1\ttruck\nKu0XhH2YeG4_0\tbear\nKvH6JyHG3H8_0\tmotorcycle\nKvH6JyHG3H8_1\tmotorcycle\nKvLXxaGooPk_0\tcow\nKvPLPO4A5R8_0\tknife\nKvRsu4xefwo_0\tperson\nKvcxzJxNkkU_1\tbird\nKveRZ7dBNGU_0\tboat\nKvgupPBw5rc_0\tcat\nKvjDDIthDDM_0\tcow\nKvkOTtqxJlo_1\tcat\nKvsaKWirK7Y_0\tskateboard\nKv0ui3mEWGE_0\thorse\nKv0ui3mEWGE_4\thorse\nKv0ui3mEWGE_1\thorse\nKv0ui3mEWGE_2\thorse\nKwkcPYl8Lv4_0\tcow\nKw7t6l8h2Ns_0\tbear\nKw7t6l8h2Ns_1\tbear\nKw8037OwDjc_0\ttruck\nKxWI3M2FGOw_0\thorse\nKxZXot9AIY4_0\ttruck\nKxflrYttp20_0\tbird\nKxlTxdqDDzo_0\tcat\nKxuqb_htGwY_0\tgiraffe\nKxuqb_htGwY_2\tgiraffe\nKx40to29YnE_0\tskateboard\nKyDXCruNNj4_0\thorse\nKyUM64yfNCA_0\thorse\nKyWUn_bj5rM_0\tmotorcycle\nKyZWWIsQUbg_0\tskateboard\nKyZWWIsQUbg_1\tskateboard\nKyaKfhOfKhE_1\tbird\nKyt325n06oI_0\tcat\nKywHhzvsm3Y_0\tbird\nKyyS9PYJ9Zo_0\ttruck\nKzK3iwncxbY_0\tbicycle\nKzK3iwncxbY_1\tbicycle\nKzc17TzutkM_0\tskateboard\nKzc17TzutkM_1\tskateboard\nKzyD-e7N2D4_0\ttrain\nKzyD-e7N2D4_1\ttrain\nKz3zulHzEE4_1\ttrain\nK0CwoXVMp0M_0\tbicycle\nK0L3_2UquEY_0\tboat\nK0Zt-EcXkj8_1\tairplane\nK0cgwgX_8fo_2\tboat\nK0xs4bH65_Q_1\tmotorcycle\nK02fUURwCiY_2\tcar\nK02fUURwCiY_0\tcar\nK02fUURwCiY_1\tcar\nK1Qbgm__2iE_0\tcat\nK1ccfBgR_kg_0\ttruck\nK1-s4sk63R4_0\thorse\nK1_J3d_yH64_0\tmotorcycle\nK2F6TCgVfR0_0\tboat\nK2hV4KVruLc_0\tairplane\nK2my8qWjyn4_0\tcat\nK2yjgwFV15k_1\tmotorcycle\nK2yjgwFV15k_0\tmotorcycle\nK26jSjClwaQ_0\tskateboard\nK3Cgw_EFdbw_1\tmotorcycle\nK3DniaFnn9E_0\tcat\nK3KhxEuf8mY_0\thorse\nK3KhxEuf8mY_5\thorse\nK3Ov5rPJ2LE_1\thorse\nK3XsEMr7Qt4_0\tperson\nK3qgW4Y3yrk_0\tmotorcycle\nK30LSGFu6hs_0\tmotorcycle\nK4RE7AZWGv0_0\tcar\nK4U_AmqQFDY_0\tbear\nK4VnWy2-8xQ_3\tcar\nK4ec2MqDkPw_0\ttrain\nK4fCUNjbdf8_0\tmotorcycle\nK4wp52Zn5d4_0\thorse\nK5NooGgwD1E_0\thorse\nK5NooGgwD1E_1\thorse\nK5pBkPv_1sg_5\tcar\ni12y-zJl-nA_0\tcat\ni17EaDmRPCg_0\tumbrella\ni2Yjl6kF8iY_2\tairplane\ni2Yjl6kF8iY_0\tairplane\ni2cujNbMSKc_1\tskateboard\ni2diIHrCsbk_1\tbird\ni3AK_cujBxY_1\tmotorcycle\ni3BpSeFJdgo_0\tcat\ni3HeGqUyibM_4\tbicycle\ni3HeGqUyibM_9\tbicycle\ni3HeGqUyibM_12\tbicycle\ni3LFAemLFW0_0\thorse\ni3Z5pFF2dH0_0\tbird\ni3a4U770GtE_0\tperson\ni31nG3E36WE_0\tknife\ni32p4KoRD2o_0\ttrain\ni33S_D8TBc4_0\tdog\ni35wpbpl8qY_2\tboat\ni38dpYWvJN8_0\tumbrella\ni38dpYWvJN8_1\tumbrella\ni4CFI7MtlRs_0\tcat\ni4ExemfAEO8_0\tbicycle\ni4IpgDIqTrs_0\tboat\ni4RZtd1cCw8_0\tumbrella\ni4bRNqQ32MI_0\tcat\ni4clJpNvw4M_2\tbus\ni4hqN47R0oU_1\ttrain\ni45JoRzDdI0_0\tcow\ni46jok5cjyY_0\thorse\ni5GJ6mIp8zc_0\tboat\ni5G6RkcL4m0_0\tcat\ni5OdBE4QG6c_0\ttrain\ni5g87UeVkBU_0\thorse\ni5g87UeVkBU_1\thorse\ni5sT2ifoPyM_0\tknife\ni6MF-PGtJiE_1\ttrain\ni6WTNPwIjW8_0\tcat\ni6aJqhBh5wg_0\tskateboard\ni6j6P7ITxYg_0\tcow\ni6vwTWezXmU_1\tboat\ni66Gsq6zzqI_0\tmotorcycle\ni6-YQ6rSnDI_0\tcat\ni6_oBTD2-YA_5\tbird\ni7P2tq4TS_4_2\tbus\ni7UQGL5uxvw_1\tskateboard\ni7WeV3CfJV8_0\tknife\ni7a8sQcVRgE_0\ttruck\ni7umCLnxVXw_0\tcat\ni791If0qoBU_5\tknife\ni8KQCu2cMAc_2\tbicycle\ni8KQCu2cMAc_4\tbicycle\ni8bVI1667K4_1\ttruck\ni8hjK42sseE_0\tmotorcycle\ni8lG7Ux3wlc_0\tdog\ni8nbuADJjmE_0\tcar\ni8nbuADJjmE_1\tcar\ni8nbuADJjmE_2\tcar\ni9PUn4sF30g_0\tmotorcycle\ni9T-NwSBqPE_1\tknife\ni9VWkuQHBls_0\thorse\ni9nmvkDiFGc_0\tcow\ni9sP7mWuQ_8_2\tmotorcycle\ni9sP7mWuQ_8_1\tmotorcycle\ni9u4vsQUBTQ_0\thorse\ni90TDb7evCY_0\ttruck\ni9_FG4-2VIM_0\tskateboard\ni-CQVFq1JI8_1\tbicycle\ni-CQVFq1JI8_3\tbicycle\ni-T9Q2g8xbk_0\tairplane\ni-kodOT_ufM_0\tcow\ni-nP7aFTZb8_0\tbird\ni-xdWDN7Eys_2\tknife\ni-3aAuwOmxc_0\ttruck\ni-8W-K4y3nY_0\ttrain\ni_HHc85mP4Q_0\ttrain\ni_h0vOCrd_U_0\tairplane\ni_h0vOCrd_U_1\tairplane\ni_iXTMX4Vls_0\tcat\ni_nZ8ImBf18_1\tbicycle\ni_nwFUP7QJM_0\tknife\ni_4c71HPXOI_0\tgiraffe\ni_-PIEIGkQE_0\thorse\ni_-PIEIGkQE_1\thorse\njAH-80rHWKY_3\tbear\njAW8iLGAgdQ_1\tbear\njAW8iLGAgdQ_0\tbear\njAh4oBD0Bsw_0\ttrain\njAnV_6fFGnI_0\tcow\njAy3VhkJauE_2\tknife\njAy3VhkJauE_5\tknife\njA6aZl1f4Wg_0\tbicycle\njBMmFLPc7nA_6\tbus\njBMmFLPc7nA_0\tbus\njBMmFLPc7nA_3\tbus\njBMmFLPc7nA_5\tbus\njBTJgbVspOA_0\tairplane\njBl50J7bOEw_1\tairplane\njB1IT1aBj-Y_0\tdog\njCDFU72N7Mc_1\tskateboard\njCJGjjNBSk8_1\tairplane\njCJGjjNBSk8_0\tairplane\njCMWNtCzuqU_0\tknife\njCUnLxCoYMA_0\tmotorcycle\njCY67ybfyqU_1\tcow\njCZx5dn_4KA_0\tbear\njCcW1MW6PTE_0\ttruck\njCcW1MW6PTE_1\ttruck\njCiwgfC1uN0_0\tdog\njCtFgJ1qhJE_0\tbird\njC5Px208OVY_4\thorse\njC5Px208OVY_5\thorse\njC5YGckTiIU_2\ttrain\njDFqxB4rC7M_0\tcat\njDJNC5fzvfA_1\tmotorcycle\njDYks7hSKbg_0\ttruck\njDbHjQZ5R70_0\tairplane\njDbHjQZ5R70_1\tairplane\njDdFavN2eWY_0\tdog\njDgpggXdBIc_1\tmotorcycle\njDgpggXdBIc_2\tmotorcycle\njD2RjyxG6ow_0\tmotorcycle\njD4621IQz3w_0\tdog\njD4621IQz3w_1\tdog\njEASZOuNSS0_3\tskateboard\njEASZOuNSS0_0\tskateboard\njEASZOuNSS0_2\tskateboard\njEEOkCjU9y0_0\tbear\njEJZ76_xhog_2\tbear\njEQDhb_Zewo_0\tcat\njEYG-qIv34o_1\tcat\njEYG-qIv34o_0\tcat\njEfwj-JzFXo_0\tperson\njE1Rq_Ot02M_0\tdog\njFAm4tikj6E_0\thorse\njFSIX_KuRK8_0\thorse\nK5p31PQkx3I_1\thorse\nK5q4FoXnLwI_0\ttrain\nK5sQWplX-D8_1\tskateboard\nK5sQWplX-D8_2\tskateboard\nK6JHTga6VU8_0\tairplane\nK6SFafS3Zv8_0\tcar\nK6SFafS3Zv8_2\tcar\nK6jf51to7dU_0\thorse\nK6jf51to7dU_1\thorse\nK6sKjN_MOsE_1\tbear\nK6srgkSvZdw_1\tskateboard\nK6srgkSvZdw_2\tskateboard\nK6vEY0vOlSg_1\ttrain\nK66dqG9OJuo_1\tdog\nK66dqG9OJuo_0\tdog\nK6_WEh-eizw_1\tairplane\nK6_WEh-eizw_2\tairplane\nK6_WEh-eizw_4\tairplane\nK7uSHqISah0_0\ttrain\nK702Tx5vkp4_0\thorse\nK78iEUHTTZc_1\tcat\nK8aa-7brUTs_0\tbear\nK8vGdEhh_jU_0\tbicycle\nK81vEhukX4U_0\tmotorcycle\nK9LhqtvfZ10_0\tdog\nK9LhqtvfZ10_3\tdog\nK9LhqtvfZ10_4\tdog\nK9LhqtvfZ10_5\tdog\nK9TPOifKCmU_0\tmotorcycle\nK9hTkmr_71A_2\tcar\nK9jCx7G3_Mw_0\tknife\nK9kNamc2c5Y_1\tdog\nK9kNamc2c5Y_0\tdog\nK9wE7VzJD00_0\ttrain\nK-Dz6gr96Lo_0\tdog\nK-s8RPMLRw4_0\tbird\nK-s8RPMLRw4_2\tbird\nK-x3x3kGGqg_0\tdog\nK_PGa9Eo6mo_1\tdog\nK_VS3tyB-Cc_0\tperson\nK_Z28TO4stg_0\tbird\nK_h1L3P_j1M_0\tbird\nK_pO-MBS7lI_0\tdog\nK_qFWKniImU_0\tskateboard\nLAKF499FHX0_0\ttrain\nLAKF499FHX0_4\ttrain\nLAKF499FHX0_1\ttrain\nLAKF499FHX0_2\ttrain\nLAKF499FHX0_3\ttrain\nLARRHwtW8fE_1\tdog\nLAZoyKF7lbQ_0\ttruck\nLAZoyKF7lbQ_2\ttruck\nLAZoyKF7lbQ_3\ttruck\nLBJEbJfzvW4_1\tskateboard\nLBOXDMZvtBY_1\ttrain\nLBnsLkuQ8kE_0\tperson\nLBwm49n5rKo_0\tmotorcycle\nLB6fi4oTKvQ_2\tdog\nLB8Wc8hU4Hc_0\tairplane\nLCGZmNGyPhM_0\tboat\nLCghaNtVeM0_1\tknife\nLCjQb5zLTCs_0\ttrain\nLCoIwiCBlW4_0\tdog\nLCxiwbrpEFI_2\tbus\nLC5Qly11BZs_0\ttrain\nLC5q2G2pxT0_0\tbus\nLDEju5sQWOU_1\tbear\nLDH_eiO0aFE_0\tboat\nLDJ9xB-n5Sg_0\tdog\nLDJ9xB-n5Sg_1\tdog\nLDQiOOCMhs4_0\ttruck\nLDQqhsLKyjs_0\ttrain\nLDYFndJjRGA_0\tskateboard\nLDgpZlJ_QYM_0\tboat\nLDh-8GoBSLw_0\tbear\nLDlR_gDbVFk_0\tairplane\nLDvN2rB8p44_0\ttrain\nLD-8yzPoOIQ_0\tcar\nLD-8yzPoOIQ_1\tcar\nLD-8yzPoOIQ_2\tcar\nLEH61oMv2So_1\ttrain\nLEIkLV_S5yA_0\tcat\nLEP6ZOl5iw0_0\thorse\nLEUCQjNIm9E_0\tknife\nLEYBNQUwruU_0\tdog\nLEiolk6i9RI_0\thorse\nLEmU61Tdqxs_1\tmotorcycle\nLEverFsHygc_1\tairplane\nLE2ks85I17U_0\tbird\nLFDqskJozig_1\tskateboard\nLFMUePhHPAk_1\tcar\nLFZYYpjP3FA_0\tknife\nLF4xVBfV5SI_1\tbird\nLGRkVRP-RTs_0\tcar\nLGgzD_ng3aA_1\tbear\nLGrMlBi0l6Y_1\tboat\nLGuSLUeKcTo_0\tbird\nLG0w1oTdXgY_0\tbird\nLHEuYW96FG0_0\tbear\nLHEuYW96FG0_4\tbear\nLHbVe_bjGp0_2\tdog\nLHbVe_bjGp0_0\tdog\nLHbVe_bjGp0_1\tdog\nLHmvAqv6kYE_0\tzebra\njFneoJr36o8_0\tcar\njGCw13fkf0Q_2\tmotorcycle\njGPtq4pO8Ug_0\tcar\njGTNsTUkNUw_0\tcat\njGTr1LSaGGw_1\tbicycle\njGTr1LSaGGw_2\tbicycle\njGTr1LSaGGw_0\tbicycle\njGlNsqDOz8Y_0\thorse\njGqRX9IwGI0_8\tbear\njHK3JYa_Ypg_0\tumbrella\njHM867g1K8k_1\thorse\njHM867g1K8k_0\tperson\njHy5deaCjQE_0\tdog\njH_YxkU_JwE_0\tmotorcycle\njINuUqU6sJI_0\tdog\njIP9FdmB0_E_0\ttrain\njIbmC5sed8I_1\tairplane\njIjEX8I5SHo_1\tbird\njIjEX8I5SHo_2\tbird\njInMbuzvtiQ_0\tumbrella\njInMbuzvtiQ_1\tumbrella\njI0xgoZ8QDA_0\tboat\njI1Swlwj_wc_0\thorse\njJMefDe4r9w_1\tskateboard\njJR-emvmi9s_0\tbear\njJR-emvmi9s_1\tbear\njJf_N_p-Gjo_1\tskateboard\njJnz3tS1uME_0\tmotorcycle\njKBU4c1AdSQ_0\tcat\njKv6Q1RRxVM_1\tboat\njLBSOa5iDgE_0\thorse\njLR7LmbNekc_0\tmotorcycle\njLXuZdAveV0_2\tboat\njLXuZdAveV0_0\tboat\njMNaKigE1eI_0\ttruck\njMNaKigE1eI_1\ttruck\njMVeJ3RbcH4_0\tcar\njMaYIgpjxlk_0\tdog\njMmjaxXWaUk_1\tbus\njMo01X2mBq0_0\tbus\njM79QETqts8_1\thorse\njNCq29f3J8Y_0\tairplane\njNE_FcqbQN8_0\tmotorcycle\njNJJgAg79KA_1\tairplane\njNJJgAg79KA_0\tairplane\njNKO9msLe34_1\tairplane\njNKO9msLe34_0\tairplane\njNSTcIQwl_g_3\ttrain\njNSTcIQwl_g_1\ttrain\njNSTcIQwl_g_2\ttrain\njNllRQ66Re4_3\tdog\njNn7v2MFg_U_0\ttruck\njNsEePln1_U_0\tbird\njNsEePln1_U_1\tbird\njNt8Vn-WKRI_1\thorse\njN-BXoM15Qs_0\tcat\njOQ0W0Z_-Uo_0\tdog\njOl4m5QdOZQ_0\tbus\njPaVdR2IRu8_0\tairplane\njPiVFMGvHbM_0\ttrain\njPiVFMGvHbM_1\ttrain\njPrY_Xz0CDM_0\tknife\njP5RhcwO4E4_1\tdog\njP7mwBStU3w_0\tdog\njQBc1CqjGOk_0\tskateboard\njQCrA8Bjbp8_0\tbird\njQXYSlXk7_c_3\tbear\njQXYSlXk7_c_1\tbear\njRIy_wUojcs_0\tcar\njRR6sU59uTo_0\tairplane\njRTkny0bdY0_2\tmotorcycle\njRTkny0bdY0_1\tmotorcycle\njRh5WphQGDI_0\thorse\njRqdnQ8HlwQ_0\tairplane\njR7eq8CAmbs_0\tairplane\njR-Cbp3qBJI_2\thorse\njR-Cbp3qBJI_0\thorse\njSS6b2iz2hk_0\tknife\njSk-3X-hjyg_1\tknife\njStwl7WfsVE_0\tskateboard\njTAz5HO8mQw_0\tcat\njTHDoLyfTLc_0\tdog\njTQ5A95TKw8_0\tcat\njTYsK4JKns8_0\tgiraffe\njT1mDaHStHU_0\ttrain\njUDnkkvVKNo_0\tairplane\nLIw68irBLtE_3\tairplane\nLIzgqx7Ykxw_0\tairplane\nLI286rLHd0I_0\tbird\nLJGQA810BtE_0\tbus\nLJJuw5mLJ4Q_0\tskateboard\nLJhCGLht3Rw_0\ttrain\nLJhCGLht3Rw_1\ttrain\nLKe9a7L3vkk_0\tbird\nLKhjmARDv7k_4\tbear\nLKhjmARDv7k_6\tbear\nLKoaXogFTbc_0\tdog\nLKyQ2fBNVmw_3\tskateboard\nLK2-EMocZQs_6\tdog\nLK2-EMocZQs_1\tdog\nLK2-EMocZQs_3\tdog\nLK9zoUrrEHc_0\tskateboard\nLLJiqe0d06I_0\ttrain\nLLOwSRx9hxo_0\tbird\nLLVr7tG42kw_0\tmotorcycle\nLLW1jx3S-Hw_0\ttrain\nLLjDNseEw0c_0\tskateboard\nLL_DiAJ71rc_0\tbird\nLMGo4BXG4Yw_8\tknife\nLMRH29tlDrM_0\tcat\nLMrDuKEYJ3k_0\ttruck\nLM1djNtENzA_0\tcat\nLNQHybwdHRk_0\tairplane\nLNX244qUx7M_0\tdog\nLNntRLW2bHA_3\tskateboard\nLNntRLW2bHA_0\tskateboard\nLNntRLW2bHA_2\tskateboard\nLN6DT1DOaTg_5\tskateboard\nLOBD9yc5YPM_1\tskateboard\nLOMTlGqGyHc_0\tmotorcycle\nLOjc-npcSjs_0\tairplane\nLOjc-npcSjs_2\tairplane\nLOjc-npcSjs_4\tairplane\nLOjc-npcSjs_9\tairplane\nLOlUKQgr7Qg_0\tboat\nLOosqz3z8Xw_0\ttrain\nLOzh9vxSHPg_0\tdog\nLPQv6LdOZHo_2\tmotorcycle\nLPQv6LdOZHo_1\tmotorcycle\nLPZjxIqs8Uw_2\tairplane\nLPd_Y8gk5uI_1\ttrain\nLPgmaebC-L0_2\tboat\nLPtcpZXDhHw_0\tknife\nLPvsAAlZI_8_1\tbus\nLP3a2L1ZCyg_2\tdog\nLP8dyCxmCrI_2\ttrain\nLQAF34GzpMY_0\tairplane\nLQO68Aj4ons_0\tcar\nLQRuelaTZd4_0\tbear\nLQRuelaTZd4_1\tbear\nLQT4GnnPhA8_1\tdog\nLQbQVeZrwEk_0\tmotorcycle\nLQdP4gNX9Aw_0\tbird\nLQjzonTrY2o_0\tbear\nLQr5vK-X1fQ_0\tcat\nLQ2EDJSNIN0_1\tdog\nLQ2EDJSNIN0_3\tdog\nLQ4z96EA6co_2\tbird\nLRSii99-QIo_1\tzebra\nLRgsl5_TJVg_2\tskateboard\nLRgsl5_TJVg_0\tskateboard\nLRgsl5_TJVg_1\tskateboard\nLRtLr32oPAw_0\tskateboard\nLR7IHIbXtrE_0\tbird\nLSE0KHhFxps_0\ttrain\nLSMKaXjXnhE_1\tboat\nLSi1i5lSUjA_0\tdog\nLSqIpguEI04_0\tmotorcycle\nLSqIpguEI04_1\tmotorcycle\nLSvVMD-SF48_1\tbus\nLS8qQoB3Uw8_0\tdog\nLS8qQoB3Uw8_1\tdog\nLTEyQSswTVI_0\tbus\nLTQPc_WVFOw_0\tairplane\nLTQPc_WVFOw_1\tairplane\nLTQPc_WVFOw_2\tairplane\nLTQPc_WVFOw_3\tairplane\nLTaExiLK2S0_2\tbear\nLTaExiLK2S0_3\tbear\nLTaExiLK2S0_4\tbear\nLTaExiLK2S0_6\tbear\nLTaExiLK2S0_7\tbear\nLTjSA_-Q5DU_1\tknife\nLTkuM5IoNV4_0\tmotorcycle\nLUCDeZOOhlg_0\tcat\nLUUYKUhaYZs_0\tbus\nLUjqWGI9KSo_2\ttruck\nLUphe242a5g_0\ttrain\nLU4-QjhixQU_0\tmotorcycle\nLU4-QjhixQU_1\tmotorcycle\nLU__7PPUMTo_0\tskateboard\nLVCMA3LXlkc_0\tairplane\nLVfXvn7elFI_0\tperson\nLVfrWLnu7T8_0\ttrain\nLWHshdXjBCY_0\ttruck\nLWQhidgjZno_0\tmotorcycle\nLWRXboX1o5Y_0\tmotorcycle\nLWTYrbFCPl0_0\tdog\nLWY9Y2YVtHA_1\ttruck\njUQUg-qsfgI_0\tmotorcycle\njUWm1Mc1Tno_0\tairplane\njVEM2JpS4sE_0\ttruck\njVZhyibQ31g_0\tcat\njV9-Lr_rsf0_0\tbicycle\njWCpff7m0LE_1\tairplane\njWCpff7m0LE_8\tairplane\njWCpff7m0LE_0\tairplane\njWCpff7m0LE_2\tairplane\njWCpff7m0LE_10\tairplane\njWGulD3X0qw_0\tcar\njWIFscsXRmo_0\tskateboard\njWLv1BQ4PsA_0\tbear\njWawsbm6dCc_0\tbear\njWfItNlOURk_0\tmotorcycle\njWfItNlOURk_1\tmotorcycle\njWruD-mHxrQ_0\tcat\njW4VRs_uVZw_2\tairplane\njW4VRs_uVZw_5\tairplane\njW4VRs_uVZw_0\tairplane\njW4VRs_uVZw_4\tairplane\njXBBnV6cop0_0\tcar\njXDxesHRKAc_0\tumbrella\njXLUgu4rET0_1\tcat\njXkzrsfYgbs_0\tdog\njX84bwkb-r0_3\tbus\njYBgSw-woGw_2\tbear\njYIWAGlIq9c_0\tskateboard\njYZmjlzKhL8_1\tskateboard\njYhAd9FFxqI_0\tumbrella\njY37CiJCKJk_0\tcat\njY9ihstGQwU_0\tcat\njZWITYFghgA_0\tcat\njZZBR49_vR0_0\tmotorcycle\njZiuOZwq7gQ_0\tmotorcycle\njaS19NIXdrk_0\tmotorcycle\njaVgyhuxK_4_3\tskateboard\njaVgyhuxK_4_0\tskateboard\njalIqFA40pI_1\tmotorcycle\njalIqFA40pI_2\tmotorcycle\njaoXgM9c7u4_1\tcar\njaovVHNORuA_0\tcat\njauLT1ElBPc_1\ttrain\njauLT1ElBPc_2\ttrain\njbN4y-wz5-s_13\tgiraffe\njbN4y-wz5-s_1\tgiraffe\njbN4y-wz5-s_4\tgiraffe\njbN4y-wz5-s_5\tgiraffe\njbN4y-wz5-s_11\tgiraffe\njbhxM5eNgO0_0\ttrain\njboQE0Z0280_0\ttruck\njbrhKjPDzhE_1\ttrain\njbwSKNFH66s_0\tdog\njb23jXcxaHE_1\ttrain\njb23jXcxaHE_2\ttrain\njb23jXcxaHE_8\ttrain\njb23jXcxaHE_9\ttrain\njb3uct7NumU_0\ttrain\njb4crk58m88_0\tskateboard\njb4672rSRIs_0\tdog\njcLbvoEUbj0_0\tairplane\njc2fijpD8vI_0\tbicycle\njc-IKl7He7U_0\tknife\njduOxfYHRGQ_0\tperson\njeBcjSSkUhw_0\tcat\njeFFdyPLUts_1\tboat\njeWf_4ARan0_1\tbicycle\nje8cw_bajbc_1\tcat\njfENtrpYNKE_2\tbear\njfENtrpYNKE_1\tbear\njfixAXjax5I_1\tmotorcycle\njfixAXjax5I_2\tmotorcycle\njfixAXjax5I_0\tperson\njgAt3qPg7A8_2\ttruck\njgD77Vh-X28_0\tmotorcycle\njgGLyRuFOdk_0\tbus\njglg4qcOpWw_0\tskateboard\njg7I2TXyQ2Y_2\tbus\njhQ4iIJ42Yw_0\tcat\njhSH0EjNy0k_0\tcar\njhjKdc7FtE0_5\tairplane\njiAVTB1keAQ_0\tbicycle\njiCp6fAMISg_0\tcat\njiJWjndM8hI_0\tknife\njjDZnXMMhEA_0\ttrain\njjKsYbTw1qk_0\ttruck\njjNxX05CDNc_0\tbird\nLWv0LbGIDi8_0\tcar\nLWxkJ4fux_I_0\tknife\nLWy-Lhb3YEk_0\tbear\nLWy-Lhb3YEk_1\tbear\nLW3bZPt1qrw_5\tboat\nLW7XQWZjBIw_0\tdog\nLXLI-Bzcsf4_2\tknife\nLXLmpEVYE5E_0\ttrain\nLXgItdZ5DXo_0\tairplane\nLYLuXQRCIJ4_0\tcar\nLYXMPTRr40M_0\tdog\nLYXMPTRr40M_2\tdog\nLYmsSNBP634_0\tknife\nLY-hwswMG4g_0\tcat\nLZJjKCpcAWA_1\tknife\nLZ_qufxYP3I_0\tcat\nLaA51BrvHGw_1\ttruck\nLaA51BrvHGw_2\ttruck\nLam8oTdJids_0\tcar\nLanX2twvMmw_1\tairplane\nLanX2twvMmw_0\tairplane\nLan3os3aUl8_0\tboat\nLbC7nqh0Uyg_2\ttrain\nLbEPmGgzUIE_0\ttruck\nLbvEMq_DQTU_1\ttrain\nLbv8FZelQCM_0\ttruck\nLcD_I0Lkw3k_0\ttrain\nLcD_I0Lkw3k_2\ttrain\nLceJwFxs3q8_0\tdog\nLdEeXsYfzE0_0\tcar\nLdLtHx09mII_0\tskateboard\nLdL-cFGaJqU_0\tbird\nLdRX8-r4Cpc_0\tcar\nLdggIc_gAew_0\tmotorcycle\nLeAl87F6eS0_2\tumbrella\nLeOCD9rZsSI_0\tbird\nLeX-zqgzN3k_1\tbird\nLeljDmw2CGU_0\tskateboard\nLfAbAKrmMq0_6\tgiraffe\nLfAbAKrmMq0_7\tgiraffe\nLfAbAKrmMq0_1\tgiraffe\nLfatUu2cH3Y_0\tcar\nLfbQRAjsucU_0\tcat\nLf5ebV_NH78_0\ttrain\nLgVi03EiPlQ_2\ttrain\nLgVi03EiPlQ_0\ttrain\nLgZrI3dxws4_0\tmotorcycle\nLgrPr2OxWcw_0\tgiraffe\nLgyj-vOk72M_0\tumbrella\nLhdXtQ8SbGE_1\tbird\nLhgyObbNmLI_0\tbus\nLhhzzaKmVO4_2\tmotorcycle\nLhm6JF_1lQg_1\ttrain\nLhnNboAgtNg_0\tcat\nLhtrfEijGHU_0\tairplane\nLiMriWExmQM_0\tboat\nLiZxvVZfUdU_2\tumbrella\nLiwliE18fA4_0\tmotorcycle\nLiwliE18fA4_1\tmotorcycle\nLiwliE18fA4_2\tmotorcycle\nLizh5Kae5Nk_2\tknife\nLizh5Kae5Nk_4\tknife\nLiznFL6_r2A_0\tmotorcycle\nLjLWamF9HyA_0\tgiraffe\nLjjGe9bnQ3Q_0\ttrain\nLj0zBxRWoIU_0\tskateboard\nLkFbAjpWRAw_1\tgiraffe\nLkFlT3d8MuQ_0\tairplane\nLkmioXgRyo4_0\tcat\nLk7Z-AUDCuQ_0\tcat\nLlA5ioDqRns_2\tbus\nLlA5ioDqRns_1\tbus\nLlNCPsiSjOU_0\tairplane\nLlS3_VvB4Nw_0\ttruck\nLlfRY71K2AU_0\ttruck\nLliRBHO1A_E_0\ttrain\nLlplZ9JJtQw_0\tdog\nLlplZ9JJtQw_2\tdog\nLmFx-lJ6-_M_1\ttruck\nLmR0Ur4owgw_0\tbicycle\nLmT8BFH5c7k_0\tumbrella\nLmYKmKucl28_0\ttruck\nLm4mghtFu-I_0\ttrain\nLm5GStt7KBw_0\ttruck\nLm5GStt7KBw_1\ttruck\nLnGeYd1AsoA_1\tbicycle\nLnKLql5jAXo_0\ttrain\nLnLlD-mNTtE_0\tbear\nLnPyjqgA37I_0\tgiraffe\nLndUw9o_3ME_0\tskateboard\nLnhmeU6oRBE_0\tbus\nLntuuj_mi9c_3\tknife\nLnyfbZ7-fP4_1\tumbrella\nLnyfbZ7-fP4_0\tumbrella\nLnyfbZ7-fP4_2\tumbrella\nLnyfbZ7-fP4_3\tumbrella\nLn_tNsQVuwc_0\tdog\nLomkA_DJyEM_1\tbird\nLo2GqBe8-Qc_0\tbus\nLo8Q0MdVi9A_1\tbear\nLo8ZEKusM1o_0\tdog\nLpXfY3oQDIc_0\tskateboard\nLpXfY3oQDIc_1\tskateboard\nLpnkxmohHZ8_1\tairplane\nLpt6bE36Uuw_0\ttrain\nLpt8i9V2MK0_1\ttrain\nLp88aaB29zE_0\tzebra\nLqOv_DqIWEk_0\tboat\nLqf8Q1pPNFg_1\tknife\nLrIVNsObdso_0\tbird\nLrKKU5rjq38_2\tzebra\nLr-9DI7T7JE_0\tbird\nLr-9DI7T7JE_6\tbird\nLsdHOclMPh4_0\tdog\nLshP_zqoBc0_0\tknife\nLsuQhEjteSE_0\tdog\nLtGXT385l_I_1\tdog\nLtabCE1oaCw_0\tbird\nLtt24ke9SIA_0\tbicycle\nLtyHCo5uPrQ_0\tumbrella\nLuA9aRIic7s_1\tbird\nLuM1ie5yy70_1\tumbrella\nLuM1ie5yy70_3\tumbrella\nLuQiLJ7-B-8_0\tcat\nLuQxQm7FqD0_0\tcat\nLua1id9drCA_1\tgiraffe\nLuv05fYUS1Y_0\tskateboard\nLu6WLASNWIM_0\ttruck\nLu6rn2EQSEM_0\tmotorcycle\nLu6rn2EQSEM_2\tmotorcycle\nLvPDEznT9Yo_1\tbird\nLvgprOdn070_2\ttruck\nLvhxnDPWfXw_0\tknife\nLvv3Ei45X_4_1\tknife\nLvz3fP96sew_0\tdog\nLv7JaIYWXV4_1\tdog\nLv8u2aPVHmc_2\tbird\nLwChAirlUno_0\tskateboard\nLwMepJ25LgQ_0\tbear\nLwPB4qPCelk_2\tcar\nLwPB4qPCelk_0\tcar\nLwgyjrFlc5M_0\tbicycle\nLwiTfwL3bCs_0\tcar\nLxAhZAbzn7k_2\tbird\nLxjlAGLccRw_0\tmotorcycle\nLxlu3NusDCM_0\tbicycle\nLx0IybSITTc_0\tboat\nLx25sZ_GeqA_0\tmotorcycle\nLyOo_B0KLAs_0\tcar\nLyReFCR-oq8_1\tbicycle\nLyReFCR-oq8_0\tbicycle\nLyiT3ute8W0_0\tbird\nLyiT3ute8W0_1\tbird\nLyiT3ute8W0_3\tbird\nLyiT3ute8W0_4\tbird\nLyiT3ute8W0_5\tbird\nLy-uIzZCdn0_1\tbus\nLzMxggGTH1I_0\tmotorcycle\nLzP0t153jKw_0\tskateboard\nLzY_TxIbKpw_0\ttrain\nLzk6uj8FMsE_0\tcat\nLzp-Yej0-7E_1\tbird\nLztNNlg_fXs_0\tknife\nLz0Gxxs0FUE_2\tbus\nL0IXFlnu6Qg_0\tmotorcycle\nL0US3Aiu1q0_0\ttruck\nL0kRKO8zzsI_0\tbird\nL0kRKO8zzsI_3\tbird\nL0kRKO8zzsI_1\tbird\nL1EZ_RVwD8E_0\tcat\nL1LQOPj7NBs_0\ttruck\nL1U2YrjRao0_0\tbear\nL1VgJBGpBz8_0\tbird\nL1iiOGDSByA_0\tmotorcycle\nL19ZzBwAHrU_0\tknife\nL1_86Xd176w_3\tknife\nL2Efv5kJpc0_0\tskateboard\nL2FE5Lr0wnY_3\tbicycle\nL2FE5Lr0wnY_4\tbicycle\njjZl3tMuO6w_0\tdog\njjcoVigCzgg_0\tskateboard\njjk9P9gQq3E_0\tbus\njj-p0K2XoQY_0\tboat\njj_pv9SFrnU_1\tumbrella\njj_pv9SFrnU_0\tumbrella\njkGvuOC8azU_0\tmotorcycle\njkGvuOC8azU_1\tmotorcycle\njkKU7T0wpj4_0\tbus\njkdEq1MRNws_0\tcat\njkkk9vsCYVA_0\tcar\njkqKyvow-ww_1\tskateboard\njkqKyvow-ww_0\tskateboard\njk2gGx6dIWA_0\ttrain\njlA3_oF9j-Q_0\tmotorcycle\njluiJgeyCa4_0\ttruck\njluiJgeyCa4_1\ttruck\njlu4Ry8dDus_0\tcat\njmXmA9egY4s_0\tbird\njmXmA9egY4s_1\tbird\njmeVwD4p83w_0\tumbrella\njm8AZ0aSF0U_0\tmotorcycle\njnD_9KMnzpk_2\tskateboard\njnD_9KMnzpk_1\tskateboard\njnQYikiCbAM_0\tbicycle\njnQgVTaiaXk_0\ttrain\njnSm3vCtu1k_0\tdog\njnu28BEM2j0_0\tbird\njnwQHd-sNW0_0\tcat\njous_VGiSK0_0\tbicycle\njoxEhiwL-qg_1\tskateboard\njpBcdceCHgY_0\tskateboard\njpCdMdRzmuY_0\tcat\njpuFdyVJJwQ_0\tmotorcycle\njpuFdyVJJwQ_1\tmotorcycle\njpyidnScqNQ_0\tumbrella\njpzKefnhMA4_0\ttrain\njqHtlrHk5Cw_0\tdog\njqO4FvS_v54_0\tboat\njqRXcc7rPaY_0\tcat\njqWXHWqSVX8_0\ttrain\njqu6Gjc1hCE_0\tperson\njq9ZPuTO7Rc_0\tumbrella\njrAyEPgy1LM_1\ttruck\njrLRiCFtlvY_0\tskateboard\njrNGiQLJ0ug_1\ttrain\njrg8oKSN6bk_1\tbird\njrg8oKSN6bk_0\tbird\njsJprPZCPvA_0\tboat\njskm6kDOao0_0\tcat\njslKL8yQ7v4_0\tbird\njslKL8yQ7v4_1\tbird\njsp_sWu7g7Q_1\tbear\njsx0cE948y8_2\ttrain\njtQGgQPHofk_0\tboat\njtWerSK0atA_0\tumbrella\njtqUFmuGnVs_0\tperson\njtx5yVxuLzA_0\tbicycle\njtx5yVxuLzA_2\tbicycle\njuC5lVOX-R8_0\tbear\njuC5lVOX-R8_1\tbear\njuMoEfLbbI4_11\tbicycle\njuUIMSiDGm0_0\tumbrella\njuownJlkGfA_0\ttrain\nju08Y0j4rAI_1\tcar\njvKKm9UbcbE_0\tcat\njvKqk7Yfq5Q_0\ttruck\njvdYM-W5Kmo_2\tbear\njvxjOOQa_JQ_3\ttruck\njwxSjxJVyOc_0\tdog\njxIyftPYPsc_0\tcat\njxIyftPYPsc_1\tcat\njxlDJ0D2Tec_0\tbicycle\njxn5iX8buaE_0\ttruck\nL2XOsdnKegA_0\tdog\nL2bV5Mh6tLM_0\tdog\nL2e6nVyZ33k_0\tcar\nL2gSKheIL48_0\tdog\nL2zsyBTtcqE_0\tbird\nL21bM4j4bEc_0\tmotorcycle\nL21bM4j4bEc_4\tmotorcycle\nL21sWlIIkHA_1\tskateboard\nL28I6_ASmq0_0\tmotorcycle\nL3F2ir5MPj4_3\tskateboard\nL3Q42kZ8Ap8_0\tbus\nL3oyk4iYySM_0\tboat\nL3urWJiuom8_0\tbear\nL32hlxmCYZU_3\tbicycle\nL32hlxmCYZU_6\tbicycle\nL32hlxmCYZU_7\tbicycle\nL32hlxmCYZU_14\tbicycle\nL4NZ3vAx87A_0\tboat\nL4kK9gTKA3Q_2\tbear\nL4w-P2UsvBE_0\tbird\nL5VC4bXm6Kc_0\tdog\nL508o9A8028_0\tbicycle\nL52ZiKJ5NLM_0\ttruck\nL5499EWzDaQ_0\tmotorcycle\nL6QaXTuDftA_0\tbird\nL6vLixMpRZg_1\tdog\nL6vLixMpRZg_0\tperson\nL63p00d7BPY_0\tcar\nL7TR8yCVhN0_0\tcat\nL7ZTQMPeHYo_1\tknife\nL7iHAg6bHw4_0\tbicycle\nL7rQQ4IVPrU_1\tskateboard\nL70Zv9DFAhc_0\tskateboard\nL71JgB-L1mA_0\tmotorcycle\nL779-Nw9GV4_0\tcat\nL780lAoEC2M_0\tgiraffe\nL780lAoEC2M_1\tgiraffe\nL8H_7qqaEOM_1\tmotorcycle\nL8SF7xF6Ucs_8\tbird\nL8h9dw2kYRA_2\tknife\nL9EAUBlNvLU_1\ttruck\nL9LWOPIuvcE_0\ttrain\nL9L-OlYNdL0_6\tknife\nL9Tx4-RNDqo_2\tmotorcycle\nL9Tx4-RNDqo_3\tmotorcycle\nL9Tx4-RNDqo_1\tmotorcycle\nL9Vt1klujtA_0\tdog\nL90g72YGdVA_0\tcat\nL97eqv7bBCE_0\tdog\nL985IUAQ8u8_1\tskateboard\nL-S4CNhlvlM_0\tcat\nL-w35NTF7vA_0\tcar\nL-0JgkugTvw_0\tgiraffe\nL_AcMGC96O8_0\tmotorcycle\nL_ZdaWupJcU_1\tboat\nL_xPWB4viT8_1\tdog\nL_xPWB4viT8_0\tdog\nMAJonEdmXNA_0\ttruck\nMAVqUxAjlbg_0\tskateboard\nMBAPF4RVq7E_0\tcar\nMBLHIupmPNk_2\ttruck\nMBLHIupmPNk_5\ttruck\nMBl4bkFRZUY_2\ttruck\nMBl4bkFRZUY_0\ttruck\nMBuwlS32gjE_0\tdog\nMC8Lal5Lp5Y_0\tcat\nMC-KkFD07Ts_0\tdog\nMDxAuy6D1ks_0\tskateboard\nMD5P0EFFnUQ_1\tskateboard\nMD8RTKTEaM0_1\tmotorcycle\nMEi_ikuUJoQ_0\tskateboard\nME0CETCuaK0_0\tboat\njyY5W5HiWUQ_1\tcat\njyeqCulSuVM_0\ttruck\njy_Dr_R-svo_1\tumbrella\njy_Dr_R-svo_3\tumbrella\njzRWRRcWffo_0\tskateboard\nj0BXwDs11NY_0\ttrain\nj0OALCZbAJQ_0\tbus\nj0ii12pbeag_0\tknife\nj0yk2O6HAHA_0\tbird\nj0_9iwi_dm8_0\tdog\nj1CQLHBLwew_0\tcar\nj1NePJe1agU_0\tbird\nj1XwtnPy1Ik_1\tbear\nj1rU13Z_fxc_0\tbicycle\nj1utZs4pDTc_0\tbicycle\nj10ev-4-0Fg_0\tmotorcycle\nj11_jPnp4Pc_0\tcat\nj2-VEpDwbyo_0\tdog\nj3X6elDpZ-Q_0\tbicycle\nj4K9kM9p16o_1\tbear\nj4Qv6RH4lPk_1\tbird\nj4U8EcQ8K34_0\tumbrella\nj4daTphUuBw_0\tcat\nj4mpJ3QE8VU_1\tcat\nj4ofs57G2Uk_0\tskateboard\nj4rMKhohDps_0\tbicycle\nj4zZbJTAcC4_0\ttrain\nj4zZbJTAcC4_1\ttrain\nj5EP2UNErRE_0\tdog\nj5Evt1HJ2ck_0\tskateboard\nj5ayq3AbImg_2\tbird\nj5uxE5IUOhk_0\tdog\nj6GdrMPrcNU_0\ttrain\nj6P1j6Ed1Hg_0\tboat\nj6Ybo1yk-lE_0\tmotorcycle\nj7v1htyJtdo_1\tboat\nj7v1htyJtdo_2\tboat\nj7xvqf1mrUo_2\tbird\nj707fRdtbEE_0\ttrain\nj8jip_gthjs_0\ttrain\nj8s5sMFYoiM_3\ttrain\nj8s5sMFYoiM_1\ttrain\nj82ZCaABxl8_0\ttruck\nj8-maioFCxo_2\tboat\nj924hdZilyY_0\tcat\nj-MwElKg8Tw_0\tcat\nj-VN0PFvkDg_0\ttrain\nj-a26pZGsKA_5\tbicycle\nj-r3lQdwYeI_0\tboat\nj-r3lQdwYeI_3\tboat\nj-x8lbwsObQ_0\tmotorcycle\nj-0kVn7sEvQ_0\tmotorcycle\nj-0-IDS-OD4_1\ttruck\nj_DE_vsqSZg_0\tmotorcycle\nj_D7oxUpZqs_0\tbicycle\nj_D7oxUpZqs_1\tbicycle\nj_FCzH1rLDw_0\ttrain\nkABwo7h7ILg_18\tbicycle\nkABwo7h7ILg_13\tbicycle\nkANh1n3sh5M_0\tgiraffe\nkANh1n3sh5M_3\tgiraffe\nkAekmn2pgpc_0\tskateboard\nkAekmn2pgpc_1\tskateboard\nkAhVhIYl-GE_0\tmotorcycle\nkAhVhIYl-GE_1\tmotorcycle\nMFw-_3fTBzA_0\tbicycle\nMF06s9T8iJA_0\tskateboard\nMF06s9T8iJA_1\tskateboard\nMGFx6Irt70E_0\tknife\nMGMJ6ocyKXQ_2\tboat\nMGQw41RhBfc_0\tmotorcycle\nMG9MouhNLjY_1\tknife\nMG96iokcNoY_0\tcar\nMG96iokcNoY_1\tcar\nMHIEOK-O3Q4_1\tbird\nMHT9BbNzNJo_0\tknife\nMHqZCkvaub8_1\tcar\nMHsxwUMk-_s_8\tumbrella\nMIHg2KAYh5c_0\ttrain\nMIHg2KAYh5c_3\ttrain\nMIHg2KAYh5c_1\ttrain\nMIKCpSFDh4M_0\tbear\nMIKCpSFDh4M_1\tbear\nMIKCpSFDh4M_2\tbear\nMIKCpSFDh4M_3\tbear\nMInom2mFpwg_0\tskateboard\nMI2d7Rd8_Zs_9\tbicycle\nMI2d7Rd8_Zs_10\tbicycle\nMI2d7Rd8_Zs_2\tbicycle\nMI2d7Rd8_Zs_4\tbicycle\nMI2d7Rd8_Zs_5\tbicycle\nMJOztUhgARo_1\tbear\nMJvPtT5tzRI_0\tmotorcycle\nMJ3I-JfOG48_0\ttrain\nMJ6b6iOY7CI_0\tcar\nMK2aqzY-UTQ_0\tcat\nMLXY5iff2rU_0\ttruck\nMLZ5bpXr5fk_0\tbicycle\nMLrWgAcIumk_3\tknife\nMLrWgAcIumk_1\tknife\nMLtRUMzqhDk_1\tdog\nMLwCW5HBfWQ_0\tbicycle\nMLwCW5HBfWQ_1\tbicycle\nMLyrsP65yc8_0\tcat\nMMGw177uo60_8\tbicycle\nMMGw177uo60_11\tbicycle\nMMGw177uo60_0\tbicycle\nMMGw177uo60_1\tbicycle\nMMGw177uo60_2\tbicycle\nMMGw177uo60_4\tbicycle\nMMGw177uo60_6\tbicycle\nMMX4my6X-xg_0\tcar\nMMfLN7_khoc_0\tskateboard\nMMwk9bxedYo_1\tbird\nMMxfwNbWaxc_0\tbus\nMMxfwNbWaxc_1\tbus\nMMzNcR3qtX0_0\tknife\nMM9D2A52FM4_0\tcat\nMNBfv2S-yco_0\tdog\nMNDWyaUDfAM_0\ttruck\nMNKwR4IK04k_0\tbus\nMNnYExmY67E_0\tbus\nMNnYExmY67E_3\tbus\nMNuhuq3FP5Q_0\tmotorcycle\nMNuhuq3FP5Q_1\tmotorcycle\nMNuhuq3FP5Q_2\tmotorcycle\nMORtJq8MelU_2\tdog\nMORtJq8MelU_3\tdog\nMORtJq8MelU_0\tdog\nMORtJq8MelU_1\tdog\nMOR6ErlJIp8_0\tgiraffe\nMOcTGHSkER0_0\tcar\nMOgN13g3SzU_1\tmotorcycle\nMOxIwc0MqZ0_1\tcar\nMO5aNU1mc1s_2\tboat\nMPQqmw9gvF0_0\tdog\nMP8ETGMyhnU_0\tdog\nMQAJWDp31ag_0\tcat\nMQimJolkMRI_0\tcat\nMQ5mTW70Ebs_1\ttrain\nMRzphcX41T8_0\tumbrella\nMSWR-YqRwqk_0\tcat\nMSjYJFNM2HU_0\tboat\nMSjYJFNM2HU_3\tboat\nMSonF1662RI_3\tskateboard\nMSp3-aHmNP4_1\ttruck\nMSp3-aHmNP4_2\ttruck\nMSvmSEk-UJ0_0\tbicycle\nMSxdHgV7e6o_0\tcar\nMS7Emoy0Foc_1\tboat\nMTDl42dubw8_0\tbear\nMTr54KYSQBw_0\tperson\nMTvLNcYmHhQ_0\tcar\nMT-VkX2ZUYs_1\tbear\nMT-VkX2ZUYs_2\tbear\nMT_GWiXfC2k_0\tknife\nMUAuC-rgc9Q_0\tdog\nMUPAcFVQjlE_0\tzebra\nkAkZoxVhM3I_4\ttrain\nkAkZoxVhM3I_1\ttrain\nkAkZoxVhM3I_2\ttrain\nkAkZoxVhM3I_3\ttrain\nkAmtMpdj5F8_0\tdog\nkAsA28fm6YM_0\tdog\nkBZZqBNk68M_0\tcat\nkBg_1xTx4Dw_0\tcar\nkBsc-5sxeTw_1\tknife\nkBsc-5sxeTw_3\tknife\nkCWupS0PNHk_0\tcar\nkC0y-y4Y9zQ_0\tknife\nkC4_7iM24Uw_0\ttruck\nkC7fdR62Lto_0\tperson\nkDU_m-Zhi-I_2\tbicycle\nkDsGVRUxg9s_3\tbicycle\nkDsGVRUxg9s_4\tbicycle\nkDvYbh9_fvY_0\tdog\nkDwVR3eWyA4_0\ttrain\nkD0shq5M7Xw_1\tskateboard\nkD_zeOiIsTM_0\ttrain\nkEw-F2KrxLQ_0\ttrain\nkE3cb1gtxpM_0\tperson\nkFihVzuPlGI_0\ttruck\nkF9uWuyPP8g_0\tskateboard\nkGB7yQn8jpQ_0\tbicycle\nkGkvBOa6Ao0_0\tmotorcycle\nkHCbADkGOsE_0\tskateboard\nkHEfe-TDtS0_0\tmotorcycle\nkHkZCi873e4_1\tmotorcycle\nkH2Vmad_zzc_0\ttrain\nkH9YVTvwmpM_0\tbicycle\nkIGuIdHDwIw_0\ttruck\nkIasEX-cJb8_0\tcat\nkIqavvGxvh0_0\tbird\nkIyZZm3zk5M_0\ttrain\nkIyZZm3zk5M_1\ttrain\nkIyZZm3zk5M_2\ttrain\nkI14RuB6ab4_1\tboat\nkI9E5m5l4Uo_2\tbird\nkJFQOFR0l0w_0\tmotorcycle\nkJJuX1cGFYg_0\ttruck\nkJJuX1cGFYg_3\ttruck\nkJR59i4f5HA_0\ttrain\nkJR59i4f5HA_2\ttrain\nkJR59i4f5HA_4\ttrain\nkJR59i4f5HA_1\ttrain\nkJUDpKKsNQ8_3\tboat\nkJYZ-XE8ZEQ_0\tcat\nkJuBcbws_zM_2\tcar\nkJuuymSuBLA_3\tboat\nkJ2eEJ07dR8_0\tcat\nkJ4rlYx4HDQ_0\tmotorcycle\nkKJAqMzsMHo_0\ttrain\nkKOKJLrWCro_0\tmotorcycle\nkKSyjiL5foc_0\tskateboard\nkKTvKA8cd-c_0\tbird\nkKTvKA8cd-c_2\tbird\nkKeaUBfwuG4_0\tdog\nkKfiOXnjX0E_1\tbird\nkKtawdL8xDU_0\tumbrella\nkLL_YMFYoQw_1\tcar\nkLL_YMFYoQw_3\tcar\nkLgtAl-xGI0_0\tbus\nkL3r_JUstGU_0\tbus\nkL7sfsNuNVw_0\tgiraffe\nkL7sfsNuNVw_1\tgiraffe\nkL777xHctO4_0\ttruck\nkMMe5H6THlA_1\tboat\nkMuQLvHlZM8_1\tskateboard\nkMuQLvHlZM8_2\tskateboard\nkM3Ml3gsG1g_0\tboat\nkM3yM5qONQc_0\tperson\nkNNLDq_wPc4_0\tdog\nkNQYLVUS5ag_1\ttrain\nkNQYLVUS5ag_0\ttrain\nkNTqRDpy6Jg_0\tbicycle\nkNVh6uD0bMs_0\tcar\nkNlVF3ROFLs_0\tdog\nkOOlwQ0DrQU_1\tcat\nkOjjXFA4JLo_0\tbicycle\nkOksVTxs6S0_0\ttruck\nkPEf41FB6w4_2\tbear\nkPH88UubFMg_0\tbird\nkPLn0enV644_0\tmotorcycle\nkPPya6oadAk_0\ttruck\nkPSuwjI94G8_1\tbus\nkP4KkSrY81s_0\tmotorcycle\nkP4KkSrY81s_1\tmotorcycle\nkP7xV2Efw9c_0\tcar\nkQBqt_vvAUc_0\ttruck\nkQHn-cRLiDk_1\tcat\nMVG65Om9g1k_0\tcat\nMVG65Om9g1k_1\tcat\nMVPQRjLFz6E_0\tboat\nMVRf770zXL0_0\tbus\nMVZinfPagDI_0\tbicycle\nMVhsNNsDFWo_0\tknife\nMVxJBHYueGI_0\tboat\nMVxJBHYueGI_1\tboat\nMV5174rsbEY_0\tbus\nMV-CnX4Gf7A_0\ttruck\nMWGRoXhqRgQ_0\tboat\nMW78cTfzq0c_0\tcat\nMXGO41E37k0_1\ttrain\nMXVOVBJlezc_1\ttrain\nMXW5J8Fq8aw_0\tbicycle\nMYW0loI0g8M_0\tdog\nMZJtj9J3P2w_0\tknife\nMZU8lpmJhxg_0\tbus\nMZaYMDyaATI_5\tskateboard\nMZaYMDyaATI_0\tskateboard\nMZfxKiKSuFU_0\ttrain\nMZfxKiKSuFU_1\ttrain\nMZfxKiKSuFU_2\ttrain\nMZr4cAj7j28_0\tmotorcycle\nMZtheeh470g_0\tcar\nMZxz9C8nBdA_0\tbus\nMZ4A6ItKCn0_2\tknife\nMaApAnpbJwE_0\tmotorcycle\nMaNGPVuxXqo_0\tbicycle\nMaUrOzoC1qE_0\tmotorcycle\nMaV9LY8Yf7c_1\tskateboard\nMaeWb_sv_KU_9\tbus\nMaeWb_sv_KU_10\tbus\nMaeWb_sv_KU_1\tbus\nMaeWb_sv_KU_7\tbus\nMaeWb_sv_KU_8\tbus\nMalEpweFuSM_0\tmotorcycle\nMarA93dcZrA_0\ttrain\nMbCJqlLjY_o_2\tknife\nMbK94OERQUw_1\tbicycle\nMbK-28LCQ1g_0\tboat\nMcV3_FGrKNw_1\tboat\nMccB4r2uPG8_2\tbus\nMctKaOAWQ2g_0\tskateboard\nMc_qufFsRZQ_0\ttrain\nMdP8tqMgy-c_0\tboat\nMdcfoMlgxyI_0\tboat\nMdcfoMlgxyI_7\tboat\nMdcfoMlgxyI_6\tboat\nMeGIovLiBUs_0\tcat\nMeNT1BqRoSk_0\tskateboard\nMeR6T05EfeY_4\ttrain\nMeR6T05EfeY_5\ttrain\nMedPaDPXclw_0\ttrain\nMe6y3gzfhGA_1\tcat\nMe7wQZBbtkw_1\ttruck\nMe9X6zA_WSI_2\tcar\nMe9X6zA_WSI_3\tcar\nMe9X6zA_WSI_0\tcar\nMe9X6zA_WSI_1\tcar\nMfEA9RwWf8s_1\tcar\nMfKpwmhyptQ_6\tknife\nMfQe_WreL6U_0\tcat\nMfVLnZLXmvw_0\tboat\nMfYYHsKxgn0_0\tcat\nMfYYHsKxgn0_1\tcat\nMfaYiIkR0D8_10\tdog\nMfe3mmOd7co_0\tskateboard\nMflUSzEyPQA_0\tdog\nMf1njOx66R4_0\tknife\nMf1njOx66R4_1\tknife\nMgR0ON5CM-E_1\tdog\nMgR0ON5CM-E_0\tdog\nMg7Ve43Durw_0\tzebra\nMg9oRrgGKv0_0\tskateboard\nMhFgGvNvIPU_1\tmotorcycle\nMhOdsv74XK4_0\tbicycle\nMhPIl5JGvTQ_2\tdog\nMhdkxaMWwb4_0\tdog\nMhfYe7VajGQ_1\ttrain\nMijD0ZqMorA_3\tbear\nMijD0ZqMorA_4\tbear\nMixmJ2mkl18_5\tmotorcycle\nkQhvp8FqRRI_0\tmotorcycle\nkQ0WAbN3uvE_2\tbicycle\nkQ0qYUhkgXE_0\tzebra\nkQ0qYUhkgXE_2\tzebra\nkQ27FYyayCg_0\tumbrella\nkQ9C8T343Bg_0\tumbrella\nkQ97WPM3Qw4_0\tskateboard\nkROqNf1kadg_0\tbicycle\nkRWaghM9Bng_4\tknife\nkRYejzNzz-k_0\tbird\nkRYejzNzz-k_2\tbird\nkRYejzNzz-k_5\tbird\nkRtAJBnrb0o_0\tcat\nkSnUCbQ4k4c_1\tgiraffe\nkSxPGqWydhQ_0\tcar\nkSxPGqWydhQ_1\tcar\nkTBAPJCn4AI_1\tcar\nkTNOY900Hbk_0\tcat\nkTVuc-2UjPI_0\tumbrella\nkTbS3XR-Xhc_7\tbear\nkTdT3aGZVmo_0\ttrain\nkTm1R3GaJzg_1\tumbrella\nkTyJyGREDR8_0\tboat\nkUX28ytNCwc_0\tcar\nkUcErGH2rjs_0\tdog\nkU8IsLpAlXg_0\tmotorcycle\nkU8IsLpAlXg_1\tmotorcycle\nkVCic6S6ITo_0\tknife\nkVmUxntjOEk_1\tskateboard\nkVxw5-K9zZk_0\tmotorcycle\nkVyJVrTWLwo_0\tcat\nkVzNGKIHA44_5\tgiraffe\nkVzNGKIHA44_2\tgiraffe\nkVzNGKIHA44_3\tgiraffe\nkVzNGKIHA44_4\tgiraffe\nkWHw0OdDAes_0\tboat\nkWHw0OdDAes_1\tboat\nkWo2PlJB2Nc_0\tmotorcycle\nkWxJX4oVzMo_3\ttrain\nkXKTNNclCns_0\tdog\nkXOYPLKJDdI_0\tknife\nkXOYPLKJDdI_2\tknife\nkXVHu_jzgek_0\tknife\nkXj4YpwnHVs_0\tcar\nkXliGVQWoAE_0\tmotorcycle\nkXwzICrP2CA_1\tdog\nkX-rqtb_n5w_0\tboat\nkYAGyQOUOAw_5\ttrain\nkYAGyQOUOAw_6\ttrain\nkYAGyQOUOAw_9\ttrain\nkYRvBDpWk_0_0\tskateboard\nkYd1dxkZ7Q8_0\tdog\nkYh89aM71_c_0\tbicycle\nkYie2clM8Jg_0\tmotorcycle\nkYjiRbFWFuE_0\tumbrella\nkYwzLhWdjYc_0\tbird\nkY1mYWiL24M_2\ttrain\nkY1mYWiL24M_11\ttrain\nkY1mYWiL24M_0\ttrain\nkY1mYWiL24M_1\ttrain\nkY1mYWiL24M_3\ttrain\nkY1mYWiL24M_4\ttrain\nkY1mYWiL24M_5\ttrain\nkY9lrTOcuxY_1\tknife\nkZNZbhh6P3g_0\tcat\nkZrG7mMww7I_0\ttruck\nkZrgKUm3pUs_0\tboat\nkZ1L8FBg_P4_0\tcat\nkZ3A6bY6RHo_0\tmotorcycle\nkaKhLfdT3z4_0\ttruck\nkaNpALWiNSQ_0\tcar\nkadq7fGv_zg_1\tmotorcycle\nkao854-T3zw_0\tbear\nkaxFMN_9CfM_0\tbear\nkaxFMN_9CfM_1\tbear\nkazbC0JbsUY_1\tboat\nkazbC0JbsUY_0\tboat\nka1HMN9Mxho_1\tcar\nka8YGdEujsQ_0\tmotorcycle\nkbEenS2dRTc_0\tcat\nkbF3h-YQ7m8_0\tskateboard\nkbuWFd9Vthc_1\tumbrella\nkb2LQHXd2zk_0\tcar\nkb-A8wbnvQg_0\tbicycle\nkcBIvi6fhUo_1\tbus\nkcTwHA-N1cg_0\tbird\nkcip1032v3E_1\tskateboard\nkco1LYK4z_w_0\tperson\nkdIBzH30zKA_0\tdog\nkdIBzH30zKA_1\tdog\nkdP5V_afg7E_0\tskateboard\nkdRLqCUbWts_0\tbird\nkdUrK5I-cNo_0\tcar\nkdU-XJEwZsQ_1\tbird\nkd3DLyL1JMw_0\tbicycle\nkeGrBBWcGE4_1\tbus\nkeGrBBWcGE4_0\tbus\nkePvCa53REA_0\tgiraffe\nkePvCa53REA_1\tgiraffe\nkea2UOTXlhs_0\tcat\nkea4eM8Blz8_0\tdog\nketFGT3U5D0_0\tbicycle\nkexKkPOprms_0\tcat\nke3yWKL94kE_0\tskateboard\nMi4HJYsPBPk_0\tskateboard\nMjGAi_5coGY_0\tbicycle\nMjGAi_5coGY_7\tbicycle\nMjGAi_5coGY_5\tbicycle\nMjGAi_5coGY_6\tbicycle\nMjxkMQcgRss_1\tcar\nMkF-jfvzRJU_0\tbus\nMkGLvilh-P4_2\tdog\nMkIK8kdqU2I_0\tmotorcycle\nMkQzgwai9zk_0\tzebra\nMkYtT0L4_3A_0\ttruck\nMktDGOflp1w_0\ttruck\nMktDGOflp1w_1\ttruck\nMk82qF_xfzI_1\tmotorcycle\nMk9tGnGNkkE_0\tbird\nMlLHwysBUiY_0\tknife\nMlVr20XSJMY_1\tdog\nMmQIeOEPu9g_2\tskateboard\nMmQIeOEPu9g_0\tskateboard\nMmQIeOEPu9g_1\tskateboard\nMnE1EjTWbTA_2\tskateboard\nMnGGl7pusvI_0\tmotorcycle\nMnGGl7pusvI_1\tmotorcycle\nMnd7aZxjoEg_0\tbird\nMnvqegl_fME_1\tcar\nMnvqegl_fME_3\tcar\nMnvqegl_fME_8\tcar\nMnyV8-43fRY_0\tbicycle\nMn2Nul_w66I_1\tmotorcycle\nMn2Nul_w66I_3\tmotorcycle\nMn2_fRbVluE_0\tknife\nMoHDZuwBO4E_0\tcat\nMog-qUf6B1c_1\tcat\nMo6Q7lGmAw0_0\tskateboard\nMp42DoVxbWY_0\tmotorcycle\nMp91b_edytM_1\tdog\nMp91b_edytM_0\tdog\nMqAlMygAZto_0\tcat\nMqPKFAIxZpE_0\tdog\nMqlxERdGjdg_0\tmotorcycle\nMqvfJOEW4oE_0\tcat\nMrsXy6DL4DA_0\ttruck\nMrssB6CtGrM_1\tgiraffe\nMrvbaDZm6gY_7\tknife\nMrvbaDZm6gY_8\tknife\nMrwi7WoPJSs_0\tcat\nMrxYHk0ghfM_0\tboat\nMr1A4et0ESg_0\tbird\nMsFvL8N-3ds_0\tumbrella\nMsQJkEOyREY_0\tbicycle\nMsY_zz2OeKU_0\tmotorcycle\nMs8x8pjN7Fw_1\tbicycle\nMs8x8pjN7Fw_0\tbicycle\nMtIjkcXspsU_2\tmotorcycle\nMtfpgvzOlW8_0\tperson\nMtiQjguNpH0_2\tboat\nMtiQjguNpH0_0\tboat\nMt_4bFjyYuU_0\tcat\nMuLk_dOouJY_0\tknife\nMuOG8PoK21o_0\tbus\nMuVtFYK_nH0_0\tbird\nMuYixry0epc_2\tmotorcycle\nMuYixry0epc_0\tmotorcycle\nMuYixry0epc_1\tmotorcycle\nMu51W-lkSEc_0\tcar\nMvIYOnRinSo_0\tbicycle\nMvxRpbl0BBk_0\tbus\nMv6v4w7VDFk_1\tcar\nMv_9l8fWiP4_0\ttruck\nMwAM4o2GCuM_0\tcar\nMwHQb6ZryRA_0\tskateboard\nMwIKOqSMRwk_0\tcat\nMwLnGflxcqc_2\tzebra\nMwNsM6f6fNY_3\tbicycle\nMwNsM6f6fNY_5\tbicycle\nMwN7iYEim6k_0\tbird\nMwW14_GuwLg_1\tbus\nMwdX3PbgC34_0\tgiraffe\nMwjq136uMe0_0\tcar\nkfInF5cUU98_0\tmotorcycle\nkfInF5cUU98_1\tmotorcycle\nkfLnoXlGBvU_0\tdog\nkfhspLhCU5Y_0\tcat\nkgDOVDDZ9eQ_0\tcat\nkgONObiF8Hg_0\tcat\nkgT-NsRkv1c_0\tcar\nkgco3sZv7BY_0\tcat\nkgi1KajW_ZU_0\ttruck\nkglv-2P5ow4_4\tbus\nkgrFzgXO9Q8_0\tskateboard\nkgsyAMgjuL4_0\tbus\nkgxQ03-tSek_0\tbear\nkg6RFppR4MM_0\tknife\nkhUURgtFYBY_1\tbicycle\nkhUURgtFYBY_0\tbicycle\nkhVST8w3Zzw_0\tskateboard\nkhlqzkfBCfc_0\tcat\nkhpJlBWPPr4_1\tcat\nkimZApwsJEY_5\tbicycle\nkimZApwsJEY_6\tbicycle\nkimZApwsJEY_0\tbicycle\nkimZApwsJEY_2\tbicycle\nkimZApwsJEY_3\tbicycle\nkimZApwsJEY_4\tbicycle\nkizrM5CZzPk_0\ttruck\nkjBdTAkRijw_2\tbus\nkjM0hJl-L44_0\tskateboard\nkjtOW8OAIeY_0\tmotorcycle\nkkC5lqQb0t0_0\tumbrella\nkkR7pnou7hc_0\tknife\nkkeBMT1ixs4_0\tboat\nkkkc9xwKGp8_1\tskateboard\nkkvU3dvMkSI_0\ttruck\nkk4KuU5X6Lk_0\tcar\nklGHWdeD-qw_2\tbear\nkldR5yJFeOo_1\tbicycle\nkldR5yJFeOo_3\tbicycle\nklgANznh5x0_1\tbicycle\nkl2buVrYbX8_0\tskateboard\nkl3_w8_h6ts_0\tskateboard\nkl4RYG6OCIY_2\tknife\nkmZFQEGncaI_2\tbicycle\nkmZFQEGncaI_0\tbicycle\nkmllekf2nKc_0\tcat\nkmoaGUqL6bI_0\tskateboard\nkmvCtYXRUhM_0\ttruck\nkm7aR2fTJlA_2\tknife\nkm-3wnNLVYY_0\tboat\nknDRZU9u-Lw_1\tboat\nknVcB-GeINU_0\tcar\nknqi3OAHNO8_0\tboat\nkoOxoaMnXZc_0\tskateboard\nkoOxoaMnXZc_1\tskateboard\nkphV7yVMBOQ_0\tbicycle\nkphV7yVMBOQ_2\tbicycle\nkqDbbFz-XQQ_0\tbird\nkqDxyoQKFfE_0\tcat\nkqVaHPJzEro_0\tdog\nkq4tOnX3m2Y_3\tbus\nkq4tOnX3m2Y_0\ttruck\nkrSKV36ocSs_0\tbear\nkrvyahlS1z4_0\tbus\nkryv5em-VHk_2\tbear\nksB15ebtJeM_0\tumbrella\nksCempldLAA_0\tskateboard\nksCempldLAA_1\tskateboard\nksCjOk8r4rU_0\tperson\nksSVtTRXRyI_1\tbicycle\nksk5uCVKU7Y_0\tskateboard\nksxTUcFqlZw_0\tknife\nksx219-g47A_0\tcat\nktHzii2XMh4_0\tboat\nktPLKpH7-mk_5\tdog\nktcodoKjIvE_3\tbicycle\nktcodoKjIvE_4\tbicycle\nktcodoKjIvE_5\tbicycle\nMwtWyQiagOk_0\tbicycle\nMwvYg837DFU_0\tmotorcycle\nMxEjkI5fRh0_0\tdog\nMxHBWltYQX0_0\tboat\nMxKuZbSiZ4s_0\tskateboard\nMxK1dXmYQU8_0\tknife\nMxr-1toRi3s_0\tskateboard\nMyS7UVUc55M_0\tcar\nMybir4gfQaU_3\tbird\nMzB160hQlFE_9\tgiraffe\nMzB160hQlFE_2\tgiraffe\nMzB160hQlFE_4\tgiraffe\nMzB160hQlFE_5\tgiraffe\nMzB160hQlFE_6\tgiraffe\nMzB160hQlFE_7\tgiraffe\nMz9ZTHPYJxk_0\tdog\nM0Ga521uzoA_0\tdog\nM0qQQArQdTU_0\tbird\nM088XJeXBS0_0\tcat\nM1UsEMPrCc4_0\tknife\nM1cuEQppjNk_0\tbus\nM1p1DBTuqmk_3\tbird\nM1p1DBTuqmk_1\tbird\nM1xxFVktlzw_1\tbird\nM1zDeqozcU4_1\tbus\nM2R_9l38IUQ_0\tbus\nM2uSqd8ohUk_0\tbus\nM3CUpLmpRBo_0\tcat\nM3OhLKUgQho_0\tcat\nM3P38sLk0pc_0\tdog\nM3tK5YBjyKI_0\ttruck\nM3tK5YBjyKI_1\ttruck\nM3tK5YBjyKI_2\ttruck\nM4CENhQ5vWo_0\tcat\nM4Hqq89bZiE_1\tdog\nM40QOQPocV4_1\tcar\nM45MyaeogPU_0\tcar\nM5BEqJFfJYw_0\tskateboard\nM5NRM7UQv5c_0\tcat\nM5bLnqKDa1U_0\tbear\nM5kj9SEKNAo_0\tbus\nM6POMFHs-ec_0\tbus\nM6bin6X9FSI_0\tknife\nM6eRY9q89aQ_2\ttruck\nM6tXmkLy-2Y_1\tbird\nM7465rUWBzY_1\tbicycle\nM8Lhm-CgqH4_0\tcat\nM8cFdveIy4g_0\tcat\nM8drJLCDOL8_0\tcat\nM8ea7gWeDQ0_0\tbird\nM8f0VhN1ZnY_0\tumbrella\nM8i-DGTEw9M_3\tskateboard\nM8i-DGTEw9M_1\tskateboard\nM8sMZ15CLIU_0\tskateboard\nM9McwXGtZnI_0\tcat\nM9QtHKxypyI_1\tknife\nM9UrZSSK1MA_2\tmotorcycle\nM9eiVambl5s_1\tdog\nkuRfhOqyXeY_0\tumbrella\nkuzyHmE3SI0_1\tknife\nku68PhgE8bk_0\tbird\nku7gA5ZLk1Q_0\tcat\nkvFSzJHIsVg_1\tknife\nkwDNLBoEQq8_0\tskateboard\nkwDX0_2B3A0_0\tumbrella\nkwGGXvXtsjI_0\ttruck\nkwY370WQYUg_0\tcar\nkwbt-wHLPkY_1\tcar\nkwlcEg9G1bE_0\tknife\nkwsp30ykR4U_0\tboat\nkxeSYfuQl-I_0\tbird\nkx1bCqhLcbY_0\tbus\nkx5tIvM-9dE_0\tknife\nkyAEyX8zMWQ_0\ttruck\nkyPXCwNh7Rg_0\tcat\nkyW_f8sv5iw_1\tgiraffe\nkye1Q_k-_Gc_0\tbicycle\nky1FAcaT3UE_0\tdog\nky6uivneqIg_0\tbird\nkzblQQcpTdk_0\tskateboard\nkzblQQcpTdk_1\tskateboard\nkzfxn1c7_xc_10\tbicycle\nkzg7y0rERTY_0\tbicycle\nkzi3zDJR9Bc_0\tdog\nkzpJkBQxgE0_1\tbicycle\nkzp3UEwOkJA_0\tknife\nkzw5a8z9cXs_0\tbird\nkz6HYpF3pLo_0\tdog\nk0cUZwgJzB4_0\tumbrella\nk0uDHQea9sg_1\tdog\nk00mpKYHsuU_0\tskateboard\nk1F_TFA3Bbk_0\tbicycle\nk1LrJEfFKag_0\tmotorcycle\nk1NVg8uaPE4_1\tskateboard\nk1Q5wms4euk_0\tbird\nk1TOwPACsvY_2\tgiraffe\nk1TOwPACsvY_3\tgiraffe\nk1vz1ZSBSoo_0\tbicycle\nk2O0XiVn5kw_0\tskateboard\nk2QiX8c3t50_0\tbird\nk2SEBRgras8_3\tcar\nk2Z0W54JwB4_0\tskateboard\nk2bQG12smw0_0\tcat\nk2imYphEfo0_0\tcar\nk2ocqQxARpQ_0\tskateboard\nk2yx7C__3wY_1\tcat\nk3HKP8CV3CY_0\tbus\nk3LnBcn5zlU_0\tboat\nk3QuANDFgVQ_2\tboat\nk3QuANDFgVQ_3\tboat\nk3QuANDFgVQ_5\tboat\nk3fZgTTMj1g_0\tgiraffe\nk3fZgTTMj1g_1\tgiraffe\nk3im7HEvSCI_1\tbear\nk4D-Ql4Fg7c_1\tbird\nk4PWQfz5NGo_0\tmotorcycle\nk4U1AP6KV4E_1\tskateboard\nk4c6D3ZsdL4_0\ttruck\nk5Pp6BYXono_3\tbear\nk5R3cUyyyWo_0\tcar\nk5nvWBLlS2c_1\tboat\nk5nvWBLlS2c_2\tboat\nk5vlZTySXDk_0\tknife\nk5yJqWnvZzg_1\tbus\nk5yyV32-nOM_0\tmotorcycle\nk5yyV32-nOM_2\tmotorcycle\nk55nlQZwGz0_1\tboat\nk57rVPEq54k_1\tbear\nk57rVPEq54k_2\tbear\nk6Bwd6af64Y_2\tbear\nk6gc4du1FqU_0\ttruck\nk6l0hwjaeMA_0\tmotorcycle\nk6l0hwjaeMA_1\tmotorcycle\nk6l0hwjaeMA_2\tmotorcycle\nk60P5osD0rU_0\tbus\nk64DU45ej5M_6\tcar\nk64DU45ej5M_0\tcar\nk64DU45ej5M_1\tcar\nk64DU45ej5M_2\tcar\nk64DU45ej5M_3\tcar\nk64DU45ej5M_5\tcar\nk640Wtpq-mU_3\tumbrella\nk640Wtpq-mU_0\tumbrella\nk640Wtpq-mU_1\tumbrella\nk7TCyTff1aM_0\ttruck\nk7uTiiG-Ez0_0\tbus\nM-8Zbj9mU9U_0\tboat\nM_miIFgy1Ro_0\tbear\nNAGKrEjU7Sk_3\tbird\nNAGKrEjU7Sk_2\tbird\nNAkFaQBgOvo_0\ttruck\nNA9hxGtSLCM_0\tbird\nNA_DgxP18c4_2\tmotorcycle\nNBE97NAHACk_0\tgiraffe\nNBdhmPgSS2o_1\tmotorcycle\nNCNgKQCU8BM_1\tbird\nNCP6Cna8jtY_0\tskateboard\nNCQ5340WhY8_0\tcar\nNCSygygs2Dw_0\tskateboard\nNCWp95If4uM_0\tmotorcycle\nNCazYWutlOc_0\tboat\nNCoJmkRt2nE_0\tbicycle\nNDUhlmH9Rz4_0\tcat\nNDYT9jTE54Q_0\tbus\nNDYT9jTE54Q_1\tbus\nND_GyhH6zgI_0\tmotorcycle\nNEQIR06VuP4_1\tgiraffe\nNEQOLn6QBuE_8\tbird\nNESQ70PhJU0_1\tboat\nNElB9jKqhLc_0\tdog\nNFjb4XxSoHI_0\tskateboard\nNFye-cUktCg_0\tbicycle\nNFz_zzAU_Hc_2\tskateboard\nNFz_zzAU_Hc_0\tskateboard\nNFz_zzAU_Hc_1\tskateboard\nNF_o01qBrtI_0\tskateboard\nNF_o01qBrtI_1\tskateboard\nNGCjiEfG4C8_0\tskateboard\nNGM0enFRa7E_0\tcar\nNGO_7sJEeyk_0\tbus\nNGRBYn2OatE_0\tmotorcycle\nNGU-5KGKEJ0_0\tbear\nNGmJtkXyJpc_0\tcat\nNGmKyRRNL_E_0\tbird\nNGw5-auup1k_0\tcar\nNG7FgzWn8Gw_1\tgiraffe\nNG9SIDqXvic_0\tknife\nNHlayOfSZJc_0\tdog\nNHlsNDcNZqU_0\tcat\nNHmxckr22ws_0\tskateboard\nNIPnaoHgzdU_0\tbird\nNIPnaoHgzdU_1\tbird\nNIPnaoHgzdU_2\tbird\nNIvYcbJIYdA_0\tcat\nNI_YQKOQEvM_1\tbird\nNJeNAw2RnNc_0\tbus\nNJeNAw2RnNc_1\tbus\nNJeNAw2RnNc_3\tbus\nNJeNAw2RnNc_4\tbus\nNJ0O48Pkn2k_0\tbird\nNJ9DpLHaGl8_0\tskateboard\nNKLemqoJ_hA_0\tcat\nNK4942wyYgk_0\tbus\nNLKK4VUbuuI_5\tbear\nNLp8voZylqM_1\tknife\nNLsGPrwnRug_1\tbus\nNLsGPrwnRug_2\tbus\nNL3CG8KGwis_3\tgiraffe\nNL5j52SH-yQ_0\tbus\nNL9o4JgV25A_0\tdog\nNMJB2K_UOLc_0\tdog\nNMJLv-oYyNc_1\ttruck\nNMJLv-oYyNc_0\ttruck\nNMecCV-gtK8_1\tdog\nNM7OVTITkaA_0\tcat\nNNCjf9Qu2RI_0\tbear\nNNHOtBx0FOY_0\tmotorcycle\nNNkLZRrMEv4_6\tboat\nNNl4nD5_b_o_0\tskateboard\nk8NHRbiB2Dc_0\tdog\nk8OEoDpqSLk_0\ttruck\nk857sWPtmcs_0\tcat\nk9BuU6A21DQ_0\tskateboard\nk9HxprAZods_0\tumbrella\nk9KmR4MNI7o_0\tcat\nk9KtLV0IMgI_0\tdog\nk9PCp-8PFZ0_0\tdog\nk9PX9l8Fnlw_8\tbus\nk9PX9l8Fnlw_0\tbus\nk9PX9l8Fnlw_2\tbus\nk9PX9l8Fnlw_4\tbus\nk9PX9l8Fnlw_5\tbus\nk9VDPqCbqj0_0\tbear\nk9VVUD9wVxk_1\tboat\nk9zLR7VKKpE_0\tskateboard\nk9-PLHxxGHc_0\tcar\nk-DOe-pD_MY_0\tdog\nk-Nl-39bZnw_1\tskateboard\nk-SqR4BEw3s_4\tmotorcycle\nk-SqR4BEw3s_1\tmotorcycle\nk-izgq4Wj4E_0\tdog\nk-izgq4Wj4E_1\tdog\nk_X3oj841SQ_1\tmotorcycle\nk_e_YVhclfg_4\ttruck\nk_e_YVhclfg_3\ttruck\nk_iI2BJQpqo_0\tcat\nk_jXopyxdo0_1\tboat\nk_sLp7QKSu8_0\tboat\nk_tkXRmI_O0_1\tskateboard\nk_tkXRmI_O0_0\tskateboard\nk_vnzrtDfAw_1\tcat\nk_5e1d-vpBU_3\tumbrella\nk_5e1d-vpBU_4\tumbrella\nlAA5eXeYwpo_0\tcat\nlAFonTk_uSA_1\tbear\nlAI9mfwKMM8_1\tdog\nlAQxdRz4PlQ_0\tbear\nlA3btp7QIxg_0\tbus\nlBH0KOGRswc_0\tcar\nlBXWSN3ciPY_0\tmotorcycle\nlBsOiAR5dAk_2\tbird\nlBsOiAR5dAk_3\tbird\nlBsOiAR5dAk_4\tbird\nlBsOiAR5dAk_7\tbird\nlBsOiAR5dAk_8\tbird\nlByHH7yvxpA_0\tboat\nlB7j8Z4gGtQ_0\tcar\nlB_bnqdnexA_5\tbird\nlB_bnqdnexA_1\tbird\nlB_bnqdnexA_4\tbird\nlCYwepuY9qY_0\ttruck\nlCZry6FRpsk_0\tbicycle\nlCf6uL_GkYw_2\tbear\nlC0yidNH6B8_2\tbear\nlC4BoFWvHs4_3\tbear\nlDLYtKqlr5M_0\tbus\nlDf9b9Kr-24_1\ttruck\nlDgzFjqokik_0\tboat\nlDqk6pRbY3M_0\tbus\nlDybC3N70so_0\tcar\nlD63JOjqTDg_5\tbear\nlD63JOjqTDg_9\tbear\nlD63JOjqTDg_10\tbear\nlD63JOjqTDg_0\tbear\nlEG4DGADyEU_0\tbird\nlEIbERGmlJw_0\tumbrella\nlEWOScSt-Ks_0\tdog\nlEaMfPfi9wI_0\ttruck\nlEwJRP_FRW0_0\tdog\nlFYONMOuW_o_0\ttruck\nlFqrTC4j9AU_0\tcat\nlF3vWAJRnek_0\tmotorcycle\nlGPyv8wlqaw_1\tknife\nlGaQV9YhOac_0\tmotorcycle\nlGrVM91Cav8_0\tperson\nlG5xlt4odEs_0\ttruck\nlHKKhuJtJ9A_0\tknife\nlHXHAD73KC4_0\tmotorcycle\nlHX5VdjDPMg_0\tknife\nlHuiaqmISAM_0\tmotorcycle\nlHyHQQF-8K0_0\tcar\nlIE0SbW_gCY_0\tdog\nlIH_in2H5ds_0\tknife\nlIrvgqkirS4_0\tcar\nlIrvgqkirS4_1\tcar\nlI6hnnAL_54_1\tskateboard\nlI7VzYQQ8DY_1\tbus\nlJBeZTzXuSk_0\tumbrella\nlJJU-pzIbgs_0\tboat\nlJKxeHgRugQ_0\tbicycle\nlJa2bLMFljk_0\tknife\nlJa2bLMFljk_1\tknife\nlJa2bLMFljk_2\tknife\nlKC5LtWPL6s_0\tboat\nlKEgqjR4HeU_0\tbicycle\nlKEgqjR4HeU_1\tbicycle\nlKJZ4AYoO9g_0\tcar\nlKJZ4AYoO9g_1\tcar\nlKJZ4AYoO9g_3\tcar\nlKJZ4AYoO9g_4\tcar\nlKJZ4AYoO9g_5\tcar\nlKJZ4AYoO9g_6\tcar\nlKJZ4AYoO9g_7\tcar\nNOEix5l-1TE_1\tbear\nNOVqPOoUWiM_2\tbear\nNOmc38WuhVA_1\tzebra\nNOmc38WuhVA_2\tzebra\nNPX9qxaZXGQ_1\tboat\nNPc_EhpqV9I_0\tcat\nNPlhHkKnD-o_3\tbird\nNPlhHkKnD-o_1\tbird\nNPnIcXU4TO4_0\ttruck\nNPnJoNuZw64_0\tbicycle\nNP2YBNp1eMo_0\tbus\nNP8MrtR7UMQ_0\tskateboard\nNQRWmK2DAwo_1\tskateboard\nNQ7XVf2jPCk_1\tbear\nNRBtrgg-ACI_0\tumbrella\nNRGqiXyM4H0_0\tbus\nNRRxMVw0Fv0_0\tumbrella\nNRV62o4HAaI_0\tdog\nNRkeO8cWvlY_0\tskateboard\nNSEdAs2W7io_1\tbus\nNSrCO0JVjrQ_0\tbus\nNS6Z7neTE58_2\tbear\nNS7vapDr5vE_0\tdog\nNTJsuoSzIX0_8\tboat\nNTi-7LowE5E_4\tbicycle\nNTi-7LowE5E_0\tbicycle\nNTurL251ndw_0\tbird\nNTyAmrmpD-w_0\tcat\nNUOXJlGoyJk_0\tmotorcycle\nNURGtF3McGo_0\tknife\nNUU3df9bDmc_0\tmotorcycle\nNUhIeMVykto_0\ttruck\nNUkuVMR_rDA_0\ttruck\nNUkuVMR_rDA_1\ttruck\nNUo3_VxkQWs_0\ttruck\nNUo3_VxkQWs_1\ttruck\nNU5WfPjxGO4_1\tcat\nNU60EZnPyy8_0\tbird\nNVAF-TWNge8_0\tboat\nNVeRtjaMVVM_0\tcar\nNVz1RXwlQQM_0\tskateboard\nNWOVEKbfu_M_2\tcat\nNWwoSS6oanE_0\tbus\nNW6ZEfS5YY0_0\tdog\nNXHWi70uXME_0\tmotorcycle\nNXU1Yxq08KQ_0\tskateboard\nNXe33k8YYzQ_4\ttruck\nNXe6DkOAbbo_0\tcat\nNX2FQE2RlgI_0\tdog\nNX2FQE2RlgI_1\tdog\nNYBxFsoPtLU_7\tknife\nNYBxFsoPtLU_2\tknife\nNYVtLPBMGDA_1\tdog\nNYVtLPBMGDA_2\tdog\nNYpkdx_Wzos_0\tbicycle\nNYrd2o8DQhw_0\tbird\nNYsYKDH1T0Y_0\tbear\nNYs9voRwmTk_2\tmotorcycle\nNZGyAc3mNmM_1\tskateboard\nNZOBtVvtpfo_0\tbird\nNZoU9njpjBc_2\tbird\nNZoU9njpjBc_1\tbird\nNaOwM5jaBb0_0\tbear\nNaTP9E6Ee6k_0\tmotorcycle\nNahvbbnqXN0_0\tknife\nNaszpQMnSmM_0\tskateboard\nNbXn5vr55Ik_0\tmotorcycle\nNbnAyKWQOgU_2\ttruck\nNbnAyKWQOgU_3\ttruck\nNbz45at2suY_0\tbird\nNb1nL_IG2Tc_0\tumbrella\nNb4FhqzK_80_0\tbird\nNb9Ee0cdc90_4\tknife\nNb9Ee0cdc90_0\tknife\nNcD7EzR9VKc_0\tcat\nNcODwqAl8wA_0\tbird\nNcODwqAl8wA_1\tbird\nNcnPt-ksZkA_0\tmotorcycle\nNcnr9xhL4RE_1\tbird\nNcnr9xhL4RE_5\tbird\nNco2IqVnrXc_0\tcat\nlKiN4UeEuCQ_0\tcar\nlKrgSHU_lF4_0\tmotorcycle\nlKrgSHU_lF4_1\tmotorcycle\nlL9OwfLG-LQ_0\tskateboard\nlMPus-gGijc_0\ttrain\nlMw3GHYr5nI_3\tbear\nlM2lr9vONXE_1\tbird\nlNDNEdNtW4w_0\tumbrella\nlNLvw0Ga8IY_1\tskateboard\nlNLvw0Ga8IY_2\tskateboard\nlNLvw0Ga8IY_0\tskateboard\nlNShteFjBFI_0\tbird\nlNh4Dhf0JC8_0\ttruck\nlNj5zp4Gbsw_1\tbird\nlOGti3Hfk6A_2\tbird\nlOglyCevyZo_0\tmotorcycle\nlOzlZJwo_U8_0\tmotorcycle\nlO0DJaFrguw_0\tmotorcycle\nlO0Nas9ogL0_0\tbird\nlPG5xsRX0U0_0\tbird\nlP3Jv00bEG8_0\tbear\nlQf2-zTERI8_0\tmotorcycle\nlQ8AFjrjX64_0\tumbrella\nlRSTcmXYwzM_2\tknife\nlRyY7rtPGJ0_1\tdog\nlRyY7rtPGJ0_0\tdog\nlR-HPtCgbFY_0\tcar\nlSefRz_ad2I_0\tperson\nlS7IFw-rHNE_0\tcar\nlTNivynkdBQ_0\tbear\nlTNivynkdBQ_2\tbear\nlTW53YPXtYw_0\tumbrella\nlTgxSRoCADM_1\tboat\nlTgxSRoCADM_2\tboat\nlTgxSRoCADM_3\tboat\nlTgxSRoCADM_0\tboat\nlTyeSMENfFI_0\tdog\nlT1oYaEt3l0_0\tskateboard\nlT1oYaEt3l0_2\tskateboard\nlT1oYaEt3l0_1\tskateboard\nlUEz6tmtuxs_0\tdog\nlUQr1JtEFAM_0\tcat\nlUSPy6WOhvw_1\tboat\nlUk_G-9RjSE_0\tbird\nlUq042i-r3E_1\tdog\nlUq042i-r3E_2\tdog\nlVCS7_AhLDg_0\tcat\nlVKT0DahELk_0\tbus\nlVKT0DahELk_2\tbus\nlVOqUh5DjZE_0\tbicycle\nlVWFKjMWyF8_0\ttruck\nlVWFKjMWyF8_1\ttruck\nlVWFKjMWyF8_2\ttruck\nlVoO_SiGxpw_0\tcat\nlVohP88BOwU_1\tgiraffe\nlWDh4SPr76A_1\ttrain\nlWGBmSVTvwo_2\tskateboard\nlWLYqz3RhXs_0\ttruck\nlWkC8ABD6YI_0\tknife\nlWnVG1WyzTQ_0\tdog\nlW8axrSg7EY_0\tdog\nlXJGVOcVinA_1\ttruck\nlXkkzYM416M_12\tknife\nlXkkzYM416M_8\tknife\nlXkkzYM416M_11\tknife\nlXshoTSoReY_0\tmotorcycle\nlXshoTSoReY_1\tmotorcycle\nlXshoTSoReY_2\tmotorcycle\nlYC47pEoyKc_2\tskateboard\nlYEiGk0pa9w_1\tdog\nlYP4KB7dANc_0\ttruck\nlYcCLy33mJA_0\ttruck\nlYcCLy33mJA_1\ttruck\nlYrLCKi7wHw_0\tknife\nlYrvoVOM7i8_1\ttruck\nlYrvoVOM7i8_2\ttruck\nlYzirpo9X4Q_2\tknife\nlY38gkpHWQA_0\tdog\nlZWg3rt2bp4_0\ttruck\nlZWg3rt2bp4_1\ttruck\nlZWg3rt2bp4_2\ttruck\nlZgIg28WsqA_1\tdog\nNcs0SIaAZjk_0\tskateboard\nNdDPhB7JjOc_1\tcar\nNdFMcVN8fkc_0\tskateboard\nNdFMcVN8fkc_1\tskateboard\nNd2smOOuPs4_0\ttruck\nNd5Cyi1P2AQ_0\tperson\nNd5Cyi1P2AQ_1\tmotorcycle\nNesRw9JE-bc_4\tdog\nNesRw9JE-bc_0\tdog\nNesRw9JE-bc_1\tdog\nNe_T9PyoaOA_0\ttruck\nNe_T9PyoaOA_2\ttruck\nNfQ_F7iyFT4_0\tbus\nNfoq-vLwXMs_0\tcat\nNfuM3ceM9Lg_0\tbird\nNf4iPszryRI_1\ttruck\nNgA6Mi5Qj6Y_1\tcar\nNgHJhpedfLw_0\tcat\nNgfJ42fUH10_0\tskateboard\nNglZtOBkn1M_0\tboat\nNgp2Yvug4N4_0\tskateboard\nNg7YPssESZs_1\tumbrella\nNhDdHfwovA0_1\ttruck\nNhHYQ1QBPq4_0\tcat\nNhJWY87UJGA_0\tmotorcycle\nNhKgTGZXrk4_0\tmotorcycle\nNiN42Yupn8k_0\tmotorcycle\nNiQLFJ_8gI0_1\tbird\nNifFA8VfbMY_0\ttruck\nNjPnw9Ofph8_1\tbicycle\nNjr2CQDoQ0w_2\tboat\nNj1tu2uzjf8_0\tumbrella\nNj4IqLuQBd0_0\tcar\nNkHiSqSViG4_3\ttruck\nNkSVC1QmlzA_2\tboat\nNkXF30FQWUs_0\tbicycle\nNkajkrLx-Pg_1\tgiraffe\nNkdGD4jRmVk_2\tskateboard\nNkdGD4jRmVk_3\tskateboard\nNkdGD4jRmVk_4\tskateboard\nNkvfxcYCIfg_0\tperson\nNkxm_Grldgg_0\tboat\nNlKX0Q_a4qM_0\tbicycle\nNl2e8ERoEYk_1\tskateboard\nNl27zjpvGZk_0\tcat\nNmCxdejUxjE_2\tumbrella\nNmGnWjSHIGc_1\tdog\nNmGnWjSHIGc_3\tdog\nNmGnWjSHIGc_0\tdog\nNmHo6hH22gY_0\tcat\nNmRjRjuwWGU_0\tumbrella\nNmm4H7xWWeE_0\tgiraffe\nNmnOIU5yzmo_0\ttruck\nNm3Wkz8ClY8_4\tbicycle\nNm3Wkz8ClY8_0\tbicycle\nNm3Wkz8ClY8_3\tbicycle\nNnQubFQHcUU_0\tcat\nNnSwVsUnfj8_0\tdog\nNnV7SskfNiQ_1\tbicycle\nNnYCP4YouSI_0\tskateboard\nNnYCP4YouSI_1\tskateboard\nNncDYgsTFic_0\tmotorcycle\nNn1fsXlRDQg_7\tbird\nNoKz0p_h8xA_0\tcar\nNoRnxJ4D8OY_0\tmotorcycle\nNoglbvaRxAM_1\tcar\nNoglbvaRxAM_2\tcar\nNopFymjXZBE_0\tcar\nNosN0T3He9Y_2\tknife\nNowQILLv6pM_1\tmotorcycle\nNoxncYznLDw_0\tmotorcycle\nNoxncYznLDw_2\tmotorcycle\nNoxncYznLDw_3\tmotorcycle\nNoxncYznLDw_5\tmotorcycle\nNpbXizTCNgs_0\tmotorcycle\nNpciaYlS9Bs_2\tskateboard\nNpptiWtuy7U_1\tbird\nNp0p_ITfRiE_0\tboat\nNp0p_ITfRiE_2\tboat\nNqA0sKGQZbc_0\tbird\nNqD8w0_R9y8_1\tmotorcycle\nNqLEhuNiS-A_0\tknife\nNqzZbJJl3E4_0\ttruck\nNqzZbJJl3E4_2\ttruck\nNq-mC-BLk1c_0\tbird\nNrGByfXIMJc_0\tdog\nNrGHtOFFLxU_0\tmotorcycle\nNrJIz8M3oNM_0\tboat\nlaSVNAwUDQc_0\tgiraffe\nlaiFgjfWMS8_1\tbird\nlajujsJ1J4k_0\tbird\nlajujsJ1J4k_1\tbird\nlauIpA9lVMo_0\tskateboard\nla0ygpbR6t4_0\tdog\nla0ygpbR6t4_1\tdog\nlbCW72FyaQ8_0\tumbrella\nlbC8rsjkZ8Y_1\ttruck\nlbDdPmkMwnw_0\tmotorcycle\nlbSldeZXn6I_0\tskateboard\nlbZo-rTovyc_0\tskateboard\nlbod3X-5Z40_4\tbus\nlbod3X-5Z40_5\tbus\nlbzHPZpNNjg_0\tdog\nlcSqXrVIbwo_0\tmotorcycle\nlcWTw6rAYfI_0\tcat\nlcv8jXnPWQU_0\tcat\nlc6jM9I3ffc_0\tmotorcycle\nlc8hZxMLAr4_0\ttruck\nldjVc4u8LUc_1\tmotorcycle\nldqpSPYa-3U_1\tbicycle\nld5g39_bixY_1\tskateboard\nld5g39_bixY_2\tskateboard\nlew1kgMUujc_0\tcar\nlfPmXUBRa-k_1\tbird\nlfVb7VtGUAI_0\tperson\nlfYoLXfvmyo_0\tbus\nlf29DRtjGcY_1\ttruck\nlf29DRtjGcY_2\ttruck\nlf4Xwro4NOQ_5\tbus\nlgLHq8p_CnA_0\ttruck\nlgVXhalKM3w_0\tboat\nlgne-5wGRTg_4\tbird\nlgwnVArDAa0_2\tbear\nlg3udJdBBoI_0\tdog\nlg_4H9FLVog_0\tdog\nlhBsZjQzf8Q_0\tmotorcycle\nlhEN_T9FduQ_0\tknife\nlhoMpa49rvU_0\tumbrella\nlh1Brsyb0aE_0\tbicycle\nlh21_LSx_G8_1\tdog\nliDzsyAmMJQ_0\tmotorcycle\nliThgzeBkVY_0\tcat\nlite73A-c3o_0\tbicycle\nli8IvNy_DW4_1\tbird\nljrwXgV0j9o_0\tmotorcycle\nlj3DWkRI_HM_2\tbear\nlj3mqLiqSRw_0\tknife\nlj5bI1M_0ZA_0\tskateboard\nlj-BTMsCDdY_0\tdog\nlj-BTMsCDdY_1\tdog\nlkOFpGLmX9s_0\tcat\nlkYuyUsRfWE_1\tdog\nlkg_nXf_W88_0\tbicycle\nllBtQEKaglQ_2\tbird\nllFPEcbP7m8_0\tcar\nllWG8M6Fsrg_1\tskateboard\nllu7uI6yzns_0\tmotorcycle\nllu7uI6yzns_1\tmotorcycle\nllu7uI6yzns_2\tmotorcycle\nlmCsOrgM7zE_0\tcat\nlmVNyKFiuQw_3\tknife\nlmVNyKFiuQw_2\tknife\nlm-deiNDAW4_0\tmotorcycle\nlnFmVwj7oMg_1\tcat\nlnk0OtCMbBc_0\tcat\nln5IAoaoPHc_0\tdog\nNrX1AnOpS98_0\tbus\nNroEppStyZI_0\tbicycle\nNrvQhlD_Fuw_0\tdog\nNrvQhlD_Fuw_1\tdog\nNsCdsMqUNFc_0\tbicycle\nNsaAbiSbaCc_0\tcat\nNsdCvelNA0g_0\tmotorcycle\nNsgZVfgUWco_0\tskateboard\nNsgZVfgUWco_1\tskateboard\nNs78CA77Hmk_0\tbird\nNtHFEE2Ii0o_0\tknife\nNtQSi_L3_e4_0\tbear\nNttRY9GKNOE_1\tcar\nNt38ikEgqJg_1\tdog\nNt-UKy4Uq0o_0\tcar\nNuOq_HSf26I_0\tboat\nNucr0ksCppE_0\tdog\nNumUCmB1MLA_0\tbus\nNu6g6OfLbKU_0\tzebra\nNu6g6OfLbKU_1\tzebra\nNu-gGh3BQo0_0\tskateboard\nNvDafPMMZtg_1\tcat\nNvDafPMMZtg_0\tcat\nNvFUKJ9Y500_0\tbicycle\nNvTRLNn1Tk4_0\tcat\nNwC3jHQ65I0_0\tbear\nNwG3zY4-qHs_0\tskateboard\nNwHv08KS8WU_0\ttruck\nNwHv08KS8WU_2\ttruck\nNwgEA2yRlYk_0\tbird\nNwgEA2yRlYk_5\tbird\nNwlCLmmFUzM_0\ttruck\nNwoCpDkRUOc_0\tskateboard\nNwzkWW45Qx0_6\tbird\nNw1pLrkHm1E_1\tcat\nNw8ZySxnzIA_0\tcat\nNxPgLux4spk_0\tmotorcycle\nNxgst3FR84g_0\tcar\nNyOC1kV5fqc_2\tknife\nNyOVnxlZw44_0\ttruck\nNyQlYlDdA1Y_2\tskateboard\nNyg0BliJTCI_2\tumbrella\nNy14oMm9C9k_6\tskateboard\nNzIOn70DDCU_0\tbicycle\nNzfwqHNApI8_0\tbear\nNzqr9pq3W0g_0\tbus\nNzwcia0dVls_0\tbear\nNz5AnTEPNKY_3\tbird\nNz_Dn60wY8c_0\tdog\nN0p_wrAammI_1\tbird\nN0wFxDTDhrA_0\ttruck\nN0yYt90fBGo_0\tboat\nN049Vl1eC9E_0\ttruck\nN1C5Wk1HQEk_0\tcat\nN1jUvtD_RyY_0\tumbrella\nN1xm5YdzSfQ_0\tbird\nN13r5ZKqAZI_1\tboat\nN2GiHfyj2sY_0\tknife\nN2Y3LmbOWhM_1\tcat\nN2Y3LmbOWhM_2\tcat\nN2e24fXBD58_0\tboat\nN2u1zVHzrfc_0\tcat\nN3D5PnaCpHs_1\tknife\nN3D5PnaCpHs_2\tknife\nN3Iy7f2RrrQ_0\tmotorcycle\nN3OIM_qi7dY_0\tcat\nN3VKNNdiRhs_0\tumbrella\nN3ZGT5VDX7A_0\tdog\nN3vCQPsPb7k_0\tcat\nN3x4Fw8PZ04_1\tbird\nN4BazwxnEJU_1\tumbrella\nN4T6B8WAeyw_1\tbear\nN4bUNLwIt-I_0\tbicycle\nN4gBOlxfYUI_0\tgiraffe\nN5T8bgYdTg8_0\tbird\nN5cC5-506Yg_0\tmotorcycle\nN5uwMT9YWA8_2\tumbrella\nN6FCEWFj0vc_0\ttruck\nN6XH-20xsPk_0\tbus\nN6Xl8e3GRcY_0\tbird\nN6gcbwR93B4_1\tmotorcycle\nN6rvYTX52x4_0\tcar\nloFhsa4OXsA_0\tzebra\nloFhsa4OXsA_1\tzebra\nloFhsa4OXsA_2\tzebra\nloS5Iy7HDhY_2\tcar\nloyp0oi9idU_0\tcat\nlpPnun9oDq4_1\tboat\nlqEgRMyazN4_0\tdog\nlqi9uYhr1lU_3\tboat\nlqybkPUTuGk_4\tbird\nlqybkPUTuGk_0\tbird\nlqybkPUTuGk_1\tbird\nlqybkPUTuGk_3\tbird\nlrbJ-8myxJA_1\tskateboard\nlrd8TXYq2Co_0\tzebra\nlrgLAWtIFbQ_0\tbird\nlrk-LSpxnaQ_0\tbus\nlrsspehYW2Q_0\tcat\nlrusc_A2xpY_1\tskateboard\nlsQ4p_XwS3U_1\tskateboard\nlsW8rve_6F0_0\tbird\nlsslg2HK3as_1\tbird\nls7K9Ga_TDo_0\tcat\nls8cJ6QPPdI_0\ttruck\nltfbVFmlGNs_0\tmotorcycle\nltfbVFmlGNs_1\tmotorcycle\nlti3EMrk6hA_0\tbird\nltyDB0DzJ4o_0\tbear\nluZpSqhxjzc_0\tskateboard\nlujnNrfylcM_0\ttruck\nlu4gOMv2LmA_0\tdog\nlvW9JvQnv_U_0\tcat\nlvXow0J0_Z8_5\tboat\nlvpmaJx7Ydo_0\tmotorcycle\nlvxwGSPs5eo_0\ttruck\nlvxwGSPs5eo_1\ttruck\nlv79L0E9KbU_0\tcat\nlv8ApAxhQxg_9\tdog\nlwIzp1ny_cc_0\tbicycle\nlwqQ1SyQ6oc_0\tbird\nlwu1229kxGE_0\tumbrella\nlw-_X5H5dsA_1\tskateboard\nlxfLak4qc0w_3\ttruck\nlxxazO-lUhg_0\tskateboard\nlxz5eN6gYvE_0\tskateboard\nlx4WDd9A1jM_0\tcat\nlyBbm0su2N8_0\tdog\nlyDsv_jEl3M_0\tmotorcycle\nlylbDiRYA18_0\tskateboard\nlym5pBjKK44_1\tboat\nlyx_DnTpBx4_0\tbird\nlzAGCQoeAug_1\tboat\nlzISnRATBZY_0\tmotorcycle\nlzrv6Lmaqhc_0\tbicycle\nlz9wsaAdD3g_0\tdog\nl0HBjPE-vp4_0\tbicycle\nl0LztA4KLq8_1\tumbrella\nl0TccajPnLs_0\tcat\nl0YyZLT2r0Q_0\tdog\nl0dbu61iEXU_0\tcat\nl0kogcjKlvI_0\tbird\nl01YbT30Uzw_0\tcar\nl1PoAFZPnAI_0\tcat\nl1cfghmMFfA_0\tmotorcycle\nl1dkS9dCOZs_0\ttruck\nl1eSoNjG7g4_3\tcar\nl1smSqKCK4k_0\tperson\nl1wXtZDVtTw_0\tbear\nl120CJB_tWI_0\tcar\nl2Cytaq3_MU_0\tbird\nl2d3stMmMjs_0\tcat\nl2pGQEcySt4_0\tgiraffe\nl23teWgsK_Q_1\tskateboard\nl23teWgsK_Q_0\tskateboard\nN7HX62OM1Jo_1\tcar\nN7WtVRWgYEs_0\tbird\nN8RE_7TdVGo_0\tskateboard\nN8wDSOXX8q4_0\tcat\nN9TwNh9IZug_0\ttruck\nN9TwNh9IZug_2\ttruck\nN-bSoL4tlX0_0\tcat\nN-ehGzRtoj8_0\tbird\nN-4XvHMsGCk_0\tperson\nN-9RtI_ifsk_0\tmotorcycle\nN_MWs_Dxjio_0\tknife\nOAJTjsjrFlQ_0\tcat\nOATLx4-34zQ_0\tdog\nOAtOdcwMjgs_0\tskateboard\nOBDA-yKAC_k_0\tumbrella\nOBDA-yKAC_k_2\tumbrella\nOBLc4YWkCqU_0\tmotorcycle\nOBYJdeMHD3g_0\tmotorcycle\nOBlj7XKW4lc_1\tboat\nOBlj7XKW4lc_0\tboat\nOBti9g_xdjg_0\tbus\nOBuDg5pF8EM_0\tmotorcycle\nOBvMQQZSs6Q_0\ttruck\nOCEGSfdedcM_1\tdog\nOCYvV1-sQQQ_1\ttruck\nOCYvV1-sQQQ_0\ttruck\nOCijTz38zrU_0\ttruck\nOCpuPcuJN68_1\tcar\nOCp5hNHBPpU_6\tknife\nOC3VHGBHbMY_1\tdog\nOC3VHGBHbMY_2\tdog\nODXPmCSXZDc_1\ttruck\nODXPmCSXZDc_2\ttruck\nODbUQUd4jSU_0\tskateboard\nODdK6tzKWWs_2\tbicycle\nODdK6tzKWWs_3\tbicycle\nODlDtYOtoQs_0\ttruck\nODo-zlQ_GB0_0\ttruck\nODp6c6uSvaU_0\tgiraffe\nODuka2U9fkA_0\tbird\nOD4XXIos2Zo_0\tdog\nOEJox-XKatw_0\tknife\nOEJox-XKatw_1\tknife\nOEMh8A9j_pg_3\tbear\nOEQV-Uetx8M_0\ttruck\nOE0tYMQn8GU_1\tbird\nOFA22Poj7lQ_0\tbicycle\nOFA22Poj7lQ_1\tbicycle\nOFbK3M6Z_QU_2\tdog\nOFbK3M6Z_QU_1\tdog\nOFdr0zUfrlE_0\tbus\nOF2H-LBDSPk_2\tbird\nOF2H-LBDSPk_1\tbird\nOF6Up9vV9Qc_3\ttruck\nOGMTfwEYzHA_0\tknife\nOGNQnbR2jAw_1\tbear\nOGVemy4LnsA_0\ttruck\nOGbVuwjdEDU_0\tmotorcycle\nOGnQhL7HZyI_0\tbus\nOGsEC0i33BY_0\tknife\nOG7Gqq0yNXc_0\tskateboard\nOHWx9W6ECl8_0\tgiraffe\nOJ0c10BvtRY_0\tdog\nl3U_T7n5YD8_0\tbicycle\nl3YBS5nRxUY_0\ttruck\nl3lkSnsgzx4_0\tumbrella\nl3qhbFnoRvI_0\tcar\nl31h7cMiU1I_0\tbear\nl4LQx_ua4m0_0\tbus\nl4MLa-2lkQI_0\tbus\nl4dzsbhTXr4_2\tbird\nl4lv0qkvs10_6\tbear\nl43lNQ5Vq_s_0\tbird\nl4-nRuAZNyY_2\tcar\nl5FUU1e4Y60_2\tbicycle\nl5ecq1OhBsk_1\tskateboard\nl5ecq1OhBsk_0\tskateboard\nl508a0nbyQI_6\tbicycle\nl508a0nbyQI_13\tbicycle\nl508a0nbyQI_14\tbicycle\nl508a0nbyQI_18\tbicycle\nl6NgJ2NHnt4_1\tbear\nl6S8h_QnD7U_0\tcat\nl63MzTHehFQ_0\tcat\nl7p6AfqPX2Y_1\tmotorcycle\nl7p6AfqPX2Y_0\tmotorcycle\nl8-hpsjvPaw_1\ttruck\nl8-hpsjvPaw_2\ttruck\nl9PH4iTXdYs_0\tskateboard\nl9ZtaPU3mB8_0\tknife\nl9j2X0rGhIY_0\tbird\nl9qm2_xBYHQ_0\tcat\nl9urEyEnxnU_1\tknife\nl96fQdjYlLs_0\tknife\nl-MCmCPjH7k_0\tbicycle\nl-QCC522u8A_0\tcar\nl-eNrq-WUQo_0\tboat\nl-98mL8hxMY_0\tbicycle\nl-98mL8hxMY_8\tbicycle\nl_DmnPQxj7k_0\tzebra\nl_scPJDEOuI_0\tbird\nmAE8hqG3eSk_0\tbus\nmAPlm5rMa-w_0\tmotorcycle\nmA5ZTSfwetI_0\ttruck\nmBEMpccxmBw_1\tmotorcycle\nmBEMpccxmBw_0\tmotorcycle\nmBTsr9NKqos_0\tdog\nmBTsr9NKqos_1\tdog\nmBTsr9NKqos_2\tdog\nmBivNgtX2dc_1\tskateboard\nmB2K7Cqy5sA_0\tknife\nmB2K7Cqy5sA_1\tknife\nmB2K7Cqy5sA_2\tknife\nmCA3YMqp59Y_0\ttruck\nmCVUS1SHxdc_1\tbicycle\nmCaHiS25d_c_0\tbird\nmCipOiHzL24_0\tcar\nmCnfYEJ7_nM_1\tboat\nmCplUoipq_M_0\tumbrella\nmCshfLJNDZc_0\ttruck\nmC9gh-poTgc_1\tbus\nmC_yfZI-Kfw_0\tcar\nmC_8_BVmM48_0\tbus\nmDOnks0KH3c_0\tbus\nmDO2Jg5oyPM_1\tumbrella\nmDTcvH2cBAk_0\ttruck\nmDTxktaf2Z0_0\tcat\nmDio2Blh76Y_3\tknife\nmDio2Blh76Y_0\tknife\nmDio2Blh76Y_2\tknife\nmDoksuME2bk_0\tknife\nmECu0xa8vxM_0\tbird\nmEFIkGBIFT4_0\tumbrella\nOKHhm13mZYw_0\tbicycle\nOKJlHLunIJ4_5\ttruck\nOKL9IGXZDqg_0\tcat\nOKOBYUJfsW0_3\tbus\nOKVeF8WX7nM_1\tdog\nOKXlOHWMVYI_0\tbicycle\nOKXlOHWMVYI_2\tbicycle\nOKniUxVle4E_0\tdog\nOK1lt5Hbk8U_0\tbird\nOK1lt5Hbk8U_1\tbird\nOK72g05p_nY_0\tbird\nOLWhwdr2s3U_0\tmotorcycle\nOLqz23zKUZ0_0\tskateboard\nOMJA4N9BRjk_0\tbus\nOMJA4N9BRjk_1\tbus\nOMROj6nJzNU_0\tumbrella\nOMscf19CmfE_0\tcat\nOMszUYfxt-k_0\tdog\nOM7YDn8Aj8U_0\tcat\nONQt1uMKjzM_0\tcat\nONQ7_XR_YoE_0\tcar\nONvq-WMS04Q_0\tbus\nON25DCtbtZI_0\tbird\nON25DCtbtZI_1\tbird\nOOOsedHMhFE_0\tdog\nOPFx79LTPYQ_0\tknife\nOPFx79LTPYQ_1\tknife\nOPNrGuEJKfQ_0\tcat\nOPRxB1VUSzc_0\tbus\nOPZI6LUwe80_0\ttruck\nOPny4vHo5EQ_1\tmotorcycle\nOQWmlKTZbJA_1\tboat\nOQh45xm5OzM_0\tcar\nOQlHcCttP0Y_0\tmotorcycle\nOQ5Q0IvSVJw_0\tskateboard\nOROW-2FDArE_0\tknife\nORjDIPVlrpY_3\tboat\nORyOEpNkmQU_0\tbicycle\nORyOEpNkmQU_1\tbicycle\nOR1UJ2WJswk_0\tumbrella\nOR8th1OG-XE_0\tumbrella\nOSia7sePfOs_0\tdog\nOS2Ga4W91oU_0\tboat\nOTGZvd8HEBs_6\tumbrella\nOTGZvd8HEBs_1\tumbrella\nOTGZvd8HEBs_5\tumbrella\nOTK2nAcxHMw_0\ttruck\nOTSLZbr15Rk_0\ttruck\nOTXkN6YTPBY_2\tbear\nOTvtQllL8ho_0\tgiraffe\nOT1tUDnxHUY_1\tbird\nOT1tUDnxHUY_0\tbird\nOUDo6Wi3Mx0_0\tbicycle\nOUaP4Qe7K_k_0\tskateboard\nOU9OQRs4Ff4_3\ttruck\nOU9OQRs4Ff4_0\ttruck\nOVBUoFuLqko_3\tboat\nOVBUoFuLqko_4\tboat\nOVBUoFuLqko_5\tboat\nOVBUoFuLqko_0\tboat\nOVBUoFuLqko_1\tboat\nOV8AfAYiWos_3\ttruck\nOWe4Ah3rUkU_3\ttruck\nOWe4Ah3rUkU_4\ttruck\nOWwYp5TMtyo_0\tdog\nOW09PhbCZ2c_0\tcat\nOW9poTV3Pw0_0\tmotorcycle\nOXDBegRD_hY_4\tbear\nOXleFWP00RU_0\tskateboard\nOXn_z6r4tTM_0\tbicycle\nOX46gFmob50_0\tboat\nOYAOM3GxoFs_0\tumbrella\nmEhLlaG7ivE_0\tcar\nmEyJVUti9TA_0\tbird\nmFCrAjplP-s_1\ttruck\nmFQSD32phtQ_0\tmotorcycle\nmFoVk3mdfVs_0\tboat\nmFpufihJP34_0\ttruck\nmF3uYMbMsrA_1\ttruck\nmGAgv6gfUIA_1\tgiraffe\nmGP0JfjwxXU_0\tcar\nmGP0JfjwxXU_1\tcar\nmGwC1aGK8EQ_1\tmotorcycle\nmGwC1aGK8EQ_0\tmotorcycle\nmG6Uz7wciew_0\ttruck\nmHNOyEXbwsg_4\tbear\nmHORHQS-7WE_0\tmotorcycle\nmHPMxlukQ30_0\tmotorcycle\nmHfy3z8lzZY_0\tbus\nmHicqYMm5B8_0\tbird\nmHicqYMm5B8_1\tbird\nmHtWCmdt2ck_0\tcat\nmHwCC0jnHbI_0\tbear\nmHwgF2IQCd8_0\tmotorcycle\nmIn-Tkvx0xg_0\ttruck\nmIx7ZeZ2Vv8_1\ttruck\nmJ0xD-4leB8_0\tcat\nmKRUuWYJC2k_0\tmotorcycle\nmKRUuWYJC2k_1\tmotorcycle\nmKRUuWYJC2k_2\tmotorcycle\nmKRUuWYJC2k_3\tmotorcycle\nmKWmMLNNRAQ_0\tzebra\nmKgld1efJss_0\tbus\nmKu97ivRVSM_0\tknife\nmKu97ivRVSM_1\tknife\nmLDjtK6d-W0_0\tknife\nmLGU-BL1agI_1\tcat\nmLG8EyllDhA_0\tcar\nmLIp-YLvQaA_0\tzebra\nmLgNPTUe_XI_0\tcat\nmLmtVR-AGCk_0\tbear\nmLpoizHo-v4_0\tdog\nmMG1DT2mUAo_0\tskateboard\nmMUflfP_ZMY_0\tcat\nmMXGos8VYQI_1\tdog\nmMt-gdadsY4_1\tdog\nmNFkEphgV18_1\tbicycle\nmNdM6zfb6FA_0\tcat\nmNeHO27e_i4_0\tbus\nmNeHO27e_i4_1\tbus\nmOMvL5XuAZs_0\ttruck\nmOVza6TV55E_0\tbicycle\nmOcxsTLCyfM_0\tumbrella\nmOjLK3sW2lA_0\tskateboard\nmO3CzDojFYs_0\tdog\nmO8cYs6iJlE_0\tcat\nmPCBb4ndGx0_3\tcar\nmPCBb4ndGx0_2\tcar\nmPPaPa0iD_c_0\tdog\nmPV3eyH3uiY_0\tbicycle\nmPW-nXWaC4U_0\tcat\nmP223OT32Rc_0\tknife\nmP223OT32Rc_1\tknife\nmP553XrHpVs_0\tmotorcycle\nmQD1eeRC1Q4_0\tknife\nmQf2FppJTEM_0\tbird\nmRFdLfB4a1s_3\tbear\nmRI6bXmeH0U_0\tknife\nmRMc_QxifPU_0\ttruck\nmRMc_QxifPU_1\ttruck\nmROsO1LIGpo_0\ttruck\nmRYB4i5ld-k_0\tdog\nmRkf0ciWPgI_9\tbird\nmRl54j1LWx8_0\tperson\nmRyO8jtjseY_1\tcar\nmRyO8jtjseY_2\tcar\nmR0m08J8B08_4\tboat\nmR0m08J8B08_0\tboat\nmR0m08J8B08_1\tboat\nmR0m08J8B08_2\tboat\nmSTIz-CdXqU_0\ttruck\nmSf7pQlzXuw_0\tgiraffe\nmSgbTXZAzDk_1\tumbrella\nmSvLPzkZzps_0\tknife\nmSxrYqw4oqg_1\tumbrella\nmSztwZ01Pck_0\tbus\nOYIPropF-hA_2\tknife\nOYTwB7sOFYE_0\tbird\nOYa8DOvcJkU_0\tcat\nOYf6rSUrwxc_0\tdog\nOYnjEcx19SM_0\tcat\nOZBLMb8bGX8_0\tzebra\nOZcS8vrufig_0\tdog\nOZeialzVvBQ_0\tbird\nOZqsh8FFeFo_0\ttruck\nOZqsh8FFeFo_3\ttruck\nOZqsh8FFeFo_4\ttruck\nOZstdGSfBBw_0\tbird\nOZ2Xf6zzI5Q_1\tskateboard\nOZ2Xf6zzI5Q_0\tskateboard\nOaR_KKoBRYA_0\tboat\nOai5vIFRADY_0\ttruck\nOaxb1TjNF5A_0\tboat\nObG3TG10dF0_0\tdog\nObLBCGg01UY_5\tskateboard\nObLBCGg01UY_1\tskateboard\nObLBCGg01UY_2\tskateboard\nObLBCGg01UY_3\tskateboard\nObLBCGg01UY_4\tskateboard\nObMci_3wRII_0\tboat\nObmxs3FqVc0_0\ttruck\nObol9FzC6qw_0\tboat\nOburzWcRnbc_0\tskateboard\nOb5o_Ufzxvo_0\tumbrella\nOb6-UrKFrTY_5\tboat\nOcFGISpeAn0_0\tskateboard\nOcQBa7E9-AI_1\tcar\nOcZG24cCgsU_2\tboat\nOchOHb4q-iE_0\tbicycle\nOcmRyP_n53E_0\ttruck\nOcuYOC6GylA_0\tcar\nOc1tfJzLD3o_1\tbus\nOc1tfJzLD3o_2\tbus\nOc1tfJzLD3o_0\tbus\nOdGHHAUYow4_0\tboat\nOdl4k8y8GfI_1\tskateboard\nOdo1ZvyEbqs_3\tbear\nOdo1ZvyEbqs_6\tbear\nOdo1ZvyEbqs_7\tbear\nOeJet0TZ0Ns_0\tcat\nOecO1BnSygU_0\tumbrella\nOecO1BnSygU_1\tumbrella\nOepCeq6zNOc_0\tumbrella\nOevlneuqSNg_0\tskateboard\nOe3qCUtDCoI_0\tbear\nOfD7c6vcSKc_0\tmotorcycle\nOfFZrl_Ltoo_0\tdog\nOfQ3Y3DEgNI_0\tskateboard\nOfZ9wyeuMaU_0\tskateboard\nOfcr6xsiMGY_1\tknife\nOfmW_n1WB-0_0\tbird\nOfpLj-uw2VM_0\tskateboard\nOfv2SMoyg_8_0\tboat\nOgG3xES-A9s_0\tbicycle\nOgG3xES-A9s_1\tbicycle\nOgtTZgAAtrk_0\tbear\nOg77fxfsfzI_0\tskateboard\nOg83XjWPr30_0\tbird\nOg_sRGRP2fw_0\tmotorcycle\nOhQqfPIVR_o_0\ttruck\nOhh5X9j8-P4_0\tskateboard\nOhvnlA9rzUA_0\tumbrella\nOh4vuNdjqGg_1\tboat\nOh4vuNdjqGg_3\tboat\nOh79QNRx0m0_0\tbus\nOiHa7vhbW0g_0\tumbrella\nOiT0hP6IU_0_1\tcar\nOidiasYmhhk_0\tdog\nOiuo__vi77s_0\tmotorcycle\nOi3BJVuj3f8_0\tbus\nOjst9j_7TPs_0\tmotorcycle\nOjxLYDs9O2w_1\tskateboard\nOkd1qAIUuZo_0\tskateboard\nOkd1qAIUuZo_2\tskateboard\nOlO9xdVfniA_0\tbus\nOlPObAsvFRE_0\tbear\nOlQykWy5_d0_0\tskateboard\nOlVZS0O7Xcc_0\tbus\nOlVZS0O7Xcc_1\tbus\nOlVofey46c8_2\tgiraffe\nOlVofey46c8_0\tgiraffe\nOldv3-_fn3E_0\tmotorcycle\nOlufwgkC9nA_0\tcat\nOl3C5MWakic_0\tbus\nOl63TPS0wjE_0\tskateboard\nOmTHe4jPR30_0\tumbrella\nmTnSFF649v4_0\tmotorcycle\nmTtOhVJYmco_0\tbicycle\nmTuXb1mo6ms_1\tmotorcycle\nmTwbZIC2mjs_0\tumbrella\nmUllN4tCjhg_0\tcar\nmVWf8BrbbQc_0\tskateboard\nmVZVZPz-0uk_0\tknife\nmVztYl0hyR0_1\tbird\nmWOuUa5VTIU_4\tbird\nmWSWZi7ef2Q_0\tbus\nmWULzZ-r0BE_10\tbear\nmWULzZ-r0BE_0\tbear\nmWULzZ-r0BE_1\tbear\nmWULzZ-r0BE_3\tbear\nmWULzZ-r0BE_6\tbear\nmWULzZ-r0BE_7\tbear\nmWULzZ-r0BE_9\tbear\nmW85x5O3sQM_1\tbus\nmXYQlH9le8Y_0\tdog\nmXt-xLcVJTM_0\tknife\nmXuPzw4I-wQ_0\tdog\nmXu238CeGfQ_0\tmotorcycle\nmXu238CeGfQ_1\tmotorcycle\nmX3SlrHHN8A_2\tknife\nmX3SlrHHN8A_3\tknife\nmYFsdZ6ZiHg_0\tskateboard\nmYYLIkI65fA_0\tcat\nmYgcUWeYKeE_0\tcat\nmYhujznmuic_0\tmotorcycle\nmYtEL2P4G64_2\ttruck\nmYtEL2P4G64_0\ttruck\nmY6M_QMVm6A_0\tdog\nmY6M_QMVm6A_1\tdog\nmY6M_QMVm6A_2\tdog\nmY6M_QMVm6A_3\tdog\nmZEPBKLKQLU_0\tskateboard\nmZStBRJGz0o_0\tbird\nmZWugKrC8fs_0\ttruck\nmZ0LxtaLk9s_0\tbicycle\nmZ0LxtaLk9s_1\tbicycle\nmZ1ae3QtMqY_1\tskateboard\nmZ6SXifL_5I_0\tcat\nmaANeKOpibc_0\tbus\nmaATqEbCdmA_0\tboat\nmaOsv3Gen0Q_0\tmotorcycle\nmagDXuphf6E_0\ttruck\nmavzqjj21eQ_0\tmotorcycle\nmbFrW58khSM_0\tmotorcycle\nmbtyAyprPhQ_0\tmotorcycle\nmbuozxoOynA_0\tbus\nmb9G4GF56RA_1\tumbrella\nmb9G4GF56RA_2\tumbrella\nmb-nes45JeE_1\tbird\nmb-nes45JeE_0\tbird\nmcCOvhuC86Q_0\tcat\nmcxoHsKM444_0\tcat\nmc0A1NsuIBI_0\tbird\nmdDoBuc7jag_0\tboat\nmdZbK8mOA5Y_0\tmotorcycle\nmdxbRZzm2Fo_4\ttruck\nmdzJDnEx5AI_2\tboat\nmd8Xi01GJ0Q_3\tbird\nmeRJPfPZTpw_3\tbird\nmebu5O8auic_1\tbird\nmehKWfZTJQE_0\tbird\nmfPFvq57cxM_0\tskateboard\nmf4LyMZ6wyY_0\tskateboard\nmgEZVZrBkrg_0\tbicycle\nmgEkK74q1Lo_0\tmotorcycle\nmgTCPe8eM00_1\tumbrella\nmgVB0o0U17w_1\tskateboard\nmgVB0o0U17w_0\tskateboard\nmhSSgOcQwd8_0\tcat\nmhqhGszzAR8_0\tcat\nmiC3NPxHofU_1\tbird\nmiQyhDocW3I_1\tdog\nOm1q-9YbJu0_0\tbus\nOm-NvWZY9XM_0\tbear\nOnKqSIvDmuM_0\tskateboard\nOnemsYazBrQ_0\ttruck\nOnemsYazBrQ_5\ttruck\nOnemsYazBrQ_1\ttruck\nOnemsYazBrQ_2\ttruck\nOnemsYazBrQ_3\ttruck\nOn3Yd3AHFp0_0\tdog\nOn3b0cn9QYE_0\tbear\nOn-GcAXLGZ0_0\tmotorcycle\nOn_5UKUJi7U_0\tcar\nOohVLB8HrmU_0\tbear\nOo2Ux9rWYGo_0\tskateboard\nOo8VLA_C0ho_5\tbicycle\nOpAPsb8a7ck_1\tbicycle\nOpD07kt9gdg_0\tmotorcycle\nOplcFe9OOMA_0\tboat\nOplcFe9OOMA_1\tboat\nOpqmXBQU87o_0\ttruck\nOp3764NveuQ_1\tbicycle\nOqKwAAWtANM_0\tcat\nOqPOCcEAHqk_0\tskateboard\nOqbhEJlCp48_0\tskateboard\nOqc407hvhn8_0\tskateboard\nOqjbl3c9LYU_0\tbear\nOqo2P7az_Jw_2\tmotorcycle\nOrhDfcZqq1E_0\tcat\nOrhipZ8lZHo_2\tbird\nOrhipZ8lZHo_3\tbird\nOrhipZ8lZHo_1\tbird\nOrmkaB0vrG8_0\tdog\nOr-E2m2p4X8_0\tmotorcycle\nOr--toMjK3I_3\tboat\nOsvYa6TnsFI_2\tcar\nOtIV8clF1-o_0\tbird\nOtIV8clF1-o_2\tbird\nOtKIh5W3Uro_0\tbird\nOtw43WNrlsM_1\tbicycle\nOtw43WNrlsM_2\tbicycle\nOtyYn5vEHbM_2\tskateboard\nOuBzZzA9Q7o_0\tbicycle\nOuJMq2UqA-s_0\tdog\nOuVznEsiyyA_1\tmotorcycle\nOui9ZgfJiJE_0\tskateboard\nOu1yCmmAuSY_0\tcat\nOvEJdKYqvF4_2\tdog\nOvEJdKYqvF4_5\tdog\nOvZguhO8UVQ_1\tbird\nOvlqAWflXBs_1\tbus\nOwQktS0dM3k_1\ttruck\nOwUWoVRKf7E_2\tzebra\nOxADHlAb7dM_0\tboat\nOxInmNOeLHY_0\tbicycle\nOxInmNOeLHY_1\tbicycle\nOxInmNOeLHY_2\tbicycle\nOxInmNOeLHY_3\tbicycle\nOxInmNOeLHY_5\tbicycle\nOxInmNOeLHY_6\tbicycle\nOxInmNOeLHY_7\tbicycle\nOxZdEZCJtcw_1\tmotorcycle\nOxkx4bWzOMo_0\tskateboard\nOxp9w62kg0Y_2\tknife\nOxp9w62kg0Y_3\tknife\nOx1idrJvs2E_0\tcat\nOx_8K3szIs0_0\tcat\nOyGSbm149i8_0\tboat\nOzBFCX0vpiU_0\tmotorcycle\nOzCvvptC7o8_1\tbicycle\nOzHYG5kpMbw_0\tcar\nOzItTAjpb9U_1\tknife\nOzwlwZq46z8_3\tbus\nOz89_rVdBV0_0\tknife\nOz89_rVdBV0_1\tknife\nO0JwQIk5pZY_5\tknife\nO0Xl3AF_T0s_0\tdog\nO0dforbCqKM_0\tcat\nO0lfImzhCM4_0\tdog\nO0p5eAP2AyA_0\tboat\nO0rSIIipDT0_0\ttruck\nO02oVGyCZDI_0\tmotorcycle\nO0_GC-1pCYk_0\tbear\nmj155rqWO3k_0\tmotorcycle\nmj5oMHI4Ch0_1\tskateboard\nmkPWdHTd5X8_0\tbear\nmkVYY1EvetE_1\tbicycle\nmkZA72VL1oI_0\tzebra\nmlAzMb61fYU_0\tcat\nmlJYoZVHztc_0\tbear\nmlT2XD9k5Ro_2\tbird\nmlophh4mK4A_2\tbicycle\nmluR2OjQTmU_0\tcar\nmlwpiHjyzIA_1\tmotorcycle\nml4BVi7cCV4_0\tbird\nmmnFugXdqlQ_1\ttruck\nmmnFugXdqlQ_0\ttruck\nmmojCWiaNYI_0\tcat\nmm_Udf1FG0s_0\tcat\nmnB2hBuySsI_1\tbear\nmnB2hBuySsI_2\tbear\nmn_cuBRZu8M_0\tcat\nmoZR-AtZJnI_0\tcat\nmobg7uEQTmo_0\tumbrella\nmogyHm8Jiok_0\tbird\nmoh4TWSe9Fc_0\tumbrella\nmotFo9G-GLs_0\tskateboard\nmoyxRLHHeiI_0\tbus\nmo5ZpMFELUQ_0\tmotorcycle\nmpO9dBwTeW4_0\tbicycle\nmpYAM0x6L5M_0\tmotorcycle\nmphFmT6TzLM_1\tknife\nmphFmT6TzLM_2\tknife\nmphFmT6TzLM_3\tknife\nmp25XfIJhQY_0\tcat\nmp8USuQKinc_0\tbird\nmqUyhzbCpig_0\tmotorcycle\nmqjilBZByTI_0\tskateboard\nmq5DqmYGVM4_0\tperson\nmrJAakc7Fj8_0\tbus\nmrOsDCuEdRQ_1\tdog\nmrY8gIFiUhE_0\tcar\nmrhfyNpFMq4_1\ttruck\nmryDGEujJno_0\tmotorcycle\nmsNXnb1a02o_0\tknife\nmsbOXFTsSVU_0\tgiraffe\nmszokIKsdUk_0\tbus\nms0_k1aLULU_0\ttruck\nmtITgRv95Sw_0\tdog\nmtU7bHAsI8Y_0\tcat\nmtZHgLGJiu4_0\tskateboard\nmtmzPf2AZuI_0\tskateboard\nmtnURpE0wyE_0\tbicycle\nmtnURpE0wyE_1\tbicycle\nmtnURpE0wyE_2\tbicycle\nmtpTPJtG8F4_0\tmotorcycle\nmt_LZ5UsG_w_5\tknife\nmt_LZ5UsG_w_1\tknife\nmuKQy-1p4fg_0\ttruck\nmuWIt0X4pKQ_0\tdog\nmuZ7xPF8odU_2\tbicycle\nmueRS6nKTdA_0\tbicycle\nmujGcuAzOdo_1\tbear\nmujGcuAzOdo_4\tbear\nmulQIomc988_1\tbicycle\nmulQIomc988_3\tbicycle\nmuoqLEyrhhI_0\tdog\nmursOuNatdc_0\tboat\nmu65YolQZds_0\tknife\nmvEcWlHP6u4_0\tbicycle\nmvYBfdZkCe8_0\tdog\nmvb5jVJeuGE_0\tperson\nmvhEFfQeFCY_0\tbear\nmv2FHxOHSR0_1\ttruck\nmwAPVTEbZGM_0\tskateboard\nmwAPVTEbZGM_1\tskateboard\nmwBKrjOpxkY_0\tskateboard\nmwIroQ9RbXA_0\tbird\nmwrxbdZraRk_0\tcar\nmw5fQZ8EB5I_0\tknife\nO1KrpGSvXAY_0\tknife\nO19Mlhhzqgc_2\tbear\nO2ZR7HPYZCo_0\tcat\nO2u5126JYpY_2\tmotorcycle\nO3DA7qzf2s8_1\tbus\nO3DA7qzf2s8_0\tbus\nO3y2taxKvCA_2\tboat\nO4CfuT5BDcc_0\tskateboard\nO4VQQaJ07zY_0\tcat\nO5b3XcEGZ4M_0\tcar\nO54XRvo6VU0_2\tmotorcycle\nO54XRvo6VU0_0\tmotorcycle\nO59A3lMogSo_0\tgiraffe\nO59A3lMogSo_1\tgiraffe\nO6BXRuq_YcE_0\tdog\nO6BXRuq_YcE_2\tdog\nO6EtCByhFZI_0\ttruck\nO6Jf2yxCTuI_0\tcat\nO6Uln7GkqDA_0\tskateboard\nO6b3a--pX3E_0\tbird\nO6kqsEuKhis_0\tbird\nO69gCmR0LvA_2\tmotorcycle\nO69gCmR0LvA_3\tmotorcycle\nO7ReHsig5IQ_1\tknife\nO7Wrpfzb8_g_0\tbear\nO7lvzdzmX5k_2\tbicycle\nO8BNclEPo5w_2\tdog\nO8BNclEPo5w_1\tdog\nO8f0Dhn1as0_0\tumbrella\nO8sB46kfM28_7\tumbrella\nO8sB46kfM28_6\tumbrella\nO8sB46kfM28_13\tumbrella\nO9Duu2Un8AE_0\tskateboard\nO9Duu2Un8AE_1\tskateboard\nO9Duu2Un8AE_2\tskateboard\nO9EqKcj_CPs_0\tumbrella\nO9iWg3ZqLcU_2\tbear\nO-NZJ4-eoQ8_0\tbus\nO-ZUr1bQzp4_6\tumbrella\nO-kJ078YJq4_7\ttruck\nO-2S79hisI8_0\tzebra\nO-4CV4-x7Tk_0\tdog\nO_D7M00pmjQ_0\tmotorcycle\nO_PCiV3NICw_0\tdog\nO_bAX_ruSNQ_0\tskateboard\nO_fZm7Mblgg_0\tknife\nO_mRo8YLc50_0\tumbrella\nO_3VssPsSVQ_5\tbicycle\nPABLxf3U8qc_2\tbicycle\nPABLxf3U8qc_4\tbicycle\nPABLxf3U8qc_1\tbicycle\nPASMcbnOtUM_1\tbird\nPAZBEMKPQEw_4\tboat\nPAbB9I6MC_o_2\tboat\nPBD1IW-vA6Y_0\tdog\nPBD1IW-vA6Y_1\tdog\nPBQjiKBWtao_1\tbicycle\nPBQjiKBWtao_3\tbicycle\nPBqIT1T_Tl4_2\tumbrella\nPByJb40LNJ4_28\tbicycle\nPByJb40LNJ4_30\tbicycle\nPByJb40LNJ4_3\tbicycle\nPByJb40LNJ4_13\tbicycle\nPByJb40LNJ4_18\tbicycle\nPByJb40LNJ4_22\tbicycle\nPB8sWVNFkDw_1\tmotorcycle\nmxA8JbJ0Do8_0\tcar\nmxFga0703Mc_0\tbird\nmxMCBmJ5owQ_2\ttruck\nmxXH5aZCSJ8_0\ttruck\nmxYl5Y1KAiY_0\tdog\nmxZgNkjbyxk_1\tknife\nmxeuMHAWMxo_6\tknife\nmxeuMHAWMxo_7\tknife\nmxeuMHAWMxo_9\tknife\nmxsTfEQlVgM_0\tmotorcycle\nmxvG6gSVYuo_0\tbicycle\nmxwmtm7rKF8_0\tcar\nmxxiqhZzhEE_0\tmotorcycle\nmxyHDUSMhLs_0\tcat\nmx2i3CYeEEE_0\tbear\nmyRelcztkqo_1\tknife\nmyWzn06fmDI_0\tdog\nmyY1Ijlbknw_1\tbicycle\nmyY1Ijlbknw_4\tbicycle\nmyY1Ijlbknw_5\tbicycle\nmyY1Ijlbknw_2\tbicycle\nmymtiyldysk_0\ttruck\nmzGmbowEFfA_1\tknife\nmzMgXA_v8q4_0\tmotorcycle\nmzYPSSUS--w_2\tboat\nmzYPSSUS--w_0\tboat\nmzdD_0CKekQ_0\tmotorcycle\nmzfrEqAhHeY_0\tbus\nmzm_D3J8zqQ_0\tumbrella\nmzyu28WsuFs_0\tmotorcycle\nm0MVwwL_0MM_0\tbicycle\nm0gukhoxW0Q_0\tskateboard\nm0gukhoxW0Q_1\tskateboard\nm0gukhoxW0Q_2\tskateboard\nm08CnM1FBR0_0\tcat\nm0_tPmnque0_0\tbicycle\nm0_tPmnque0_1\tbicycle\nm1Qhj9jYohk_0\tbus\nm1pFyDGuVzk_1\tskateboard\nm1pFyDGuVzk_2\tskateboard\nm2StZDAc1yw_0\tbird\nm2uQowbhYDc_1\tbear\nm3AM4AQLDo0_0\tzebra\nm3AM4AQLDo0_1\tzebra\nm3RCOnTUyMY_0\tboat\nm3RCOnTUyMY_1\tboat\nm3SOT8NCOEY_0\tbicycle\nm3cgfDs0_G8_2\tdog\nm3fctWcU4as_0\tmotorcycle\nm3sztS1QC3s_0\tcat\nm3uDjNrfbD8_1\tbear\nm35CwgXROHw_0\tcar\nm4qZSrgBZkc_0\tbird\nm4qZSrgBZkc_1\tbird\nm6NemUzZQFc_1\tmotorcycle\nm6NemUzZQFc_0\tmotorcycle\nm6S6MEQgo2E_2\tmotorcycle\nm6S6MEQgo2E_4\tmotorcycle\nm6hQABEUkQQ_4\tboat\nm6z3sbKYwcc_3\tbus\nm6z3sbKYwcc_4\tbus\nm669S-54lMc_0\tmotorcycle\nm669S-54lMc_1\tmotorcycle\nm7djLwb_a5k_0\tcar\nm7k5fJXTZPI_5\tbird\nm7xUarlXKEw_0\tumbrella\nm7xUarlXKEw_4\tumbrella\nm7xUarlXKEw_1\tumbrella\nm7xUarlXKEw_2\tumbrella\nm8B-pb1I7nc_0\tcat\nm8YA8dXocmg_2\tboat\nm8t6gPBCxr8_0\ttruck\nm9HGLakPqSo_1\tbear\nm-NEL2Jq0nQ_2\tcar\nm-dKTMwfPqo_0\ttruck\nm_JHW_eCKY0_0\tumbrella\nm_dOsn1chuA_1\tbus\nm_dOsn1chuA_2\tbus\nPCC9sJ4Gdxw_0\tcar\nPCeoeGBYrJU_0\tdog\nPCqa_yHJ32g_2\tbicycle\nPC2plr6JdQg_0\tumbrella\nPC_wbEzLNLQ_0\tbicycle\nPC_wbEzLNLQ_1\tbicycle\nPDU92To89cE_1\tbird\nPDlKUKo06lI_0\tknife\nPDvSiH5Pf_0_0\tbus\nPEC7E1t79A8_0\tcar\nPEJFRzyvIBc_0\tbird\nPEJvGdLGOjU_0\tzebra\nPEY59JrOz5I_1\tbird\nPEY59JrOz5I_0\tbird\nPEfpmwboH3w_0\tbus\nPEtsR4S5Zzg_0\tbicycle\nPE_zE5T1ayo_0\tcat\nPFJiRWGaPaw_0\tcar\nPFJiRWGaPaw_1\tcar\nPFa_RCiQVjA_0\tskateboard\nPFjuIzuDmJs_1\tknife\nPF8HAptOIC8_1\tcar\nPGEM0ys1sGE_0\tknife\nPGMimFwsl54_0\tcat\nPGP0PEOv3zw_2\tbear\nPGP0PEOv3zw_0\tbear\nPGipyYSRHso_0\tbicycle\nPGn623RKWNA_1\tcar\nPG8bMx6DuSo_0\tknife\nPHeQ1xoUBgg_1\tboat\nPHmnvFIAtHo_0\tbus\nPHxuey2u6UE_0\tskateboard\nPIDvuyKFIJ8_0\tcat\nPIT2XsuODRE_0\tbird\nPIa767e6xuQ_0\tcat\nPIkhnCxrF9g_0\tcat\nPInIdEVTPn0_7\ttruck\nPI5ROW9ewOg_0\tcat\nPJoSJpMWo0Y_3\tskateboard\nPKTJIVIuSFw_0\ttruck\nPKZXF6Hj0kw_0\tbird\nPKZXF6Hj0kw_2\tbird\nPKZXF6Hj0kw_1\tbird\nPKtfgOMwx4A_0\tdog\nPK-4bXZDtlA_1\tskateboard\nPLO2xY76oh4_0\tmotorcycle\nPLVEvFhXHAE_0\ttruck\nPLVEvFhXHAE_1\ttruck\nPLd8HlO4HYo_1\tcat\nPLd8HlO4HYo_0\tcat\nPLwQ0AHwZgg_1\tskateboard\nPLwQ0AHwZgg_2\tskateboard\nPL2FcMREy_0_0\tbicycle\nPMRnsvlMF4A_0\tskateboard\nPMUqAknVm2Q_0\tmotorcycle\nPMXmKup8jy4_0\tboat\nPMkiPjm9XdY_1\tmotorcycle\nPM028PEyjv0_4\tbear\nPNpDnymoq8w_0\ttruck\nPN6PB668zV4_0\ttruck\nPN86cQumWDU_0\tmotorcycle\nPN_b6R9HxwQ_2\tcat\nPOQalChDjmU_0\tskateboard\nPOW6F8MZMTQ_1\tbird\nPO-OnjGHjDk_0\tbus\nPPI6aG2QFaM_0\tbird\nPPdV273cZC8_0\tskateboard\nPPhYyYHNaQ4_2\tboat\nPPhYyYHNaQ4_3\tboat\nPP5_L_EZsmE_0\tbird\nPQI2zG7I8jI_1\tbus\nPQjM0fGHXds_0\tbird\nnARlDpJ1mzQ_1\tdog\nnAmX6FEKmTg_0\ttruck\nnAsHFcuT16U_0\tskateboard\nnAsHFcuT16U_1\tskateboard\nnBLWjCuzp2g_0\tdog\nnBPhMvA4QIs_0\tdog\nnBXKLM2hLN0_1\tcar\nnBtF1BDR8wE_0\tmotorcycle\nnCKBmlhUPYg_0\tcat\nnCPhfqQsjIQ_0\tmotorcycle\nnCPhfqQsjIQ_1\tmotorcycle\nnCe_XQHu77g_0\ttruck\nnCgjbB7wxoE_0\tbus\nnDsb271W8XU_1\tcar\nnEFtdboPB2w_1\tbear\nnEIawnnD8V8_0\ttruck\nnELgP3wAnm8_0\tdog\nnEM7mY_k1_4_0\tboat\nnEVFHD_9xCw_1\tbird\nnEtqWL5nz_U_0\tbus\nnEtqWL5nz_U_1\tbus\nnEyJKW3bMCc_0\tdog\nnE6lY5G16lE_1\tbicycle\nnE6lY5G16lE_2\tbicycle\nnE6lY5G16lE_0\tbicycle\nnFQvQPqMjpk_0\tcar\nnFZrdv6K4pg_0\tmotorcycle\nnFa5TGw-b5Y_0\tbicycle\nnF28ACSGHM8_0\tboat\nnF444n6UUJE_0\tbear\nnF444n6UUJE_4\tbear\nnGQ3Hq6P5tM_0\tcar\nnGnDoylbNm8_1\tbear\nnHAF0LI8CPk_0\ttruck\nnHAF0LI8CPk_1\ttruck\nnHAF0LI8CPk_2\ttruck\nnHApjxTb0fI_0\tumbrella\nnHAt_MmKZtA_0\tdog\nnHRioXgb-Fo_0\tbird\nnHbHOfTnrtg_0\tdog\nnHbHOfTnrtg_2\tdog\nnHe8j-osZck_0\tdog\nnH9AXssn9vw_0\tumbrella\nnIIQLgiJpz4_0\tmotorcycle\nnIqnT8pJFz0_0\tknife\nnJF2wWsJCd8_0\tcat\nnJuhir_bIpw_0\tcat\nnJ6iwd_XQso_0\tumbrella\nnJ6uR6SE01w_0\tbicycle\nnKM_iCO6bKs_0\tbus\nnKS1tzA_Hrk_0\tskateboard\nnKUBzJ38GgY_1\tboat\nnK-2zxkNCuA_0\tcat\nnLED5Us6rMo_0\tmotorcycle\nnLL3PMe48dQ_0\tboat\nnLXX8_SfZs0_0\tcat\nnLn2LN33uxg_0\tcat\nnLx78Uv2dmc_3\tskateboard\nnMbLyO3605c_0\tknife\nnMo_-oHL7bU_0\tknife\nnMtxrG4hH5M_0\tskateboard\nnMyhi847s6A_0\tknife\nnNNF1j89RS0_0\tbear\nnNScwJL6ym0_0\tmotorcycle\nnNeaR2o9KMY_0\tboat\nnNwEBFJZT8U_0\tbird\nnOe7o_AaOUs_3\tskateboard\nnOfyHwhf35s_0\tbus\nnPBFLS60OYk_5\ttruck\nnPhpYRGfHlw_0\tbear\nnP5wigEk-3A_3\tknife\nPQsHE_w_Q5I_1\tknife\nPQuYVLwcT7k_0\tskateboard\nPQ4gPP2l3RY_0\tbus\nPQ9ZEkeKIzs_0\tskateboard\nPRIJbfolHpE_0\tumbrella\nPRIw6kIS_oM_0\tmotorcycle\nPRg6CE_exgE_2\tdog\nPRoAGpjxUIQ_1\tdog\nPSdh0lzfg3M_0\tbus\nPSrvUaBxbgU_0\tmotorcycle\nPS_CABKe3Yk_0\tmotorcycle\nPTKnZd28Sac_2\tdog\nPTORa3OCyoU_1\ttruck\nPTxm2ZRQbNg_0\tskateboard\nPTxm2ZRQbNg_5\tskateboard\nPTxm2ZRQbNg_1\tskateboard\nPTxm2ZRQbNg_2\tskateboard\nPT2XxI2FufM_0\tbus\nPT3felQmrwU_1\tbear\nPT6KXLLxhes_0\tbird\nPUFo51ngpe8_0\tbus\nPUeS5CCMoa4_1\tzebra\nPUgpXWoI6nw_2\tbird\nPUiSf8EuinE_2\tbear\nPU3x1IpbndQ_0\tknife\nPU5v_AtaKKw_9\tbird\nPU5v_AtaKKw_2\tbird\nPU5v_AtaKKw_3\tbird\nPU5v_AtaKKw_4\tbird\nPU5v_AtaKKw_5\tbird\nPU5v_AtaKKw_7\tbird\nPU-lRdkaqdg_0\tcat\nPVV-saboi8Q_0\ttruck\nPVXtjPyNMms_0\tdog\nPV6mXKbH058_0\tskateboard\nPWIWGwJZENs_0\tdog\nPWQGxn3c5iQ_2\tknife\nPWQGxn3c5iQ_0\tknife\nPWs7zuWiKZo_0\tbus\nPW7XGdRhgKI_0\tcat\nPW97rAj3_84_0\ttruck\nPXb9PHJghpA_0\tcat\nPYH5FxLfm3M_0\tbus\nPYOwGQUBJXY_1\tboat\nPYWfE8WhDKk_1\tknife\nPYohJALR7DA_1\tmotorcycle\nPYohJALR7DA_2\tmotorcycle\nPYsiftgJNrs_0\tmotorcycle\nPZEun35Hcoo_1\tdog\nPZNXXWorkrY_0\tmotorcycle\nPZSGccVPUm8_1\tbird\nPZjQiLyqHkw_0\ttruck\nPZoM9dv8P3A_1\tbear\nPZuGSUZ1N2w_0\tskateboard\nPZz86aIvTWU_0\tskateboard\nPZ3PfRXk2rQ_0\tcat\nPZ9YkHds_00_0\tdog\nPaVPMVUQwtM_7\tboat\nPaVPMVUQwtM_2\tboat\nPatPjxyHqvY_0\tboat\nPbPu-cnEMqo_0\tcat\nPbUb1IktyM0_0\tmotorcycle\nPbdnWP3AnKQ_1\tperson\nPbhIhdwp7nI_5\tknife\nPceERP83N7g_1\tdog\nPceERP83N7g_2\tdog\nPdER58jIvPg_0\tcat\nPdRRvS5p7TM_4\tbicycle\nPdRRvS5p7TM_0\tbicycle\nPdRRvS5p7TM_1\tbicycle\nPdgOy1B6ByE_0\tperson\nPdgOy1B6ByE_1\tmotorcycle\nPdkRSALRJOE_1\ttruck\nPdne4jISJMk_0\tbird\nPeXODrjPJpU_0\tmotorcycle\nPecvaJstdYE_0\tknife\nPejvg4LHBXw_1\tskateboard\nPeur7tMeMNc_11\tbicycle\nPeur7tMeMNc_12\tbicycle\nPeur7tMeMNc_20\tbicycle\nPeur7tMeMNc_21\tbicycle\nPeur7tMeMNc_5\tbicycle\nPew5sug67ao_0\tdog\nPfKS2L_bxBc_0\tcat\nPfOYq_uyVF8_1\tbird\nnQPFPYvmWtU_0\tskateboard\nnQd33JTaurM_0\tbird\nnQd33JTaurM_2\tbird\nnQmH_VIOI4o_0\tcat\nnRG70FCdevw_0\tbus\nnRP28gcIe5Y_0\tbird\nnRP8SwdbUGw_1\tbear\nnRr5gMvJ77k_0\tskateboard\nnSgcLfwMJu4_0\tdog\nnSvaQz0i9i8_1\tskateboard\nnSvaQz0i9i8_0\tskateboard\nnSz_BdDSYsk_1\tbear\nnS_SY6iDJ2U_3\tbear\nnTjbCPXR408_1\ttruck\nnTjbCPXR408_2\ttruck\nnTjbCPXR408_3\ttruck\nnTjbCPXR408_4\ttruck\nnTz3LA23B4U_0\tskateboard\nnUBgjOAcKBw_0\ttruck\nnUDvay-MfVs_0\ttruck\nnUVSuT7wfDs_0\tmotorcycle\nnUdbTm-FW0I_0\tbus\nnVAOU6r15Ww_3\tknife\nnVTMM3F16j0_1\tboat\nnVi9QbrUrjE_0\tmotorcycle\nnWvR8fiLxGw_0\ttruck\nnXD-zvpjC50_0\tcar\nnXG_fwbJQ-E_0\tcar\nnXjIIWFPSd4_0\tcat\nnXlSVy8CmMk_0\ttruck\nnXpq0p9VBXc_0\tboat\nnXqE-XROi78_0\tbear\nnXqQPuJmTZo_0\tcat\nnYYFquwhxeI_0\tcat\nnYqRuOF_Uao_2\tcar\nnYqRuOF_Uao_0\tcar\nnYqRuOF_Uao_1\tcar\nnYut3zBSbuM_0\tbear\nnY0xtzTME34_1\tcat\nnY2XarSrm7Y_0\tboat\nnY2XarSrm7Y_1\tboat\nnY3BS_3Mq6o_0\tmotorcycle\nnY3fRfvoh9w_4\tbear\nnY3fRfvoh9w_0\tbear\nnY_icz32gn8_0\tcat\nnZHGbmVkhrE_0\tcat\nnZn4xAbcGSk_0\tcat\nnaE1svJuCTw_0\ttruck\nnaE1svJuCTw_1\ttruck\nnaR-9rNf5fE_0\tskateboard\nnalqTKM6890_0\tumbrella\nnalqTKM6890_1\tumbrella\nnalqTKM6890_3\tumbrella\nnbCix4zvF_E_0\tumbrella\nnbcH6NfapD0_0\tboat\nncuqh0iglYU_2\tskateboard\nncu8gbqMkMc_0\tcat\nnc9aHs1_xzs_2\tmotorcycle\nndBPYFAVIiM_0\tbird\nndJ2_mPZktw_2\tbear\nndJ2_mPZktw_1\tbear\nndMfXyYPfAM_0\tbird\nndNs3q8tY9U_0\tbus\nndO2b-r-Krs_0\tmotorcycle\nndO2b-r-Krs_1\tmotorcycle\nndj7VTH_PhE_0\tbird\nPfi9ZEQtgjY_0\tknife\nPfnFeL4ArA8_0\tskateboard\nPfpTZKfKeKY_2\ttruck\nPfpTZKfKeKY_0\ttruck\nPfpTZKfKeKY_1\ttruck\nPgBMaMqbYqA_0\tmotorcycle\nPgE6BAQmVQQ_0\tumbrella\nPhFFfxYo2_o_1\tdog\nPhJOcszed6A_1\tcar\nPhJ5rQ5VmeY_0\tskateboard\nPhjPRYTcJwQ_1\tcar\nPhyQoxFlTMU_0\ttruck\nPh8Vag9VxRU_0\tzebra\nPh8Vag9VxRU_4\tzebra\nPh8Vag9VxRU_1\tzebra\nPh8Vag9VxRU_2\tzebra\nPh8Vag9VxRU_3\tzebra\nPiO6F4X8k_M_1\ttruck\nPiO6F4X8k_M_2\ttruck\nPiRy-T8d0gQ_1\tskateboard\nPiRy-T8d0gQ_0\tskateboard\nPi_aEuQD5gA_5\tumbrella\nPi_aEuQD5gA_8\tumbrella\nPjAWqdid4rw_1\tumbrella\nPjBOLvrlicY_0\tcar\nPjBOLvrlicY_1\tcar\nPjBOLvrlicY_2\tcar\nPjTtsfl7KZ4_0\tcat\nPjjO6IaSiuo_0\tskateboard\nPjjV-pCjgqc_1\tbird\nPjk0d9eP2gI_0\tdog\nPjm-ptGWuWU_0\tdog\nPjpGwiZ8mK8_0\tbus\nPjuUsIXzSzQ_0\ttruck\nPjwfhUvbBNI_0\tskateboard\nPj9588RHCHM_1\tcar\nPkktNSL9IjE_0\tbird\nPlKRGU_XIzs_3\tboat\nPlfCXfMXcs0_0\tskateboard\nPlfCXfMXcs0_1\tskateboard\nPltDcKetGYw_0\tknife\nPl6ja9eNHzE_3\tskateboard\nPl6ja9eNHzE_4\tskateboard\nPl6ja9eNHzE_1\tskateboard\nPl6ja9eNHzE_2\tskateboard\nPml224S87BE_0\tbird\nPm_2At7P8Yo_0\tbus\nPnt2XmUpT8Q_1\tbear\nPnt2g-tHwK4_0\ttruck\nPn1VFdKk5vQ_0\ttruck\nPoL9E8Yc2vo_0\tcar\nPoL9E8Yc2vo_1\tcar\nPoUPC9WCdiE_5\tdog\nPoV7Wn66UTo_0\tbird\nPolaH6r1Qds_4\ttruck\nPolaH6r1Qds_2\ttruck\nPpI7DZdWcfc_0\tperson\nPpZHxI0N3Wo_1\tmotorcycle\nPptqwylntWQ_1\tboat\nPp6vch1kMqE_0\tcat\nPqJOWTjp0ww_0\tcat\nPqKlF5nnOFs_0\tmotorcycle\nPqNDvGH2-iM_0\ttruck\nPq7tfwAqhIM_0\tmotorcycle\nPrV4kyVAwWE_0\tbear\nPrynn7mNQdQ_0\tknife\nPsVhOsDIopI_0\tumbrella\nPsfddppUmSk_0\tskateboard\nPsgPXqr-N7A_0\tskateboard\nPsvVwYAeKEc_1\tboat\nPsytJKFxV8c_0\tboat\nPszGWhekz-Y_0\tumbrella\nPs9ReRjYLVk_0\tbird\nPs9f-iFqX4M_0\tskateboard\nPtL5k4ew4q0_0\tcar\nPtR7vRI9mn0_0\tmotorcycle\nPtVUPVUYld8_1\tskateboard\nPtnFOxat4hE_0\tbear\nPtq7-B4P9Bw_0\tskateboard\nPt04IRhfVFk_0\tboat\nPt1vVuKH3fk_1\tskateboard\nPuV7SV-FwOU_1\tskateboard\nneA0T50G8TU_0\tcar\nneA0T50G8TU_1\tcar\nneA0T50G8TU_3\tcar\nnewqX6GTbrA_0\tcar\nne8K6jHnOT8_0\tboat\nnfnKsQItZjE_0\tskateboard\nnfxMe31pjec_4\ttruck\nngAKsr62ACQ_0\tknife\nngOtFD7Fxd4_2\tboat\nngZtMG--t4I_2\tbear\nnga4aEZQhJw_0\tknife\nngslQPG3kEI_1\tbird\nnhI3C5y85gw_0\ttruck\nnhdMHfvazLY_0\tumbrella\nnhoO0Evj7OQ_0\tumbrella\nnhoO0Evj7OQ_2\tumbrella\nnh56dQ3T3Mc_2\tboat\nnh56dQ3T3Mc_3\tboat\nniBK6HGH16U_0\tcat\nni3trEPOXck_0\tbird\nnjBEUyoUzlQ_0\tbird\nnjK1OLFCvv4_0\tcat\nnjMC5HAlnMU_1\tumbrella\nnjnGmGuXNdE_1\tknife\nnjn4TkIDn0k_0\ttruck\nnkJxMYiG9Ho_0\tbus\nnkSvwnLvBmw_4\tmotorcycle\nnkSvwnLvBmw_0\tmotorcycle\nnkSvwnLvBmw_2\tmotorcycle\nnkVPvJ3Smrg_0\tcat\nnkZ6NDOt4r4_0\tcat\nnkv5eof4q_M_0\tknife\nnlAePf94uwk_0\tcat\nnlupdJzbyKs_1\tbird\nnl83jp96h9s_2\tknife\nnmmeE-Dfds8_0\tbus\nnmwFYDopqBc_0\tdog\nnmwFYDopqBc_1\tdog\nnnIGNFEnlw8_0\tcar\nnnNkJ09YO9M_0\tmotorcycle\nnnUkcXbXbFM_0\tumbrella\nnnhUxSjBHP8_0\tumbrella\nnoCrLkdGSXw_0\tbus\nnoGmFOxKIr0_0\tperson\nnoIHydna8tw_3\ttruck\nnonoyrFpKVA_1\tzebra\nnonoyrFpKVA_4\tzebra\nnonoyrFpKVA_5\tzebra\nnonoyrFpKVA_0\tzebra\nnosbeVXMgAk_0\tknife\nnqN2uJfit8o_1\tcar\nnqPkd_Quci0_0\ttruck\nnqWs5hqd8Ps_0\tbus\nnqbsnsBZULc_0\ttruck\nnqnjh-NO9go_0\tbus\nnq8oHNlU_BQ_0\ttruck\nnrJURcGigjE_0\tmotorcycle\nnrlcROgdPlI_1\tcat\nPumbYcoJ5zE_0\ttruck\nPu8rYMOC0Iw_0\tdog\nPu_KMtdCGZY_1\ttruck\nPvSzSsQ4YCY_0\tcat\nPvuGk2XhJW8_0\tbird\nPv77ig8kBgE_0\tcat\nPwBNm2_oKbQ_1\tzebra\nPwE-w-S8nQc_0\tcat\nPwRb6q11-rw_7\tbear\nPwRb6q11-rw_0\tbear\nPwRb6q11-rw_3\tbear\nPwRb6q11-rw_4\tbear\nPwRb6q11-rw_5\tbear\nPwgxDMnN1SA_0\ttruck\nPwmBtcc64nM_0\tzebra\nPwmBtcc64nM_1\tzebra\nPxN14d54as8_0\ttruck\nPxOYpOxjFFc_0\tcat\nPx02MS-Ywo0_0\tknife\nPyevrWYsc8k_0\tmotorcycle\nPyr-sHCH2wc_4\ttruck\nPyvyP3J13FI_0\tknife\nPyvyP3J13FI_2\tknife\nPy6rKt-beyk_0\tknife\nPy-bAIGcQ1Y_1\tboat\nPzZ-Jr7jMk8_0\tbus\nP0S7eBa6_S4_0\tdog\nP0e6zPkZO5s_1\tknife\nP1_bfvyTku0_0\ttruck\nP2NRNopueuo_0\tumbrella\nP2SgXG0mMWU_0\ttruck\nP2Wv0vXNCqQ_0\tzebra\nP2kLj1DZq3I_1\tbird\nP2kLj1DZq3I_0\tbird\nP2ldC-_7nrs_1\tboat\nP256TqMIJZk_0\tdog\nP3MLJSbWlpg_1\tmotorcycle\nP3jB1tXpVMw_0\tbird\nP3q6jIrZyo4_1\tdog\nP4jpdzY2as8_0\tdog\nP43doVXj3y0_0\tcat\nP5DcP_VLnP4_0\tbear\nP5Gd_8k2O5s_0\ttruck\nP5VAaJj-1Rc_0\tdog\nP5kFeiFmPxw_0\tperson\nP5xsJqm2v6c_1\tmotorcycle\nP5xsJqm2v6c_2\tmotorcycle\nP5xsJqm2v6c_0\tmotorcycle\nP5yrLRVD86M_0\tdog\nP6Qm9u9GIE4_0\tmotorcycle\nP72vKWjKtik_0\ttruck\nP741OzHLvig_0\tdog\nP8BX8WSWRm8_0\tbus\nP8K2yXmSMwY_0\tbird\nP8MCMBcqM00_0\tmotorcycle\nP8MCMBcqM00_1\tmotorcycle\nP8h9iD7kPRQ_0\tbear\nP80sglFzhRI_0\tbear\nnsS9iSqNMew_1\tbus\nntO6br-N89w_0\tcat\nntVDuucoRIk_0\tcat\nnuVxM9m1nb8_0\tmotorcycle\nnuVxM9m1nb8_2\tmotorcycle\nnvIi1SvX-sU_0\tdog\nnvXKI_MhTTE_4\tknife\nnvYTcYLFUvc_2\tdog\nnvdIoQ5mj64_0\tknife\nnvxwnGRXwZY_1\tdog\nnxJkhdCqhc0_0\tdog\nnxUe9yoeHvs_0\tbear\nnxYGMvfgi8g_0\tperson\nnxj_aavOM50_0\tboat\nnxmr9gg0ses_1\tbear\nnx9Uisdggps_3\tknife\nnx9Uisdggps_0\tknife\nnyOaHbw3DLo_0\tcat\nny2pC-BfLT0_2\tdog\nny2pC-BfLT0_0\tdog\nny2pC-BfLT0_1\tdog\nny3nZLL4cQ0_3\tmotorcycle\nnzGPh9yFDTI_5\ttruck\nnzQqdKnkQ9I_0\tzebra\nnzppX26-51c_0\tboat\nnzytVTFaYvs_0\tknife\nnzytVTFaYvs_1\tknife\nnzytVTFaYvs_3\tknife\nnz9DMQ9cPrw_0\tcat\nnz_YTLNErSY_1\ttruck\nn0P8wVonqY4_0\tmotorcycle\nn0T51DP8868_0\tbird\nn1VbuQk_3JY_0\tbird\nn1ZrqU8VSBA_2\tbus\nn2Xd8e_vz0w_0\tcat\nn2Xrvmq2r2I_0\tcat\nn2jvWkboChM_11\tbus\nn2jvWkboChM_10\tbus\nn2jvWkboChM_14\tbus\nn3EKpxnV5U8_0\tcar\nn3bFZVLqNvI_0\tumbrella\nn3iNRmzhO1U_0\tmotorcycle\nn3pRNFU0ovc_0\tbear\nn3pRNFU0ovc_1\tbear\nn38NmPI7Sss_0\tboat\nn4cdQF8d8UI_0\tknife\nn4mWuEmbbEM_0\tbird\nn5J7UxAi_70_5\tcar\nn5J7UxAi_70_1\tcar\nn5J7UxAi_70_3\tcar\nn5J7UxAi_70_4\tcar\nn5i5aZXPgok_1\tbus\nn5ojrsEczYM_1\ttruck\nn5wZ3Zin9uQ_0\tbus\nn5wZ3Zin9uQ_1\tbus\nn6cpTMT-Ci0_1\tcar\nn6sMWDd_j1c_0\tcat\nn6wMhru1Mx0_2\tcar\nn7HaOXaXWJw_2\ttruck\nn7NWTiq_W-c_0\tboat\nP9sfOBt9FI8_1\tbird\nP95Pyq4kglE_0\tknife\nP95Pyq4kglE_1\tknife\nP-EecPZ9zV4_0\tmotorcycle\nP-JbMZ89Hac_0\tcar\nP-SIr3rYBzg_0\tumbrella\nP-lf6syyjAs_0\tcat\nP-tXkGlSa_8_0\tmotorcycle\nP_A56tkbbmk_8\tumbrella\nP_A56tkbbmk_1\tumbrella\nP_A56tkbbmk_7\tumbrella\nP_un1_qBDWo_0\tumbrella\nQATQMMA9vo4_2\tmotorcycle\nQATjEG1LPL0_0\tbear\nQA4LOoc1Crg_0\ttruck\nQA__knfzZZM_0\tbird\nQBZUbx6SUyU_2\tbear\nQBbAz7q7E9c_0\tbus\nQCDUv9KNiWQ_2\tdog\nQCKzW_uA3vY_0\tmotorcycle\nQCl4OGNJdos_1\tbus\nQCqvd4xHZLs_0\tcat\nQCzgTA2cABU_0\tboat\nQDQgSF9ciHk_4\tknife\nQD4ioxu8LAk_0\tcat\nQEMoyw7o_f8_0\tdog\nQEQfoQOU_F8_1\tbird\nQFB5gDukoqg_0\tbus\nQGDhzG35q8c_0\tdog\nQGDhzG35q8c_1\tdog\nQGDhzG35q8c_2\tdog\nQGDhzG35q8c_3\tdog\nQGFSTul5MDQ_0\tknife\nQGcd6O1NAkY_1\tbus\nQGcd6O1NAkY_2\tbus\nQGv8jcDgmBY_0\tmotorcycle\nQG25-t2CqY0_0\tbus\nQG5tLrHw5Hk_0\tcat\nQHVkPy7f680_0\tcar\nQHVkPy7f680_2\tcar\nQHhXgNBSjV0_0\tumbrella\nQH2Vo_5h-x8_0\tcar\nQIe7ky6mJO8_0\tbear\nQIqf221MKYo_0\tbird\nQItwshU9sAQ_0\tcar\nQI65w7sMLtA_0\tcat\nQJIgRLU_fU8_0\tmotorcycle\nQJfS9bR2S4I_0\tcat\nQJsyPZ31U-0_0\tcat\nQKG7PXh0UoU_1\tbus\nQKG7PXh0UoU_5\tbus\nQKG7PXh0UoU_6\tbus\nQK9WWQe1WQU_0\tbus\nQLTztdEJ8Ts_0\tmotorcycle\nn7dIhGKEzWM_2\tboat\nn7hFNcaW9rw_0\tknife\nn77hlwjlW_Y_0\tdog\nn8IsRKE9S6k_0\tmotorcycle\nn8kFOAqnMao_0\tmotorcycle\nn9RozRHi7iI_1\tknife\nn9RozRHi7iI_3\tknife\nn9xiuvCd5Lw_1\tbear\nn-fT4fcLulk_0\tbear\nn-fT4fcLulk_4\tbear\nn-gEIxTHjBk_3\tbear\nn_EpRXVan0M_0\tcat\nn_J23TUQdl0_1\tbear\nn_PRUX4zrLw_0\tcar\nn_bIC-prc2E_0\tmotorcycle\noARh23g1-LA_0\tcat\noAhYK7brhk0_0\tdog\noAhYK7brhk0_2\tdog\noBDdj5mkGyc_1\tknife\noBraEPvaSi0_0\tbird\noBuzx2dwA_Q_2\tknife\noBzhDbxL57k_0\tbird\noCUkN7ySpf8_0\tmotorcycle\noCZ3WCK5BZU_1\tmotorcycle\noCf-LgXx6Dw_0\tbird\noDHO9J7vFwI_0\tboat\noDUJYHwNuS8_0\tbus\noDsRL8dvgLA_1\tbus\noDsRL8dvgLA_2\tbus\noF81nMQlA-4_2\tumbrella\noGMlnXjD9R0_0\tbird\noGuIyQiDsy0_2\tboat\noGuIyQiDsy0_0\tboat\noH-XJADp0FM_1\tbear\noH-XJADp0FM_2\tbear\noH-XJADp0FM_4\tbear\noH-XJADp0FM_5\tbear\noI5l1By4H7U_0\tcar\noI_peuU5xk8_5\tmotorcycle\noI_peuU5xk8_0\tmotorcycle\noI_peuU5xk8_3\tmotorcycle\noJD17uQnW_o_0\tdog\noJK_TUb7HoQ_3\tknife\noJLVcOe7CEU_0\tmotorcycle\noJervxxOCvY_0\tdog\noKTgwWf3FKA_0\tdog\nQLxMt8F3oYA_0\tcat\nQL4uK4sZxIU_0\tcat\nQL-hkYCV0BQ_0\tmotorcycle\nQMEIKO8LcEU_0\tmotorcycle\nQMGNMAZLRFY_1\tknife\nQMGNMAZLRFY_0\tknife\nQMHCb6-qyQE_4\tbird\nQMHCb6-qyQE_0\tbird\nQMHCb6-qyQE_3\tbird\nQMJHMIdkS0w_0\tboat\nQMVKAdAOrNY_0\tdog\nQNUGl2q9luk_6\tdog\nQNVeq1dY-gY_0\tbus\nQNV_xE7TePM_0\tumbrella\nQNV_xE7TePM_1\tumbrella\nQNaFT-Ch0Oc_1\tbird\nQNgnQe-MASw_0\tbus\nQNgnQe-MASw_2\tbus\nQNibPLG3_Q0_0\tdog\nQNibPLG3_Q0_1\tdog\nQNibPLG3_Q0_2\tdog\nQNrg73bCl7M_0\tbus\nQN5joVuigKw_0\tdog\nQOCUHjNieAs_0\tcat\nQOGKQmMhYE0_2\tknife\nQOQU7N2vIdQ_0\tdog\nQOcPhbRnGh4_0\tbird\nQOm8zog21wI_0\tbear\nQOp31EvHfRU_0\tcat\nQOs2s2r3hpY_2\tbird\nQOs2s2r3hpY_3\tbird\nQO1T0Gc_cJk_0\tbird\nQPwnbNFbZyY_0\tmotorcycle\nQQAQLPTkDwg_2\tbird\nQQAQLPTkDwg_0\tbird\nQQh4Cpr7tpM_0\tbear\nQQ7EaN8ArmM_0\tmotorcycle\nQQ-MUe-ni48_2\tmotorcycle\nQRXtuZBCXtA_0\tumbrella\nQRZ_xQK1gx8_0\tbus\nQRZ_xQK1gx8_1\tbus\nQR3BO_SYrpQ_0\tbird\nQR5EuXvYbms_0\tcar\nQSK1oOt_5R4_0\tknife\nQSld_dZQvpY_0\tbear\nQTPAOir-oYM_1\tknife\nQThuW0gGa20_0\tdog\nQTlzTtcPjwk_3\tcar\nQT0-oUhQtbk_0\tdog\nQT17xRXmBGA_0\tumbrella\nQVCd5pTgbds_0\tboat\nQVRM0OueKFY_0\tdog\nQVXv0Z1FCdg_0\tmotorcycle\nQVXzwEenImE_0\tbus\nQWBwnViynQA_0\tmotorcycle\nQWFR4XdQv2Y_0\tumbrella\nQWPkooq95So_1\tknife\nQWPkooq95So_2\tknife\nQWSsyFwwdO8_0\tdog\nQWl839SnUOs_0\tdog\nQW1BlOtH1bo_0\tcat\nQXAw2xD7Sgc_0\tmotorcycle\nQXB7sLTVqfM_0\tbear\nQXIGeVZ6Uqk_0\tbear\nQXVQ8S7aUB4_0\tknife\nQXjfaOwHSFo_1\tmotorcycle\nQXwh-lAa3Pk_0\tknife\nQXwh-lAa3Pk_4\tknife\nQXwh-lAa3Pk_5\tknife\nQY2pVib4cZE_0\tmotorcycle\nQZOPux7sysI_1\tdog\nQZOPux7sysI_0\tdog\nQZhaeUKdGYk_0\tmotorcycle\nQZpfX1aipco_1\tcar\nQZui5buTy7k_0\tbus\nQZ3FD2qszF8_0\tmotorcycle\nQZ3MWq6qwJI_0\tbus\nQaGjoVfIWLQ_0\tmotorcycle\nQaM6ny5gEFQ_0\tcat\noKY-KsLfJe4_0\tbird\noKY-KsLfJe4_1\tbird\noKbCNTwLJoI_0\tdog\noKe3Rcvn_TU_2\tcat\noK9TjDSQdSs_0\tcat\noK9erjaiRq4_0\tbus\noLRDfgRIJ-A_1\tbus\noLSjl-qN4M8_0\tdog\noLrou9S3K-0_1\tmotorcycle\noM_FQGUvPIk_1\tmotorcycle\noNFmLa8pU3A_0\tknife\noNLkf1j-v6Q_0\tcat\noNZOg6XoSrY_1\tdog\noNbWPkOIdxg_5\tcar\noNbWPkOIdxg_4\tcar\noNyfqJGJhrY_0\tmotorcycle\noPhE3ECqxf0_0\tbear\noPlhh62giKI_0\tcar\noPrG5_acHVU_2\tbird\noP0yHq-dlRY_0\tmotorcycle\noQV827pXDXA_0\tmotorcycle\noQXdls5ffZc_2\tbear\noQXdls5ffZc_0\tbear\noQXdls5ffZc_1\tbear\noQ7ARK51eHE_1\tdog\noQ7ARK51eHE_0\tdog\noR-7d677bYw_0\tmotorcycle\noSPVZs6_Bd4_0\tmotorcycle\noSVes8uNT5E_0\tmotorcycle\noSao8txZd7A_0\tmotorcycle\noSb17xrITtY_0\tmotorcycle\noSqq5UHBveo_0\tbear\noSxoAvNHNB0_0\tmotorcycle\noS60CV9BFs8_4\tbear\noTYr-qD5JOE_0\tbird\noTj1e8RI67A_0\tboat\noTlwKNdm3rE_0\tdog\noTuVBf1jiPM_3\tbear\noTuVBf1jiPM_0\tbear\noUHa0FV0wwM_1\tdog\noUVJrf3WBrs_1\tbus\noUVJrf3WBrs_3\tbus\noUuQYVAvtgs_0\tbird\noVUE-0XhhsQ_0\tcar\noVUE-0XhhsQ_2\tcar\noVUE-0XhhsQ_3\tcar\noV1vhE0ypUE_0\tcat\noV6wthYHnKA_3\tknife\noWFO_yss01s_0\tcat\noWI2O83zUJk_1\tcar\noWI2O83zUJk_0\tcar\noWYSJgX0THI_1\tdog\noXMW3YjDAqQ_2\tboat\noXaieymppqU_0\tcat\noX4YRc-No7Q_0\tdog\noYY_svQfTs0_1\tboat\nQahJqWjC1v0_0\tmotorcycle\nQakBz4K6hqw_0\tumbrella\nQbHAXTRKk8w_0\tknife\nQbHAXTRKk8w_1\tknife\nQbNU92uEUSc_0\tcat\nQbk_YIfY5q4_7\tknife\nQcLZ-b-0PxY_0\tboat\nQcU2S6m_GJk_0\tdog\nQcuHNJWb-AY_0\tcar\nQc0kbcpophI_0\tcar\nQc5ZW-ni9ZQ_0\tboat\nQeRfpcI_TTQ_0\tbear\nQebJi8pjWkk_0\tcar\nQeeG_4eNyg0_0\tdog\nQe1-M3oVaFs_1\tknife\nQfOdxYnCAKc_0\tbear\nQfOdxYnCAKc_2\tbear\nQfaVCQOGlMM_0\tmotorcycle\nQfgJh_s9H0I_1\tbird\nQfgJh_s9H0I_2\tbird\nQfr5Fc1k7Ic_0\tknife\nQfwCa3YapRg_0\tcat\nQgRbpAz8TuI_0\tbear\nQgRbpAz8TuI_5\tbear\nQgRbpAz8TuI_2\tbear\nQgXjMUMIe4Q_0\tcat\nQhbwOw5dHPg_0\tcat\nQhc3Bb_6Uq4_1\tmotorcycle\nQhc3Bb_6Uq4_0\tmotorcycle\nQhnEXqWFBuw_0\tbird\nQhxv39Tkzbs_1\tdog\nQiHJ2uYByjM_0\tmotorcycle\nQjV-g1D6Be0_0\tmotorcycle\nQjV-g1D6Be0_3\tmotorcycle\nQjV-g1D6Be0_1\tmotorcycle\nQjV-g1D6Be0_2\tmotorcycle\nQjdGUh1FtN4_1\tbus\nQjqhhoIx6nQ_0\tboat\nQj4Mfd45GOE_3\tbus\nQj4Mfd45GOE_0\tbus\nQkPH2LBso5c_0\tumbrella\nQkPLEWaH1bo_0\tcat\nQkkuZ_G7t48_0\tboat\nQkwI5-_QspU_0\tcat\nQk6G7eAHlCs_0\tdog\nQlcaO8pkzd4_0\tbear\nQliTvc637Yk_2\tboat\nQlieDL9xPyU_1\tmotorcycle\nQlxQKy1yzyI_3\tmotorcycle\nQmP4xj9S0mQ_0\tmotorcycle\nQmR3bvWDA1s_0\tboat\nQngGa73C1G8_0\tcat\nQnnV6lKKIgI_1\tknife\nQnuD7a8BM30_0\tdog\nQn9CU5O4FHU_0\tbus\nQn9Z0LVIxbo_0\tcar\nQoTopiP9k2o_2\tbus\noZLdU13R4uU_0\tmotorcycle\noZoTyJNjCJI_0\tbus\noZ6Py8Tx-sA_0\tdog\noZ9qkN9Q1X4_1\tbird\noaXGm1MdDoA_0\tcat\noajaYAOs_oI_1\tknife\noa_73oVbH38_0\tbird\noa_73oVbH38_1\tbird\nobbzKGrHOP0_0\tbird\nob70dcN35yg_0\tbird\nocNVbpQhB5g_0\tcat\nocPgZeXuFqs_0\tcar\nocj3mV2T-ls_1\tbird\noc4RRoFoUo0_0\tboat\nodsCgfz0yM8_0\tmotorcycle\noeIBPeBAEv8_0\tdog\noeVUkEvC3To_0\tboat\nofDmsqy24k0_0\tcar\nofJOKOICGco_0\tmotorcycle\nofvHImJKiAg_1\tbear\nofy3Sid451s_1\tbear\nogIewcLFxLo_0\tdog\nogLOXI-Kvcg_0\tknife\nogzWVQ5TC80_0\tcat\noh7uEf_YE40_1\tdog\noiItk_51540_5\tmotorcycle\noiKC4SxYNJE_0\tbus\noiRnmB7WQjQ_0\tbird\noiu_53B5AAc_0\tmotorcycle\nojFBoKltgfQ_0\tbus\nojFBoKltgfQ_1\tbus\nojFBoKltgfQ_2\tbus\nojQfL_XgMM0_2\tboat\nojz2xLrH-Ts_7\tcar\nokKrvzNb9IU_0\tcar\nokiIzmV8YLw_0\tcat\nokiIzmV8YLw_1\tcat\nokzrd8v1G-w_3\tboat\nomGx_muz0SY_1\tboat\nomngVtTFM1I_0\tumbrella\noms2XkgghV8_0\tboat\nQoqeX-W0RFw_0\tboat\nQoqeX-W0RFw_2\tboat\nQo0mxFOMVGc_0\tdog\nQpAWeYA1pc8_0\tcar\nQpDm5g1dELc_0\tbus\nQpD7CVh2Z_c_3\tknife\nQqdW9IMDHgs_0\tboat\nQqdW9IMDHgs_2\tboat\nQqdW9IMDHgs_3\tboat\nQqhZnuITXs8_2\tbird\nQqhZnuITXs8_3\tbird\nQqkblYN1YOg_0\tbus\nQrEjYyinITM_0\tcar\nQsQFhUd04jI_0\tmotorcycle\nQsQFhUd04jI_1\tmotorcycle\nQsV9BTogrKc_0\tknife\nQt78_24lkeM_0\tboat\nQu8xNQ6Vd04_0\tcat\nQvgmjwKuAeM_0\tumbrella\nQvqNodq3NxA_3\tbear\nQvsjDkJ_oho_0\tcat\nQwALBOsUby0_1\tknife\nQwYxgsacjx0_0\tknife\nQw9UvjSO9_Q_0\tbird\nQxx3WjrGmtE_2\tbear\nQyc0xSSPT1E_0\tdog\nQzCvBtKWPjg_0\tperson\nQzPFEeJYDcE_0\tumbrella\nQz1R2sk37qg_3\tbear\nQz1R2sk37qg_5\tbear\nQz1R2sk37qg_6\tbear\nQz1R2sk37qg_7\tbear\nQ0HX6Jfnnb8_0\tbird\nQ0J1QbF_Vis_0\tbird\nQ0KhMTnvbxM_0\tbus\nQ01P6P7bm7E_0\tmotorcycle\nQ0-7SsSXMV0_0\tknife\nQ0-7SsSXMV0_2\tknife\nQ1RqyDERgxM_1\tbird\nQ1VXWNHzPqI_1\tcat\nQ1VXWNHzPqI_2\tcat\nQ197NAaQodY_0\tdog\nQ2Sop28spdM_0\tknife\nQ2bha73kLKM_0\tmotorcycle\nQ2vBCDtNAGI_0\tmotorcycle\nQ2zRXVl7bLI_0\tmotorcycle\nQ3ZxsgPKTGY_2\tbird\nQ3ZxsgPKTGY_3\tbird\nonoO4tamBlA_0\tknife\nonpRejbK_VE_0\tumbrella\nooJg7-nxmUw_0\tmotorcycle\nopOHceUyoXk_0\tcat\nopb_qoqO05s_0\tbird\noqUbqkDsSzI_1\tknife\noqvnxRx-0J4_1\tbird\noq4KPP5PYAo_1\tmotorcycle\norQkUDPfTg8_0\tboat\norTFjuPHzxU_3\tdog\norcE_uPKO_c_0\tbird\normZXNXni-U_0\tdog\nosYgSn6yOG0_0\tcat\nos3H6KzvGEg_1\tknife\notHFt4YAKeI_2\tdog\notvQKWvIXAE_0\tbus\nouFwG2YU59c_0\tmotorcycle\nouNsmVT6GRU_0\tcar\nouqFEe0ud_U_0\tmotorcycle\novHCJGK35r0_0\tknife\novHCJGK35r0_1\tknife\novQY7VA36gU_0\tbird\novRBelXjQ-A_0\tbird\novaFSf6jda4_1\tboat\novnkb_MuAlg_0\tbus\nov9yaGUtSEw_0\tbear\nov9yaGUtSEw_1\tbear\nowKiuZVov4U_2\tdog\nowaIraEDvqI_0\tumbrella\nowaIraEDvqI_1\tumbrella\nowb-43QL8Dc_0\tcat\noxKhcqfQV7k_0\tumbrella\noxZ42ECABUo_0\tmotorcycle\noxdCJK5GPS8_0\tdog\noxyS9oNIBaQ_2\tboat\noy52khlb79k_0\tcat\noy885M8rmDM_0\tbus\noy_Efqu_Zhk_0\tknife\no0CsAQaDp1k_0\tboat\no0VArHW9gpE_2\tdog\no0yyk1GchoE_2\tknife\no06poedEjtM_2\tknife\no1RqDbHx0IA_0\tumbrella\no12Lc5yZNco_1\tbear\no2E2ypLvzOo_1\tcar\nQ34_kBWh3QU_0\tmotorcycle\nQ4IH3ZOVKFQ_5\tbus\nQ4TELEHdcjA_0\tmotorcycle\nQ4YD_lW8JFE_1\tknife\nQ4afI-fku0A_0\tknife\nQ4d0z-q-UXQ_0\tbird\nQ4jZeoLzZXs_2\tbird\nQ5DrYh7pcTg_0\tcat\nQ5RabF9bK3o_0\tcar\nQ5cY3mt9NHI_1\tcar\nQ5cY3mt9NHI_3\tcar\nQ6Lg4c8W2XQ_0\tbus\nQ7SXsNoT9cc_1\tboat\nQ7TDTHQoPGc_0\tbird\nQ7TZ3TlDNzI_0\tbird\nQ7V8JjnLW_A_0\tperson\nQ7a4tWAU7-o_0\tdog\nQ8gHTSzR6h0_0\tcat\nQ807ZgwscUk_0\tcat\nQ9LvGsq1Mas_2\tbird\nQ9fbeFbARPY_0\tbird\nQ9qA-2ofuFc_0\tdog\nQ9qA-2ofuFc_1\tdog\nQ-JQokKqXZM_0\tmotorcycle\nQ-STF8c8RSE_0\tmotorcycle\nQ-S6ypfxn4w_1\tbus\nQ-VqbNMPAjE_0\tdog\nQ_a7bRv2dM0_1\tcat\nRAQAfTprH5s_0\tcat\nRAc8MyscjAA_4\tbear\nRAc8MyscjAA_0\tbear\nRAc8MyscjAA_3\tbear\nRAqMmf5FS_Y_0\tdog\nRBNNklw-NjE_0\tcar\nRBNNklw-NjE_1\tcar\nRBdpxD5mMy8_0\tcat\nRBssHo0ygdI_2\tcar\nRBssHo0ygdI_1\tcar\nRBvocl1t9qM_0\tcar\nRBvocl1t9qM_1\tcar\nRCzBVv_Vddo_0\tdog\nRC444E40nLY_0\tcat\nRC_ckl7o7sc_0\tdog\nRDq9wvYEiSI_0\tumbrella\nRD8OUO8u7oQ_0\tperson\nREBpFtJosSc_3\tbear\nREBpFtJosSc_4\tbear\nREBpFtJosSc_0\tbear\nREbm5i5vhcQ_0\tumbrella\nREbm5i5vhcQ_1\tumbrella\nREiwqNPkmew_4\tbear\nREiwqNPkmew_3\tbear\nREjT99mHV_g_0\tcat\nRFIE-agz3SA_0\tdog\nRFUZkHtGWvg_2\tbird\nRFUZkHtGWvg_1\tbird\nRFZG72_XG3U_0\tmotorcycle\nRFcz2p3w1oc_0\tbus\nRFhEq5WF9Io_0\tmotorcycle\nRFqSKdzXQFQ_0\tbus\no2z2zu4L1Ho_0\tcat\no3OdAgJnYlw_0\tumbrella\no3TpeQ7mhIQ_0\tbear\no4It_gqHKoM_0\tbus\no4It_gqHKoM_4\tbus\no4It_gqHKoM_5\tbus\no4bpCoFINtY_0\tbird\no4yKF7ZQge8_0\tcat\no4yxnKhoWrQ_0\tcat\no49yvv0vmJQ_0\tknife\no5TWf69h978_0\tmotorcycle\no5bJmNSZmGE_0\tcat\no6vw6_1pc_g_1\tperson\no6x94jhuMEw_0\tcat\no7UXYGmFww0_0\tknife\no8BqJTsAjnI_0\tboat\no8BqJTsAjnI_2\tboat\no8Gr9wZzcA0_0\tknife\no83uI_tdkrE_2\tcar\no9UpoUWgJWw_1\tmotorcycle\no9YqiVSTBVs_0\tmotorcycle\no9qB9kYt9Bc_0\tmotorcycle\no9vRwcqz30w_2\tbear\no98cAmKOAtk_2\ttruck\no_BpJHlv8bY_0\tcat\no_NYHfqWzBw_0\tcat\npAP3j2UmTAA_0\tcar\npAuz372kMrs_0\tboat\npAvBjM_cSCk_0\tumbrella\npA_f-DZ2FdI_1\tbus\npBj4KFDTwGg_0\tcat\npCPwOGObTcs_0\tumbrella\npCXmnj6vY7o_1\tknife\npCa3Tf27TcY_3\tbear\npCdwcy8npiE_2\tbear\npCfA0E-TIXo_0\tmotorcycle\npC9mu-CQ9fg_0\tcat\npDjjH1_G6Z0_1\tmotorcycle\npDjjH1_G6Z0_0\tperson\nRGT-FumEK7I_0\tcar\nRGXgv5gqM8k_0\tumbrella\nRGiE9-CME30_0\tmotorcycle\nRG6y27UUUMI_0\tknife\nRHHOcUqVF80_0\tknife\nRHSfZLRz95o_0\tboat\nRHrnX__15lI_0\tcar\nRIBigSX5_90_1\tbear\nRImslgwYbYk_2\tboat\nRIwUvnURoqs_0\tcat\nRI14PaJgb7E_0\tumbrella\nRJ95URcz63g_1\tmotorcycle\nRJ95URcz63g_0\tmotorcycle\nRKZ4YVnDywQ_0\tknife\nRKa1tJXFTAw_1\tcat\nRK8ZJaF2QHQ_5\tbear\nRK8ZJaF2QHQ_6\tbear\nRLP9M0bfpWo_0\tumbrella\nRMapunE2wEc_0\tboat\nRNPKsQSr2o8_0\tknife\nROfxuPZWET8_2\tbear\nROkJ79Y9T7s_0\tmotorcycle\nRPJ0SJeC5ck_1\tcar\nRPJ0SJeC5ck_2\tcar\nRPWms_VL6wY_0\tbus\nRPhdhEKBBAM_0\tmotorcycle\nRP81F6rIP4w_0\tmotorcycle\nRQ5liX_fOJw_0\tumbrella\nRREV1E0Mbhs_1\tknife\nRSXIvkOJQq0_1\tknife\nRSq71vJH9yc_0\tbus\nRStmsJCm7mo_1\tcar\nRSztnKS1IYI_0\tcar\nRTTysK1hBpg_0\tboat\nRTvVXaA35DI_0\tmotorcycle\nRT0tTVP14XE_1\tumbrella\nRT0tTVP14XE_4\tumbrella\nRT0tTVP14XE_6\tumbrella\npFCVfOX_UJ0_0\tumbrella\npGJMt9Jmk_Y_0\tcar\npGnZDXcCjSc_0\tbus\npHC850dBc-E_1\tcar\npHf0EP0QU9Y_0\tcat\npHueI1IUqzg_0\tcar\npIhqwiD8cks_0\tbus\npJXxn2DRWyI_0\tbus\npJYetmKuiE0_4\tbear\npJj28cMLcZc_0\tknife\npJl14EZ6-Mc_0\tumbrella\npKPRv5lL_DQ_1\tmotorcycle\npKz_g-J2O-A_1\tbus\npK1umZxS4nE_0\tknife\npLEV-uFmv6I_0\tcat\npLI_HgRsRow_4\tbus\npLQDtquQaSE_0\tbear\npLp7vmowqNs_0\tmotorcycle\npMHRlQ2NxeA_1\tboat\npMaT7qWMaV4_1\tbear\npMg2xwjkfVc_4\tumbrella\npNHKmiurxTg_0\tknife\npOCvwILBOCY_0\tboat\npOjuNMevoaM_0\tcar\npOq6RrgrXWY_0\tmotorcycle\npPyL4U8gYpM_0\tcat\npP22coNl6r4_0\tbus\npP5q-Bszfh0_0\tmotorcycle\npQMkOOTP0Lk_0\tcat\npSJypg6az1w_0\tbus\npSjKd_x9ycU_1\tboat\npSz961UYSrY_0\tmotorcycle\nRVvfyYc8jws_0\tumbrella\nRXAW31Vm7pU_0\tmotorcycle\nRXQ-E6_Y__c_1\tcar\nRZAlTTj0Z4o_0\tmotorcycle\nRZAlTTj0Z4o_1\tmotorcycle\nRZL2H_-y3vE_0\tumbrella\nRZrAehHE8aA_2\tknife\nRZrAehHE8aA_0\tknife\nRZ0yQkyeSd8_0\tboat\nRaZy_JiiJ3E_0\tmotorcycle\nRa48MJPLmUw_2\tmotorcycle\nRa48MJPLmUw_0\tmotorcycle\nRa48MJPLmUw_1\tmotorcycle\nRbQTcoldE8M_0\tbus\nRbRqkcC6l_A_0\tknife\nRb5tGSqtlFU_1\tmotorcycle\nRcSm0O0Ylc0_0\tcat\nRdNjlTlNbEA_0\tbus\nRdP6hW5p6ys_4\tcar\nRdUjywh70lM_1\tcat\nRdlWUo9fYmA_0\tmotorcycle\nRd4TvDZNwHs_0\tumbrella\nRfNyu5aooJs_0\tcar\nRfrtTbza00c_0\tboat\nRgBWTOo9hqo_0\tcat\nRgC0rdZCy2c_0\tmotorcycle\nRgFR8z8IzAQ_0\tcat\nRgUwlXzmX4Q_0\tboat\nRhYw3jSi0xY_0\tbus\nRhqz5maRjNs_0\tcat\nRh0zI8vpRWk_2\tknife\nRh7Y69j41EY_0\tbus\nRiCptCjnrqk_0\tcat\nRiOw5wO0xTg_3\tknife\nRid6twPtgIo_0\tcat\npTGbMPGsbCU_0\tcar\npTSbrP23T0s_0\tmotorcycle\npVCT-jEaSPE_1\tbear\npV8hPodV-zY_0\tmotorcycle\npXBltXzZZe0_0\tcar\npXcoix_wq4E_0\tcat\npZC4kceO-0g_0\tbus\npZJDlV5VS3Y_0\tmotorcycle\npZ7RohF8JgE_1\tknife\npaF1hQf-YFk_0\tboat\npalM4nIm6GU_0\tmotorcycle\npba0HVNnmbc_1\tmotorcycle\npcOsY0MSbh0_0\tbus\npcb_jPcg_U8_0\tbus\npcpHHo_gp-Q_0\tcat\npc2aHxzJDtQ_0\tcat\npdDVE4LsX54_4\tcar\npdDVE4LsX54_0\tcar\npdDVE4LsX54_1\tcar\npdDVE4LsX54_2\tcar\npdDVE4LsX54_3\tcar\npdDVE4LsX54_5\tcar\npd0IEWCwpUY_0\tbear\npd1BZjvbFNI_0\tknife\npgKdcFb2680_1\tmotorcycle\npg4m5Fi0Mhc_1\tcar\nRiq87Q_unPU_0\tcat\nRjDo0UDX9Ws_1\tknife\nRjItZnZQBKk_0\tcar\nRjqDxu3wf5o_0\tcat\nRkSzsg-k14I_0\tboat\nRktoQu-Wk0M_0\tcat\nRmFxIMl1tSU_0\tbear\nRmpv0oMhUCc_0\tbus\nRnEWcQNxWGY_0\tmotorcycle\nRnPY8wgKxj4_1\tcat\nRnQ-v8AJQbc_0\tmotorcycle\nRnjU70B_0cU_0\tbear\nRpTRF_oB1-I_2\tbear\nRpn1EcI_ESo_0\tknife\nRp8euBdhkR0_0\tmotorcycle\nRp8euBdhkR0_1\tmotorcycle\nRqs856i0jbs_0\tumbrella\nRrj0e5VSIgY_0\tcar\nRsw947loMaA_0\tcat\nRtSEfWF3PdI_1\tknife\nRtng6SCToEM_0\tcar\nRufUHX-TjyM_0\tbear\nRvHvTQC9Kr4_0\tbear\nRwC5kkt5VDU_1\tperson\nRwC5kkt5VDU_5\tmotorcycle\nRwVgY7zgnYM_0\tknife\nRwVgY7zgnYM_1\tknife\nRwYiNSlAYcE_0\tcar\nRwpY0u7t3vE_0\tumbrella\nRwp_dTfFI28_4\tboat\nRwz5T35lNgY_0\tcat\nRw5dzv79c-M_1\tmotorcycle\nRxLwy_iZqKg_1\tbear\nRxWhDOyHYNo_0\tcat\nphJS1iN6HFo_0\tumbrella\nphTyZcbKeQw_5\tbus\npihR4mhfwxM_0\tmotorcycle\npim0lzR8i1g_0\tcat\npix5Cxt_fUM_3\tknife\npjgi60dJalw_0\tcar\npjgi60dJalw_1\tcar\npjhNnA0142Y_0\tmotorcycle\npmszdloBDwA_0\tbear\npmszdloBDwA_2\tbear\npmszdloBDwA_5\tbear\npnMd28rPX7M_0\tmotorcycle\npncTBxEM4WM_0\tbus\npnjPhdpuKGc_0\tmotorcycle\npn0ZChK2ASs_0\tbear\nppAj6dnl62Y_0\tknife\nppAj6dnl62Y_1\tknife\nppJXGy7snUw_1\tknife\nppwjIgwParM_0\tboat\npq1swOh85gc_0\tboat\npq1swOh85gc_2\tboat\npq1swOh85gc_1\tboat\npriwWNrQnkI_1\tbear\nprwglbuvyZ8_1\tknife\nprw0IWDYBUM_0\tcat\npr3LOwTWNnk_1\tbus\npsOuOLCJNk8_0\tcat\npsTqTt0np_I_11\tbear\npsTqTt0np_I_3\tbear\npsTqTt0np_I_6\tbear\npsUASBNRwIE_0\tcar\npsUASBNRwIE_2\tcar\npsUASBNRwIE_4\tcar\nptCx-L_n2Yg_2\tbear\nptNC5ou_rOQ_1\tmotorcycle\npuZUIBS4Ceg_0\tcat\npuw9BfAKOHU_0\tbus\nRxiBbfFH3is_0\tknife\nRxiE2beIvjQ_0\tbear\nRyWLXS1Vrco_0\tknife\nRy4q0UokRjo_0\tmotorcycle\nRzWczJnyzmg_0\tcat\nRzWdM4_lg2c_4\tbear\nRzj5xv434WA_0\tbear\nRzrQOptkjFM_0\tmotorcycle\nRzrQOptkjFM_1\tmotorcycle\nR0hj1kAnMgs_0\tcar\nR0w6j1wmwo0_2\tknife\nR0w6j1wmwo0_3\tknife\nR1Fkwaa8CxU_0\tmotorcycle\nR2FlyNrjZBQ_2\tboat\nR2FlyNrjZBQ_1\tboat\nR2Fps165H9g_2\tknife\nR2XiIC1qbAM_0\tbear\nR2YmjDNC8oo_0\tbear\nR2duXYQhnFA_0\tcar\nR2sy6qbPc4c_0\tcar\nR23ZSmBA2Rg_0\tknife\nR3zhr1iboG0_0\tbus\nR4ktPNCb564_1\tbus\nR4vLajpLSMk_0\tcat\nR5CBlOfUL4w_0\tperson\nR5cIoEcqZ9E_1\tknife\nR5r3AIx_BoU_1\tknife\nR5r3AIx_BoU_2\tknife\nR6PuHPDiwPs_1\tcar\nR6f_t-MqO_s_0\tbus\nR6tsNuvoTus_0\tcar\nR6uZ5JpxQ88_0\tcat\nR6wk6JHQSeI_0\tknife\nR6wsV6cYN_w_1\tbus\nR7w-mdDyhG8_2\tknife\nR8TV702EIqs_0\tknife\nR8j0mjQR4lI_4\tboat\nR84Bj4PKOvE_0\tbear\nR84Bj4PKOvE_1\tbear\nR9LK4x3pO0Y_0\tcat\nR9L1I9EEE0g_0\tmotorcycle\nR9zDzUslz9g_0\tcar\nR9607CioN3U_0\tcar\nR99fGQRB6rM_1\tcar\nR-UGxl6KGoo_1\tbus\nR_LEKDTlVvs_5\tboat\nR_NxqXdz3RA_0\tcar\nR_UPR78XIvA_0\tknife\nSAFptHT-UpM_1\tboat\nSAFptHT-UpM_2\tboat\npvrO7c2imos_4\tcar\npwgqJO3yKHI_0\tcat\npwwdlKxLCqQ_1\tknife\npxBtDlmwesI_0\tcar\npxIlEGkEw5U_0\tcat\npxwl3iVkx08_0\tboat\npyAuY2v2U0I_0\tcat\npyTXP2GZRuM_0\tknife\npyTXP2GZRuM_1\tknife\npyTXP2GZRuM_2\tknife\npy0K3KEYfjA_2\tumbrella\npy0K3KEYfjA_4\tumbrella\npzZvI_g1S8M_0\tmotorcycle\np03u2BJIvyE_0\tbear\np03u2BJIvyE_1\tbear\np1p9QUFIi_8_0\tbus\np1_thBtA2-g_1\tbear\np2pRN03gXFk_0\tcat\np26eBX5AGCo_0\tboat\np3MF-uxvtWk_0\tbear\np32jOqTS5ec_0\tcat\np4MmW7gFlLI_0\tmotorcycle\np4MmW7gFlLI_1\tmotorcycle\np5NxEAfgmro_0\tmotorcycle\np5bLvlU8ua0_0\tmotorcycle\np5lUPYsz-HE_0\tcat\np5vt7l9pW-0_1\tmotorcycle\np5vt7l9pW-0_0\tperson\np5_O08ZNK_c_0\tmotorcycle\np6GkhJZsCi8_0\tcat\np6Rtu645O08_1\tmotorcycle\np6Rtu645O08_0\tmotorcycle\np6dBx3tBRr4_5\tbear\np6dCoZRaQOA_0\tboat\np6dCoZRaQOA_1\tboat\np6dCoZRaQOA_2\tboat\np7OlEbiu5to_0\tcat\np7WwUD62qfY_0\tmotorcycle\np7gjVQyX07A_0\tcat\np7pnYAaDqPI_0\tumbrella\np7sHze5SC0g_4\tbear\np8MEDllYMKg_0\tcat\np8RUtiaGu5U_0\tcat\np8ZUCNMnKpE_0\tcar\np89fuT8e_zk_0\tcat\np8-8JqAgtv0_0\tmotorcycle\np9XjLjpQX-8_0\tcat\np9by0qLqHOQ_0\tknife\nSAkHT1Ozg1c_0\tmotorcycle\nSAkHT1Ozg1c_2\tmotorcycle\nSA1Tb1XbngU_0\tcat\nSB1UBp1PVf4_2\tbus\nSDKsL-L7GbI_0\tknife\nSDbe9JVnITk_0\tknife\nSDk3Y3jzalg_0\tknife\nSEp92WMharw_0\tbus\nSExW2mVb1Mc_2\tcar\nSExW2mVb1Mc_0\tcar\nSExW2mVb1Mc_1\tcar\nSE5Rg8Qpb8c_1\tknife\nSFB2FGuZb6w_0\tmotorcycle\nSFMc-UCkcT8_0\tcat\nSF8c7EeFPPk_0\tmotorcycle\nSHcJfBJBQe4_0\tbear\nSHxyKRdKRc8_0\tcat\nSHxyKRdKRc8_1\tcat\nSH1noq6GrKw_0\tknife\nSISqo1FBefA_0\tbus\nSIbLAYX2J_A_0\tbear\nSJAZnOnRtag_1\tbear\nSJsxWsiEuTg_0\tmotorcycle\nSKNl4frouUY_1\tknife\nSLEOr8bmm2w_0\tmotorcycle\nSLEOr8bmm2w_1\tmotorcycle\nSLzqvins4p8_0\tbear\nSMYpv_Ea3w8_0\tperson\nSM6BtnyDz5w_0\tcat\nSNZ0xGGmZvU_0\tknife\nSNhnfqJHoI4_0\tmotorcycle\nSNl4Gq_2aVQ_0\tbear\nSNrosAtwG2k_4\tbus\nSOYkQc-toMU_0\tbear\nSOYkQc-toMU_2\tbear\np-J0yyoF0lU_0\tmotorcycle\np_C9Zwt3N5c_0\tumbrella\nqAJSLnflSrQ_0\tcat\nqA5rC8MxCoA_2\tbear\nqCzILENpEWk_0\tboat\nqCz4ft26CAw_2\tknife\nqDobzjbo_aM_0\tcat\nqEcNn2_TQC8_0\tcat\nqEei5YCRiHA_0\tcar\nqEj3r8dtvKg_0\tboat\nqE5fKHWTLMw_1\tbear\nqFR-yuWiHVk_3\tknife\nqFR-yuWiHVk_4\tknife\nqFwugOO0pC0_0\tknife\nqGjYX-iNrPE_0\tboat\nqGohF2oMPS0_0\tmotorcycle\nqGxfRwBmBEc_0\tmotorcycle\nqGxfRwBmBEc_1\tmotorcycle\nqHKwI-35nNU_0\tmotorcycle\nqIIu-MIIYIE_0\tboat\nqINDYDOlPLA_0\tmotorcycle\nqIPydTwqwmI_3\tcar\nqIPydTwqwmI_0\tcar\nqIPydTwqwmI_1\tcar\nqIPydTwqwmI_2\tcar\nqIkNPwKd6ag_0\tknife\nqIkNPwKd6ag_1\tknife\nqInP3tWVtWE_0\tcat\nqJMxoAbx9YU_0\tboat\nqKxQVpaLChg_0\tbear\nqLfa8e4ffQY_0\tbus\nqL6LVXg4Vt4_0\tcat\nqMEMl1FFVIM_2\tumbrella\nSPRByN4TiFg_1\tboat\nSPsOjXxZymk_1\tboat\nSQ_ChhUwWng_0\tbus\nSRUB2kzDBTk_0\tperson\nSSFOqr1ARgI_1\tumbrella\nSSaN8vntuYs_0\tbear\nSTTRwCtQ8_8_0\tboat\nST6aA292Pos_0\tmotorcycle\nSUMc-5fiNzQ_0\tmotorcycle\nSUnPNgAE_ho_2\tboat\nSUyRs3xvc9c_0\tcat\nSVBc-W37yW0_0\tumbrella\nSVSMGxy8Z6I_0\tcat\nSVXaBPnNWO0_0\tknife\nSVXaBPnNWO0_2\tknife\nSVt7vQ8LYZU_0\tbear\nSV70cwNA6o8_0\tknife\nSWJyq_mITbE_0\tboat\nSXmy9BLHr84_0\tbus\nSXvXN3waFWs_6\tbear\nSYCg5NuWc60_0\tmotorcycle\nSaHw7yyoeJg_0\tcat\nSaSgclGWGwE_1\tmotorcycle\nSaSgclGWGwE_3\tmotorcycle\nSa1iRLR4d_c_0\tbus\nSa4L2rdyD10_0\tknife\nSbWCXCuXBqY_1\tbear\nSe3XbBA4N4o_3\tknife\nSe3wtx4DzwE_5\tbus\nSe3wtx4DzwE_1\tbus\nSe3wtx4DzwE_2\tbus\nqMlYXZy1Tow_0\tbus\nqNfS9Y5zs-Y_0\tcar\nqOaABf_zb9U_1\tboat\nqO7qHolBYj4_0\tbus\nqO8D0E7MjOI_0\tcat\nqPGkJRPae6A_0\tbus\nqPMDgkgSTnA_2\tmotorcycle\nqPaox7otsVI_0\tknife\nqPwAWEtJBqA_2\tmotorcycle\nqPwAWEtJBqA_0\tmotorcycle\nqPwAWEtJBqA_1\tmotorcycle\nqPyR7CpZ6l0_0\tknife\nqP88t7GfZc8_0\tknife\nqQaIW7IjCZo_3\tmotorcycle\nqQaIW7IjCZo_0\tmotorcycle\nqQaIW7IjCZo_1\tmotorcycle\nqQdtuBd-SgI_0\tknife\nqQlsMjenbfE_2\tknife\nqQ5tf8s7KrE_0\tbus\nqRO6U_tg6SE_0\tcat\nqR4kw8rf-FU_0\tmotorcycle\nqSQGG-K89mg_1\tknife\nqSgOYqBt_8k_0\tbus\nqSnoKy6T22k_0\tmotorcycle\nqTKtODdEZIg_0\tcat\nqTut_O_LppA_0\tbear\nqT00uOC9JpQ_0\tcar\nqUuTEKdKNNg_0\tcar\nqU7DT4ipQHw_0\tcat\nqVSnhT0Luh8_0\tcat\nqVyAlx4rMTo_2\tbear\nqV7U9CRjZGI_0\tcat\nqWN8i7sJyVg_4\tumbrella\nqWcXQWy7yw8_1\tbus\nqW-zRq8VTV0_0\tboat\nqX8RcjE0tjs_0\tmotorcycle\nqX-YEHlu0Kg_2\tknife\nqZWxhCk8AX0_0\tknife\nqZf1fw737A8_1\tcar\nqZyxILyLOv0_0\tknife\nSfZLu5uG7mc_0\tcar\nSgDdyLB3fFo_1\tmotorcycle\nSgHH9KN_nkY_2\tmotorcycle\nSgOvlqqKbEI_0\tbear\nSgSsk-eeClA_0\tcat\nShHLzcBozxo_1\tboat\nShPl28Zw1kU_8\tcar\nShPl28Zw1kU_3\tcar\nShPl28Zw1kU_7\tcar\nShPl28Zw1kU_9\tcar\nShaLoFJZv-M_1\tknife\nShhC84AwZ04_0\tbus\nSh6uHJRUnP4_0\tcat\nSiSP3Kko4VM_0\tbus\nSi3psXQA46c_0\tbus\nSjLNVLIdpbc_0\tcat\nSj0pcvct_3k_0\tmotorcycle\nSkLwUmczAMo_2\tknife\nSkLwUmczAMo_1\tknife\nSkVIH0IZI1I_0\tmotorcycle\nSlBZM22tlSU_0\tknife\nSlIzgQZ63h4_1\tknife\nSlWmnHWeqIE_0\tboat\nSlYqzpZkWho_0\tbear\nSmCvuBfyU5o_0\tmotorcycle\nSmCvuBfyU5o_1\tmotorcycle\nSn8nb_cv5K4_0\tmotorcycle\nSn8nb_cv5K4_1\tmotorcycle\nSo5dCmgNRtU_0\tbear\nSo-dFj7N07Y_0\tcar\nSpGfQe7sWIQ_0\tmotorcycle\nSpuAy2Z1ejE_0\tboat\nSpx8fHkY0Ac_0\tbear\nSqUzKvBRVmQ_0\tcat\nSqkoepvLN3c_0\tmotorcycle\nSqkoepvLN3c_1\tmotorcycle\nSq-LvVdVwhc_4\tbear\nSrBwCHcEe4g_0\tcat\nSrPgW-L7Gps_0\tbear\nSrTxMAryank_0\tknife\nSsQb12lMU_w_1\tcar\nSsQb12lMU_w_2\tcar\nqZ0egYy10zs_0\tcat\nqaKYHGIZ8tU_0\tcat\nqantWNz3Z-k_0\tbus\nqc1U41zjMfI_0\tknife\nqeSfa-Xin3s_0\tbear\nqfZHHSjai5Q_3\tmotorcycle\nqfZHHSjai5Q_5\tmotorcycle\nqfZHHSjai5Q_0\tmotorcycle\nqfZHHSjai5Q_4\tmotorcycle\nqfZHHSjai5Q_6\tmotorcycle\nqf4dZ323eu4_0\tcat\nqf5FQP-vjpY_3\tbus\nqgYBD0GBerg_0\tknife\nqglTXvFe5vw_0\tmotorcycle\nqgr1pdkQkKM_1\tknife\nqhTOaoL2B54_0\tbus\nqhgQ0_y6Jr8_0\tmotorcycle\nqhyihSkbubs_1\tbus\nqiW4cUVZCJA_0\tmotorcycle\nqjfkIHC3sNA_0\tbus\nqj1y76m_WFg_1\tcar\nqklXdTo1CKQ_0\ttruck\nqlGmmBY7ITI_0\tcat\nqlGmmBY7ITI_1\tcat\nqlfCKWLj_xU_0\tboat\nqlvwUVksAC4_0\tcat\nqnaQOGGmyhI_1\tmotorcycle\nqo2tG-wOpLI_3\tcar\nqpBRU2SONe0_4\tbear\nqpNPlLO7Wdo_6\tbus\nSsWwZCQR8pA_1\tbus\nSs6lM7iutJ0_2\tboat\nSs-ENa079_Y_0\tcar\nStg0xs4yv5A_3\tbus\nStg0xs4yv5A_1\tbus\nStg0xs4yv5A_2\tbus\nStoHoHg6XHo_0\tmotorcycle\nSuoVrAXkHsM_1\tboat\nSv-Xsjm8Seo_0\tboat\nSwfda4hcQzo_18\tumbrella\nSwfda4hcQzo_0\tumbrella\nSwfda4hcQzo_3\tumbrella\nSwrxLGIVuNg_1\tbus\nSw01FqLPH0o_0\tmotorcycle\nSxxBAhDGWzU_1\tcar\nSybtH9db7tI_1\tboat\nSybtH9db7tI_6\tboat\nSybtH9db7tI_0\tboat\nSybtH9db7tI_4\tboat\nSybtH9db7tI_5\tboat\nSyk5Jc9_tQA_1\tboat\nSywBQoMh8Q8_1\tcar\nSzD0AW8MKxY_1\tcar\nSz3ay4xexe0_0\tmotorcycle\nSz3oWSS6V3s_0\tbus\nS0AoM2Xz64Y_0\tmotorcycle\nS09dKnW798o_0\tcat\nS12WKCebYHg_0\tboat\nS2YoTKzOHW8_0\tumbrella\nS3O_xjPQToU_0\tknife\nS4lNN0zJE4A_0\tcat\nS49Hdfpc-SI_1\tboat\nS5VjgUVKjV0_0\tcat\nS5Z4g_SORHc_3\tknife\nS5Z4g_SORHc_4\tknife\nS6crKzUWKYI_0\tumbrella\nS6ksiMdECu8_0\tumbrella\nqp11ZgRmeck_1\tmotorcycle\nqqd7FMwn5Ks_0\tcat\nqqmk0BKAubw_0\tboat\nqqo83uqRldw_0\tmotorcycle\nqqumKQ_igJQ_0\tmotorcycle\nqqumKQ_igJQ_1\tmotorcycle\nqrHPEAVq_yE_1\tboat\nqrJljeVBE-k_0\tboat\nqrJljeVBE-k_2\tboat\nqrTOqXRwHqM_1\tbear\nqrTm-7zA5FM_1\tmotorcycle\nqrU7MAMf42A_0\tmotorcycle\nqrfZoDvW7wI_2\tbus\nqsFkwL9ikBE_6\tumbrella\nqsFkwL9ikBE_0\tumbrella\nqsbpGZepU_4_0\tmotorcycle\nqs4ACjrDQvo_0\tcat\nqtEJPGYfmb0_0\tmotorcycle\nqtQNJD43Z30_0\tknife\nqthVtX1KeJY_0\tcat\nqtmXJD337Sg_0\tcat\nquMSh4JZfSE_0\tbear\nquSzbk4CkBE_0\tcar\nquZjkqmOTys_0\tcat\nqvAPzGCqVG0_0\tbus\nqvAPzGCqVG0_1\tbus\nqvCVL7reF8g_2\tbear\nqwBsDRYIhwg_0\tcat\nqwI3fCK486I_0\tcat\nqwZ_bpVY018_1\tbear\nqwcgkEVHQS4_1\tmotorcycle\nqxwgvTIA0Oc_0\tumbrella\nqykj452YYlU_0\tboat\nqzjG5RMNfB0_0\tcat\nq0tjDTtHr00_3\tknife\nq1LbqldHuM0_0\tknife\nq1QElQCedrc_0\tumbrella\nq15Lr3-V3qI_2\tmotorcycle\nq2K3ctdaVGU_0\tknife\nq2MasRNKQxI_0\tbus\nq2NfowB59fs_0\tmotorcycle\nq3J7hUfBGGQ_0\tcat\nq4EXWy685Wo_0\tperson\nq4EXWy685Wo_3\tmotorcycle\nq4EXWy685Wo_6\tmotorcycle\nq4EXWy685Wo_7\tmotorcycle\nS7SEfKdokC0_1\tbus\nS7-k1XdAR7Q_0\tcat\nS8BbQRnxfqY_0\tcat\nS8WFgIrdEyI_0\tcar\nS9LooqaA-VA_0\tcat\nS9wDiwQMla8_0\tperson\nS9wDiwQMla8_1\tmotorcycle\nS9wDiwQMla8_2\tmotorcycle\nS9xCWTCFhNc_0\tmotorcycle\nS-T-e07Bgys_0\tmotorcycle\nS_K_nwYUS2o_0\tcat\nS_09gd9e0zE_0\tboat\nS_5w6lmw0DI_0\tknife\nTAzjOrAfzFM_0\tcat\nTA1NbMN7gNo_0\tmotorcycle\nTBvuwl0phUE_0\tmotorcycle\nTBy---hD-FA_0\tbear\nTB9qJG8A-H4_0\tcar\nTCS6svwO2AE_0\tboat\nTCVj-PtxnsQ_0\tbear\nTDSmQkKnGFU_1\tcar\nTENive2WCAw_0\tcat\nTFUV5Dy2MvE_0\tmotorcycle\nTFu5bNUW02Q_0\tbus\nTIZr3Y-PLQM_1\tknife\nTIpoS2Jymv8_2\tknife\nTJJgVPay9LE_0\tbus\nq4zFevdC3-w_1\tknife\nq5D67534lFM_0\tmotorcycle\nq5ESvcujAps_0\tperson\nq5wOimcVyaI_0\tcat\nq6YyhMSTSjg_2\tbus\nq6YyhMSTSjg_3\tbus\nq65QzEDi_jo_1\tmotorcycle\nq8nG4OvfGhY_0\tcat\nq8oKL5zvWZw_0\tcat\nq9QycGD31Co_0\tcat\nq9ZSVLXRUx8_1\tcat\nq9p4QZdwQ0I_0\tboat\nq-Sw3Dx1Xb0_0\tknife\nq-lbxXK_UY8_0\tbear\nq-nt9k61jqQ_2\tboat\nq_NnyABqOFg_3\tboat\nrAcvNOp95pA_0\tcar\nrApBsMx8ZjU_1\tumbrella\nrAtKVQ_h94Q_1\tcar\nrBLqbf-KdaY_0\tcar\nrBjCxCwLz84_0\tcar\nrBl7T312SPQ_0\tcat\nrBnSmzTRsqE_0\tcar\nrCAA1xoobto_0\tcar\nrCOxllaoO64_0\tbear\nrCrQRhaJeAA_0\tbus\nrDEW_AdTSH4_1\tcat\nrDEdeXsgOdU_0\tumbrella\nrEL7A7rKARs_3\tknife\nrFF0purpqAU_2\tknife\nrGgvqpRsaew_0\tbus\nrGlpoWppAfU_0\tcar\nrG4cDTukyNw_0\tcar\nrG4ld81Rxt8_0\tcar\nrHHUlsaTde8_2\tbus\nTKCXvzTT2ws_0\tumbrella\nTMyv9XNlPGQ_0\tbus\nTQWq_YDrKc0_2\tknife\nTQm0C-2ersM_8\tboat\nTQm0C-2ersM_10\tboat\nTQm0C-2ersM_1\tboat\nTQm0C-2ersM_5\tboat\nTQm0C-2ersM_6\tboat\nTREARdQ16GQ_0\tcar\nTREARdQ16GQ_1\tcar\nTSQwlIeADdw_0\tbear\nTSQwlIeADdw_1\tbear\nTSQwlIeADdw_2\tbear\nTSpUcayboiM_0\tcar\nTS7UuEszy9E_0\tcar\nTTQQky-HcCs_0\tknife\nTTdbV_lHq_s_0\tcat\nTUrnPZr3eXs_0\tbus\nTVjvTR7CrNE_0\tknife\nTVvo40ERO9Y_0\tcat\nTW6cU7OYa60_1\tcat\nTXrnNVUe53o_0\tboat\nTXsQGHJjWhI_2\tknife\nTX2BAlXe5IA_0\tboat\nTX2BAlXe5IA_2\tboat\nrIUepAhKVnM_0\tcat\nrIc3ZEnqjQA_0\tumbrella\nrIezbmq7N9U_3\tbear\nrI79TJwwnW4_3\tknife\nrJGGo2bI150_0\tbear\nrJGGo2bI150_1\tbear\nrJGGo2bI150_2\tbear\nrKiQjOPzf0s_0\tcat\nrKs2bGgU29k_0\tcat\nrLm1866Q28U_3\tumbrella\nrLm1866Q28U_0\tumbrella\nrLm1866Q28U_1\tumbrella\nrNlm7i1BcaQ_0\tcat\nrNw1jiERG4I_1\tcar\nrOtd7pdh-zY_0\tcat\nrO0qo7r4TTc_0\tcat\nrPCOxxRwiTM_0\tbus\nrP6vb-cxVcI_0\tbus\nrQBwAWkz3Ao_2\tboat\nrQBwAWkz3Ao_0\tboat\nrQBwAWkz3Ao_1\tboat\nrRL4f466oNQ_0\tumbrella\nrR9vwlyXtYs_0\tbus\nrSNfdcbzEhE_1\tboat\nrSNfdcbzEhE_2\tboat\nrSNfdcbzEhE_3\tboat\nrSNfdcbzEhE_6\tboat\nrSNzuWEgSeg_0\tcat\nrSWYvSf29vQ_1\tcat\nrTM-3OYHQZA_0\tbear\nrTM-3OYHQZA_9\tbear\nrTreVVS3XVg_0\tumbrella\nrUcsGq10bCk_0\tumbrella\nrWLG9njOx1k_0\tcar\nTYuoW3gezZ4_1\tcar\nTZFETDh9bQo_1\tbear\nTZFETDh9bQo_3\tbear\nTain2YW14ok_0\tumbrella\nTb943q0WnTY_0\tcar\nTcfdUbzZcIc_0\tknife\nTcnKT-jCrxQ_1\tbus\nTcnKT-jCrxQ_0\tbus\nTcnKT-jCrxQ_4\tbus\nTdmeXkKeGmE_0\tknife\nTdxsosl1CIk_0\tumbrella\nTeF2gxyzjF8_4\tknife\nTeM8oPJR8nM_2\tbus\nTeM8oPJR8nM_4\tbus\nTeM8oPJR8nM_7\tbus\nTeSMF-Tw8b8_0\tbus\nTf8ZmK4GZYU_0\tbus\nTf9piH7b4Js_1\tbus\nTihSkV4th6I_0\tumbrella\nTimXSaV1u4M_2\tbus\nTjs55_3zB_o_0\tknife\nTjvHNNlcym8_0\tknife\nTjvHNNlcym8_4\tknife\nTj-U_ZtaHe0_0\tboat\nTkmEiKe_Uto_0\tboat\nTkuUMAPSGiU_1\tcar\nTnN1RBRfLnE_0\tumbrella\nTnN1RBRfLnE_1\tumbrella\nTnXDBpRvE_U_0\tbear\nrWw_OZqgPk8_3\tbus\nrYlL6avPERw_0\tcar\nrZDchhWp8lc_1\tbus\nrZ7XejB4nyk_0\tboat\nrawi3Ka9Oew_1\tcar\nrawi3Ka9Oew_0\tcar\nrbONk59p13Q_0\tbear\nrbWOxoprQ2M_0\tbear\nrbXmAC9QV2A_0\tcar\nrbjK97ECn_A_0\tboat\nrcrE_BJU-n4_0\tknife\nrcrE_BJU-n4_2\tknife\nrfksy8z9X40_0\tcar\nrgWglS6-TTw_1\tknife\nrhIa7DWBXUM_1\tcar\nrjVLfZDg-1g_0\tboat\nrk9SO8fR7-0_1\tbus\nrk9SO8fR7-0_4\tbus\nrlBfiB0epGw_1\tknife\nrlLJTjn9vkk_0\tumbrella\nToclpwxGMe8_0\tbus\nTpKpXHgy7yw_2\tknife\nTpKpXHgy7yw_5\tknife\nTqPnQuSGm2Y_0\tbus\nTqZZfXdm7D0_0\tcar\nTqnj4qeawHg_0\tboat\nTqsQOw3CqXo_0\tbus\nTrXkieSIkII_0\tboat\nTsfcgwFff0k_0\tbear\nTsrQwMo3niY_1\tbear\nTs8Wofx6QYY_0\tcar\nTusmYht5g7o_0\tbus\nTvbiwdoAnv8_0\tboat\nTvvBAOBoHFU_1\tumbrella\nTwEihF94LGQ_0\tumbrella\nTwSkZlbuaEU_0\tbus\nTxUm-m-jFQM_0\tknife\nTyV9inNHHAE_0\tbus\nTy_FDwb_nLY_2\tcar\nT0Mp-gJmMlU_2\tbear\nT0Mp-gJmMlU_3\tbear\nT0tT7l2X1_g_0\tbus\nT1Zywya-PcI_2\tcar\nT1Zywya-PcI_3\tcar\nT1Zywya-PcI_1\tcar\nroNPRQwafcU_2\tbus\nroNPRQwafcU_5\tbus\nroW8_xIYVAk_0\tknife\nroXQ3vv08_A_0\tbear\nrqA8P346qIQ_1\tboat\nrqDqbsbIcc8_0\tbus\nrq5jwk8hqYA_0\tbus\nrq5jwk8hqYA_1\tbus\nrriv5ZJYcJI_1\tknife\nrsMmhzkVg_0_0\tboat\nrta_HO-3L_A_3\tbus\nrwH7x0MR_38_0\tboat\nrwS5mEyV7Go_1\tknife\nrwS5mEyV7Go_2\tknife\nrwcVAIM0TvE_0\tbus\nrwcVAIM0TvE_1\tbus\nrwu0xKkvzvA_0\tknife\nrxRxMZ6DIjw_2\tumbrella\nrxSJHCdoi0c_0\tbear\nrxm15TcjWqQ_0\tknife\nryBGF3WFvsY_0\tbus\nryBGF3WFvsY_1\tbus\nry0Pnb8VkxU_0\tbus\nry0Pnb8VkxU_1\tbus\nry0Pnb8VkxU_3\tbus\nrzDa9eW_dpg_3\tcar\nrzDa9eW_dpg_5\tcar\nrzOhM6n6Amc_0\tboat\nT21Uim3jGuo_1\tbear\nT3wZwUQ_7q4_0\tumbrella\nT5ZgfFcAd94_0\tbus\nT6QiKZd4bH0_0\tknife\nT7h2fJLtABk_0\tknife\nT8C-sLfGg3A_0\tboat\nT-5AESRu0pM_0\tcar\nUAptbKXXoJI_1\tbear\nUBk45sVKl_o_0\tumbrella\nUCnTA86V3o0_0\tknife\nUDmjHWk8iRk_1\tbear\nUE1kUiVy7LA_1\tcar\nUFPrfB6_TJY_0\tbear\nUFQmHju3MrM_0\tbear\nr1JK0UIvyoM_0\tbus\nr1YNttJqXjI_1\tbear\nr2GN4IDacgM_0\tboat\nr2GN4IDacgM_1\tboat\nr2GN4IDacgM_2\tboat\nr2GN4IDacgM_3\tboat\nr2sw-3mWNEQ_1\tboat\nr4U8cMe6_Uo_0\tumbrella\nr4cneWcmGJc_0\tbear\nr4cneWcmGJc_1\tbear\nr43KKtRQNxw_0\tknife\nr5c09tdbF3U_0\tknife\nr6HzXMpwuOg_0\tboat\nr7V8M9vMX8I_0\tboat\nr8oV5neCRZc_1\tbear\nr-Wqqn-oS_0_0\tbear\nr_squ5DWzV0_0\tbus\nsAa0aLc0rvM_0\tbus\nsAo-z30biYY_0\tcar\nsAqB_9DrpiU_0\tboat\nsCGJB9oAeHo_0\tcar\nsCX1zbdQvbE_0\tboat\nUHvwjd6eSDY_0\tcar\nUH6GKx07mu0_2\tbear\nUIlo6WvfABM_0\tboat\nUJ7xasCu9yw_0\tknife\nUKdl8BrKy4g_0\tknife\nULTTzu_-eQI_2\tbus\nULgPda0ny1Q_0\tboat\nULxGPhbhuwI_0\tumbrella\nUMQ6fAZTiLo_0\tumbrella\nUNfKxOwP1V8_0\tbear\nUNyq1SNbNPk_1\tbear\nUP2WXifDFc0_0\tbus\nUQdjo1v_Hv0_0\tcar\nUQrP0Wa7bfA_0\tbus\nUQ90qkTMSes_0\tumbrella\nURiNDCZBU7E_1\tcar\nURmMAndDPfQ_0\tboat\nUSYudaDNkeU_2\tknife\nUSYudaDNkeU_3\tknife\nUTx1Fw7nQcQ_0\tbus\nUVGq9IRroYo_0\tboat\nsDSmkWE8qw4_0\tknife\nsEnhkLttWlw_0\tbus\nsFgXir9g_Os_0\tcar\nsF2EQhRNlQc_0\tumbrella\nsGpQTqemybM_0\tbear\nsGzXdAI4YSQ_0\tbear\nsG_AruJlxiw_0\tumbrella\nsJA7-N7htNo_0\tbear\nsJL716urwpY_1\tcar\nsJL716urwpY_0\tcar\nsJTLB7bgb0k_1\tknife\nsJsEpKneYMs_1\tbus\nsMm8f8vBx7c_0\tumbrella\nsOQWtx6GiR4_1\tumbrella\nsOQWtx6GiR4_0\tumbrella\nsOvnHbg6d_8_0\tumbrella\nsPDY-ey2kNA_0\tumbrella\nsPDY-ey2kNA_1\tumbrella\nsQEBpH647Mw_0\tumbrella\nsQJr7LooP_s_1\tboat\nsQftML4HXiU_1\tknife\nsQvi3OxMoKU_0\tbear\nsQvi3OxMoKU_1\tbear\nsQvi3OxMoKU_2\tbear\nUWJIq_1uAnA_0\tboat\nUXDmIQTthAE_0\tknife\nUYRhIhbuh34_0\tboat\nUanzlUcmoJY_1\tbus\nUbj2t-7KcJk_2\tcar\nUb5O76sDojg_0\tcar\nUcBLQsI3Dzs_0\tcar\nUcKyiCjpXoA_3\tbear\nUdFEBlYt9tM_0\tumbrella\nUdaAkO2f_pU_0\tbus\nUeQLdrnbe8E_1\tbear\nUeQLdrnbe8E_3\tbear\nUgHNBgeg9cY_3\tknife\nUgh33I0Qxi4_0\tumbrella\nUgkXJsrPys0_0\tumbrella\nUhgJaZWsdCQ_0\tknife\nUhupGJ7k3Q0_0\tknife\nUhvhrEMHY0E_0\tboat\nUhwOdFtF8os_0\tbus\nUiZ3tYMpOic_1\tumbrella\nUjTdR_85bTo_0\tumbrella\nsSPe9VqmSuU_2\tbear\nsS-GtompdcQ_1\tboat\nsUhpJsSmrzA_4\tboat\nsU-mmzCCGmg_0\tbus\nsVbrxAG6jtA_0\tcar\nsVkPUjUh0UQ_0\tknife\nsV9ymK-zZ8A_4\tbus\nsV9ymK-zZ8A_6\tbus\nsWfQh6SsvG0_1\tboat\nsW7n8r3vvl8_1\tknife\nsXwrjhXbAwA_0\tumbrella\nsYE45Xnof5I_3\tbear\nsY1i3-cQv70_2\tboat\nsY3G5eOlysI_0\tbus\nsY_jGNxKdYw_0\tknife\nsY_jGNxKdYw_2\tknife\nsaBAx3Xw2PE_0\tbus\nsbR26E99_A8_0\tbus\nsbmsWqsHD9M_0\tbus\nsb1unJ1sby8_0\tknife\nsb1unJ1sby8_4\tknife\nscFiRRTU5jg_1\tbear\nscJFbu3WboQ_1\tcar\nsc-BJ-WirDo_0\tbus\nsdHNJK0mfWQ_3\tbus\nsdd5ViCUDwY_1\tbus\nsfVwMcMm77E_1\tumbrella\nsfVwMcMm77E_2\tumbrella\nUjxwNRWfxBo_2\tbear\nUkBlnrNOssQ_1\tbus\nUlLwBfXpz4A_1\tbus\nUmAOVqCB6UM_0\tbear\nUmBxMf5cHV4_0\tknife\nUmewKWpE2qE_0\tcar\nUrRiUQPaxic_0\tumbrella\nUrxeEW4FBq4_1\tumbrella\nUtvo55GUNyg_1\tbear\nUutgI7H2EPc_0\tbus\nUutgI7H2EPc_2\tbus\nUutgI7H2EPc_4\tbus\nUutgI7H2EPc_5\tbus\nUutgI7H2EPc_6\tbus\nUvsMOU9XGYk_0\tcar\nUvsup5BdpLM_0\tcar\nUwlk3sF-l38_0\tknife\nUxD-6ScNF1U_0\tbus\nUx3oyD0wLig_0\tboat\nUx_-m16Ntqs_0\tbear\nsgDzqYTo0GI_0\tcar\nsgDzqYTo0GI_2\tcar\nsghMPNg9wB0_0\tbus\nshgKQ2FcjfM_1\tknife\nsiNixoeB9Ew_0\tcar\nsi8Uk6frpqI_3\tknife\nsjBWnj8kKVs_1\tbear\nsjESht-PXb0_2\tbus\nsje-nlCBYAk_0\tbear\nsk5gj6VnXds_0\tboat\nslGCyLNlI3w_0\tumbrella\nslgsRri0IUU_0\tbus\nsli0aHrS-l4_0\tknife\nsoPkYPTLD-Q_1\tboat\nsoe3qmwZTEE_3\tknife\nsoe3qmwZTEE_4\tknife\nsplTIYA-rtY_3\tknife\nsrUGXKwzLf0_0\tbear\nU0G9nt_JMp4_3\tknife\nU1jXflUgiSo_2\tknife\nU1p1HQ3ZsUo_2\tcar\nU1tGGfRyOzY_1\tcar\nU3BQYG5-Koc_0\tbus\nU3pwXnANDgk_0\tknife\nU3pwXnANDgk_6\tknife\nU4nccTmpY0A_1\tbus\nU7N--AsibJc_1\tknife\nU7fW1r0kRYw_1\tcar\nU7-_NQlr8l0_1\tbus\nU8EGQyjwfEQ_0\tcar\nU85wCYoCIZ4_0\tknife\nU-B7Xkx_rF0_1\tknife\nsuQJeplwaco_1\tbus\nsvZPjH3EGcI_3\tcar\nswj8kdhr03w_0\tbus\nswkyfcVE17I_1\tumbrella\nsyJ4LBRPwjs_1\tknife\nsyY8MaSUvJI_0\tcar\nsyfJEZrVzqA_0\tbus\nsy9XCn-ebrE_0\tcar\nszClXDUETvQ_0\tumbrella\nszW2Gonojss_0\tknife\nszXVjlTlt3w_0\tbear\nsziUCgMKvrM_0\tbus\nsznHM_K2obc_1\tbus\nsz6Zoh7MfnA_0\tbus\ns0ABooHpZjo_0\tknife\ns09Dr7gZ5G8_0\tboat\ns1t73kIOSQU_2\tbus\ns2BVmX4vImY_0\tknife\ns2gkrcGsOxU_1\tbear\ns2nioy3J4RY_3\tboat\ns2nioy3J4RY_1\tboat\ns2nioy3J4RY_2\tboat\ns2qgkHBVQxo_0\tbear\ns2qgkHBVQxo_1\tbear\ns3lwoM0rD2U_2\tboat\ns3-sF0tSY8w_0\tumbrella\ns6BicsP9eBk_0\tknife\nVA3OWlsrD28_0\tumbrella\nVBPWsv5FfbU_0\tbus\nVBPWsv5FfbU_1\tbus\nVBr3P_OGawE_0\tknife\nVB6eUS7LSfM_1\tboat\nVCCevTa32Ng_0\tcar\nVDz1RZU6x5c_0\tbear\nVESEWamKy10_0\tcar\nVFv1UuT7klg_2\tknife\nVGAYYimByOM_0\tcar\nVGwSM3IXcJ0_0\tboat\nVG_OHq6R1AE_0\tbear\nVHiMLGyNYgQ_0\tcar\nVIASAf569_k_0\tcar\nVIxj6BV3kgM_0\tumbrella\nVJZpavOgVEo_0\tumbrella\nVLaCK3u84vI_0\tumbrella\nVMLuyFD54AQ_2\tboat\nVMXrHUjXjyQ_0\tboat\nVMXrHUjXjyQ_1\tboat\nVMi5mAdZyZI_1\tknife\nVMs0jemUzI0_0\tknife\nVNuYRPiFrus_0\tbear\nVN-BCqBlrhs_0\tcar\ns8vzssNUlOA_0\tknife\ntAOx6NFDD9I_0\tknife\ntAxbjy_edDI_0\tumbrella\ntBOSPNFbuv8_0\tumbrella\ntBQRfKeIYZc_2\tbear\ntBgtSnOMOwM_0\tbear\ntBh6HxQHmrs_0\tknife\ntCZLl-MZJp8_0\tcar\ntDYPtg0At_Y_0\tbear\ntE42n_1PW6w_0\tbus\ntFfqpeBbvr0_0\tumbrella\ntFjlTZqwoWI_0\tbear\ntGycfa97LVU_1\tbear\ntIX4eIYzfD8_0\tknife\ntIX4eIYzfD8_1\tknife\ntIs05U9pd04_3\tknife\ntIs05U9pd04_1\tknife\ntIs05U9pd04_4\tknife\ntIs05U9pd04_5\tknife\ntJXbZyaUOD4_0\tcar\ntJhfshKvRmE_1\tbus\ntJhfshKvRmE_4\tbus\ntJ01Y3R3Qmg_0\tumbrella\nVOcplsa6Gq4_2\tknife\nVOcplsa6Gq4_5\tknife\nVPI_Nm3GHHc_5\tbear\nVPI_Nm3GHHc_2\tbear\nVP0u_E6FOsY_1\tcar\nVR_V9WaFYn0_0\tumbrella\nVSj9dXwt7zI_0\tbus\nVSxoLvaJN2Q_1\tbus\nVUcCABjVSO0_0\tcar\nVU2lUX4NdkM_0\tknife\nVU2lUX4NdkM_1\tknife\nVVg7sbsw9vY_0\tbus\nVWpm6_Uhis0_2\tboat\nVX9TPrjMcOg_0\tknife\nVX9TPrjMcOg_4\tknife\nVZ5r0BHRf84_0\tboat\nVaW7Go5pX-c_0\tumbrella\nVa50KanUO94_0\tumbrella\nVbA0B1JcpNY_2\tknife\nVbeIRLOQ5pI_0\tbear\ntKCjJuulqx4_2\tbear\ntKCjJuulqx4_3\tbear\ntKCjJuulqx4_4\tbear\ntKN3Qo0oUoc_3\tknife\ntNvGTzks1yw_0\tcar\ntNvGTzks1yw_1\tcar\ntO0igm1AwqU_0\tbus\ntPae9uGqDog_2\tbear\ntPzWEC_9_H4_3\tknife\ntQpyrprwwc0_0\tumbrella\ntR2sDFGND7g_0\tbear\ntSEneDiCrqg_0\tbear\ntTFTWquOTi8_0\tbus\ntTjbx39rZMk_0\tbus\ntT2pUZ0W33A_0\tbear\ntUHf6Ynx_vI_0\tknife\ntVJE-0uNX1s_0\tboat\ntVTkAh80t5I_0\tumbrella\ntVuL82POt-I_1\tcar\ntXMBGjGduCM_2\tknife\ntXsMGHCKw7U_1\tboat\ntXwfqREzEtI_0\tboat\ntYGp2PFiAUE_0\tknife\ntYas1z25M_4_2\tknife\ntYcNeSisfpI_0\tbear\ntYdhIaTDwiE_1\tknife\nVdLohVQNC5Q_0\tknife\nVdLohVQNC5Q_1\tknife\nVdLohVQNC5Q_5\tknife\nVdLohVQNC5Q_6\tknife\nVeUIJlyGjkY_0\tcar\nVekx17G8mkk_0\tbear\nVfBqMWT6aRM_0\tknife\nVfKgW5eSGsk_0\tumbrella\nVhmj1OGGQuc_1\tbear\nVhn-8bCU70s_0\tbus\nVh21adwevRU_0\tbear\nViXmx_D5BAY_0\tknife\nViXmx_D5BAY_3\tknife\nVizxeIzWEFw_0\tcar\nVjF-G6FQooU_0\tboat\nVjS5w2pc0tA_1\tboat\nVjvpOU349zY_0\tbear\nVkDn2-1H23o_0\tumbrella\nVkDn2-1H23o_3\tumbrella\nVk43AD4O_hc_0\tboat\nVnrw6Fjmj8I_0\tbus\nVnwwgTO4w_k_0\tumbrella\nVn4aKSlYXX4_3\tbus\nVppPgMZqfEQ_0\tboat\nVp0kah4_m6w_0\tboat\nVp0kah4_m6w_2\tboat\nVqHSuVVKfjs_0\tbus\nVqo2RiAzLnU_1\tcar\nVrnm_kf7OCs_0\tboat\nVsDgOcOWqXw_0\tbear\ntYofvh4_3K4_0\tbear\ntadYkEU5suY_1\tknife\ntbIUesoKv9Q_1\tbus\ntb_hKPkH2co_0\tknife\ntcFQ5kE3PKM_0\tcar\ntcFQ5kE3PKM_1\tcar\ntcSHrlGTFJc_0\tknife\ntc912gGdckQ_0\tboat\ntdjDSO8NFx4_0\tknife\ntdpAPPsHlDQ_1\tbear\nteJyM5tywno_1\tbus\nteQkZqDa1lw_0\tknife\nteb83RDwop4_0\tbear\ntgSfan8G7wo_0\tcar\ntgVXG7H_acI_0\tumbrella\nti3J-8aWPcw_0\tbear\ntjldcvPuif8_0\tbear\ntj4mnSXX2DM_0\tcar\ntm2bmSBR4uE_0\tknife\ntoiMoCxSyKY_2\tboat\ntos1ELGZH0M_2\tumbrella\nVs3Mi3Ch_EQ_0\tbear\nVtHzTaDh4WM_0\tbear\nVtHzTaDh4WM_1\tbear\nVt8DAmG3nHs_0\tcar\nVu4xkIEs6U8_0\tboat\nVvXxRawsOCs_1\tknife\nVvXxRawsOCs_4\tknife\nVwYEgB5HOD0_1\tbus\nVxdUG7Sinyw_0\tcar\nVyDNhpvCuc8_0\tbus\nVyfIuIcelhc_0\tumbrella\nVz3wJsLA_gI_0\tbus\nV0NnR8HLSbo_0\tumbrella\nV0o8kxcOZRc_2\tbear\nV1a9QcSegdw_2\tumbrella\nV1dqjmHNyIY_0\tboat\nV23vmoZYoVw_0\tbear\nV4o7I9cLp-g_0\tbus\nV6nKvvfzWpg_0\tboat\nV64pvhB8sKU_0\tcar\ntrAReSHvUdQ_0\tcar\ntrAReSHvUdQ_5\tcar\ntrAReSHvUdQ_6\tcar\ntrAReSHvUdQ_1\tcar\ntrAReSHvUdQ_2\tcar\ntrAReSHvUdQ_3\tcar\ntrAReSHvUdQ_4\tcar\ntsNhgDUKwHw_3\tknife\nttdTnGOIBmA_0\tumbrella\nttdTnGOIBmA_3\tumbrella\ntvVLkJ0HTQQ_3\tcar\ntvew-P2UPL4_0\tumbrella\ntwiEfNprSoE_0\tknife\ntwiEfNprSoE_1\tknife\ntw7jf9U2-kM_2\tbus\ntxpIIsM1T8U_0\tbear\ntx2dZF1Ckxk_0\tknife\ntx5tKODiGuo_0\tknife\ntx5tKODiGuo_1\tknife\ntyO37NBAS1Y_0\tbus\nt1UtwxOBGvE_1\tknife\nt1vrE0cEB80_0\tbus\nt10FRgv9o5M_0\tbear\nt10FRgv9o5M_4\tbear\nt14PUW9SINk_0\tknife\nt31z17N5skw_0\tknife\nt31z17N5skw_1\tknife\nt31z17N5skw_3\tknife\nt31z17N5skw_4\tknife\nt33TQH8-7tg_2\tboat\nV9UCv2qhsxc_0\tcar\nV9ulnUIQGJU_0\tbus\nV9ulnUIQGJU_6\tbus\nV-KNIu_PsaQ_0\tbus\nV-NvBHig1i0_0\tbear\nV-tMggTxBu4_0\tknife\nV_Bb7A55f-c_0\tcar\nV_dJ2KuqfOA_0\tboat\nV_dJ2KuqfOA_1\tboat\nV_t8pbEf8bA_1\tboat\nWB7fT2tI7Pg_5\tcar\nWCSEuwFm7KU_1\tcar\nWCfc8YGLu1o_1\tbear\nWCfc8YGLu1o_3\tbear\nWDgLmrXq4vg_0\tumbrella\nWHLIJlNh3TQ_1\tknife\nWHQXE5tuTXk_0\tcar\nWHUaoqVF57g_0\tcar\nWIdj4ovuDWQ_0\tbear\nWIdj4ovuDWQ_1\tbear\nt4oaGCoTBZc_0\tcar\nt42tnyTtYWE_0\tboat\nt7OKXKxjHls_6\tbear\nt8X-x_7pv94_0\tcar\nt_-dK1Xhg90_0\tknife\nuAjqm8B-aio_0\tknife\nuB_Hurzj4s0_0\tcar\nuGEDuDcqqvU_0\tboat\nWJ2A2XRRTw4_1\tbus\nWJ_vIH7FJsQ_0\tcar\nWKDhXr_5mbI_0\tknife\nWKKFM7oRSd0_0\tbear\nWKS6aq75gk0_3\tknife\nWKV4j8-G1Nc_0\tknife\nWKfQfA_YQTY_3\tknife\nWKubVTrND7s_1\tknife\nWKzUT3zOIU8_0\tknife\nWLxzHH6iJlk_4\tboat\nWMSu-XOQe5w_4\tbus\nWMSu-XOQe5w_0\tbus\nWMgP1z0x0Io_0\tbus\nWOVTnN-HcZ0_1\tbus\nWOxTA78OlZU_0\tknife\nWPqEyeVtih8_0\tbus\nWPuItCUuEkY_1\tknife\nWQAr1enuPKw_1\tbear\nWQX6ptTAKHg_0\tknife\nWSc0kYKLGTg_0\tbus\nWStgEyiPBBE_0\tcar\nWSvHn5XJq0Q_0\tknife\nWS0DayzAv80_1\tboat\nWS0DayzAv80_2\tboat\nWTXytzbF5lU_0\tumbrella\nWT69VoU2Hps_0\tcar\nWVx9vOoutGo_0\tbus\nWWKuCF2FuYk_0\tcar\nWWm9iMkKk-g_0\tknife\nWW7ib8XAVz0_0\tboat\nuHqj6xQGOYg_3\tbus\nuHqj6xQGOYg_4\tbus\nuHqj6xQGOYg_6\tbus\nuHqj6xQGOYg_7\tbus\nuIKZlXUoHOc_0\tbear\nuJMFDY-BKiQ_1\tbear\nuJMFDY-BKiQ_4\tbear\nuKdOuLYJjrg_0\tknife\nuK-zcpEE8nE_5\tboat\nuLdXkXRsHok_0\tumbrella\nuMK6b2TG8rc_0\tbear\nuMV37U-DNUQ_0\tcar\nuMciOwjd0GU_0\tcar\nuMciOwjd0GU_1\tcar\nuMd1DmjxAZQ_1\tcar\nuMj3V0s7mUo_0\tbus\nuM_jxm7bFp8_0\tboat\nuNDkbmlEYeQ_0\tbear\nuO7OtV3J1AY_0\tbear\nuPE1o5dCYDc_0\tbus\nuQhMkVrdghM_0\tbear\nuRLAyu-3l0A_0\tknife\nuStpLanz0fU_0\tcar\nuTAqzBGMDOc_0\tbus\nWYwRW_t4jb8_0\tcar\nWZK5IqBtpGE_3\tknife\nWZgxjIvc2nk_0\tboat\nWaEyVBSggwQ_1\tbear\nWaaW6bElWCM_0\tcar\nWb20JaIrr8M_0\tknife\nWb20JaIrr8M_2\tknife\nWcNlbTBZM64_0\tumbrella\nWdIATjW74Pc_0\tboat\nWdYFXDv4TEo_1\tcar\nWdgTHJurLx0_0\tumbrella\nWd0xTEH2d9k_0\tboat\nWejCws8AoxE_1\tknife\nWejCws8AoxE_2\tknife\nWejCws8AoxE_3\tknife\nWe4_tuFKyGE_0\tknife\nWf6hHpxRW_Y_4\tknife\nWgx6hhiRLoA_0\tpotted plant\nWjiMUA6_CkY_0\tboat\nWlm2mLKCMlM_1\tbus\nWlsN6HURFTc_0\tbear\nWmFqo8n67Ok_0\tbus\nuWi9-84kTFQ_1\tbear\nuXHJHV0bwUk_2\tbear\nuXe9WOlTFcs_0\tbus\nuXe9WOlTFcs_1\tbus\nuZgcOYmazsw_0\tbus\nuaJ1g0xJ4QY_0\tbus\nual32V7-KJo_0\tboat\nua_5GosOa-c_1\tbear\nubFoUAh6d4g_1\tknife\nubOiomYqbNs_2\tknife\nudSE-6UkgwM_5\tumbrella\nue1CIlwhPEs_0\tumbrella\nufFT2BWh3BQ_0\tbear\nugWs4v6DbUw_0\tbear\nugsJ5cOmFTg_1\tboat\nuhXcL98XNCY_5\tumbrella\nuhXcL98XNCY_1\tumbrella\nWoxbRmDfLeI_0\tumbrella\nWoxbRmDfLeI_1\tumbrella\nWpCyx-QCMec_0\tbus\nWplsTumdQf8_0\tboat\nWqFFUvf-YJk_0\tknife\nWqxU9aIFmNY_0\tumbrella\nWr5BjrtC4Ts_1\tknife\nWsEiHZFGeFs_3\tumbrella\nWsaP8FyRUCc_0\tcar\nWses8y3NyJ4_1\tbus\nWs9V_B7mqJI_0\tknife\nWuTHL7GtG-8_3\tknife\nWvGzCV5ICZM_1\tboat\nWvuZRZqhxk4_3\tknife\nWvuZRZqhxk4_5\tknife\nWvv8cOXaAZI_0\tbus\nWv-Weuc4E1A_0\tumbrella\nWwLtxfDC7ok_0\tboat\nWxWXB9hf7n0_0\tcar\nW0kDpFkg6xU_0\tboat\nW1z3EAv-eJw_0\tbus\nujnUCtI7gzI_0\tbus\nuj4TRH5r_ww_6\tbus\nuklsFjegS-w_0\tbus\nulzto7-Hl64_3\tbus\nul__w-oqHrw_0\tbus\numjU9X1kuYg_2\tcar\numjU9X1kuYg_4\tcar\numjU9X1kuYg_1\tcar\nuoGBYfJo5Xg_0\tcar\nuo1J9BUgQmk_0\tboat\nurRNkZvzuHI_2\tknife\nurmSoxyi9Vo_0\tboat\nurmSoxyi9Vo_2\tboat\nutmsGeHFdvI_0\tboat\nuuBKDGoTmGY_1\tcar\nuu-UptVYr_A_3\tcar\nuvV7cblR4qc_5\tumbrella\nuvZOzZjBKXY_0\tbus\nuwL5LYln0EM_3\tbus\nuwL5LYln0EM_4\tbus\nuwL5LYln0EM_5\tbus\nuwL5LYln0EM_6\tbus\nuwx7UKo4jcg_1\tboat\nuwx7UKo4jcg_0\tboat\nuwzHiGF1YMM_0\tboat\nW2z3SxorVnI_0\tknife\nW2z3SxorVnI_1\tknife\nW38vB3cw2fA_2\tboat\nW4Is7CI2Sfo_1\tumbrella\nW47ZA0onzb4_0\tknife\nW5dSTfMCj-U_0\tboat\nW5zIkmZyS18_0\tbus\nW51Spbo8SQQ_0\tknife\nW6YCv9ZVVOc_3\tboat\nW6uCEMEi7_E_0\tbus\nW7JkNuRYNr0_2\tknife\nW7JkNuRYNr0_3\tknife\nW7JkNuRYNr0_4\tknife\nW7JkNuRYNr0_5\tknife\nW7yqHDA_RMU_0\tknife\nW8EKt6cG0E8_3\tbus\nW8EKt6cG0E8_7\tbus\nW8EKt6cG0E8_1\tbus\nW8xqW-QD_B4_0\tknife\nW87M2lQeWNk_0\tbear\nW87M2lQeWNk_1\tbear\nW-ZpC_K7Df8_0\tcar\nW-x__78AyrI_0\tboat\nW_Wc7lFraRg_0\tbus\nW_v5wpcibRM_0\tboat\nW_2LqiQ_ico_1\tknife\nXAa2L1v8iJM_1\tumbrella\nXBAOFn8KXFo_0\tbear\nXBn6P-IKuis_0\tperson\nXBssw3bqXL0_2\tbear\nXCZv_AjZo08_0\tknife\nXCu0Ea4zHuQ_2\tbear\nXDtfr902CVM_0\tbus\nXD1OYmmeKic_0\tumbrella\nXD1OYmmeKic_2\tumbrella\nuxFX6p61oPY_0\tknife\nuxlDad59mFc_0\tboat\nuyWVUOcgZHg_0\tbear\nu1OhTXTmuWM_5\tbear\nu1TvbkpmEbs_0\tcar\nu1vMDzyFxzI_0\tbus\nu2BVfAFQ1zU_3\tknife\nu2BVfAFQ1zU_2\tknife\nu2EDuPJijZ8_4\tboat\nu4K3jRl7Gag_0\tcar\nu4S9mlFpt0s_0\tbear\nu4uwaq4uf54_3\tcar\nu4uwaq4uf54_0\tcar\nu6XGBXhCJ18_1\tknife\nu7STs8FCy_g_0\tbus\nu-1HZJXwFHo_0\tumbrella\nXF8B5xjRCF0_0\tcar\nXF8B5xjRCF0_2\tcar\nXF_oHXRGd1o_0\tboat\nXGRZLrZC9zY_0\tboat\nXIlybSpq0mg_0\tbus\nXJmn9i57K3g_0\tbus\nXLvSaN_M6lE_0\tcar\nXL0B2niNRCw_2\tbus\nXMlEA_yRojM_0\tknife\nXMyio1ZckJc_0\tbus\nXQBtgwUzEL0_0\tcar\nXQX5y5BQykU_0\tbus\nXQ6u2yTbu_0_0\tcar\nXQ7UbbPjnDo_1\tknife\nXRenv5AHI_8_0\tboat\nXRpgkCuziGY_0\tumbrella\nXSI7M8s2Tc0_0\tbus\nXS4ow1Wcaro_0\tcar\nXTm-jN1RVHA_0\tumbrella\nu_YKLGqrMKQ_1\tknife\nu_gN-dXNRHI_0\tknife\nvARZcTna8NU_0\tboat\nvBEaeqdPsho_4\tcar\nvBEaeqdPsho_3\tcar\nvDT-DShjnjU_0\tumbrella\nvEMHY2cT6kA_0\tbear\nvEi5gkcTDGY_0\tbus\nvE9zapt1WdI_3\tcar\nvFSRrtT5AL8_0\tbus\nvGbt_XsSaVk_0\tknife\nvGi-DjriLLs_0\tumbrella\nvHAlsHYE3mo_3\tcar\nvHAlsHYE3mo_0\tcar\nvHXM9IJdVcM_0\tumbrella\nvIQAK-4lMOc_0\tumbrella\nvIgmRBC2ayQ_0\tumbrella\nvJl9QkAbpc8_0\tcar\nvKxCl7DzJjI_0\tknife\nvK8dgvZ5B6A_0\tumbrella\nvLA-mHM7MAQ_0\tknife\nvL-6uNdrCV4_2\tknife\nvN54ADSnJmE_0\tbus\nvOKH_DIjvAU_3\tknife\nXUkTknKOdrs_4\tknife\nXVa23hmwe-E_0\tumbrella\nXVrNN52RTEs_2\tcar\nXVrNN52RTEs_3\tcar\nXV694aCXY8Q_0\tboat\nXW6BQWpl3bI_1\tboat\nXZl5Luzj6v0_6\tbear\nXaSsc3noeLs_0\tboat\nXbHWOyNM3Bw_0\tbear\nXbHeGzyGejE_0\tbear\nXbWrCVe09YA_0\tboat\nXcLl0qSs9bU_1\tknife\nXcifNE0anDo_0\tknife\nXcifNE0anDo_1\tknife\nXc1jzGFyrnE_0\tcar\nXc5LW1FIVE0_2\tknife\nXc5LW1FIVE0_3\tknife\nXdu-98BUgmA_0\tknife\nXd7VbtoAdb0_0\tcar\nXeOwt5KeVfA_2\tcar\nXeR1DgyOa9o_0\tknife\nXekvrqFtazY_0\tbus\nXeplLROyXyA_5\tumbrella\nXgBTEQN_ZxA_2\tbus\nXgBTEQN_ZxA_4\tbus\nXgBTEQN_ZxA_7\tbus\nXhSmPb3cA_A_1\tknife\nXhSmPb3cA_A_3\tknife\nXiEeY5R56EQ_0\tknife\nvOy0N09kGEE_0\tumbrella\nvO56uCHmSjg_0\tumbrella\nvPVpX6GPY5Q_0\tbus\nvPVpX6GPY5Q_1\tbus\nvQ_8ry_dx68_3\tboat\nvRhGvmXk2js_1\tboat\nvRzpk-thwA0_0\tbus\nvTvjeXsP7TM_1\tcar\nvTwSeYRU_WQ_0\tcar\nvTwSeYRU_WQ_2\tcar\nvUKk9LqKVpA_0\tboat\nvUKk9LqKVpA_1\tboat\nvUg2Sr7Jl-Y_0\tumbrella\nvVKZzTBvsF4_1\tbear\nvVNCUA8hss0_0\tboat\nvVUbZCrCqEU_1\tboat\nvV72xGim-is_5\tknife\nvWMiT73g5-k_0\tboat\nvWO0tyaGuaM_0\tumbrella\nvWUAzQ_EEJ4_0\tknife\nvW_aJr-PSvA_0\tbus\nvW_o48lG_0I_0\tbus\nvXX9FmlwVlk_1\tbus\nvXX9FmlwVlk_6\tbus\nvXX9FmlwVlk_0\tbus\nvXX9FmlwVlk_2\tbus\nvXX9FmlwVlk_4\tbus\nvXaLFnwvrX4_0\tbear\nvXvR0RiGzj4_1\tcar\nvYROjLzMqvY_1\tbus\nvYROjLzMqvY_2\tbus\nvYROjLzMqvY_3\tbus\nvYwdLoOa0Rc_0\tumbrella\nvYwdLoOa0Rc_1\tumbrella\nvY1sAfu99Es_2\tbear\nvZznldYVwGA_0\tboat\nvbfWHUjHR2k_0\tbus\nvcdEtOGEEcU_1\tbear\nvcdEtOGEEcU_0\tbear\nvcdEtOGEEcU_2\tbear\nvch6R3EO9Ec_0\tknife\nXjHJiHO6onE_5\tbear\nXmVv2wQSvjs_1\tcar\nXoJahpK73EM_0\tboat\nXoqPCnlpymI_2\tknife\nXpDVw5mS058_0\tboat\nXp591jCTBOA_0\tbear\nXqfkP1lAkyE_4\tbus\nXqfkP1lAkyE_5\tbus\nXqfkP1lAkyE_2\tbus\nXq-5DHWJ1pk_1\tbear\nXrh68BP53Gw_0\tcar\nXriRhjtrlLE_0\tcar\nXu-ZZl_L38Q_2\tboat\nXv9eEVcD2P0_0\tbus\nXwvKtur_QEk_0\tknife\nXxHnDkI1NdQ_0\tbus\nXxHnDkI1NdQ_1\tbus\nvfzGrdk_Mxo_0\tbear\nvhrRnvGSMMY_2\tboat\nvhrRnvGSMMY_5\tboat\nvhrRnvGSMMY_6\tboat\nvhrRnvGSMMY_8\tboat\nvh4BHzMwVT8_2\tboat\nvh4BHzMwVT8_3\tboat\nvi4ktD0dAD4_0\tcar\nvkfdn7gkQh8_1\tumbrella\nvknUR0K4MqM_0\tbus\nvlNLyHxz1TY_0\tboat\nvlaeAly1nZc_0\tboat\nvmr5UiZekic_1\tbear\nvo0WWdM7UCs_0\tbus\nvo6Uzhx2fcw_0\tboat\nvpItyB8epmQ_4\tboat\nvp8NiaEmk2M_0\tbus\nvqeybXtIwxE_3\tumbrella\nvrK5lDQJnmc_0\tcar\nXy1w-6sjVS0_0\tbus\nXzj_w2QkjRg_0\tumbrella\nX0iu2HmUYfY_0\tumbrella\nX0nevXM5278_0\tcar\nX1drOgA68EU_0\tbear\nX2zWe7ayseQ_1\tbear\nX3ST-FA3VS0_4\tbear\nX4YaqObAEns_1\tbus\nX4kxk4G-BOs_0\tbear\nX4kxk4G-BOs_1\tbear\nX6Y6e6qsVOc_1\tbear\nX6tuO-hL1cg_0\tboat\nX6z7yGyP3UY_0\tboat\nX7AJSe6kUz4_0\tboat\nX7PChwjgRog_0\tboat\nX7mkuAPcpg0_0\tbus\nX8Wc00FiJn8_1\tbear\nX8lHVX9uGm4_0\tcar\nX9dNz1MhFTM_0\tcar\nvtOaPYxGauU_0\tboat\nvwp5f1sTcOM_2\tboat\nvxEizaWVZ2E_0\tcar\nvx7S4ISNz90_0\tbear\nvzKEVGD3E3w_0\tboat\nvzKEVGD3E3w_1\tboat\nvzmWbtFBxb0_0\tbus\nv0DjGmLiuao_0\tcar\nv0P7DOSAooM_0\tboat\nv0Uh3fazz7A_4\tbear\nv4CWziKFAvg_0\tboat\nv4CWziKFAvg_1\tboat\nv4TWD1hSObU_0\tumbrella\nv4TWZQM-t_M_0\tboat\nv4wheqJ7qmw_0\tcar\nv4-PEShPpKo_1\tcar\nv4-PEShPpKo_0\tcar\nX_1xeuzdJII_3\tbus\nYAI5kxAVlag_0\tbus\nYAS9QgwaKuA_3\tbear\nYAacEL8GB8Y_0\tbus\nYCTBEauAnvs_0\tboat\nYCT0ue2AdNE_0\tumbrella\nYC0SWC1thDM_2\tcar\nYDxjfXnUsjA_0\tbus\nYFb4IgdgsQI_1\tboat\nYGm0A03QK-0_0\tbus\nYJklsCjPlRE_0\tcar\nYJrYjEZ4Hfo_1\tbear\nYLNAOu0nAaM_1\tbus\nYMWEbvBeA2k_0\tcar\nYNOl5XssrmA_0\tcar\nv6RTPFSqVAo_0\tbear\nv6d52nxP9CI_0\tboat\nv6d52nxP9CI_6\tboat\nv6d52nxP9CI_2\tboat\nv7R5EfiWsMU_0\tboat\nv7mxF1u1eJA_0\tboat\nv74SVFcInoY_0\tbus\nv77um2oiCmw_1\tbear\nv8vdjpigkqA_3\tbear\nv9EO_34zhPY_0\tbus\nv9dJjyyqJ14_0\tbear\nv-_nfHjdDrM_0\tcar\nwAJI2wAjCLA_0\tcar\nwAktmcUSj0Q_0\tbear\nwAsEbrNlx-Q_0\tcar\nwBEyQdKDniA_0\tbus\nwDOuWmULTDo_0\tbus\nwDwRfk2Ka7A_2\tumbrella\nwFuYr5TAoA4_0\tcar\nwFuYr5TAoA4_2\tcar\nwGqMuP3z6nY_2\tbear\nwHdnCnPBax4_0\tumbrella\nwHrdTEho0Do_2\tbus\nwItLJ3GVPHo_0\tumbrella\nwIzhSLqL-4M_0\tboat\nYPR6uiSn_PI_0\tbus\nYPR6uiSn_PI_2\tbus\nYPWoY6sseHw_2\tbus\nYP9HVTyFrM0_0\tumbrella\nYQRaUcLNZjw_1\tcar\nYRmCe16K5EI_0\tumbrella\nYRxTciapqLc_0\tbear\nYSFyOBQNQzc_1\tumbrella\nYSOeyn1SUIc_0\tbear\nYSx79S6HsRE_0\tboat\nYSx79S6HsRE_1\tboat\nYVueKFH38pQ_0\tumbrella\nYWAY2hVlXwU_1\tboat\nYXC4y1_fd5M_1\tboat\nYYjM_RIWUWk_0\tbus\nYY-G2b46dbU_0\tbus\nYalvFPYggIo_0\tbus\nYbsAJsBizWo_0\tcar\nwJbu3nAVmh8_0\tcar\nwJ-qeIIyve0_1\tbear\nwKlqztWBWCE_0\tbus\nwLXsUww1z0Y_1\tbus\nwLXsUww1z0Y_2\tbus\nwMW3eYDAmiM_0\tcar\nwN6DTQLhQo0_0\tboat\nwOAtMDJ1DIU_1\tbus\nwOqLqQhPKNs_2\tbus\nwPCVya7FjXI_0\tbear\nwPcWihBU6Fc_0\tboat\nwPjzhuBuZ_E_0\tcar\nwPrTnHfCQy0_0\tbear\nwP83jrOriho_5\tboat\nwP83jrOriho_1\tboat\nwP83jrOriho_3\tboat\nwQY4K0ZN5RY_0\tbus\nwQY4K0ZN5RY_1\tbus\nwQY4K0ZN5RY_3\tbus\nwRJ_foSdk2g_0\tumbrella\nwRs7_Un28R0_0\tbus\nwSaf-OQyJzM_0\tboat\nwSkaSUiYB60_0\tboat\nwUG-UKf5xOM_2\tbear\nwUtwwmbus0k_0\tbear\nwVI9BeWuM68_0\tbear\nwVX6wPj2U5M_0\tbus\nYcrP36sQwVc_5\tbear\nYepGVMeHePw_1\tboat\nYe3mi53K_Oo_2\tboat\nYgouPUMM7w8_0\tbus\nYhZT5GU-dEY_0\tbear\nYiDVwrN1Djs_3\tbus\nYi8XHxZACGY_0\tbus\nYlGg5v-AWZc_2\tumbrella\nYlnMI5yk7FU_0\tboat\nYmRfW-9QwH0_0\tcar\nYodCYpx5p8o_2\tbear\nYogxE9OtHGE_0\tcar\nYogxE9OtHGE_2\tcar\nYozOMrrhBWk_0\tumbrella\nYozOMrrhBWk_5\tumbrella\nYo8IaFdsDHQ_0\tumbrella\nYo8IaFdsDHQ_1\tumbrella\nYpGGnhGqqkc_0\tcar\nYpv2bwSbJbg_0\tbus\nYpyrD-P9emk_1\tbus\nYq3H6FwjqwQ_2\tbear\nwXg6MT7--Ms_1\tbus\nwYO_Z3tO-P0_0\tcar\nwYO_Z3tO-P0_1\tcar\nwYO_Z3tO-P0_2\tcar\nwaGAoKeMDbo_2\tbus\nwaZHoBhYNXM_2\tcar\nwan2A1Zp9pg_0\tumbrella\nwa4LKNmoGCI_0\tbus\nwbBafnofeHM_1\tbus\nwcLRQ5lDklc_2\tbus\nwcRJMRP7TtY_0\tcar\nwcUHhJA9ynY_0\tumbrella\nwcUHhJA9ynY_1\tumbrella\nwc6z479m8VU_0\tknife\nwePYCAT9VWI_0\tboat\nweUGYN9mO8M_0\tcar\nwe9P1H3yM9s_0\tumbrella\nwgn5GA4Kt_w_0\tbus\nwioe2rgDFxQ_0\tbus\nwi_60seXhMg_0\tumbrella\nwkCC1-6dZZc_0\tbear\nwkRF61CxvWQ_1\tboat\nYsJGlSMV6fc_0\tbear\nYsKpyV6dNVU_0\tumbrella\nYsKpyV6dNVU_6\tumbrella\nYukb6C-FiPs_0\tbus\nYyqN8OKq7-k_0\tcar\nYy9Cj5ayVow_4\tcar\nY2esC00COVs_0\tumbrella\nwkhiKomfWwo_0\tboat\nwku7FWw9zok_6\tbear\nwmN3gF7czBE_0\tboat\nwoB4lneU8v4_2\tboat\nwoB4lneU8v4_5\tboat\nwoB4lneU8v4_3\tboat\nwonqKYd_Hkc_0\tboat\nwulomSbG8Ww_0\tboat\nwwHyMOLjtHw_0\tcar\nY8gjbHlOSpg_1\tcar\nwz-CYTAvpJA_0\tcar\nwz-CYTAvpJA_1\tcar\nw1xC4CowaVk_2\tbear\nw2d7ZPHVRsQ_0\tcar\nw4QoeqK4vN4_0\tboat\nw5KKrxi32ZU_0\tboat\nw5RAGrRh6N0_0\tboat\nw85PvG-O3JQ_3\tbear\nw-RoxIo67S8_0\tbear\nw_dzHMbP1wk_0\tcar\nxAdflusGMAM_2\tbear\nxAdflusGMAM_1\tbear\nxBQVhJr5tn4_0\tcar\nxBQVhJr5tn4_1\tcar\nxBW2dB1aHqE_1\tbear\nxE-fIbBizEc_0\tboat\nxIjuSe8NERE_0\tboat\nxIr-46lqsbs_4\tboat\nxI3wdcR9GOU_0\tbear\nxJaqlEqJIsg_0\tcar\nxKUjAAXXark_1\tcar\nxKjnn1lJsUE_0\tboat\nxLl8JlHPals_0\tbear\nxL0aucx8LjA_0\tcar\nxM1N_JeMAns_0\tcar\nxNfYVO0HOWA_0\tbear\nxNfYVO0HOWA_1\tbear\nxNqzZtEMt6A_1\tcar\nxOQ_zqhFFoQ_0\tcar\nxOQ_zqhFFoQ_1\tcar\nxOQ_zqhFFoQ_2\tcar\nxPgexGqlrpM_0\tboat\nxQ2ursLiV78_0\tboat\nxVl7ISxNOBo_1\tboat\nxWfIV6ykSZU_0\tumbrella\nxYRbcgZcjTo_0\tboat\nxZdiy-peZpE_0\tbear\nxcC48didfYg_0\tcar\nxds7aav_WA0_0\tumbrella\nxeEFpaZutxQ_2\tcar\nxeEFpaZutxQ_0\tcar\nxemv_TG3nHo_2\tboat\nxf7e7HpnDAI_2\tumbrella\nxhLH-f-e2Ds_0\tbear\nxhLH-f-e2Ds_5\tbear\nxhLH-f-e2Ds_1\tbear\nxhLH-f-e2Ds_3\tbear\nxhLH-f-e2Ds_4\tbear\nxhYRRVSUjcI_0\tbear\nxh6_xD0_FUY_0\tumbrella\nxi1l0PNYmVU_0\tcar\nxi1l0PNYmVU_1\tcar\nxk-PCxxgLyQ_0\tcar\nxlSq_r-1VZI_0\tcar\nxlTBS98u4Xk_1\tboat\nxl03KNG3qcY_2\tbear\nxl03KNG3qcY_3\tbear\nxmXEOSj-QR8_0\tumbrella\nxm61skXJVHY_0\tbear\nxm7yMjZR_HM_0\tcar\nxniXqwdU3rM_1\tcar\nxn_6GQGdyww_0\tbear\nxoL1TWqV2UY_8\tcar\nxoL1TWqV2UY_3\tcar\nxoL1TWqV2UY_4\tcar\nxoL1TWqV2UY_6\tcar\nxo93ACxVFCE_0\tcar\nxu3hCCY1M98_0\tcar\nxvJ-vgSlRFQ_1\tbear\nxyUFBTV5sfA_1\tboat\nxyUFBTV5sfA_5\tboat\nxzFwd6rktG8_1\tbear\nx1PZyiPtcD0_2\tbear\nx1PZyiPtcD0_0\tbear\nx2MUZI0ckUs_0\tboat\nx51qh-jbh2w_0\tcar\nx8bgasvRg_0_0\tcar\nx_PtUMz2m3g_0\tumbrella\nx_yZa__92dU_0\tbear\nyE9ySV90e2U_2\tbear\nyFdbcjv2scY_0\tbear\nyFwt2mHmJQw_2\tumbrella\nyFyTQPoWKrg_0\tcar\nyGYLwBmuRVI_0\tbear\nyGYLwBmuRVI_1\tbear\nyGq_wX2hSms_0\tcar\nyHFbPuIOGec_0\tboat\nyMVPEp44IcU_1\tcar\nyNYzTl3zuSA_0\tcar\nyOeQRz1L-6w_0\tboat\nyPx8JYuB8jo_5\tbear\nyTEPer0Bvnk_0\tboat\nyTr7cqNxVw8_0\tboat\nyVwePYmRfaA_2\tboat\nyVwePYmRfaA_0\tboat\nyV3gYczZGSU_0\tboat\nyWKpg3C3HRA_0\tumbrella\nyWQT0KUXmZs_0\tcar\nyXA2s-Ylkx4_0\tumbrella\nyYt1-j5ltQg_0\tbear\nyZOWsBbP8Dw_1\tboat\nyafgzvvEBsk_0\tcar\nygqn0Cw0cJg_0\tboat\nykAF4z2vPRI_1\tcar\nynSIMn0mh5Q_0\tcar\nynuXudWT-jg_1\tboat\nyqDO3G8QSxs_2\tboat\nysudb_DYv1E_0\tbear\nytzy45KRs4k_0\tumbrella\nyy-1Eaz2SGI_4\tboat\nyy-1Eaz2SGI_5\tboat\nyy-1Eaz2SGI_6\tboat\ny26dbfVQaAI_0\tcar\ny3HDa7ZvWW4_0\tumbrella\ny5rlUzgK0z4_0\tumbrella\ny6l_Xj3A7dU_0\tbear\ny6nMm6sNieE_0\tbear\ny6oa4gTfIaw_0\tboat\ny7_Teuq-Jd4_0\tumbrella\ny-J-zu3KYKk_0\tboat\ny-lv7_3azcQ_3\tbear\ny-lv7_3azcQ_1\tbear\ny-lv7_3azcQ_2\tbear\ny_Kbef75lDk_0\tumbrella\ny_OvZEh5PxQ_1\tumbrella\nzA7rl-0pCw4_1\tbear\nzBCRUfv1YVo_0\tcar\nzBomR9gjgg4_1\tcar\nzCnqglOaM40_0\tboat\nzC1J8hrm_FI_0\tboat\nzGOI3Uds1-A_0\tcar\nzGvuvfZeouY_0\tcar\nzHwK-Ov5Dn8_1\tbear\nzIGdWP0BOPc_0\tcar\nzIoLntgax_4_0\tcar\nzIrTQvy-DtU_0\tumbrella\nzKN-t-wHfVw_0\tcar\nzOxKFs0x_-M_0\tcar\nzPUoexM4GJg_1\tbear\nzS4G-dKS3dg_0\tcar\nzUYNrm52mG8_0\tcar\nzU9O4EpnP8g_0\tboat\nzW4j5HFdFCE_1\tbear\nzW9G9_luulU_6\tboat\nzW9G9_luulU_8\tboat\nzX70EOhK1IA_4\tboat\nzX70EOhK1IA_0\tboat\nzX70EOhK1IA_2\tboat\nzX70EOhK1IA_3\tboat\nzYNSRTs7wcI_0\tboat\nzZMZCzV930Y_0\tboat\nzaXvp0LSorI_0\tumbrella\nzcIJlqUAlyQ_0\tboat\nzcdpKM2gDkA_3\tbear\nzdWOfDZyRWg_0\tcar\nzdp6LbsF3Fo_0\tcar\nzdp6LbsF3Fo_1\tcar\nzglydzoqdNw_1\tcar\nzhSMuVKY4jM_1\tboat\nzhgbbZA2jZo_0\tcar\nzj0QGbLx2Ek_0\tumbrella\nzkC1ygaZUL4_0\tcar\nzkFlovQ2F80_2\tumbrella\nzkFlovQ2F80_4\tumbrella\nzkFlovQ2F80_0\tumbrella\nzkYqOEAbTTE_0\tcar\nzk5BFmxsRfQ_1\tcar\nzmXJ3VmO_yQ_0\tbear\nzmXJ3VmO_yQ_1\tbear\nzn_LOCSgnBI_0\tcar\nzobMJDgPWmM_0\tboat\nzpW9Kjtbu7g_1\tboat\nzp4-YNYr-l8_0\tcar\nzqDdt_wpfcM_0\tbear\nzqyhnAN5qnA_0\tcar\nzq-AjPBQb3w_0\tumbrella\nzsszkZnE24M_0\tcar\nzsszkZnE24M_1\tcar\nzwKNqBmI95k_0\tumbrella\nzxfyvjQQ0QY_0\tcar\nzxuleRJc5Pw_1\tboat\nzySbpWHTUUI_2\tumbrella\nzzDlzbpuFUg_1\tcar\nzzOYV3PIwDo_1\tcar\nzzljeIZDjM8_0\tcar\nz1CT7NYPStE_0\tboat\nz1CT7NYPStE_2\tboat\nz1DFtYFOfsQ_0\tboat\nz1GcDqMXI5U_0\tbear\nz1WPNBklZbo_0\tbear\nz3V1O449zY8_0\tcar\nz3V1O449zY8_1\tcar\nz3V1O449zY8_2\tcar\nz32BNdijIPo_0\tcar\nz4C0C5AtXd8_1\tbear\nz4Nk6je-k5E_5\tbear\nz4Nk6je-k5E_6\tbear\nz4Nk6je-k5E_2\tbear\nz4Nk6je-k5E_4\tbear\nz4YdhKjeNQk_0\tcar\nz5PqRVPhGGo_0\tbear\nz56C-TtwATI_0\tcar\nz6Bzk_B2FVo_1\tumbrella\nz6gL7THeOz4_0\tcar\nz8GzZUKj04k_0\tcar\nz8QYapjsTBo_0\tbear\nz8WzXJMRLkg_1\tbear\nz9CJpzFuqHU_0\tboat\nz-gqhqI7U10_0\tumbrella\nz-n_qZEuRko_0\tumbrella\nz_CWMOiNpzY_1\tboat\n0Ah0DHbJ6Uw_0\tbear\n0B-l9QmJK3I_0\tcar\n0DHXMcNUn60_1\tumbrella\n0EEILwHA4Dg_0\tumbrella\n0FRiwnN3Wv8_0\tbear\n0FUPhsPv9vs_0\tboat\n0FUPhsPv9vs_1\tboat\n0GR555fb7uE_1\tboat\n0GR555fb7uE_3\tboat\n0Gal36CHm94_0\tcar\n0Hf-spRN8iA_0\tbear\n0H81H-1s398_0\tcar\n0JkwSF_s82I_0\tumbrella\n0JxUW6X6VTA_1\tcar\n0JxUW6X6VTA_2\tcar\n0LY3jcKxA2E_0\tboat\n0NN0x0UcFVI_0\tcar\n0NgLxOGQPPM_1\tcar\n0Nh6NERAbQM_0\tumbrella\n0NyneL4SB78_0\tumbrella\n0O2cDoxCAhA_0\tcar\n0PqvPOqRHik_0\tbear\n0ROl0QaHTgU_0\tboat\n0ThOYMXH3Mw_0\tumbrella\n0TyHCEslM-4_0\tboat\n0UGD0u7LEPY_0\tcar\n0UVJn4oJR3I_0\tcar\n0Vu78K6ZsOk_2\tbear\n0XETGtPrUR0_1\tboat\n0XrWsyRsBYs_1\tbear\n0YWXAZlIFZE_0\tcar\n0YWXAZlIFZE_1\tcar\n0YaZ8lrPQJc_0\tboat\n0YaZ8lrPQJc_2\tboat\n0YaZ8lrPQJc_5\tboat\n0ZJeQYZxfGQ_7\tbear\n0ZJeQYZxfGQ_6\tbear\n0agrBEPe_w4_2\tbear\n0bx9mbPU7zo_0\tumbrella\n0c5dV9e0rL0_1\tcar\n0hafN9Sygek_1\tbear\n0jL3xw-Gfq8_2\tboat\n0kyg-HgBo7o_0\tboat\n0lXT8w6Nvz4_1\tcar\n0loh5Nhb32w_0\tbear\n0lyjvzKFjn0_1\tbear\n0lyjvzKFjn0_2\tbear\n0mIwwe5irHk_0\tcar\n0mSZED2I97w_0\tcar\n0mSZED2I97w_2\tcar\n0mSZED2I97w_1\tcar\n0oHtf7nx8m0_0\tcar\n0oHtf7nx8m0_1\tcar\n0peaciSDgqg_0\tboat\n0rIli5nmkus_0\tcar\n0sAim6AJwgY_0\tcar\n0sAukk-qZs8_1\tcar\n0sWjMW4aW_Y_0\tbear\n0sbXLfSaBvk_0\tumbrella\n0tapt-cyoSY_12\tbear\n0vC1j_r-gPc_1\tboat\n0vun54M7U5c_0\tumbrella\n0wXgXCqnblk_0\tumbrella\n0wzUHyuc5JE_0\tboat\n0zKI3bZagm4_2\tboat\n01aEu9jy-zA_0\tcar\n02AiKGZAu3k_2\tbear\n02bMGGTZE_M_0\tboat\n04FPpXq4qHc_0\tumbrella\n04FPpXq4qHc_5\tumbrella\n04jEe0lfdos_0\tcar\n04p58ydbAvM_0\tcar\n05VoMpLo7Cc_2\tboat\n05rSMaVX3yA_1\tboat\n06kAyBeWx5c_1\tumbrella\n08Fj_YF5X8Q_2\tbear\n0-Jhv9dONP4_0\tbear\n0-zDto8pBU4_0\tbear\n0_ByJ0bAD70_1\tbear\n0_P-fui2MeI_0\tboat\n0_soacANAc8_0\tumbrella\n0_2dsK8nudw_0\tboat\n0_2dsK8nudw_1\tboat\n0_2dsK8nudw_2\tboat\n1EIBn1zqhJA_0\tboat\n1Fv0cFr9B_Y_0\tbear\n1Gd-hUsNAsQ_0\tbear\n1Gd-hUsNAsQ_5\tbear\n1HhUsmUQmRY_0\tboat\n1KnTTBiP4ig_0\tumbrella\n1LKTvGMlL60_0\tbear\n1MVBovgEi4s_0\tbear\n1OvseXyo27E_0\tumbrella\n1PYMTwN-dl4_0\tboat\n1REcM5EtrZg_0\tboat\n1REcM5EtrZg_1\tboat\n1SQF7Tb6pUA_2\tbear\n1T4c050qGWo_0\tboat\n1UGqDCwd0TU_2\tbear\n1VziogDsYAs_1\tbear\n1WOfnEUurGM_0\tboat\n1YelAl0OQQg_0\tbear\n1anH_WthXTc_0\tumbrella\n1anH_WthXTc_1\tumbrella\n1avrrmB_Q5s_3\tbear\n1cbY1pGpdhM_0\tumbrella\n1cy1p57Z49c_0\tboat\n1dmbrwAgFuc_0\tbear\n1fPDeE9SwYI_6\tbear\n1gbd0C2wJrI_2\tbear\n1huEYUsV2ng_0\tboat\n1iD7yA3Elk4_0\tumbrella\n1iLq0PGfeCs_1\tboat\n1irtTU-RM8g_0\tboat\n1lCEFERcEKg_1\tboat\n1lSGhF2K_lM_3\tbear\n1l-NcYZKF8w_0\tumbrella\n1miy1sfneCI_0\tbear\n1qIgbCRt2C4_0\tbear\n1qknV5a5WQA_5\tbear\n1rt4XRA4RHE_0\tbear\n1rt4XRA4RHE_3\tbear\n1v8UDwaLZOk_1\tboat\n1yym4MiYTrs_0\tboat\n1yym4MiYTrs_1\tboat\n1zGry9uSuEs_0\tboat\n10oedSsXbw0_0\tbear\n14R96gxvKtU_1\tboat\n15ImffljXUs_1\tumbrella\n16BnXZheZE8_0\tboat\n18XvETJJDqA_0\tbear\n19ID_DbSclo_1\tbear\n19vhT11oPv4_0\tumbrella\n1__PWUxtAJI_0\tboat\n2Da3689mFHo_0\tboat\n2DimBSzdfPw_0\tboat\n2Fo-71zWO5Q_0\tbear\n2F9aM3isFOg_0\tboat\n2HDMk0mGW_w_0\tumbrella\n2IWPUKQEQc0_0\tboat\n2Irm_qCNQ_g_10\tbear\n2Irm_qCNQ_g_2\tbear\n2Irm_qCNQ_g_4\tbear\n2IyAOD0OkOg_0\tbear\n2I_k7e8QpWI_1\tumbrella\n2LWxx48-zmY_0\tboat\n2OYJuEnLK_w_0\tumbrella\n2O-9dVZBFm4_0\tumbrella\n2PL1rgU3jQ4_3\tbear\n2Pxvoh1PnpM_0\tumbrella\n2QOthN0H0jo_0\tboat\n2UBlre798kQ_0\tboat\n2U7mw3Z_nrI_1\tbear\n2ZeSJRQEIDg_0\tumbrella\n2huYkh1UAa8_0\tboat\n2j5p2kIFnF8_0\tboat\n2kAmyrOg2is_0\tumbrella\n2l4-4yNg4uM_0\tbear\n2l4-4yNg4uM_1\tbear\n2nWt5S5AcdM_0\tbear\n2oAbMVTBupI_2\tboat\n2olUVemt4wc_0\tumbrella\n2rbAoA6KuZ4_0\tboat\n2rzjzIvxob0_0\tumbrella\n2sDjXjM3vuk_4\tbear\n2sgrwTqPz-Q_1\tumbrella\n2vC56ILIWK0_1\tbear\n2w5-fxqKaR0_0\tboat\n2xzgP87zGDM_0\tboat\n20nMgEiCqVs_0\tbear\n223bkVsFvUg_0\tumbrella\n23-uEh5ygBE_0\tboat\n24kbYgf2_xM_0\tboat\n27Yd0qtplBs_0\tboat\n2_VfwSLic7o_0\tboat\n3EBKN0vh_8Y_0\tumbrella\n3EQ8WatEGfM_1\tbear\n3FBfwZ1vctY_0\tboat\n3GXWmiQHAA4_0\tboat\n3Hc48OCKEaQ_0\tbear\n3ICqGhWY-HU_0\tbear\n3IOrKwocmOM_0\tbear\n3KUAz0bb87g_0\tumbrella\n3KqDceVP3xg_4\tboat\n3MqGpNqj-fo_2\tbear\n3M5VwMaIzvc_0\tbear\n3PN8pPy1PLc_1\tbear\n3PN8pPy1PLc_4\tbear\n3PuByhkRjdA_0\tbear\n3P8-bKeMTDU_0\tbear\n3P8-bKeMTDU_1\tbear\n3QQYEFonITE_0\tumbrella\n3SJI7j-hBwU_0\tumbrella\n3SbQY-gSjTI_1\tbear\n3SofVK5wM1k_0\tbear\n3T5iqGlQLn8_0\tbear\n3T5iqGlQLn8_4\tbear\n3UJ24QWw0js_0\tbear\n3UUo8exclHk_0\tumbrella\n3VZuzA8i9tI_0\tboat\n3ZWFSRxFKp8_4\tumbrella\n3ZwOfZ6mdTE_0\tumbrella\n3cBiXmqHBLE_0\tumbrella\n3eH1SNLDT7U_1\tboat\n3fiWerkBy1s_0\tboat\n3fm54fM2fh0_1\tboat\n3kOuqiigfhM_0\tumbrella\n3khbnSUKCjw_0\tumbrella\n3khbnSUKCjw_3\tumbrella\n3khbnSUKCjw_5\tumbrella\n3khbnSUKCjw_1\tumbrella\n3leEAIEn6wg_1\tbear\n3oFuTv4g5QE_0\tumbrella\n3oFuTv4g5QE_2\tumbrella\n3ohEBnBnt7o_2\tumbrella\n3pli8lLuPF0_1\tbear\n3qGBc-85DMI_1\tbear\n3q0pJjI8W5o_0\tbear\n3v6DRHFQTz0_1\tumbrella\n3yct6bNJF9c_1\tboat\n3zhjI0Cn1AM_1\tbear\n3z0lIa162ps_0\tbear\n31PMTcBL5-o_1\tumbrella\n31PMTcBL5-o_0\tumbrella\n32GDx70-6cQ_2\tboat\n351brnq0Ryk_1\tboat\n38Tbojzrw80_3\tbear\n3__l885Wkz4_0\tbear\n4A-5QKpDBFE_0\tbear\n4A-5QKpDBFE_1\tbear\n4BbVz6UbHFY_1\tbear\n4GTfq2m-SnY_0\tbear\n4K0agSc78Js_0\tumbrella\n4K0agSc78Js_1\tumbrella\n4MUu-MomyB0_1\tbear\n4N85gqVvlWU_1\tboat\n4OQGDsYtfSg_0\tboat\n4QdM0aAdf4g_3\tbear\n4Qf9iJ-IMDg_0\tbear\n4R5HjEAW6Y4_0\tboat\n4ViaowUogyA_1\tbear\n4ViaowUogyA_3\tbear\n4VxP7VQ-WtQ_0\tbear\n4XCmBo2k6Hc_1\tboat\n4h2kJG8rDAk_1\tboat\n4h8E8d4P5ms_0\tumbrella\n4iktvQjNLS8_6\tboat\n4lyoTIuPa9s_0\tumbrella\n4rxmIDjvHvo_0\tumbrella\n4td5npVxACw_0\tboat\n4td5npVxACw_2\tboat\n4td5npVxACw_3\tboat\n4td5npVxACw_1\tboat\n4u8RQi7_xUQ_1\tboat\n4zYtj8BG_ZA_0\tboat\n4z3XNRP4Qvk_0\tboat\n40Ogw6O8g2M_0\tumbrella\n42-2FjqvBRw_0\tboat\n44nxZjEYqLI_0\tboat\n45HOGdlAVq0_2\tumbrella\n45HOGdlAVq0_3\tumbrella\n45HOGdlAVq0_6\tumbrella\n46Sp7L3iKK4_1\tboat\n47mMBnGHuOE_7\tboat\n48IdCSlEHlM_0\tumbrella\n48pGfV-z-x0_0\tboat\n5AhKWEjMmUw_0\tumbrella\n5AzSuHB6_jc_0\tumbrella\n5Ce6X4i25i4_4\tumbrella\n5Ce6X4i25i4_0\tumbrella\n5EaEfiCIEcA_4\tumbrella\n5EaEfiCIEcA_3\tumbrella\n5FZykf07mxY_0\tumbrella\n5FZykf07mxY_1\tumbrella\n5FviZXBOPWk_0\tumbrella\n5H6nBOIIziQ_0\tumbrella\n5IdOF-nnOkU_6\tboat\n5I2hW9gRRwU_1\tboat\n5JubFWZKmZc_1\tumbrella\n5Kf5KxsLCmI_0\tboat\n5PxBf16_oMg_0\tumbrella\n5WUSwyO4k7A_0\tumbrella\n5XWfGTUYLbQ_6\tumbrella\n5Y3Lrgpl6s8_0\tumbrella\n5dL3vGF_-ug_0\tboat\n5e9luwmv6mU_0\tumbrella\n5g_ugz2HmKM_2\tboat\n5iYpaHYUElI_0\tboat\n5iYpaHYUElI_3\tboat\n5iYpaHYUElI_5\tboat\n5nMhK15X4R8_2\tboat\n5rT33oH7aV4_0\tboat\n5srF-BzF_go_0\tumbrella\n5suoa4TFYd4_0\tumbrella\n5vMpwDm27VM_0\tboat\n5vyqdnOWivc_3\tumbrella\n52m9SGVaiW8_0\tboat\n521jpaMoQ58_2\tboat\n537tF6-uRB4_0\tumbrella\n561s-m-0mqU_0\tumbrella\n561s-m-0mqU_2\tumbrella\n561s-m-0mqU_3\tumbrella\n582V5-HF4yg_0\tboat\n582V5-HF4yg_1\tboat\n597l2xVl9Tc_0\tumbrella\n6C42Di7bIpE_1\tboat\n6FG49plD8TQ_0\tboat\n6FQz5w7HaKg_0\tboat\n6JGioFiqwww_0\tumbrella\n6JLdACYt7D4_1\tumbrella\n6MVLpYA1t8E_1\tboat\n6MVLpYA1t8E_3\tboat\n6OEFFwKhAFw_0\tboat\n6PVjXDW7JlY_1\tboat\n6Sxb0d7xIys_0\tboat\n6Ug54vSsrio_0\tumbrella\n6WP3KFUYTrM_0\tboat\n6XrW8Yjd16I_0\tumbrella\n6c0RAJO-AGg_0\tumbrella\n6inTfRLx_58_0\tumbrella\n6it-xMMovj4_2\tumbrella\n6khDUjxTmdo_0\tboat\n6mvP_NKlIHg_1\tumbrella\n6qpeBvh9pqs_0\tboat\n6rowMK5ERz8_2\tumbrella\n6sN56W9U7tY_2\tboat\n6tLtEuKyj1E_1\tboat\n6tQrO26kwOY_0\tumbrella\n6t0mbpnPPdg_0\tumbrella\n6t55VfdtMWE_4\tboat\n6t55VfdtMWE_7\tboat\n6t55VfdtMWE_8\tboat\n6t55VfdtMWE_0\tboat\n6uM7MFSH15g_0\tumbrella\n6uvJft-l1R0_3\tboat\n6yCsWwj87QI_0\tboat\n6zxrdodJut0_0\tumbrella\n61RreGvIPOk_1\tboat\n66WmMvvZOxI_0\tumbrella\n68C7HGRrJ8o_0\tumbrella\n68kx9VUVhzE_1\tumbrella\n6-Nh0bY1nUk_0\tumbrella\n7HD-o1yj47U_0\tumbrella\n7NXmDbHoJn0_3\tumbrella\n7NXmDbHoJn0_5\tumbrella\n7NXmDbHoJn0_6\tumbrella\n7RcyfoxqADA_0\tumbrella\n7WKzOMuf3Cg_1\tumbrella\n7a_nsGmUZNU_0\tumbrella\n7kSyhlnimb8_0\tumbrella\n7kaTL52xbiY_0\tumbrella\n7tlbytb63z4_0\tumbrella\n7uR1cEVdMDo_0\tumbrella\n7ydX3wCeOgk_0\tumbrella\n71k1TftUiYE_0\tumbrella\n76ljAryU9Bw_0\tumbrella\n78lA-eJGUn8_0\tumbrella\n7-ugeb_4vqE_0\tumbrella\n7_k6DM-PlXg_0\tumbrella\n8AZtNaOO_8A_1\tumbrella\n8FhIv4h9D3E_0\tumbrella\n8FhIv4h9D3E_1\tumbrella\n8H88MFohrUM_0\tumbrella\n8SuTrZ6xu2E_0\tumbrella\n8d_Vt2SWIvg_0\tumbrella\n8fsRltS2ul4_0\tumbrella\n8nReKSsSgGE_0\tumbrella\n8oOer9PS53g_3\tumbrella\n801xOkfqjkM_0\tumbrella\n84Ber6V3IrA_0\tumbrella\n84zKfCKtsDo_0\tumbrella\n9CGTYEUn-mo_2\tumbrella\n9JFicuESmEA_0\tumbrella\n9JiMiflDI68_0\tumbrella\n9J4O20b9qnY_0\tumbrella\n9S2mGfudahk_0\tumbrella\n9UVLb_-RbfA_0\tumbrella\n9bFrwgSSAkQ_2\tumbrella\n9bFrwgSSAkQ_4\tumbrella\n9bFrwgSSAkQ_0\tumbrella\n98OOq0Wh904_0\tumbrella\n99uO6qHrhsU_0\tumbrella\n-PaNPkpeFdI_0\tumbrella\n-PaNPkpeFdI_4\tumbrella\n-Z3_Ixwl1YY_0\tumbrella\n-bA7JdKB0LA_0\tumbrella\n-d9Vg5j5vZU_1\tumbrella\n-eJmt-GItyI_0\tumbrella\n-k8FuC01N5E_0\tumbrella\n-0y7A0GDVY8_3\tumbrella\n-0y7A0GDVY8_5\tumbrella\n-0y7A0GDVY8_7\tumbrella\n-3TIfnTSM6c_1\tumbrella\n-3TIfnTSM6c_2\tumbrella\n-98I0B3kkqw_0\tumbrella\nAAVVg5xx0p8_0\tperson\nACB01WGxOSM_0\tskateboard\nACDc6tGnXXQ_0\telephant\nADWNgv6trag_0\tperson\nADznOfGgfj8_0\tperson\nAEEVGgiuS5c_0\tperson\nAEHbOzlbmOQ_0\tdog\nAEJTsQNMkME_0\tbus\nAFlkSTJ-mF0_0\tdog\nAGRV17_1OS0_1\tbus\nAHsZ4FTQ8Ew_0\ttruck\nAIViQtfacts_2\thorse\nAJBtOVA1KSw_0\tperson\nAJbQP-rIwCY_0\tperson\nAJ9ODXcnhVo_0\tperson\nAJ9ODXcnhVo_1\tperson\nAKBq0oH8IOM_1\ttrain\nAKBq0oH8IOM_3\ttrain\nAL9dFpjFlLM_0\thorse\nAM-TjLTvBSU_5\tbear\nANA-pgSAzGI_0\thorse\nANVnK2HmZno_1\tairplane\nANVnK2HmZno_7\tairplane\nANeOKwjvX7w_0\tdog\nAPP17gURiBU_0\tbear\nAPP17gURiBU_1\tbear\nAPTYyEYJfOY_0\tbird\nAQD8YBCTSPs_0\tumbrella\nARaILMtc8fs_1\tperson\nARsokXpl07Y_1\tboat\nARsokXpl07Y_2\tboat\nASPK-ZSB9Ts_0\tperson\nASfv8cmreoA_0\tperson\nASfwyHCtnIU_0\tperson\nAS5LvQT9rrQ_0\tperson\nATy91FTiYvU_0\tperson\nAVF8lCKe6os_2\tumbrella\nAWRcJpWTPwQ_0\tperson\nAWtY9Y2mPso_0\tmotorcycle\nAWwDsm1WnKE_1\tknife\nAXjDlIFY7ww_0\tboat\nAYAkMpj_MHA_2\tbicycle\nAYAkMpj_MHA_5\tbicycle\nAYAkMpj_MHA_6\tbicycle\nAax6L0Qqgio_0\tbird\nAcYd7y_-V74_0\tperson\nAdY55Q3qVK0_2\telephant\nAgbIDWiOXQ8_0\tperson\nAgsYgmA19z4_0\tperson\nAhWU-QUzOOA_0\tperson\nAiqGEAjF6QI_0\ttrain\nAiu6EH4a8v8_0\ttrain\nAiu6EH4a8v8_1\ttrain\nAiu6EH4a8v8_6\ttrain\nAixV6QSGqto_5\tbird\nAixV6QSGqto_6\tbird\nAjj7WZLukdw_0\tmotorcycle\nAjpbAriY8rU_0\tperson\nAlab3dEYXM0_0\tperson\nAoAoH9yb6zY_11\tbear\nAoAoH9yb6zY_6\tbear\nAo7Sa2afCb4_0\tperson\nApDgLQUsEqc_0\tbicycle\nApakHefqWv0_2\tairplane\nAqIG0zk2bpg_0\tperson\nAqTXLh7DtcM_0\tperson\nAqTXLh7DtcM_1\tperson\nAqdoD9jkBFc_0\thorse\nAqj7VnXQt4s_0\tcow\nAq4dBqb2SbQ_0\tperson\nArgYRdhvlc0_0\tskateboard\nAsPXe7qUyuI_0\tperson\nAuLrPQqrKV4_0\tmotorcycle\nAuY8vITQrsE_0\tcow\nAvBm7iHiDdI_2\tboat\nAvSgTHXgSXQ_0\tcow\nAwVdVzh1Eh0_0\tperson\nAwvDMOeS7no_0\tperson\nAwzt30r0OLQ_1\tbus\nAw2t3AalW4s_4\telephant\nAyh_2ithjCE_0\tcow\nAyh_2ithjCE_1\tcow\nAyh_2ithjCE_2\tcow\nAylQiap7dj4_2\tbear\nAylQiap7dj4_3\tbear\nAy9QToaaTGc_1\ttruck\nAy_a2OkcdEk_0\tperson\nAzVvPUazPYk_0\tmotorcycle\nAzzlFx32dQs_1\tboat\nA1RSx6j_ra0_9\telephant\nA1RSx6j_ra0_4\telephant\nA1RSx6j_ra0_6\telephant\nA27YZAfJmrc_0\tknife\nA27YZAfJmrc_1\tknife\nA3E72P24pf8_0\tperson\nA3cgW1rDOcI_0\tperson\nA32Fi06yKpU_0\thorse\nA5U6AHe9_4A_0\ttrain\nA5pUgLCQq9k_0\telephant\nA5pUgLCQq9k_2\telephant\nA5pUgLCQq9k_3\telephant\nA63BoLTUNAM_0\thorse\nZBzVnA8zj6Y_0\tperson\nZB45YyN1WUM_0\tbus\nZFYGhJKiw5w_1\tgiraffe\nZGfOCwbu-PY_0\tperson\nZHTMfW1eaW0_0\tcat\nZHURcze8rOI_0\tperson\nZIJUWQKzzsQ_0\tperson\nZJgwacILoAw_0\tperson\nZMgP2kxv5E8_1\tperson\nZM3wX5zgKOA_0\tperson\nZNXnJahaXIY_0\tperson\nZOc4wfLX2Jo_0\tcow\nZOnuSLp6asQ_0\ttrain\nZPQNucbAjBM_0\tcow\nZQITHWk17a0_0\tbicycle\nZQxmb_nVoH4_1\tcow\nZRUXj8o10Po_0\tperson\nZSnP5B6NiI8_0\ttrain\nZTqDuCZVTmM_1\tairplane\nZTqDuCZVTmM_5\tairplane\nZU3AYv2eU74_0\tmotorcycle\nZU4XQbNaYQc_0\tknife\nZVZWEWzZg50_1\tbird\nZVjep3tDJjU_0\tperson\nZWL6CshdsuY_1\tcow\nZWogXn8xs7E_0\tmotorcycle\nZXU4Uua3l0E_0\tcar\nZYOUZjfZMhk_0\tcow\nZYS0h2pAK6M_0\thorse\nZYm5iVw0YdE_0\ttruck\nZY8pG-I5Ax8_1\tbicycle\nZZBBcTBPmis_0\tperson\nZZpckGIvGTI_1\tboat\nZana4yKDGxY_3\tskateboard\nZana4yKDGxY_1\tskateboard\nZbnxzLt8FJk_1\tdog\nZbnxzLt8FJk_0\tdog\nZcXtrHkjobw_0\tperson\nZelRUJyMMkw_0\tperson\nZeqhN6ndscE_0\tperson\nZe8cOn59rW4_0\tperson\nZe8cOn59rW4_1\tperson\nZj1TAkYHlQo_0\tperson\nZj7GzCIi_9c_0\tperson\nZlEiOICCDdc_0\tperson\nZlH8Hd961FM_1\tknife\nZl30Oy50PfQ_0\tperson\nZmXKvpkfHZA_0\ttrain\nZmdvunyqJB8_0\tbus\nZqTkqkEbXEk_0\tcow\nZrPn3BODZJM_1\tperson\nZrPn3BODZJM_0\tperson\nZuBD3A8Vecs_0\tbird\nZuEbZKmjxaA_0\ttrain\nZuEbZKmjxaA_1\ttrain\nZu7udgxuUkk_5\tairplane\nZu7udgxuUkk_6\tairplane\nZu7udgxuUkk_1\tairplane\nZu7udgxuUkk_2\tairplane\nZu7udgxuUkk_3\tairplane\nZvadVS1LnQU_0\tbus\nZvadVS1LnQU_1\tbus\nZvadVS1LnQU_2\tbus\nZwLvs9JUsFY_0\tperson\nZw4-vF-vOMk_0\tperson\nZxO4Gd5fhOg_1\ttrain\nZxO4Gd5fhOg_2\ttrain\nZxX6DBopv30_0\tskateboard\nZyEA24Ud3EM_0\tperson\nZyM24-ekpz8_0\tperson\nZzBvzlzuw4M_0\tperson\nZ03ZC9qmwDc_0\tzebra\nZ1N0xBj_H3E_0\tbird\nZ1ns6XidhT8_0\telephant\nZ2S6XnfE5vI_0\tperson\nZ2kb4LiQJUU_0\ttrain\nZ2zB-gtDgOM_1\telephant\nZ22DSYtblFo_0\tbicycle\nZ5rHikLjARg_0\tperson\nZ6XKceRI1bE_0\tbus\nZ6XKceRI1bE_3\tbus\nZ6XKceRI1bE_6\tbus\nZ6XKceRI1bE_10\tbus\nZ6qQE2_jsIM_0\tskateboard\nZ68yTt3upjk_0\tmotorcycle\nZ8SxFPbnptI_0\tperson\nZ8pujku9bPw_0\tperson\nZ9vZk0io0fw_0\ttruck\nZ9vZk0io0fw_1\ttruck\nZ-R7-Ww03t8_0\tknife\nZ_kKBbIzdXM_0\tperson\nZ_pwMCnOdk4_0\tknife\nZ_pwMCnOdk4_3\tknife\nZ_0227AsAvk_0\tbus\nA_a1H0EO64s_0\tperson\nA_a1H0EO64s_1\tperson\nA_pc9ov1cT4_0\tperson\nA_weMKVolQM_3\tbear\nBBC4Jmlky4Y_0\thorse\nBBHBoewIXhw_1\tumbrella\nBBHBoewIXhw_3\tumbrella\nBBHBoewIXhw_4\tumbrella\nBCKR989ZYyM_0\tcar\nBCKR989ZYyM_2\tcar\nBCpaJ-tEv-0_0\tcar\nBFP7MT8RM8U_0\telephant\nBF7cTjrTSwY_0\tcow\nBF8d91cJS3o_0\tperson\nBGcAVF0Zi_o_0\tperson\nBGzetX8Dz-M_0\tcow\nBHurVVjld8Y_0\tperson\nBIUeggZa3SU_2\tperson\nBIUeggZa3SU_0\tperson\nBIUeggZa3SU_1\tperson\nBIfedkd3HEg_0\tboat\nBJaAlMv6b_U_1\tmotorcycle\nBKKSiAed9CI_0\thorse\nBKtAnbXVk1E_0\tperson\nBLCEb_seyUs_0\tairplane\nBLCEb_seyUs_1\tairplane\nBL8o-tdhlxs_2\ttrain\nBL8o-tdhlxs_3\ttrain\nBMhmY9_ltFc_0\tperson\nBO7KZKb9bkQ_0\tcow\nBQRwIXopDJw_0\tperson\nBQRwIXopDJw_1\tperson\nBQswg--xiy8_1\thorse\nBRd8dUMN0a4_0\tknife\nBRmtavy2ZEo_0\tperson\nBR0NNg6gLLo_0\tperson\nBSo8wjoZ7zc_0\tskateboard\nBTSUQrxC6l4_1\tbus\nBUHULgt_7DA_2\telephant\nBU3iU3zJnDI_0\tperson\nBU8sEPifL08_0\tperson\nBVTVHHm7vkA_0\tboat\nBWNTXqGixw8_0\tbird\nBZUE0vDhMvk_1\tknife\nBb2fkGYxp2E_0\tperson\nBckXjb2o93U_0\tperson\nBdHNtn10UKE_1\thorse\nBeXziIDAJDc_0\tperson\nBgHV_87CxNI_0\tumbrella\nBgXr-bSqMIo_0\ttrain\nBhO0SwB8Ee4_0\tperson\nBh4m74dLZaM_0\tperson\nBlYWgnhwvkM_0\telephant\nBlYWgnhwvkM_2\telephant\nBmZNFBFj-ws_0\tperson\nBm2yaWXwgjY_0\tknife\nBpXhq5Awd3U_0\tdog\nBrC6VbCzRGc_1\tknife\nBrHslMc3UMQ_0\ttruck\nBscLJpi3AJc_0\tperson\nBv8WeZ_zrJc_2\tbear\nBzEC1EEC2ts_0\tperson\nBzXWK-LODVo_0\tperson\nBzbzymdK_TM_0\tperson\nBz6Od4GfW6A_0\ttruck\nB0DRHTdmeK4_0\tknife\nB31JkzyQDkg_0\tbear\nB5GVudI81dM_0\tdog\nB6nArbkcRek_0\tmotorcycle\nB6sR2aqScR4_1\tbus\nB7IP-2uNuWs_0\tskateboard\nB7yxjI6dz4s_0\tmotorcycle\nB8iZGZlQcsg_0\tperson\nB8opNd6uzmY_1\tperson\nB9GQwzI2Eqk_0\tdog\nB92X9Xn1P2s_0\tperson\nB-CJ8miJKPs_2\tcow\nB-n15EytPtQ_0\tperson\nB_WnXKd-oZk_0\tperson\nCADW3z8x4AU_0\tskateboard\nCADyh6laNA0_0\tmotorcycle\nCA3wWkrNnRs_0\tperson\nCBSNFKeTnpA_0\tbird\nCCyZAt2Js0U_0\tcar\nCE-LfFDfGKQ_0\tperson\nCE-LfFDfGKQ_1\tperson\nCFN40hxKxM8_1\tairplane\nCFPhXPCobFg_0\tperson\nCGg2FXjvvOA_0\tperson\nCH3phgDW5Fc_0\tperson\nCINfsd8LiOU_3\thorse\nCINfsd8LiOU_0\thorse\nCINfsd8LiOU_2\thorse\nCIqkbJoJhBI_0\ttrain\nCKmnpW6gboU_1\tboat\nCKmnpW6gboU_0\tboat\nCLtQxCqTzcY_1\tknife\nCMgYFnnxQUU_0\thorse\nCOcbSVCp4ig_0\tbicycle\nCOcbSVCp4ig_3\tbicycle\nCOcbSVCp4ig_4\tbicycle\nCOcbSVCp4ig_5\tbicycle\nCRF7PcgB2yQ_2\tbus\nCSnhpel7FTA_0\tperson\nCSriNtLepLs_1\tskateboard\nCVmBocpXeTc_0\tbus\nCWCfCeYh2bA_1\ttrain\nCWvjAYt5eR4_0\tbus\nCW9n8Gahfgg_0\tcow\nCXT98GHNtRU_0\tperson\nCZ-Sh-SXaRQ_0\tperson\nCan5eao1S3Y_0\tbus\nCbB-71R_n9M_1\tmotorcycle\nCbpAv8c2Vsg_2\tcar\nCbpAv8c2Vsg_3\tcar\nCb3iufTFMEU_0\tperson\nCc2vs8vuPmU_1\tbird\nCc8E7aTdEVM_0\tperson\nCdain96L-q0_0\tbus\nCd7g3ZoA5tQ_0\tbus\nCeN22koBQRM_0\tperson\nCe2jOHHBDLk_0\tmotorcycle\nCe7IPtXkNcs_0\tperson\nCfqkbrB0Yy8_0\tperson\nCf2jOSj7eRg_2\ttrain\nCjbhKc3Vjpo_0\tperson\nCkEVvGqgVkQ_1\tknife\nCl13SbLP0hE_2\thorse\nCl13SbLP0hE_3\thorse\nCl13SbLP0hE_0\thorse\nCl13SbLP0hE_1\thorse\nCl-lB_jS8Wg_1\tbear\nCnMMdc6syXM_2\tumbrella\nCoxzc_S3ID0_1\tknife\nCpLMLRdeJJ0_0\ttrain\nCpN-qOO6Qm4_2\tairplane\nCpyK9j001RY_0\tperson\nCqNEwP8PwS4_0\tbear\nCqNEwP8PwS4_1\tbear\nCqYiAanNpo4_0\tperson\nCqbu8vOsszI_0\tcat\nCr5p4NYIR44_0\tperson\nCttKQip6B2E_0\tperson\nCuGu45Z4lt8_0\tknife\nCvszgVrLsgA_0\tperson\nCwYG2Hf6-NY_1\tcow\nCwvR1fjMeSU_1\thorse\nCyuollntwZ8_0\tdog\nC1dCZ9W6WIM_0\tperson\nC2x3rdWMAyg_0\tdog\nC3lwMd_rlG0_0\tperson\nC5MrhYouFTc_0\tcow\nC5SKibJTnR4_0\tcat\nC6dANICzCcg_0\tperson\nC6xJeHO8XSE_0\tperson\nC7NXymSnEFw_0\tbird\nC8ExRKjU1vY_0\ttruck\nC8V2-wEjv5A_1\tcow\nC8sUABBP0Jc_1\tbicycle\nC8sUABBP0Jc_2\tbicycle\nC80bmA0XrjM_0\tperson\nC886JwUWvxw_0\tskateboard\nC-Tal1XUc8o_2\tperson\nC-zp91eJqtk_3\tbird\nDApDao4fUqQ_3\thorse\nDApDao4fUqQ_1\thorse\nDApauH43Ivo_0\tbicycle\nDBArY7gHuoY_0\tcow\nDBsBTVJNxS8_0\tdog\nDBsBTVJNxS8_1\tdog\naCNvyXSuG6w_0\tperson\naCVmJCtuPeg_0\tbird\naCVmJCtuPeg_1\tbird\naDMk7CwLIxM_0\ttrain\naERiDkn_gkY_1\telephant\naEwD6TC8S4w_1\tbicycle\naFEOvm-1KvA_0\thorse\naHM4Dj-2y8o_0\tairplane\naI0y0wY4LQw_1\tperson\naI0y0wY4LQw_2\tperson\naJAd-MiEsfk_1\tperson\naJWETVChAE8_0\tperson\naJoKSWtqs0g_0\ttruck\naLYtaO_J2_U_0\tperson\naLbjxTwAV7o_0\tperson\naMDD0PenhaM_0\tcow\naMgj1BUBexw_0\tperson\naNgAUBTbUUM_0\tperson\naNmgrcJxdw8_0\tmotorcycle\naN2a-rDAYDQ_0\tdog\naN2a-rDAYDQ_1\tdog\naOhumbyx05c_0\tcat\naQcTwMVs1Zk_0\tskateboard\naQcTwMVs1Zk_1\tskateboard\naQx68fklEXA_1\tdog\naSGod2MJ5ww_1\thorse\naSq5ZqH_K7E_0\ttruck\naTAXvSNkuvc_0\tbus\naUFxg301s68_1\tskateboard\naUsTtvWAzAc_0\tperson\naV8S5HLSI_o_0\tperson\naWHaR4ExDpk_0\ttruck\naWIZBHwtII8_0\tmotorcycle\naWgH9T2sGkE_0\tboat\naWmC8Tbgy9A_0\ttrain\naXa5YE_AmKg_0\tperson\naYAuay_bTaw_0\tcat\naYVEZrX4mE0_2\tbear\naZRYQJd-5CQ_0\ttrain\naZRYQJd-5CQ_4\ttrain\naZRYQJd-5CQ_3\ttrain\naZRYQJd-5CQ_6\ttrain\naaZxOcHxPec_0\tperson\nab_RTkwBG_4_0\tperson\nacy4aJnh9SU_0\tperson\nac68trlkEnw_1\thorse\nadsmRxlAJo4_0\tdog\nafE4YqgaPlw_0\tskateboard\nafU2vHgUvaw_7\ttrain\nafU2vHgUvaw_2\ttrain\nafU2vHgUvaw_3\ttrain\nafkiqhwTeRQ_0\tperson\naiOHs3hApm0_0\tskateboard\naiOHs3hApm0_1\tskateboard\naij190b9wtM_4\tbear\nakWe9oXeKzA_0\tperson\nak1XT_Nl7VU_0\tairplane\nak4CfFF9Bpk_0\tperson\nalbeyJBtKD8_0\tperson\nalp0ImrbacI_0\tdog\nal12VKid_P8_0\tperson\namyr6d2Ns6M_0\thorse\namyr6d2Ns6M_4\thorse\namyr6d2Ns6M_6\thorse\nao9LHpxNCqY_0\thorse\napLT3-LKJgE_1\ttruck\napXNcHROKyY_0\thorse\naqp_quyEngw_0\tairplane\naspR9ca28CY_0\tperson\nas3DGRDezaA_0\tperson\natElNgnFvlk_0\tperson\nat-Ex-CnRX4_0\tairplane\nat-Ex-CnRX4_1\tairplane\nau_kgqsZlMU_0\ttruck\navRC7M3_kuA_0\tbird\nawnORAEMUIg_0\tperson\naytqFnOdBLA_0\tperson\nazLbVm88Dzc_3\tairplane\nazLbVm88Dzc_2\tairplane\nazXlb1cxVGQ_1\telephant\na1qoB1eERn0_0\tperson\na2-lZhKXx9E_0\ttruck\na3In51YCqMg_0\tdog\na3T8T1R2wAc_0\tbear\na45XOJQaDQI_0\tperson\na5dffDLeZsI_0\tairplane\na7hjIfPGJqI_0\tcat\na74_tj_B-YA_2\tknife\na74_tj_B-YA_1\tknife\na8v0k4Bz_QA_0\tperson\na9jgDU5THOU_0\tperson\na97S4U5ezQw_0\ttruck\na97S4U5ezQw_1\ttruck\na-M2_3j67qI_4\tknife\na-M2_3j67qI_5\tknife\na-M2_3j67qI_6\tknife\na-NeSgN26Zo_0\tbicycle\nbAKQZ0F7LFw_0\tperson\nbA10PjxgV3w_1\telephant\nbBPKh_BPJ50_4\tbear\nbBPKh_BPJ50_1\tbear\nbBW4swLrEHE_0\tperson\nbB6tIraYEaI_0\tskateboard\nbCDw1dn7M1Y_0\tcar\nbCDw1dn7M1Y_1\tcar\nbCWM39xLsYs_0\tskateboard\nbDFkztSgMko_0\tskateboard\nbD6xZhJfhMU_0\ttruck\nbFnzGS_doNQ_0\tperson\nbGFRHhc7zUI_1\tperson\nbGZtGWULlF0_0\tskateboard\nbGZtGWULlF0_1\tskateboard\nbIOpYFVLesY_0\tperson\nbJviDDrUSwA_0\tmotorcycle\nbKB6ESqkOic_1\ttruck\nbKRAinEnagU_1\tmotorcycle\nbKRAinEnagU_0\tmotorcycle\nbNXcPzWMXsw_0\tcar\nbN43crdYDJE_2\tbus\nbOL9YHt5u-o_0\tskateboard\nbOL9YHt5u-o_1\tskateboard\nbOofbwD246U_0\tperson\nbPKew4jsGkE_0\ttruck\nbPRVRL4x5T0_0\ttruck\nbQkneVc9gaA_0\tairplane\nbQ64JFsWSf0_0\tbicycle\nbRWbXGRwlVY_0\tperson\nbS1Z1k6laqY_0\tperson\nbUqFsPoDKBE_0\ttrain\nbVP58EONEm4_0\tcow\nbW4nHswGFPo_0\tmotorcycle\nbW5IvSesbV0_0\telephant\nbXR-iz0NfrA_0\tcat\nbZDsNeqNn9I_0\tcar\nbZDsNeqNn9I_2\tcar\nbZDsNeqNn9I_3\tcar\nbZDsNeqNn9I_5\tcar\nbZIU-ajwk6Q_0\tbicycle\nbZIU-ajwk6Q_1\tbicycle\nbZ6Tq0KWSsU_0\ttruck\nbZ6Tq0KWSsU_2\ttruck\nbanaB07Fu9c_0\tbear\nbcKUeyEaRPw_6\tbicycle\nbdhq0SKEqe4_0\tperson\nbd3b9R30l-E_0\tperson\nbeDuTpy1tg4_2\thorse\nbeDuTpy1tg4_0\thorse\nbeLkXAaP78Y_0\ttrain\nbe30TAE-gq4_0\tperson\nbfQSyBsTmE4_0\tumbrella\nbgSSzKax51E_1\tmotorcycle\nbgSSzKax51E_0\tmotorcycle\nbhoUxK8FSqc_0\tperson\nbhuPA9toCGY_0\tperson\nbiIFNnX2Nl4_0\tskateboard\nbiu2ssO3dRg_0\tbus\nbjRPge2oFgU_0\tknife\nbjV04dzuqhk_1\telephant\nbjdIG6B5zn0_0\tperson\nbjdIG6B5zn0_1\tperson\nblPLp16K1XY_2\tbicycle\nbmJ_QDIRS2U_1\ttrain\nbmJ_QDIRS2U_2\ttrain\nbmJ_QDIRS2U_3\ttrain\nbmLsrJHQQ14_4\tknife\nbnBORorLvmk_0\tperson\nbnBORorLvmk_1\tperson\nbnVGsydNrg8_0\tairplane\nbnVGsydNrg8_1\tairplane\nbnZbj1dD0qs_0\tumbrella\nbn0I2aJB5Ps_0\thorse\nboMU1mjUSDw_0\tskateboard\nbo8M-OTk4J0_0\tperson\nbpw3BCxYYU4_0\thorse\nbqoDChNwIYY_0\tumbrella\nbrJqQ_iH2VE_0\tperson\nbrMVhyEZLfo_0\tperson\nbs5AY2jipno_0\ttrain\nbtL-vruELoA_0\tperson\nbtq7gMuqMuo_1\tperson\nbtq7gMuqMuo_0\tperson\nbvEJDHpRNoI_0\telephant\nbvVfFv57gN4_0\tbus\nbvVfFv57gN4_4\tbus\nbwhPTEvGmIo_0\tperson\nbydgNyGwoys_0\tperson\nbziUK-7O0lY_0\tdog\nb0Z6qKhuldo_0\tskateboard\nb0sKQDUFTos_0\tperson\nb1s-jYD36GQ_0\tperson\nb4Wua_98Y9U_0\tperson\nb4d_9Yc0MwY_0\tbicycle\nb4qC2fctnLU_0\thorse\nb4zSrjPtOfs_0\tbicycle\nb5CJtpeG1Lc_0\ttrain\nb5CJtpeG1Lc_2\ttrain\nb5CJtpeG1Lc_1\ttrain\nb5mOcLykYeQ_0\tcow\nb9VOmo_86Ds_1\tperson\nb_W4BWH1i_A_1\tperson\nb_W4BWH1i_A_0\tperson\ncBxo9bPINJc_0\tskateboard\ncCEImigNo38_1\ttrain\ncDHZtfsI_gM_0\ttrain\ncDHZtfsI_gM_1\ttrain\ncDmkhESohro_0\tboat\ncEcTernKOqU_0\tperson\ncEcTernKOqU_1\tperson\ncGJLuwZIG5s_0\tgiraffe\ncGJLuwZIG5s_1\tgiraffe\ncGJLuwZIG5s_2\tgiraffe\ncGwjfCPO-7k_0\tcar\ncH0sXpOxvy0_2\tbird\ncH9u1pCWp2U_0\tperson\ncH_SL9CR8y4_3\tdog\ncIxdxFkZ7y8_0\tdog\ncIxdxFkZ7y8_1\tdog\ncJvh4GqZn-s_0\tperson\ncKQQVTnOzBk_0\thorse\ncLULEYFoBPc_2\tcow\ncMdjRuUhBIs_0\tmotorcycle\ncMdjRuUhBIs_1\tmotorcycle\ncMwa9cC304w_0\tcow\ncMwa9cC304w_1\tcow\ncNDYJRBsIOY_0\tdog\ncPlqWSd2TUc_0\tperson\ncP-p4R-JZxY_1\tbird\ncRBw9lx-EKA_1\tbus\ncR2-4m174EM_0\tbird\ncR-AWpc5zTs_0\tperson\ncTujx-TutbA_1\thorse\ncUrajeQPzpQ_0\tumbrella\ncUrf-ZwPzxI_0\tperson\ncUwPVOboe0k_0\tperson\ncVng1vleWNY_0\tperson\ncVrxfV0w29w_0\tperson\ncXZ7JY7YQmE_3\tbird\ncYdqN1oPRdY_0\tperson\ncagT3K3Ep3s_0\tskateboard\ncagT3K3Ep3s_1\tskateboard\nca8rEbHYMXg_0\tcow\nca-ko46j2fQ_6\tairplane\ncbL66gVAa5Y_0\tcow\ncctYyTO8OtU_0\tperson\ncc3mBIHi-GU_0\telephant\ncdNz1OLa1tU_0\tcar\ncf_U0G5W8BI_0\tperson\ncggX7PRYUh0_0\tperson\ncg_5uaJjLHk_0\tperson\nch_23jXJ_vA_2\tdog\nciCfkv5831Y_0\tairplane\ncih9W0SPGYA_0\tbird\nciwNB-l9a88_0\tperson\ncjHlHkhg0z0_0\tperson\nckFwzL1Ot94_0\ttruck\nckV9ay1lm7A_0\tairplane\nclZo-o5v1EA_0\telephant\nclvCQPta7y0_2\tbird\nclvCQPta7y0_0\tbird\nclvCQPta7y0_1\tbird\ncmTPsZ9x3PE_0\tcat\ncmW0Y4KGI7g_0\tgiraffe\ncnhhgh_z5NU_0\tcow\ncnqT4u0k3sM_0\tumbrella\ncpK8K6JD_GM_0\tairplane\ncpK8K6JD_GM_2\tairplane\ncprvb4cW5x4_0\tmotorcycle\ncqd8PRxMakA_0\ttruck\ncqvjKRFEi8M_1\tcar\ncrys7VEeUgU_0\tperson\ncskBHjsDXEs_0\tcow\ncso6B_84BFA_0\thorse\nctm9x2MaZuk_0\tcat\ncxu1qpzXobY_1\tbird\ncxu1qpzXobY_12\tbird\ncxu1qpzXobY_0\tbird\ncxu1qpzXobY_2\tbird\ncxu1qpzXobY_4\tbird\ncxu1qpzXobY_5\tbird\ncxu1qpzXobY_6\tbird\ncxu1qpzXobY_7\tbird\ncxu1qpzXobY_8\tbird\ncxu1qpzXobY_9\tbird\ncxu1qpzXobY_10\tbird\ncxu1qpzXobY_11\tbird\nczO8IPcAO1A_0\tperson\nc1FBptbYp3I_0\tperson\nc1FBptbYp3I_1\thorse\nc2T3VDriTaY_0\tknife\nc39xfJcSlxk_0\tdog\nc4kbPHdCIE8_1\telephant\nc43mnrjx2MU_0\tbus\nc5fPKbV5cAM_0\tperson\nc53j9l_w3Cg_3\tdog\nc7gnf6G7Jpw_0\tskateboard\nc7oqQy2Fvlw_0\ttruck\nc8JhzKh1i7s_0\tperson\nc8JhzKh1i7s_1\tperson\nc8gBv0b5g9w_1\telephant\nc8iU4McayiU_0\tperson\nc8iU4McayiU_1\thorse\nc8u5Y95o7jE_0\tskateboard\nc84BjBiic4s_0\tmotorcycle\nc93WuBjZeRk_0\tperson\nc-nMPinePds_0\tcat\nc_aupqZy-14_0\tairplane\nc_o91IPAB-c_0\tumbrella\ndAHCPltzogA_0\tbird\ndAP6fuArseQ_5\telephant\ndAtQR4dHPgE_0\tperson\ndA0WQ_RubaI_0\ttruck\ndBzXNQJRzls_0\tcat\ndCJFMDQBPb4_0\tboat\ndEIuy8LjAxc_0\tcar\ndElaQ10vYqg_1\tmotorcycle\ndHMFcv4UnmU_1\tbus\ndIP3FoGUXDQ_0\tperson\ndJYqTnxujb0_0\tperson\ndJnLznNE29w_0\ttrain\ndJnLznNE29w_1\ttrain\ndJ9qJezt6do_0\tcar\ndJ9qJezt6do_1\tcar\ndKmrUcJ9rJY_0\tperson\ndKmrUcJ9rJY_1\tperson\ndK3_HiQMH4o_0\tdog\ndMFsGGvkSVU_7\tairplane\ndMFsGGvkSVU_0\tairplane\ndMFsGGvkSVU_3\tairplane\ndMFsGGvkSVU_5\tairplane\ndMFsGGvkSVU_6\tairplane\ndNByeKh4gnA_0\tperson\ndNJ0q9QKzmY_0\tboat\ndNQYo7REyBU_0\tperson\ndOkb5WhLZGU_0\tperson\ndO0uu_fVUVI_0\tcar\ndO0uu_fVUVI_1\tcar\ndO4Jxsf987s_0\tbus\ndO-OrWse3dA_0\tcar\ndPCSntP-29E_0\tperson\ndPCSntP-29E_1\tperson\ndP7je2qU_QA_0\tdog\ndQIlnQxMIKo_0\ttrain\ndQIlnQxMIKo_4\ttrain\ndQIlnQxMIKo_5\ttrain\ndSAlTJeDlfQ_0\tperson\ndTvJyUKKshw_1\tperson\ndTzaYePj1gY_1\tcow\ndT5gXQAE-Qk_0\ttrain\ndT5gXQAE-Qk_2\ttrain\ndT5gXQAE-Qk_3\ttrain\ndUpoYuxpKPM_0\tperson\ndVTCCi__Z4Y_1\tperson\ndVte44AGoEE_0\tknife\ndW4RjdpTaJo_0\tperson\ndXYYgzjwm8w_0\tperson\ndXf-d5rkqdA_0\thorse\ndZv4xXpV6js_0\tboat\ndaeBFAZFQhU_0\tperson\ndbXKW9_L9sE_0\tbird\ndbwBzQuj1uA_0\tperson\ndc5oaWIkfwg_0\tcat\ndc-iaCwezlU_0\ttrain\ndeO0aj59T8o_0\tperson\ndfU8DcWDX8U_0\thorse\ndfU8DcWDX8U_4\thorse\ndgcW3TkPLmk_0\tboat\ndilCe3bivVk_0\tbus\ndi59PG3l25w_0\tbicycle\ndi59PG3l25w_1\tbicycle\ndjsh1r_W6ko_0\tperson\ndjt1lzJn7ak_2\tbird\ndlYwqfTRqoo_0\tperson\ndl-bg8WPGZs_0\tperson\ndmk3Cedj6g0_0\tperson\ndn006hdarCg_5\telephant\ndn006hdarCg_4\telephant\ndn006hdarCg_6\telephant\ndn006hdarCg_7\telephant\ndn006hdarCg_10\telephant\ndn7iBi1t7UI_0\tcow\ndn83BrM71W4_1\tboat\ndoOsOyiHItw_0\tperson\ndpqVH2tgA3E_0\tperson\ndqlk6F07Cxw_0\tmotorcycle\ndrohCN_vwC8_0\tmotorcycle\nds7JGeImFXo_0\thorse\ndtsLwaO2des_0\ttrain\ndt5TzAZByk0_0\tperson\nduROYI-AZlk_0\tperson\nduROYI-AZlk_1\tperson\ndutryxrzRjE_0\tumbrella\ndvDxOc2VWhc_0\tperson\ndvP5Dsp8EZA_2\tdog\ndvTIkEA7rOc_0\tperson\ndvvoKcQ5OOQ_3\tbear\ndvx9-0cVEYc_0\tperson\ndwQuyR9XFVM_0\tskateboard\ndxcnKYynkEY_1\tcow\ndxmxpyj3WVk_0\tknife\ndxmxpyj3WVk_3\tknife\ndyUVa3ZQVFg_0\thorse\ndzitRPrX410_0\tcow\ndzpcdtcQLfY_0\tmotorcycle\nDEnqBEwPykc_0\tperson\nDFCqlvY5OFY_1\tbus\nDFXptvzN9V8_3\tumbrella\nDFqSvoSh-qA_0\tcat\nDHEtea1hPBc_0\tperson\nDHwUCu0rrvc_0\tboat\nDJ_neeMWAuw_2\tdog\nDLsYDXqthiY_0\tskateboard\nDMBbH5HyOME_0\tperson\nDMn3ruRAObI_0\tperson\nDMyjVWCLbes_0\tperson\nDM6e1vEjYeM_0\tbicycle\nDM6e1vEjYeM_6\tbicycle\nDND0C3XD7mQ_0\thorse\nDOQilAKERwk_0\tumbrella\nDOmE1dA6CoQ_0\tperson\nDQJ4cPhVhFg_0\tairplane\nDT895n1nqqY_5\tbicycle\nDT895n1nqqY_4\tbicycle\nDUO7S4ma320_1\tcow\nDUO7S4ma320_0\tcow\nDU9GDCN25lI_0\tperson\nDV4bDUzPAIU_0\ttrain\nDWxidp6TWlg_0\tairplane\nDXhV8uXKo7w_0\tcow\nDXxF81ZJ_Jo_0\tcow\nDX1_rKFVugE_0\tdog\nDYBLqnRCo7g_0\tcat\nDZ2-5rYAUVk_0\ttrain\nDasqUqgdRv0_0\tdog\nDbNVb8C-Au8_0\tperson\nDbcdvAsVI48_0\tperson\nDcZSisTgSJs_0\tairplane\nDc9pWTcUNXY_5\tbear\nDeVQ3mr19Sw_2\tskateboard\nDeYmal3wAoE_2\tdog\nDeYmal3wAoE_0\tdog\nDfOuxNA9lro_1\tgiraffe\nDfXOTMc9IyM_1\tdog\nDfbPDcLTZEo_0\tairplane\nDf89T9IxDvc_0\tperson\nDf93ocrYlyY_0\tperson\nDgBuwqAbIkI_0\tskateboard\nDgBuwqAbIkI_1\tskateboard\nDhA0S7lPFVw_9\telephant\nDhA0S7lPFVw_0\telephant\nDhA0S7lPFVw_1\telephant\nDhA0S7lPFVw_2\telephant\nDhA0S7lPFVw_4\telephant\nDhA0S7lPFVw_5\telephant\nDhA0S7lPFVw_6\telephant\nDhA0S7lPFVw_7\telephant\nDhA0S7lPFVw_8\telephant\nDhEO4MuDBOc_0\tdog\nDhJAQCycHJs_0\telephant\nDhU-e-L13WM_0\tperson\nDhU-e-L13WM_1\tperson\nDhU-e-L13WM_2\tperson\nDiLGyNCykDE_0\tskateboard\nDjQx_qEnXko_0\tairplane\nDkMltyvC5l4_0\tperson\nDmPTbBo32qI_0\tbear\nDmzlB4KBLN4_0\tbird\nDm-XQKFA-BQ_0\ttruck\nDni4lPw5oH0_0\tperson\nDnzZd_9JlAA_0\tcat\nDoB18AvtSxQ_0\ttrain\nDofzMEokur0_0\tperson\nDonLBf92rMc_0\tdog\nDpp4k_BzZY8_1\tairplane\nDqcEAexhJ10_0\tcar\nDr6LfvQ_qKo_0\tcar\nDs_4eRyQDPo_2\tboat\nDuLk58XzeyA_0\ttrain\nDuv1XrdytdE_0\tcow\nDu4jlCLKZds_0\tperson\nDvjMMfcCq3U_0\tperson\nDvuTkGshMjA_2\tcow\nDvx0WVMuXVw_3\tboat\nDw4--8weqIA_0\tperson\nDx0LbiFgvPI_0\ttruck\nDyY1MPuGf5w_3\tdog\nDzUJVl_Pej0_0\tperson\nDzV-LWU5GoY_0\tperson\nD0b7xYmwl-M_0\tskateboard\nD0fhKhpAhJM_0\tzebra\nD0jRA5TKT-o_0\tperson\nD1vTDW7YDTk_0\tperson\nD2hRnCm0JtM_0\tperson\nD2oV8BC0iq8_0\tperson\nD21mLV716vI_0\tperson\nD32GncZb51Y_3\ttruck\nD4Jcg1u1Z-o_0\tperson\nD5maMxzZBe0_0\tperson\nD5m40zCfU8E_0\tperson\nD6E0xgBBquU_0\tperson\nD68oMT6tpc4_0\tperson\nD7H1UQbgDOw_0\tcow\nD9RGgV3fKds_0\tbird\nD_a5TQmLY-Y_1\tperson\nEBJ5jExrVqY_0\tcow\nEBLJ9v0QSrU_0\tcar\nEBUmagxsoV8_0\tperson\nEC8ftAGy2qA_2\tskateboard\nEDBDHaRqToc_0\tdog\nEEZKnzcn-v0_0\tcat\nEEfiTwozdM0_0\tcow\nEExHYyuWa-o_6\tbird\nEExHYyuWa-o_2\tbird\nEExHYyuWa-o_5\tbird\nEFRywDKULxc_1\ttrain\nEIl3WAxkNwc_0\ttrain\nEJJXpIiBEuw_0\tcow\nEJrj49l1N8k_0\tairplane\nELPjTNVxWfM_0\tperson\nEL-2TiSSQJg_0\tbear\nENPh0zyq2wo_0\tmotorcycle\nEOAADsR4IpM_0\tcow\nEP3xfG5_2i8_0\tcow\nEQN5hODdb6o_0\tskateboard\nEQ09ewMQn8Q_2\tbird\nEQ09ewMQn8Q_0\tbird\nEQ09ewMQn8Q_1\tbird\nEQ9vXT_IFYQ_7\tbird\nEQ9vXT_IFYQ_3\tbird\nESxRPsxVX-U_0\tcar\nETxRky6I39w_0\tperson\nEVD8F2ZOBbI_0\telephant\nEVYb5simSY0_0\tumbrella\nEWOehvvAvqU_0\tperson\nEXK2mcPIoBI_3\tskateboard\nEXK2mcPIoBI_0\tskateboard\nEXK2mcPIoBI_1\tskateboard\nEXK2mcPIoBI_2\tskateboard\nEXeKX_vOTvc_1\tcar\nEd-cfsA3BsU_0\thorse\nEeQOKiPASgY_0\tperson\nEfAYg1FMY-4_0\tbear\nEfAYg1FMY-4_5\tbear\nEfAYg1FMY-4_4\tbear\nEfSd4ucOXKs_0\ttruck\nEfbKwoMA6Kk_3\thorse\nEgpujPNldhs_0\ttrain\nEhQXwVQsngU_0\tboat\nEj0A86Eu1p8_0\tperson\nElHgkP_L8Eg_0\tairplane\nElTbW5itOAs_0\tcar\nElTbW5itOAs_3\tcar\nElTbW5itOAs_4\tcar\nElTbW5itOAs_7\tcar\nEmvEUer4CVc_0\tumbrella\nEnIkH0jrzaI_0\tskateboard\nEn6a3Ed7fvk_0\tperson\nEo5s8ykuzbU_0\tperson\nEpBZ77zmngM_0\thorse\nEpPw2JoHiTQ_0\tperson\nEqPK8xdf8hQ_0\tperson\nEqdBE21XAks_2\tumbrella\nEqdBE21XAks_3\tumbrella\nEqdBE21XAks_4\tumbrella\nEqz3xG4mWTs_0\tperson\nErN8-oTPkq0_1\tperson\nEr-RnWQrUac_0\tcat\nEsvPqOf-zEA_0\tperson\nEtIj5IUtn-g_0\tairplane\nEtIj5IUtn-g_1\tairplane\nEtIj5IUtn-g_2\tairplane\nEtMlgBveP58_0\tdog\nEtMlgBveP58_1\tdog\nEtkDITl8mEM_0\tperson\nEwlCKB77dYo_4\telephant\nEwlCKB77dYo_2\telephant\nEwlCKB77dYo_3\telephant\nEwqkMKutzBE_1\tknife\nEw-67eGgZAI_1\tmotorcycle\nExRpjMcFoBY_0\tdog\nEzRrohN-4ss_0\tskateboard\nEzZW0lM284U_0\tskateboard\nE2DbbyoqLg0_0\tperson\nE2DxfZPPu5Y_0\thorse\nE2DxfZPPu5Y_1\thorse\nE2DxfZPPu5Y_2\thorse\nE5erp1mhTzk_2\tbear\nE7CsRpWElOo_0\thorse\nE76rAl8oksk_0\tdog\nE9ARkaJcz2M_0\tperson\nE9J03vUxTZQ_0\ttruck\nE9w2-Y4d3MM_2\ttruck\nE9w2-Y4d3MM_0\ttruck\nE-ea5keAG3Y_0\tperson\nE-jpkZw_MdU_0\tmotorcycle\nE_cxlc0vrMg_0\thorse\nFBA18EyY2eI_2\tboat\nFBQpWJPC5pQ_0\tperson\nFBQpWJPC5pQ_1\tperson\nFBo954IqOlo_1\tbicycle\nFBo954IqOlo_5\tbicycle\nFBo954IqOlo_0\tbicycle\nFBo954IqOlo_2\tbicycle\nFBo954IqOlo_3\tbicycle\nFCICeCD4dKc_0\tperson\nFCypWBdHWb8_0\telephant\nFDKvBZH5LZE_0\thorse\nFD89Oq7BclA_0\tskateboard\nFETKMmV7P70_0\tmotorcycle\nFETKMmV7P70_1\tmotorcycle\nFEbVjS5-4ps_0\tperson\nFEsMY2y49d0_0\tperson\nFFuW_UWBVpU_0\ttrain\nFHRrYqTZExQ_0\tperson\nFID77dKUAU8_0\tcat\nFITKtv4tf7w_0\tcow\nFIi2mEV5dfQ_0\tskateboard\nFIi2mEV5dfQ_1\tskateboard\nFIvujc5oqIY_0\ttrain\nFJDKoEDLbNc_0\tairplane\nFLsLXPchOx0_0\tknife\nFMV_-mdKV8U_0\thorse\nFNNrfAuIQmo_1\thorse\nFNpd4DJ9LBA_0\thorse\nFPrcQJh9INg_0\tperson\nFQMXzPIoL14_2\tbird\nFQ-_p0lM-FM_1\telephant\nFRxSISi7wV4_0\tbicycle\nFSFW4QxV8-0_1\ttruck\nFUlVrltDAOk_0\tbird\nFWNxjmydNdU_0\tperson\nFYVNE1zYmyA_0\tperson\nFZrXRU5CxC8_0\tboat\nFaG9RreeG6M_6\tbicycle\nFaG9RreeG6M_2\tbicycle\nFbF-nKQx0WI_0\tperson\nFcP50mFdaYM_0\ttrain\nFdPApnQkBVQ_0\tbird\nFdPApnQkBVQ_1\tbird\nFdlDAmvsrR0_0\thorse\nFd1uYmMhzPE_0\thorse\nFedOlGadIYU_0\tbird\nFgd7fHxPhBs_0\ttruck\nFhQLl40AANQ_0\tbicycle\nFhvdS8wJkrI_5\tbicycle\nFhvdS8wJkrI_1\tbicycle\nFhvdS8wJkrI_2\tbicycle\nFhvdS8wJkrI_3\tbicycle\nFiCIZpT08B0_0\tcow\nFiD6UZuDr1M_0\tperson\nFjFwrTEJK1U_0\tperson\nFjmcQfLBpvQ_0\tperson\nFkSfwpb1Gss_0\tperson\nFkhru_XyPSU_4\tbicycle\nFkhru_XyPSU_1\tbicycle\nFlOaA91Qa2M_0\tcow\nFm7Z44jVp_A_1\tperson\nFm7Z44jVp_A_0\tperson\nFnIpAhpGTps_0\tperson\nFn0IWwSVPlk_0\tperson\nFotm2Ewrdr8_0\tdog\nFphk_JpP4JY_2\tbus\nFp2WKSG1qGw_0\tperson\nFrFv1rYtAws_0\ttrain\nFr298zXE9O8_0\tumbrella\nFshCFVUSBXY_0\tperson\nFsiLiUl9I10_1\tdog\nFs0LVU4qKSs_0\tskateboard\nFtEi5TPqRiA_0\tdog\nFuWY9thbtxw_0\tairplane\nFu9EsTmh8z0_0\tperson\nFvCCkxW3sv8_0\tperson\nFvDNYPmcXjQ_0\tbear\nFvDNYPmcXjQ_5\tbear\nFvDNYPmcXjQ_1\tbear\nFvDNYPmcXjQ_3\tbear\nFvHW0PyfZ_Q_1\tskateboard\nFvHW0PyfZ_Q_4\tskateboard\nFvHW0PyfZ_Q_5\tskateboard\nFv542o8y6aE_0\tperson\nFyEliJtlQIY_0\tperson\nF0PPPvVTNnE_3\tbear\nF3iJ9TqS-lE_1\tbear\nF3iJ9TqS-lE_0\tbear\nF39H1yTLerI_1\ttrain\nF4xCJHUMGsE_1\telephant\nF47hXNWC3K8_0\tcat\nF48wdm2YukQ_0\tbicycle\nF48wdm2YukQ_5\tbicycle\nF5Cc5wQJvhI_0\tperson\nF5Tm5BM0oaM_0\ttrain\nF5unbOiULNM_0\tmotorcycle\nF5unbOiULNM_1\tmotorcycle\nF9B5cLZb3T4_4\tbicycle\nF-OWsiGzRg0_0\tperson\nF_bZObIr47Y_0\tbicycle\nF_bZObIr47Y_1\tbicycle\nF_dg4Hi5ZU0_0\tcar\nF_xLwEhMPdY_0\tperson\nF_8rnxkAIgQ_0\tperson\nF_88eTR1pKU_0\ttrain\nGAMoEnodBZ8_1\tbicycle\nGAZx8145Hkk_1\tperson\nGAZx8145Hkk_0\tperson\nGCW28zxN9vk_0\tperson\nGDM2ctXPkmg_0\tperson\nGD5lsE86vOA_0\tcar\nGE2nS7Zbkrc_0\tairplane\nGE6JO6nrE2A_0\tperson\nGF9unI6hEMI_0\tairplane\nGGULYyv3_eY_0\telephant\nGGULYyv3_eY_1\telephant\nGGVYYc0KNWc_0\ttruck\nGHTZcjImEqk_0\tperson\nGIJMEjX04dI_0\tperson\nGIM6FHDMp0A_0\tperson\nGJTjlO1FJpo_3\tbear\nGJTjlO1FJpo_5\tbear\nGKyxtLTjXUU_1\tmotorcycle\nGLG6II1JYko_0\tbird\nGLpNrOwqNXc_0\tperson\nGLvmwdOjsHE_0\tcow\nGOEqT5_bhls_1\telephant\nGOVFUFYsINQ_2\telephant\nGOfP3fxCTvw_0\tperson\nGPPKPFCI-Kc_0\tperson\nGPSXltbv0f4_0\tmotorcycle\nGP5anr-xMfw_0\tperson\nGRluMAZzu8c_0\tairplane\nGSlWcX28sLk_0\tperson\nGUMAgiab8bg_0\tperson\nGUQmoD1aWhw_0\ttruck\nGUS7BLoHHPk_0\tairplane\nGVNmuLeQ6pA_1\tairplane\nGVNmuLeQ6pA_2\tairplane\nGWBEjzdOLjI_0\tgiraffe\nGWBEjzdOLjI_1\tgiraffe\nGWBEjzdOLjI_4\tgiraffe\nGXMBH6OujvQ_0\tperson\nGYM460lVV-k_0\thorse\nGYQO-VevHpI_0\tperson\nGYYxgR_VGFQ_0\tdog\nGZSlxtl9bj4_0\thorse\nGZSnngz0VX4_4\tdog\nGZhWdIsibfs_2\tbear\nGaierMnR4Xk_1\telephant\nGbe74-OWIo4_0\tperson\nGbwJhzDrFtI_0\tairplane\nGceLsS4AwH8_1\thorse\nGcjSF4Uyl74_0\tperson\nGdoD65Qn6kE_0\tcat\nGeOos0BFCSY_0\tbus\nGf_4plKc8tw_7\thorse\nGk8oy0G3dRU_0\tperson\nGlAH7-Rf8gc_1\ttruck\nGm9yMiay9Is_2\tskateboard\nGm9yMiay9Is_3\tskateboard\nGnTFmN4UNrI_0\tmotorcycle\nGn6ltyIKgcs_0\tperson\nGoXxeDaopwo_1\tperson\nGokzf7T4oVU_0\tcat\nGpE5cmO_2kQ_0\tskateboard\nGpE5cmO_2kQ_1\tskateboard\nGq7NQWGviWU_0\ttrain\nGsLJXtf6RC0_0\tperson\nGuMiw_OwxlM_0\tknife\nGubE6GTKTVc_0\tperson\nGubjV1tFrVA_1\tumbrella\nGvRQ4QZHPGc_8\tbicycle\nGvjv4DJftts_1\tcat\nGv5P6ORl-1M_0\tperson\nGwAGS0xPZDQ_0\tperson\nGwY5WqLjTcM_1\tcow\nGwY5WqLjTcM_0\tcow\nG0C4XEsjKGU_1\tbird\nG0i_9qeBwm8_0\tairplane\nG0sAxRZi6m4_0\tcar\nG1doEZFbv70_0\tairplane\nG1gPj-UK_gw_0\tcow\nG107tKapVcQ_0\tgiraffe\nG16fmAfdp9A_1\tzebra\nG16fmAfdp9A_2\tzebra\nG2gyuboBt-E_0\telephant\nG2gyuboBt-E_1\telephant\nG3jqix8WiYE_0\tperson\nG5jg_wMMXmU_0\tperson\nG6iN1OKj_eE_0\telephant\nd0G8DzwenzU_0\tperson\nd2ugQO5Z8M8_0\tairplane\nd3_3kfZ7rkc_0\tboat\nd3_3kfZ7rkc_2\tboat\nd4cTjVsUbIA_0\tperson\nd44bp_UDYOQ_0\tcow\nd6vOtyrW2eQ_0\tmotorcycle\nd6vOtyrW2eQ_1\tmotorcycle\nd6vTXY--7zw_6\ttruck\nd6xRfIz84Og_1\tcat\nd8GWgCsv0fo_0\tperson\nd8kSiPkTvek_1\tbus\nd9IW6kCjfmA_0\tknife\nd9IW6kCjfmA_1\tknife\nd9YRdtwcTOo_0\tmotorcycle\nd-CkujEJl24_0\tzebra\nd-6-T4gkBTk_1\tcow\nd_eu3LZxECY_0\tmotorcycle\nd_eu3LZxECY_1\tmotorcycle\neBIZSQg7pV8_0\tairplane\neBSijengaq4_0\tperson\neBVE2h6i3Do_0\tperson\neByIZzEh-DA_1\tdog\neByIZzEh-DA_2\tdog\neCzDpCe6xvc_0\thorse\neDUR6UTxYhk_0\tperson\neFXZRDC38No_0\tbird\neGVUtZXFcmY_1\tcat\neJn0yGDjytc_0\tcat\neKcJ2alScW8_0\tcow\neL4uMBEG4gE_0\tbus\neMsvM8G2Z0s_0\ttruck\neM0KTbh6EZE_0\tperson\neN0JRkzxVPw_0\telephant\neOeuY4ZbTt8_0\tbird\nePiG-qPeJ6c_1\telephant\nePiG-qPeJ6c_3\telephant\neQEBmp37ZMQ_0\tperson\neQ6zyKVuU2s_0\tperson\neROdacH1GEk_1\thorse\neRsf1_omRf4_2\telephant\neRsf1_omRf4_5\telephant\neRsf1_omRf4_6\telephant\neRsf1_omRf4_9\telephant\neRsf1_omRf4_12\telephant\neRsf1_omRf4_13\telephant\neRsf1_omRf4_14\telephant\neRsf1_omRf4_15\telephant\neTfXd1DQ6mc_0\tdog\neU_B2dXyBkI_0\telephant\neVAEQdogSqk_1\tperson\neVLFX7RZOJM_0\tperson\neVnnuxmvpM8_0\tperson\neVnnuxmvpM8_1\tperson\neVnnuxmvpM8_2\tperson\neWU6Kk9K6lI_0\tairplane\neWZHute7e6Q_0\tperson\neXAJwsjltWs_1\tairplane\neXAJwsjltWs_7\tairplane\neXvofXrEuU8_0\tperson\neZFqrD8MAKk_0\thorse\neZFqrD8MAKk_1\thorse\neZc2BPYt4rU_0\tperson\neZ9Qy0zfLb8_1\tdog\neaoH4_TdTt8_0\tperson\nea2xP5nm53M_2\tknife\nea_yr_40TRY_0\tairplane\nebc-oEY_eDM_0\tcow\necksf6PLvhw_1\tdog\nedx1TW6jRFg_0\tperson\nee6Zcz8Pyfk_1\tcow\nee6Zcz8Pyfk_2\tcow\nefczZtAK28w_1\tdog\negbQbEuLDlE_0\tcat\negfoTu4gtZo_0\tbicycle\negg1WCEyuTw_0\tperson\negmCEe7OgiE_0\tperson\nehxHGWKtaAg_0\tperson\neh9YpbAcMZE_0\tperson\nejRwmx3kUI8_0\tperson\nej0xIcEXWiU_0\thorse\nekfKlK5w3Lg_0\tperson\nekwoV0dpRwI_0\tperson\nekwoV0dpRwI_1\tperson\nek7bnCHGZq0_0\tskateboard\nelB6RfDJA6M_1\tdog\neljiGrMEYiQ_0\tperson\neljiGrMEYiQ_1\tperson\nemISA6YzHZ4_0\tbus\nemISA6YzHZ4_2\tbus\neoIk6xjgQ-4_3\tbicycle\neomNxgG_ivE_1\tumbrella\neomNxgG_ivE_2\tumbrella\neomNxgG_ivE_3\tumbrella\ner7oQRfciJ8_1\tperson\neuESct6MMNg_0\tperson\neuU-dtl6yyA_0\tperson\nevyGgkwoEpU_1\thorse\nex_t3nR28rg_0\tbird\nex_t3nR28rg_1\tbird\nex_t3nR28rg_2\tbird\nezrZuVfbOPs_0\tperson\nezyFfdIkCCQ_0\tcow\nez5RcUDpMoI_0\tbear\nez5RcUDpMoI_4\tbear\ne0cc8KmRgDE_0\tperson\ne0cc8KmRgDE_1\tperson\ne1VJlGQGYTA_0\tumbrella\ne37RxtyP9nk_2\tperson\ne37RxtyP9nk_1\tperson\ne5Q4wIVJR40_0\tperson\ne5a3Z_wlpUU_0\tperson\ne6FwS_DOE-U_1\thorse\ne6FwS_DOE-U_0\thorse\ne6xVrcpMa9Y_0\tcat\ne8Bc9zwTFnE_0\tperson\ne9G1bOd8GlA_0\tcar\ne9QeTOo4XBE_0\tperson\nfBYtizIh0wc_0\tcow\nfCVsRanBID8_0\tperson\nfDWKYttA3fM_1\tumbrella\nfEA-xCaKqfI_0\ttrain\nfEWxV64teMY_0\tdog\nfEpH1AFdSqs_0\tperson\nfFGF5gVW6UU_2\tbicycle\nfFGF5gVW6UU_0\tbicycle\nfFGF5gVW6UU_1\tbicycle\nfFIVNddMFuc_0\tperson\nfFT1LpdsEhQ_1\tcow\nfFmghP5NQVA_1\thorse\nfFw23dFiBDs_0\tperson\nfGJKT5ttUQw_0\tperson\nfHFCYOUh3vU_0\ttruck\nfJJuwfeoaWI_0\tcat\nfJnC2nKYQVQ_0\tmotorcycle\nfMl60_fkMfc_0\tknife\nfMu0OmctSTI_1\tairplane\nfNTptXtpsoo_0\tcow\nfOyaDea7Al4_0\tperson\nfPA_KgXi5v8_0\tbird\nfPA_KgXi5v8_2\tbird\nfP7EpJzJt0A_0\thorse\nfQRAi5pN1Fg_0\tbicycle\nfQRAi5pN1Fg_1\tbicycle\nfRB4jD1Uecw_0\tperson\nfRSu9-lyuaU_0\ttruck\nfRoEX_9tHtM_0\tperson\nfSB_aY8HhJI_0\tperson\nfSFjxB1XU2E_0\tperson\nfTd-8VbsXus_1\tairplane\nfUNAhHKf_OA_0\tcow\nfUva5AKNiPE_0\tperson\nfUva5AKNiPE_1\tperson\nfU8NxbaMKu0_0\tbus\nfWD8TEXWtek_0\tbear\nfYBeigFqN7Q_0\ttrain\nfYBeigFqN7Q_1\ttrain\nfYWFh5BSEyg_1\tcow\nfYup3iPmtHc_0\tperson\nfbAOGfYPur0_0\tperson\nfcFwbcMNdUo_0\tbird\nfcFwbcMNdUo_1\tbird\nfdMa18fwj14_0\tperson\nfdQFJz9IOso_0\tumbrella\nfd73v3-Qjqk_0\tknife\nfeMxoQY38A8_0\tperson\nfeMxoQY38A8_1\tperson\nfeNEI7bD5HI_0\tbus\nfeO8Ip4MOn4_0\tcat\nffQKiGKTDaA_0\tbird\nffr6_q8liAc_0\tperson\nffr6_q8liAc_1\thorse\nfhVVVY5XhDI_1\tknife\nfhWE0XDoxjM_0\tairplane\nfh9tibERtYI_0\tperson\nfiKs6mdtsmM_0\tcow\nfiVKh-Q-iY0_0\tmotorcycle\nfkGWb9_HVsA_0\telephant\nfk85Ace_-LM_0\tdog\nfmE9seWSDfs_0\tumbrella\nfmosIu7__Wc_1\tperson\nfmrqs2YvNCQ_0\tperson\nfm4syrPib5M_0\tperson\nfnKNDlQq-JY_0\tperson\nfoWPkPNDqyU_0\tbird\nfoWPkPNDqyU_1\tbird\nfojim3ViD7Y_0\tperson\nfpI0N9Lv5V8_0\thorse\nfpv4fALQXpQ_0\tperson\nfqWa-DUPAGw_0\tperson\nG8IUU0gjlEI_3\tboat\nG88QbXTQ6LI_0\tskateboard\nG9Sdd3czaTk_0\tdog\nG-kF2D98oms_1\telephant\nG-2yXvawYec_0\tperson\nG-5iXA4ERtM_0\ttrain\nG__uy4I0Kzw_0\tperson\nHAOmPeNNjNc_0\tbus\nHBUeO1WOFFk_0\tmotorcycle\nHBbWtsju37w_0\tboat\nHBw-J_3WlCY_0\tcat\nHF8ZrMgnyo8_0\tdog\nHJYmTdBHVvU_1\telephant\nHJYmTdBHVvU_2\telephant\nHJ08tJU-IIA_0\tdog\nHKNkm0t39B4_0\tcow\nHKRKZksEGro_0\tperson\nHMfFCe-og9A_1\tbus\nHMt7kgP0MC0_0\tperson\nHM8XKdebDvI_0\tboat\nHNBF7AppAQQ_0\tdog\nHNheLARZ64w_0\tbicycle\nHNheLARZ64w_2\tbicycle\nHN-3LaZVuCs_0\tcar\nHONOO3gmDec_1\tperson\nHP6UlpPulc8_0\tbicycle\nHQ3nHqG24O0_1\tcow\nHRF40e3Tbvw_0\tbicycle\nHRF40e3Tbvw_2\tbicycle\nHRRhkyr7U5E_2\ttrain\nHRcVM9md3Xg_0\tcow\nHTrUPWOXlvI_1\tperson\nHTrUPWOXlvI_0\tperson\nHULLjmpSRUI_0\tcow\nHUssZ9c2Qvs_0\ttruck\nHW8Z7IdfuIg_0\tperson\nHYCFQjnuXBI_0\ttruck\nHY4XBjJWJYg_0\ttruck\nHY9NQ2zNtGc_0\tcat\nHZVvEd_Tg_g_0\tperson\nHZngEEoQWDA_0\tperson\nHaMmo5SdpUo_0\tperson\nHaVnQ_P5HdQ_0\ttrain\nHacYwonTy6w_1\tskateboard\nHbWinZWeK2U_1\tdog\nHbhmAMorGaw_0\tperson\nHeOWa0NNB0g_0\tperson\nHg0fRYqZQ3U_0\tperson\nHi384VDSwXw_1\tbird\nHjo95Vo38qU_0\tperson\nHksncw-BlKU_0\tgiraffe\nHlWb7xQHFKI_0\tdog\nHmH4hitBoc4_0\tperson\nHoSTe-9VUJA_0\tcow\nHpdyNV4GqbM_0\tperson\nHpdyNV4GqbM_1\tperson\nHsGPGwN7vSk_0\tperson\nHugie4Q6leo_0\tbicycle\nHvKC4fLwUYw_1\tperson\nHvKC4fLwUYw_0\tperson\nHvOisoEmjKg_1\tairplane\nHvU4Jz4Gd1k_0\tcow\nHv_d6KPoSgA_0\tskateboard\nHwZUDp7yxxk_0\tperson\nHxPskaUPSXg_0\tcow\nHyHQRrpWhpk_0\tboat\nHylH7-rD0wA_0\tbird\nHzEm2GlGzhc_1\ttruck\nHzTD_opfrqI_0\tcar\nH0QTCKxJmLY_1\ttrain\nH1Oxjm0NqCg_0\tperson\nH2GwgpAKbzY_0\tdog\nH3HrWs1HITE_0\tcow\nH3S_DkPBWtw_0\telephant\nH3S_DkPBWtw_7\telephant\nH3S_DkPBWtw_1\telephant\nH3S_DkPBWtw_2\telephant\nH3S_DkPBWtw_3\telephant\nH3S_DkPBWtw_4\telephant\nH3S_DkPBWtw_5\telephant\nH3S_DkPBWtw_6\telephant\nH3XF5rAtuJA_2\tperson\nH3XF5rAtuJA_0\tperson\nH3a-C6RRYyo_0\tperson\nH5mmSHRHeOA_0\tperson\nH6TuJxifX64_0\ttrain\nH6w4nf5H4U4_0\tbird\nH6y9C6Ndy2A_0\tbird\nH6y9C6Ndy2A_1\tbird\nH7XZ5716KnI_0\tperson\nH7z05uOIPRM_1\ttrain\nH92s5sHsotk_0\tairplane\nH-4EZAh3ZiE_0\tbus\nIA1FFP5WN-4_0\tbear\nIA1FFP5WN-4_2\tbear\nICj693xC5DY_2\tairplane\nICj693xC5DY_0\tairplane\nICj693xC5DY_1\tairplane\nICxHfkE0XCo_0\tperson\nIDx8_34ETTQ_0\tperson\nIEyymbAxp24_0\tdog\nIFS0QSfnbaM_4\tknife\nIFS3ILjlHkY_2\ttruck\nIF_auR-0fxM_0\tknife\nIGv9j-RQi0k_0\tdog\nIG0UmL5bvEo_0\tcat\nIHFF7DOpF4Q_0\tmotorcycle\nIHmYV5ymU08_0\tcow\nIKEUMXjIyTQ_0\tcar\nILZvGBKYYrE_4\tbus\nILZvGBKYYrE_0\tbus\nILZvGBKYYrE_1\tbus\nILZvGBKYYrE_3\tbus\nIMTbwAOJNIc_1\ttrain\nIMh4AHUZ2HQ_0\tperson\nIM4EBlgTTOg_0\tbus\nINlrdk7hgl4_0\tknife\nIOQt3fFTSVc_0\thorse\nIO7-lFsWvl0_0\tbicycle\nIO7-lFsWvl0_2\tbicycle\nIPEJs-vLCV4_0\ttruck\nIPEJs-vLCV4_1\ttruck\nIRpgjSP4pLI_0\tperson\nIUJGm3Iu0Bs_1\tbicycle\nIUgsoj74aWQ_0\tperson\nIVlnjlVA5rc_1\tbicycle\nIXP1ML1tdZQ_0\tbus\nIXRxjnkOJeo_1\tmotorcycle\nIXenlPUsqrc_0\tperson\nIZvOv7tCr00_1\ttrain\nIcRjjKSX5uc_1\tperson\nIcRjjKSX5uc_0\tperson\nIcnle27cmMM_0\tbicycle\nIdVZJW1HC9E_0\tairplane\nIdVkEz2IF7w_0\tcar\nIeb9oZ9eB8I_0\tdog\nIfWSlkR8DbU_0\thorse\nIf1zPOV0idg_0\thorse\nIf1zPOV0idg_1\thorse\nIh2gG0269H8_0\tbus\nIjQXXK4uYVY_0\tdog\nIlMHPX2VcGw_0\telephant\nIluTkrIqsVg_1\telephant\nIluTkrIqsVg_3\telephant\nIluTkrIqsVg_6\telephant\nIo7bj1jNpPU_0\tcar\nIpjQJZ42zyQ_0\telephant\nIpjQJZ42zyQ_1\telephant\nIpjQJZ42zyQ_2\telephant\nIpjQJZ42zyQ_3\telephant\nIpwI5VTWHLc_0\thorse\nIpwI5VTWHLc_2\thorse\nIqy4PPX-Tlc_0\tperson\nIsHTpd2cnvI_0\ttrain\nIthz7KSWCxU_0\tbus\nIudK7ch_IIg_1\tairplane\nIvRDw_IA0_s_0\tcow\nIwve-3lTmMk_0\tperson\nIyLshk4jlyo_0\tcat\nIygCvE4_amo_2\tbird\nIygCvE4_amo_3\tbird\nIyjFl1Hhk3Q_0\tperson\nIz4XK2zNDUU_0\tperson\nI1wuUCQbXLc_0\tumbrella\nI2DkTg8wPnI_0\tperson\nI2WoCDTXONA_0\tperson\nI2WoCDTXONA_1\tperson\nI2lh579NY2s_0\tbird\nI45pfwCBczo_0\tperson\nI6ESaCg4z_8_0\tperson\nI6TvXxQTtZQ_1\thorse\nI6TvXxQTtZQ_0\thorse\nI6TvXxQTtZQ_2\thorse\nI8OfOokt6YU_0\tperson\nI8XhyDacLtU_1\tbird\nI8m0QjcQlSo_3\tbicycle\nI8m0QjcQlSo_4\tbicycle\nI9ivT_P5G18_0\tperson\nI_k5qXHxb0Y_2\tknife\nI_k5qXHxb0Y_0\tknife\nJBkwLPruJe0_0\tperson\nJBlDwXJFbQc_1\tumbrella\nJDZiLsus2es_1\tskateboard\nJDvfPX9cFDg_0\tdog\nJEpTSJRO3co_0\tperson\nJG2tVzjxhao_0\tbird\nfsAEg5w8xTg_0\tperson\nfsCwAYYI4js_0\tperson\nfsKTO8ksQ90_0\tperson\nftMQOwvHDF8_1\tcar\nftns38_MSTM_0\tcow\nfvxc7ruCiYk_0\tcow\nfvxc7ruCiYk_3\tcow\nfv8aFklHmko_0\tskateboard\nfwEvL-luHlw_0\tairplane\nfwEvL-luHlw_1\tairplane\nfwt8LzF8Mic_0\tperson\nfyZImQFj_Y8_0\tcow\nfycK7kJWV1I_0\tumbrella\nfzr3kw3BDDo_1\tairplane\nfz6ONSUlvNY_0\tperson\nf0i5E4DOFc8_0\tbus\nf2SctRCBZQc_0\tcar\nf3Z5d9I7rIw_0\tknife\nf4fxmsxPzrg_2\telephant\nf5LEkr56Efg_0\tperson\nf5Uz-TuMQ0Y_0\thorse\nf5ZpGBYuJ7o_0\tboat\nf5kAHBPObsw_1\tcow\nf6fZjMRJgoM_0\thorse\nf63aow5BRAI_5\tbus\nf65rTlprptk_0\thorse\nf7yNS6ltUFk_0\tperson\nf8H7Ns8cw-c_1\ttrain\nf8rXEKktSCg_0\telephant\nf_VqZJyJ4GM_0\tmotorcycle\ngAHcWn06srk_0\tperson\ngB0-eGpMj50_0\tperson\ngB2asNpe3zY_0\tperson\ngB7jSQgkcMM_1\thorse\ngCDC8R7IB7k_0\tperson\ngCwe-o1nqBc_0\tmotorcycle\ngCwe-o1nqBc_1\tmotorcycle\ngC9z8IzG83s_2\tbicycle\ngDEk1TWuZug_2\tperson\ngDG5Xr2p2y8_0\telephant\ngDHnBnqogX0_1\tairplane\ngDHnBnqogX0_0\tairplane\ngDbZj1O36VU_0\tairplane\ngDihz5aZLyA_0\tbus\ngDihz5aZLyA_2\tbus\ngEkiX2yFQm0_0\tcat\ngEnLlmMhxfE_0\tperson\ngGNmKI2M8i4_0\tperson\ngGd6hYCKdEs_0\tbird\ngHMCfvdZzMM_1\tperson\ngHYzGPx8f_4_0\tzebra\ngHYzGPx8f_4_1\tzebra\ngIx12Q8A3p8_1\tperson\ngJwtAwSqEow_0\ttrain\ngKAPbj9esXI_0\tskateboard\ngLqb3YuVttM_0\tumbrella\ngMRigFNGMeY_0\tperson\ngNfQargrILo_1\tcar\ngOFgWsujZaI_0\tcat\ngOWc7VBEwMo_0\tcar\ngPEMf91dil8_1\thorse\ngPSB23kv5Uc_0\tperson\ngPhL52Mj1_A_1\tmotorcycle\ngQ1qmNZzaTo_0\tboat\ngRDFlfzM_iI_4\telephant\ngRDFlfzM_iI_6\telephant\ngRDFlfzM_iI_1\telephant\ngRDFlfzM_iI_3\telephant\ngRMJhsEuiAc_0\tmotorcycle\ngRMJhsEuiAc_1\tmotorcycle\ngRMJhsEuiAc_6\tmotorcycle\ngR29_U82QeE_1\thorse\ngSJbrV0vy8M_0\tperson\ngSz16yrF9yA_0\tperson\ngT0yjYUmf90_0\tcow\ngUGlSiBvfOs_1\tmotorcycle\ngU8s5nxyBDk_0\tairplane\ngU8s5nxyBDk_1\tairplane\ngV3CcNeVZcY_0\telephant\ngV3CcNeVZcY_1\telephant\ngWkTSRUqxoo_0\tperson\ngW6HdCsty0U_0\tknife\ngYLohMps12s_0\telephant\ngYLohMps12s_3\telephant\ngYLohMps12s_4\telephant\ngYLohMps12s_1\telephant\ngYLohMps12s_2\telephant\ngaKGYmLxJVU_3\tbicycle\ngagJEV--3Pw_0\tperson\ngdAVi92ZfSc_0\thorse\ngdx96NpU6BY_6\ttrain\ngd4UfPes3YI_0\tcow\ngeEXytMwfq0_0\tperson\ngePAI8wYSdw_0\tperson\ngfTVuceAzNs_0\telephant\ngg8YzsSulrQ_0\ttruck\nghciPMerSc0_0\ttruck\ngiWDg00GIDw_1\tskateboard\ngig9B4ecK3w_0\tperson\ngiy_SOmkBY8_0\tumbrella\ngjnyg97XwnA_0\tperson\ngk-cycr3xjo_0\tperson\ngmVDmxVI7n0_0\telephant\ngpV4Qlx6YrA_6\tbus\ngqLSqmK3m74_0\tmotorcycle\ngqZYY0m_TuM_0\tmotorcycle\ngsrvWcnpNP4_1\tmotorcycle\ngsrvWcnpNP4_0\tmotorcycle\ngtVr7urU8c8_0\tperson\nguDQk0hVgU0_0\tbird\nguFTeFvjr9Y_0\tbird\ngu3DTnVjNQM_0\tknife\ngwXwH2Cs3BY_0\tknife\ngxHGnBrpPZs_1\tairplane\ngxHGnBrpPZs_2\tairplane\ngxKuLTUNhp4_0\thorse\ngx7PFNpHd_A_0\tperson\ngyaP7qiRxfY_0\tcow\ng1OZWFLSspQ_0\tmotorcycle\ng1rQZNA6yyo_6\tcow\ng1rQZNA6yyo_0\tcow\ng1rQZNA6yyo_1\tcow\ng1rQZNA6yyo_2\tcow\ng1rQZNA6yyo_3\tcow\ng1rQZNA6yyo_4\tcow\ng1rQZNA6yyo_5\tcow\ng3HXJNMlAsM_0\tairplane\ng3oqxu4AhBw_0\tperson\ng3swsx-acTI_1\tdog\ng3swsx-acTI_0\tdog\ng3vbaqnLXn8_0\tcow\ng4bayrAEhIU_0\tumbrella\ng5rUJOptHXQ_0\thorse\ng5ty_7So5Dw_0\tcow\ng51pzrSssl4_0\tperson\ng8M5d--ghFM_0\tperson\ng8vKB3IU1JY_0\thorse\ng8wHQVpij-I_0\tperson\ng9eN0FHn4-E_0\tdog\ng-EAZ6gVcic_0\tmotorcycle\ng-pVcRyPQG8_0\tcow\ng-yHAyCA2KI_1\thorse\ng_C47ek7TmI_1\tknife\ng_C47ek7TmI_4\tknife\ng_C47ek7TmI_5\tknife\ng_QHWoQgmFQ_0\tperson\ng_QHWoQgmFQ_1\tperson\ng_Tk-SESaYI_0\tperson\nhBHt6mnfUeo_0\tbus\nhBMZHx3_cTs_0\ttrain\nhC69bGTvLBo_0\tskateboard\nhD3Bn03GXNQ_1\tdog\nhFNAxcRpGBM_0\tskateboard\nhFSygfNIY_Y_0\tskateboard\nhFex_TS-aUo_0\tperson\nhGnscWmehTI_0\tcar\nhG9efPyerw4_1\thorse\nhHdBCtElIQg_0\tboat\nhHlqyr11RiI_0\tperson\nhIWM6v4zcSM_0\telephant\nhKoGkl1wyCU_0\tperson\nhON0t9Dzay4_0\tmotorcycle\nhP1ViN_WadY_0\tcow\nhR-utsUhYSg_0\tperson\nhSAUbt6-Yjc_0\tknife\nhSAUbt6-Yjc_1\tknife\nhSeHymINF98_1\tbus\nhTaEY4YCVqM_0\tairplane\nhUjzfhyM30Q_0\tairplane\nhUjzfhyM30Q_4\tairplane\nhUxguQsLvcs_4\tknife\nhUxguQsLvcs_5\tknife\nhUyAVmRxAzM_0\tperson\nhU_dAA1A0X0_0\tperson\nhU_9cs_qw1w_0\tperson\nhVjyHhYH6Ss_1\tairplane\nhVjyHhYH6Ss_2\tairplane\nhVowH5-Ss4I_0\ttrain\nhV4tEsm-F5s_0\tairplane\nhZdxBk4cjmg_0\tbus\nhaiW7jpl3wY_0\tperson\nhcJBaxNIvE4_1\tperson\nhcJBaxNIvE4_0\tperson\nhcV4RZPeRbo_0\tairplane\nhcuLD1cn9GA_0\tperson\nhdUc4uUYh0E_0\tboat\nhfWfYFG2O94_0\tperson\nhgagtwzScGQ_0\tperson\nhhFOwnYOLl0_0\tgiraffe\nhhLyE41H8nE_0\tmotorcycle\nhhNlg3Ws9Dc_0\tperson\nhhyVc2wsXVk_0\thorse\nhhyVc2wsXVk_1\thorse\nhh432zDMgPo_0\ttrain\nhiKbm0rqEb4_3\tskateboard\nhiN_kULL84o_5\tumbrella\nhiN_kULL84o_4\tumbrella\nhkEV_E85Jzw_0\tcar\nhkSv_YxmN7w_0\tperson\nhlZDJrpJzPU_0\tperson\nhljwk2WbXGY_0\tperson\nhmSeUlyLLak_0\ttrain\nhnZvUHrA3CY_0\tperson\nho6sg-47RD0_0\tairplane\nhqNhKf3a69Q_2\ttruck\nhqYyvTeOvas_0\tbear\nhqaNlwG0DNU_1\tperson\nhqrmbVw_EwQ_0\tcat\nJIuyqZCU5zY_0\tcow\nJKiG_pk4lSE_0\tperson\nJKmvEldBeEQ_0\tcow\nJKsodtdUW-o_0\tboat\nJMLFZcONQAs_2\tskateboard\nJMLFZcONQAs_5\tskateboard\nJMMci7hryUQ_0\tmotorcycle\nJMMci7hryUQ_1\tmotorcycle\nJMMci7hryUQ_2\tmotorcycle\nJNUhCGqPlFg_0\tbicycle\nJPHPd13gaL8_0\tcar\nJQrDalAaP4w_0\tperson\nJQrDalAaP4w_1\tperson\nJQz6IarIr4E_1\tperson\nJRAVv2LgiGo_0\tskateboard\nJRUvqZtBMrM_1\tknife\nJR0QfXOOmaA_0\tperson\nJSml3dguiUk_0\tmotorcycle\nJTFT_iJGFUE_0\tperson\nJUdUxjC2LRE_0\tbus\nJWU6vdEt_OU_0\tperson\nJWgjcmMh62o_0\ttrain\nJWgjcmMh62o_3\ttrain\nJW0-hEA4v9A_0\tperson\nJXIh3fJ4Jv0_0\tperson\nJX8ODdMUi7g_0\tbird\nJZC15tOV-eg_0\thorse\nJZMOzYwcTA0_0\tperson\nJasH0KtinHY_0\tairplane\nJasH0KtinHY_3\tairplane\nJa5jdE_8qio_0\tperson\nJbyTZ-esDXM_0\ttruck\nJbyTZ-esDXM_1\ttruck\nJb93SMKg5-k_0\tperson\nJcVOyLTTvKA_0\tperson\nJc18AfXzLZU_0\tperson\nJc18AfXzLZU_1\tperson\nJd7uOTcPvY8_1\tcar\nJeWRfjjRMQk_0\tperson\nJerVzlWZwac_0\tbus\nJe-lnjK_8fk_0\tperson\nJfjkltN0lZc_2\thorse\nJfobA6aKaas_0\tdog\nJftQEHHdO5w_0\ttruck\nJgaE8KDwg7k_1\tbird\nJgaE8KDwg7k_2\tbird\nJgc2PQ8Swbo_0\tcow\nJgkj9pj3-tc_1\thorse\nJhdyYrqxn_g_0\tmotorcycle\nJh7o2iR-lRg_0\tperson\nJijsSnHthXE_0\ttrain\nJio_xBodQxY_0\tperson\nJjQ8bdq_eXk_0\tperson\nJjtkwX4npyw_0\tperson\nJlG7Wzz4uU8_0\tcar\nJlG7Wzz4uU8_2\tcar\nJmkUuTj-Nks_0\tumbrella\nJmtuhGXlqmY_1\tairplane\nJnNJksYeB18_0\tcar\nJoKod4XDE6o_3\tbird\nJoKod4XDE6o_0\tbird\nJoKod4XDE6o_2\tbird\nJp6_g7oF2lQ_0\tcow\nJqEprl56N4I_0\tskateboard\nJrIoaRmcs6o_0\tcow\nJrNq6Z5YSoc_0\tperson\nJrUHo8zVwpo_0\tbus\nJsjz8hiE_iU_0\tperson\nJt7Ojtx0TMs_1\tcar\nJt7Ojtx0TMs_3\tcar\nJwBYrXUHdZ8_1\thorse\nJxTKws5Dx_8_0\tcat\nJxjXZYfiem4_0\tdog\nJx9mLWFxpnc_0\tdog\nJyYBZBogBvs_1\tboat\nJyduNnkZOiY_0\tperson\nJyrP5u2MuSo_0\tmotorcycle\nJzcc0pjgA5c_0\tperson\nJzjRC1xYwy8_0\tdog\nJ02u46SlewE_0\tperson\nJ1GtEDNcsHQ_1\thorse\nJ2JOoOxaJdw_0\tperson\nJ2bB5BgR-5Q_0\tbus\nJ2hdK_vuyyw_0\tmotorcycle\nJ2ycUTr0lJQ_0\tcat\nJ4T_QA6J7kw_0\tboat\nJ4T_QA6J7kw_1\tboat\nJ4T_QA6J7kw_2\tboat\nJ40neYxbEYA_0\tskateboard\nJ5-Z9tNISPw_0\tcar\nJ6klPNMhLKc_0\tcow\nJ7I-QXddTIk_0\tperson\nJ7hnNI0jtws_0\tperson\nJ8ITxacusCI_1\tperson\nJ8ITxacusCI_0\tperson\nJ9-8Qe3BWoI_0\tbicycle\nKARqX_agLpU_0\tknife\nKAgU6SrQTlQ_0\tumbrella\nKAgU6SrQTlQ_1\tumbrella\nKArVkjxSGpM_0\tperson\nKBCIbwknDew_1\tbicycle\nKCeuwWEv3ZU_0\tperson\nKCi4f4Hp6oA_0\tairplane\nKC5ECqMiTLU_0\tskateboard\nKD84e88aqHU_0\tperson\nKD84e88aqHU_1\tperson\nKEpHRYH8r28_0\tgiraffe\nKGdIJzBVugY_0\ttruck\nKHqFOBeHCwU_0\tboat\nKIOilXstQLY_0\tperson\nKIOilXstQLY_1\tperson\nKJ2kEj3C5HU_0\tairplane\nKKWUDcCI6yU_0\tcat\nKML2msVr5mE_2\telephant\nKMNAnjpGqv4_2\ttruck\nKNIVWRv3awA_0\ttruck\nKOmUta2sIgk_0\tperson\nKOsm1GUs46s_0\tmotorcycle\nKOza4PGcE0M_1\tbear\nKPLDdfk8hIg_0\ttrain\nKPLDdfk8hIg_1\ttrain\nKP7RzxyTTAU_1\tairplane\nKRKxqkfpetI_0\tperson\nKRNWPLnvZz4_0\tperson\nKR7Ah1hw5gA_0\tperson\nKS8S3STq2W4_0\tbird\nKS8S3STq2W4_1\tbird\nKTkhMglNlCE_0\tperson\nKTpwnsz498Q_4\thorse\nKTpwnsz498Q_6\thorse\nKWYD2iyUmgk_0\thorse\nKXIJLUzQi5Q_0\tperson\nKXMlBQiVeEg_0\ttrain\nKXPGShfFlU8_0\tperson\nKX9MjIikBU8_3\tbicycle\nKYc-vKtN0DI_0\tperson\nKY4mXNDM8I0_6\telephant\nKZdOpoUJ3Nk_0\tperson\nKcg7gY3WD7M_0\tperson\nKcg7gY3WD7M_1\tperson\nKeJWqAV0EgA_4\tumbrella\nKeJWqAV0EgA_6\tumbrella\nKedkADy9tBc_2\tknife\nKedkADy9tBc_4\tknife\nKgDguip9mZM_1\thorse\nKgDguip9mZM_2\thorse\nKg0XH4mez1A_0\tcow\nKho8jpdZzTs_0\tskateboard\nKjd7D98QULc_0\tairplane\nKkdLE8EkzQ8_0\tcat\nKkw7ZPCEz5w_0\tperson\nKk-2ajLfeh8_0\tcat\nKk_LtYOgQXA_0\tboat\nKmLYFD7xykY_1\tcar\nKmwqg1uRPRE_0\tperson\nKnQuff1ffzM_0\tskateboard\nKoRqIzHBQks_0\ttrain\nKoq5YYiN1tc_0\ttrain\nKpHpGcL_jEc_4\tbird\nKpHpGcL_jEc_3\tbird\nKpfTioA2qKw_4\telephant\nKpfTioA2qKw_5\telephant\nKpfTioA2qKw_0\telephant\nKpfTioA2qKw_1\telephant\nKpfTioA2qKw_2\telephant\nKpfTioA2qKw_3\telephant\nKppX5i4QRZ0_0\tumbrella\nKqsBJAhU_Dc_0\tcat\nKrRVwTPG26w_3\tdog\nKsE43Lli_3U_2\thorse\nKsE43Lli_3U_3\thorse\nKskL-dN784o_0\tairplane\nKtfQRtfJQ8s_2\tskateboard\nKxDh7a8_AmU_0\tperson\nKy4ahEexJUc_0\tairplane\nKzDLvBPcQew_2\tknife\nKzMFSHS4xVs_0\tbird\nKzOxVUsduDY_3\tknife\nKzt2eSUr1rY_0\tdog\nK0IvSLIQbgQ_0\tbird\nK0SktTNMXQU_0\tmotorcycle\nK2WsSTHs45g_1\telephant\nK2WsSTHs45g_3\telephant\nK2oIvJd-d-A_0\tperson\nK4IN8pNA--U_1\tperson\nK5C2Y3JvXCU_0\tskateboard\nK7TOmJ6RB_8_0\tskateboard\nK89ScUqJx5E_0\tperson\nK8_u8_NkoAk_1\ttrain\nK9L-BYQcepo_0\tbear\nK9pgB6KH-EY_0\tcow\nK-laAofNBgs_0\thorse\nK-xigT3f2VA_0\thorse\nK-0pug6xNEI_3\ttrain\nhuFyV9NBOBY_0\tperson\nhua1XfGRDoc_0\thorse\nhulGMGXPaBE_1\telephant\nhvXgMKsetW8_0\telephant\nhxBjbg6s174_0\tperson\nhyNwXcKelY0_1\ttrain\nhyNwXcKelY0_0\ttrain\nhzUpr73wZz0_0\tairplane\nh0jkFTI3qmI_1\thorse\nh1Hv9HnMe70_0\tcar\nh1zuISckIeI_0\tbus\nh10iwpJO4pQ_0\ttrain\nh2vHhQ7_MT4_0\tskateboard\nh3Fo82UBMRY_0\tdog\nh3IHNdoTXT0_0\tperson\nh3PBWibdVUc_0\ttrain\nh3RgUc0oY-c_1\tknife\nh3RgUc0oY-c_2\tknife\nh3t75PNg778_0\tperson\nh3uSlke3koc_0\tmotorcycle\nh4qpt2FEbC0_1\telephant\nh5JnAInpuSo_0\tmotorcycle\nh5JnAInpuSo_1\tmotorcycle\nh7_4qHh7Vas_1\ttruck\nh8TnGCoSVeQ_0\tairplane\nh8fKxUGKz8k_0\tmotorcycle\nh8fKxUGKz8k_1\tmotorcycle\nh-pm7wD31Ss_3\ttrain\nh-pm7wD31Ss_0\ttrain\nh-pm7wD31Ss_1\ttrain\nh-pm7wD31Ss_2\ttrain\nh_VG9OpleKc_0\tmotorcycle\nh_VG9OpleKc_1\tmotorcycle\niAZV9nCf3RE_0\tmotorcycle\niA7evYzMygE_2\tknife\niDBpYSvahjE_0\tperson\niDHjOnhAKA8_1\tskateboard\niE75sptNwbs_1\ttruck\niE75sptNwbs_2\ttruck\niFVwtlc6IYE_0\thorse\niFdOAHM4xDg_0\tperson\niFwPDZE4778_0\tskateboard\niG4PvtWoxG8_3\tcow\niH6Vlg0k330_3\tdog\niH6Vlg0k330_5\tdog\niH6Vlg0k330_6\tdog\niIWFuFa7Z4M_2\tperson\niIWFuFa7Z4M_0\tperson\niIWFuFa7Z4M_1\tperson\niIzXR3qRt48_0\tperson\niI08dGJAOMs_4\telephant\niI08dGJAOMs_3\telephant\niJcf4PhS_SQ_0\tperson\niKzpo0D7b_8_0\tcat\niK-7fByPADo_0\tperson\niMdJ5Xlz0hU_0\tknife\niMeNXU67sVg_1\tskateboard\niNiiX6P-kqA_1\tdog\niOxVi3Tq4ts_0\ttrain\niPlXCYJ6F7w_0\tskateboard\niQ-tckw9_uk_0\ttruck\niRzm-CyyW-E_0\tperson\niSNNmpWe3LA_0\tperson\niS7wej_vrvM_0\tperson\niVBDQ5wm-0w_4\tairplane\niVTAxc633DE_0\tperson\niXrLhQgf8HM_0\telephant\niXrLhQgf8HM_1\telephant\niX4gVag7ShI_0\tperson\niYL_l0MxgMY_0\tbird\niYlgi1z6nYI_0\ttruck\niavLgJ3_05c_3\thorse\nicVQnqL0xPI_2\tboat\nidkGZQeYvJ0_0\tskateboard\nigbftnGj4-o_0\tbicycle\nigg-y1toBvA_0\ttruck\nihkqhIpO_hw_0\tperson\nijbDg16cIC8_1\tbus\nik4t0sIEmTI_0\tperson\niltKgr5JKI0_0\tperson\nil5UMLzlQts_0\tbus\nimDfH3So8XU_0\tcar\nimDfH3So8XU_1\tcar\nim4bCIqpJns_1\tbicycle\nim4bCIqpJns_2\tbicycle\nim4bCIqpJns_0\tbicycle\nip1Y5qjDYfQ_0\tairplane\nip_oGEZ6zMw_1\tperson\nirvGAW8bqAw_2\tbus\nisbtQ06yVM8_0\ttruck\nitNqceL9dLM_0\tcow\niuii5XHcAYA_1\tdog\niulQVUJanzg_0\tskateboard\nivGBks6evlo_2\tdog\nivSQWqs_u1I_0\tbear\nivpPLs-cqxA_0\tcar\niwHJDgGVuCA_0\tairplane\niwHJDgGVuCA_1\tairplane\niw7zrlRPMo4_2\thorse\nixgGTHdobNI_0\tperson\niyDedQNhiYI_0\tcat\niyaI71EqLsg_0\tperson\nizHN9JUwtJ8_0\tboat\nizQ74nq9zh4_0\tcow\ni0QLe6YR7yo_0\tperson\ni1OlP2Sq0a0_2\ttruck\ni1xqjStfSsc_0\tperson\ni2SgjtgmsE0_0\tperson\ni5DfO7_n0q8_0\tcow\ni5GkqX44npg_0\tcar\ni5JWZKdNOac_0\tmotorcycle\ni5JWZKdNOac_1\tmotorcycle\ni6mzD2HGWOA_0\tairplane\ni6sR2IY4-Ck_0\tcow\ni8JA178zd0s_0\tcow\ni8Z9-KSMCTA_0\tbicycle\ni8syjc7Erco_0\tmotorcycle\ni-EijejS9Oc_0\tperson\ni-eCNLw3hVU_0\tbird\ni_l48nIXjxw_0\thorse\njBYa-gqwSeY_1\tcow\njCiTA9oIryk_0\telephant\njCuDdMn9sYA_0\tperson\njDGrgBt83DU_7\tcar\njD33e45nuRw_0\tbear\njD5K1zGLtvc_0\tskateboard\njEE_ZlDJ4cc_0\tcow\njEzxW8ylxK8_5\tairplane\njEzxW8ylxK8_1\tairplane\njEz3EToUAg8_0\tperson\njGCLsWhdTds_0\tumbrella\njHhJLxyr960_0\tbicycle\njIqTFAgBLpc_0\tdog\njJkZrKOehcQ_0\tperson\njKD0oOyMl2g_0\tperson\njLO5kFd36OY_0\tbird\njMLgjCQWQY0_0\tperson\njMmH8xfY1kw_0\tcow\njMyxNu6YkEQ_4\tboat\njN5jdXmBv2Y_0\tbird\njN5jdXmBv2Y_1\tbird\njN5jdXmBv2Y_2\tbird\njN5jdXmBv2Y_4\tbird\njN5jdXmBv2Y_6\tbird\njPouarzO-e4_0\tcat\njQPz-9OfXRM_0\tzebra\njRQuCIsXz1c_0\tairplane\njRUeQo3V1bk_0\tperson\njR366TYYsuo_0\tperson\njSkwPkAAiFM_0\tperson\njTNzSUl_zOQ_2\telephant\njUzhGHE_jgE_0\tperson\njVYzDs5YRM4_0\tcat\njVoxxEKEOFo_0\tmotorcycle\njX_taNw8FFg_0\tskateboard\njY4Dh-UAAaY_8\tskateboard\njZBMDKFS5D0_0\tperson\njbp8mHJfHGI_0\tperson\njcYNP_FWkA0_0\tperson\njcne18p2r2c_0\tcat\njdttJqwg_3o_0\tmotorcycle\njfSY_UCtq-w_0\tmotorcycle\njfTXT98Naic_0\tcow\njgQiUggCu7A_0\tcow\njjTgUBAd4D0_0\tcow\njjq2PAHcLiA_1\tperson\njjq2PAHcLiA_0\tperson\njlBGbg_CJz0_5\ttrain\njlBGbg_CJz0_6\ttrain\njlOOUqYlNNY_0\tmotorcycle\njlgECDznb0g_0\tbear\njl7oYVm0X34_0\tbird\njnU2n55I_LU_0\tdog\njouq30Wmqxg_0\tmotorcycle\njouq30Wmqxg_2\tmotorcycle\njo6o9BwKsUQ_1\telephant\njqPPsrUULY8_0\thorse\njtWUSSp-JiY_0\ttruck\njuS7DvjMPoo_0\tperson\nLCzQs5ybibU_0\thorse\nLDwE_VIc9Zc_0\tcow\nLEL3OcoqV8k_1\tknife\nLEPsxGhXYxY_2\ttruck\nLEPsxGhXYxY_3\ttruck\nLEXpJRLTRak_1\tbear\nLFWlRG2B-w0_0\tbus\nLFWlRG2B-w0_2\tbus\nLFWlRG2B-w0_3\tbus\nLGvjU4PVcC0_0\tboat\nLGvjU4PVcC0_1\tboat\nLGvjU4PVcC0_2\tboat\nLIC3D63R3HU_0\tperson\nLIhhU9j6MI4_1\tcow\nLLD46pbwbiU_0\tperson\nLLiy-k-4-OM_0\ttrain\nLLvpoIlozKU_0\thorse\nLLvpoIlozKU_1\thorse\nLO0IsJZeXhU_0\telephant\nLO0IsJZeXhU_1\telephant\nLPzXMvYB97A_0\tperson\nLTh-XAE8m3M_2\ttrain\nLURSawdSS9k_0\tdog\nLUsb9vk1q6U_0\tknife\nLU539OYJ_z8_0\tperson\nLXHO99b-uAQ_0\thorse\nLXHO99b-uAQ_5\thorse\nLX0HL9qztic_1\tumbrella\nLYPeAbFVTQw_0\tperson\nLZCq31MG3yY_0\tperson\nLZEMKs6H53w_0\tperson\nLZNlxXE0_2s_1\tskateboard\nLZNlxXE0_2s_2\tskateboard\nLZNlxXE0_2s_3\tskateboard\nLZ3S39QfkKA_3\tbicycle\nLbHrVQR9f24_0\tcow\nLcvMMvrPIug_1\tcow\nLdNi4yjT3yE_0\tperson\nLdusiqJFR6I_0\tperson\nLesCJsHdAU0_0\tcat\nLe2725PKYQk_0\tdog\nLfUSKsg8JoQ_0\tcat\nLfhPiqIDAcI_0\tperson\nLgbwFATbwhs_0\tcat\nLhF7TJOwt8o_0\tmotorcycle\nLhOMGvkzP28_0\tperson\nLhOMGvkzP28_1\tperson\nLhkFN7f676g_0\tairplane\nLh1QrEwtBxU_0\tskateboard\nLiS31CevvvA_0\tperson\nLiS31CevvvA_1\tperson\nLjRWmJThZrA_0\tperson\nLjyZ7Djyq1U_0\tperson\nLkP8lgpmCJc_0\tairplane\nLkfML7bjGg8_0\tperson\nLmCzQ6WrePM_0\tbus\nLnYz8cQsrWk_0\tcow\nLpTBcxby8_U_0\tcat\nLpT4VBLapqM_0\tcar\nLpjbdSyW__A_1\ttruck\nLqm0JTDlIaU_0\ttruck\nLtIW9sP55N4_0\tperson\nLuC8ON_75l4_0\tperson\nLuRLF2TroVk_1\tairplane\nLunFMJp3_Uc_0\tcat\nLup2fypzuD4_0\tperson\nLurlbycI8WQ_0\tperson\nLvd7WBHnDpk_0\ttruck\nLwxi57QRroE_0\tperson\nLyPkKroSsaU_0\tbird\nLyPkKroSsaU_2\tbird\nLyPkKroSsaU_7\tbird\nLz7uf7cmfAU_0\thorse\nL0Y9j9DtU1o_0\tdog\nL0mqjqU7pmw_0\tperson\nL1C1GJZuI6U_0\thorse\nL1TihVYcfII_0\tbear\nL1xr5gaSzeQ_0\tbicycle\nL2lJenTKrLU_0\ttruck\nL2lJenTKrLU_3\ttruck\nL2lJenTKrLU_5\ttruck\nL22pyXEUjv8_0\tbird\nL22pyXEUjv8_1\tbird\nL5px8rMqxRY_0\tmotorcycle\nL8Q0lJgaUi4_0\tzebra\nL-3-1978GvI_0\tknife\nL-6R2vuKWhc_1\ttruck\nL--TMS61Zvw_1\tboat\nL--TMS61Zvw_5\tboat\nL_dOv3wd1ZM_0\tperson\nL_nI4_2RbTU_0\tknife\nMAmHLoJdmc8_0\tcow\nMENNFokPNbU_0\tairplane\nMG8-IGrKVxc_0\ttruck\nMG8-IGrKVxc_2\ttruck\nMG8-IGrKVxc_3\ttruck\nMG8-IGrKVxc_5\ttruck\nMH1GdFqE_lo_0\thorse\nMH1GdFqE_lo_2\thorse\nMH1Kct5RCRg_6\tairplane\nMH1Kct5RCRg_10\tairplane\nMIkxezmilfY_0\tperson\nMI6x6FrXJqs_0\tknife\nMI9BIgkOBjI_0\thorse\nMJ9vJFTTV5c_0\tperson\nMKiCrBXtflw_0\tcat\nMK8Jm3I4In4_0\tdog\nMK8Jm3I4In4_4\tdog\nMMiSt9MNne8_0\ttrain\nMNve0XPgcGA_1\tbird\nMN1A5E3jNSE_0\thorse\nMPJu68gBGfI_0\tperson\nMPMudxdiIds_0\ttrain\nMPfgu6-snaM_0\tbird\nMQ1o_7gpp5E_0\tperson\nMQ1u8IEmFSA_0\tperson\nMQ3HhLmsCik_0\tperson\nMRNJmLLkjPc_1\tmotorcycle\nMRNJmLLkjPc_2\tmotorcycle\nMRqfEOhWW48_0\tperson\nMSItPvVCUN8_0\tcow\nMSd5Ecl5-W0_0\tperson\nMSnEnQ0psW8_0\tcar\nMV6MGXhQwFQ_0\tcat\nMWbnSN-7WG0_0\tcow\nMWt4P6HWxMM_0\thorse\nMXEcQSFwng0_0\tcat\nMXTzea4MeHc_1\tcar\nMXoVDyewPBE_0\tperson\nMYFPnJIKK5k_0\tperson\nMYpdq9KvK8o_1\tumbrella\nMasaNQLCMGE_0\tperson\nMbRvEKuvR04_0\tskateboard\nMb6r1es0AbU_0\tcat\nMcdl3s6oQrc_3\tbear\nMcdl3s6oQrc_1\tbear\nMe-clc6PGkA_2\thorse\nMe-clc6PGkA_3\thorse\nMfYpMzLWST8_0\tcat\nMgFhoihDD1U_0\tperson\nMkmpoid1BvA_1\ttrain\nMokOHR3wImM_0\tcat\nMqBTk3ITQ8c_5\telephant\nMqBTk3ITQ8c_3\telephant\nMrWZEUtDBq8_0\tdog\nMuyIuhdszH0_0\tperson\nMuyIuhdszH0_2\tmotorcycle\nMvKMtFVP5NU_0\tperson\nMvbZEiffy8s_0\tperson\nMvuGj1qR4Ic_0\tmotorcycle\nMvxUj_Du2IY_0\thorse\nMw6Cu1mPanU_1\tcow\nMxtJwd0GBkA_0\tairplane\nMzTsjMauBH8_0\ttruck\nMzrv2OCC2GE_1\tperson\nM0TTCr9jjgc_0\thorse\nM12KvkF1Nec_0\tperson\nM40gbbuNuL4_0\ttruck\nM5p7jyvEgPk_1\tknife\nM52oDxJEXk4_2\thorse\nM52oDxJEXk4_0\thorse\nM7Kcv9fUrhA_0\tcow\nM9CCnnc8m8k_0\tgiraffe\nNAInb4dMC_E_0\tairplane\nNAInb4dMC_E_3\tairplane\nNAsDBYDNhwY_0\tcat\nNDxs_vxhhME_1\tperson\nND-VrJY7mU0_0\tperson\nNEsCBcZFajg_2\tairplane\nNEsCBcZFajg_5\tairplane\njvlyXCBSuCk_0\tperson\njwYviTYbJYs_0\tcow\njxL3F-iB2S8_0\tbus\njxmsNv20V50_0\ttrain\njyrY4oyyA7M_0\tperson\njzNOBsi5TtQ_0\tcow\njzeFDGEt_iQ_0\tperson\nj4UJ80q_s3c_4\tskateboard\nj4UJ80q_s3c_5\tskateboard\nj4t-Otp9ES8_0\tperson\nj6XmNyG8nYE_0\tbear\nj8SM6uLadmU_0\tmotorcycle\nj8aX3NuEnxc_1\tairplane\nj8aX3NuEnxc_0\tairplane\nj93wwDC_a2I_0\tskateboard\nj_tT90ISNnc_0\tskateboard\nj_6ZWhyOOcA_0\tperson\nkBKG0SaNbdw_2\tcow\nkBYFlPJJx-s_0\tperson\nkCHOoDF-pXo_0\tcat\nkCQIRLEi88s_0\tperson\nkCefZaEK9M4_0\tperson\nkCt3G72NjyY_0\tmotorcycle\nkEx2sgiyKpY_0\tdog\nkG5vclMyg7w_0\tskateboard\nkHIZAi1E9gU_0\tcow\nkH3Hwla_MUM_0\tperson\nkI7523l1Tu4_0\thorse\nkI7523l1Tu4_1\thorse\nkLwsGbEsMjs_5\telephant\nkLwsGbEsMjs_1\telephant\nkL52zPMgsXM_0\ttruck\nkMIRREOoSt0_0\telephant\nkOqKBgGRd_c_0\tboat\nkQu7xcJmp6w_0\tairplane\nkRLl2HLijWc_0\telephant\nkRqsESioKVM_0\tperson\nkSWUU8Ef-Rg_0\tcow\nkSXkd4PYX9M_0\tbear\nkSm9E8WwGYY_0\tperson\nkTT6onfYUug_0\tbicycle\nkZcfsku1oJ4_1\tbicycle\nkarZg0Iifks_0\tskateboard\nkavU8zKXrEY_0\telephant\nkbD6iXQ3P6M_0\tcow\nkb4GuHpwuSw_1\tcow\nkdPgKSrjVYQ_0\ttrain\nkd9Tn_hyeb4_0\tdog\nkeka7aToy_E_0\tperson\nke2Ap6Zvq64_0\tcow\nke2uXJrB9WQ_1\tbird\nkfL1KEY53AM_0\tperson\nkfMMMSNZWeM_0\tgiraffe\nkgcb2y-aw8s_1\ttruck\nkhicinfB1nY_0\tperson\nkhr1-lWZOOw_0\tbicycle\nkixX1ga8yrw_0\tperson\nki51QTz_6iw_0\tbus\nkjhcR5ljaDU_0\tcar\nkksfStf04pc_0\tperson\nkk41Jjw-BpQ_0\thorse\nklxQpVdft5E_1\tbicycle\nkmIUPZSNl5A_0\tairplane\nknFBzlhmDMk_2\tskateboard\nknFBzlhmDMk_3\tskateboard\nkoomOoaIF0Q_0\tmotorcycle\nko4el3e0QFI_0\tbird\nkqE2rNzUnvU_0\tcow\nkqJJ6_2vGtU_0\tmotorcycle\nkqiHy-EzdcQ_0\tairplane\nkqiHy-EzdcQ_1\tairplane\nkqiHy-EzdcQ_2\tairplane\nkrD5WtdljCc_0\tbird\nkrR-lFUTXHo_0\tcow\nksbdMzGs-gs_0\tperson\nksbdMzGs-gs_1\tperson\nktCRlGt6408_0\ttrain\nktcXRj-Vz6c_0\tbus\nktcXRj-Vz6c_1\tbus\nktvaX1ALzwE_0\tmotorcycle\nkwMNSTE0h8U_0\tbus\nkwMNSTE0h8U_1\tbus\nkwyn-eed9l4_1\tbird\nkx2jH9V7vYM_0\ttrain\nkz0gVW9uWkc_0\tskateboard\nk1C25MTUso4_0\tperson\nk1Y6Y1yocF0_1\tknife\nk1qT5GtPmQo_0\tbear\nk2fCUP9H4cw_0\tskateboard\nk24lvYKkK5g_0\tboat\nk3hYFu55iGE_0\tperson\nk3hYFu55iGE_1\tperson\nk3pTU4KNdvE_0\ttrain\nk4tqy4pdlNs_0\thorse\nk5MmpG9afSM_2\tbear\nk5UoGZZb_RY_0\tcat\nk5oey7bw5kA_0\tperson\nk5-IPGgeCPc_0\tperson\nk5-IPGgeCPc_1\tperson\nk8OboASs470_0\tskateboard\nk8OboASs470_1\tskateboard\nk9COlD7u1tI_0\tknife\nk-tdE0VAFkc_1\tperson\nk-tdE0VAFkc_0\tperson\nk_E-cIymiis_0\ttrain\nlAZQZSK_9bk_0\tcat\nlCc5-WmCZJk_3\tdog\nlCc5-WmCZJk_5\tdog\nlDWAsuKkv5Y_1\tbird\nlFObiVRO-BQ_3\tairplane\nlGAGodreVjQ_0\ttrain\nlGJB2hhw5pI_0\tcat\nlIbOGzXhSW8_2\thorse\nlI-A6pFtkLQ_0\ttrain\nlI_jxWxWivM_0\tdog\nlJXfbIuwTIQ_1\tcow\nlJccP5OJjZ8_0\ttrain\nlKBO-dakd8w_0\ttrain\nlLyfm0vbHrw_0\ttrain\nlL_4QscWdx4_0\tperson\nlM0yKqnWblw_0\tperson\nlNJbOSFK9N4_1\tskateboard\nlOFTlhNmKD8_0\tbus\nlOQf3A_3lPI_0\thorse\nlOWmL3mpSeA_0\ttrain\nlOvB2zlHw8w_0\tdog\nlO-XTKPQb5I_0\ttrain\nlPapZHOAdzk_0\tbicycle\nlP5lgBlsH0U_4\tairplane\nlP5lgBlsH0U_1\tairplane\nlP5lgBlsH0U_2\tairplane\nlQDy9Mri-18_0\tperson\nlQsTpo0uOIw_1\tboat\nlQuFC-E7VUM_0\tperson\nlQuzpkDKFQ8_0\tperson\nlRuif4Zc7CI_0\tboat\nlSZa4pAHgV8_0\thorse\nlS-5gEkB0_o_0\tmotorcycle\nlTTquh-jLwM_0\tcar\nlThBPb6HI1U_0\tcat\nlVeIr8AFTjY_0\tperson\nlWT2t48q164_0\tmotorcycle\nlYSpeuL7-oo_0\tumbrella\nlZOTAg9Fofw_3\tbird\nlZVwQoLPjBU_0\tgiraffe\nlZVwQoLPjBU_1\tgiraffe\nlahDGDRe7X8_0\thorse\nlcKDCt1eWqg_1\tknife\nldQGB8gzRjA_1\tcow\nldhdyBduVoU_1\tcow\nlf_tYVzrap0_0\tperson\nlge9f_bgAOk_0\tperson\nlgzIpgcvPvU_0\tperson\nlhNv9zDa1ug_0\tcar\nlhadIxHkaVg_1\tperson\nlhadIxHkaVg_0\tperson\nlhnQuOIF-2c_1\tperson\nljLO1myCfoA_1\tknife\nljayNZQpp-I_1\thorse\nljayNZQpp-I_5\thorse\nljeTwRM6DWE_0\tperson\nlkvdy3Hejpw_0\tperson\nll6gTyUguMY_0\thorse\nll6m5MTpf4o_0\tperson\nlmpKSF0cXSc_0\ttrain\nlnfEV2dRfm4_0\tmotorcycle\nln0_FGR8B08_0\tperson\nloVlMj9Dhkk_0\ttruck\nlotZh71qMks_0\tperson\nlpcqEaZD_Xk_5\tbicycle\nlpcqEaZD_Xk_0\tbicycle\nlpcqEaZD_Xk_1\tbicycle\nlpcqEaZD_Xk_2\tbicycle\nlpcqEaZD_Xk_3\tbicycle\nlpcqEaZD_Xk_4\tbicycle\nlqu4tjd3Zg4_12\tbear\nNE9AhZPTVFY_0\tmotorcycle\nNFF4UemeH8g_0\ttruck\nNFSj66emNbM_0\tcat\nNGS9BrtLJ0I_1\tboat\nNGvpnRrWSKc_1\tbear\nNHLBjlX2jeg_0\tperson\nNHgh88y4e80_1\tcar\nNHpM-oBMIRk_0\tdog\nNHrjnZsJWOw_0\tperson\nNID_0E0tn_g_0\tcow\nNJQNZ36lsvw_2\ttruck\nNJm81cIGO98_0\tskateboard\nNJ22Hynv9s4_0\tumbrella\nNJ22Hynv9s4_1\tumbrella\nNJ7MXR2AaoY_0\tcow\nNKQfFcfr6Ko_0\tperson\nNL1iy1TKtRI_5\tcar\nNL1iy1TKtRI_1\tcar\nNL1iy1TKtRI_2\tcar\nNL1iy1TKtRI_3\tcar\nNL1iy1TKtRI_4\tcar\nNMCijcIa_XU_2\tknife\nNMhR_Z4Rq7g_0\tperson\nNNbRF02KnGM_1\tskateboard\nNQiMeD83sMw_0\ttruck\nNQiMeD83sMw_1\ttruck\nNQsnyZmQoPw_0\telephant\nNQsnyZmQoPw_2\telephant\nNQve9Yujb14_0\tperson\nNRaAEznVIxQ_0\tperson\nNTGqC7kOGAw_1\tbird\nNTRX6gLV_04_0\tbus\nNUSnWbhvmQs_0\tcow\nNVzCor2-ZpI_1\tzebra\nNV-p8Vp-bdA_0\thorse\nNWAQ1is2w98_0\tairplane\nNYIqB-l8eKk_0\ttrain\nNZ5OIYTIoYQ_0\tperson\nNaCksn1bbv4_0\tairplane\nNaCksn1bbv4_2\tairplane\nNaEokN7Nh-U_2\tknife\nNadzcUmXDTk_0\tperson\nNbJ2gM5KJTM_0\tcat\nNbJ2gM5KJTM_1\tcat\nNdXmkm9jcPA_1\tairplane\nNd6ceCmRYBI_0\tbird\nNeXVfNsggZw_0\tcow\nNfEzlo6-i_4_0\ttrain\nNfEzlo6-i_4_2\ttrain\nNfEzlo6-i_4_3\ttrain\nNhi9730yIzM_0\tdog\nNhskHQ9bqlo_0\tcat\nNhvr0y1tqjk_0\tperson\nNiP4AEjiwxs_1\tboat\nNio43-cQPh0_0\ttrain\nNi_TSyCk1Ak_0\tcat\nNjknyzAAQpM_0\tperson\nNlOjGoYPj9Y_0\ttruck\nNlTLvOcpoEA_0\telephant\nNlVEu_8kdoI_0\thorse\nNlVEu_8kdoI_1\thorse\nNljV4UjnFJc_0\tmotorcycle\nNnRWY12wxUk_0\tperson\nNnVFfTO9-q8_0\tperson\nNo84NOV3Pwk_1\tskateboard\nNpZj-n9_STU_1\tbird\nNqwxEAASrCo_1\tairplane\nNr9t7GeBwQY_2\tskateboard\nNsbG9FcyTFk_1\telephant\nNsbG9FcyTFk_4\telephant\nNsbG9FcyTFk_2\telephant\nNsbG9FcyTFk_3\telephant\nNuKyL_c3YcQ_0\tcow\nNulXMVhoGhU_0\tknife\nNuutxSJHULc_1\tcow\nNvkF9R1HsJc_0\tcar\nNxTnPIBFKdE_0\tairplane\nNxjnp7dqCdc_0\tcow\nNxqGplqsmNk_0\tperson\nNyKq-nq-KlQ_0\tperson\nNzAEnNO5-fo_0\tbicycle\nNzAEnNO5-fo_3\tbicycle\nNzAEnNO5-fo_4\tbicycle\nNzAEnNO5-fo_5\tbicycle\nN0LEywKxW9o_0\tcat\nN0e8A9q9tyU_0\ttrain\nN1OYtZSKdKQ_0\ttrain\nN1OYtZSKdKQ_3\ttrain\nN1pTdHcekjU_0\tcar\nN28sspen6dM_3\tbird\nN28sspen6dM_1\tbird\nN3ffRSq8s7M_2\tcow\nN6nP6NLTaG0_0\tmotorcycle\nN7Bv6ZMyBrU_0\tskateboard\nN9vkS7ish9k_0\tcow\nN_5Xf4hpanE_1\tdog\nN_5Xf4hpanE_0\tdog\nOBQQMo8mWLE_0\tperson\nOCA5rhgrl48_0\tperson\nOCLVaKMFCZg_1\tbicycle\nODI8kcB_dSs_0\ttruck\nODJSlRRM1Uo_0\tcat\nOD4XsgCwIKk_0\tperson\nOD9vhbbeBAE_0\thorse\nOEhrO1p2agU_0\tperson\nOGOf9vbNJB8_0\tperson\nOG8Nfns4uh0_0\tcat\nOHEyq1pCfZ8_0\ttruck\nOIV8ASYsqZc_0\tskateboard\nOIV8ASYsqZc_1\tskateboard\nOImLl2ufWqI_0\tcow\nOJktr2-sJmY_0\tmotorcycle\nOJktr2-sJmY_2\tmotorcycle\nOKbNtRotT5w_2\thorse\nOKbNtRotT5w_5\thorse\nOKbNtRotT5w_7\thorse\nOK-2ALhNWts_0\tbird\nOLpvIpNUgY4_0\tperson\nOLyGncmosSs_1\thorse\nOL_lZw3lqE4_0\tperson\nOMm3ReCUyGA_0\tperson\nONlvohUS-io_0\tcow\nOOC45SMJl6M_0\tbus\nOPIxLQwJLaM_1\tcow\nOPbyoGG-M_E_0\thorse\nOPm_iAWIO2o_1\tknife\nOR4OEYlOndk_0\tmotorcycle\nOSRtFznjiro_0\tmotorcycle\nOSUOKZdfiXQ_0\tperson\nOS6SXRjK0rU_0\thorse\nOUeSqgMRLUg_0\tbird\nOUrVDMMYK-4_0\tperson\nOWBXMvAtmcA_0\tcow\nOWqaj3O-u6E_0\ttrain\nOWqaj3O-u6E_1\ttrain\nOWqaj3O-u6E_3\ttrain\nOWvRHFQJ-5g_1\ttrain\nOXjc7JlWYwk_1\tbird\nOXpPVrdEoko_0\telephant\nOXpPVrdEoko_1\telephant\nOYCDyQPt5rU_0\ttruck\nOYRmTydmqZo_0\tcow\nOYugCmogPD8_0\tbear\nOZver3igS6U_1\tzebra\nOZy-0MSWC7o_0\tperson\nOZ5z2K-vIYg_0\tmotorcycle\nOb4ur_FS9xM_0\tdog\nOdLj2La07lM_0\tboat\nOdnylLd12pU_0\tskateboard\nOdsXUxBBISo_0\tairplane\nOePFLxtDg7k_0\thorse\nOflyVi689KA_0\tskateboard\nOg9LiinXMtw_0\tbus\nOjx6OtSIA3k_0\tperson\nOmdbd0YsB2o_0\tairplane\nOmdbd0YsB2o_1\tairplane\nOnRL69PzM4I_0\tbicycle\nOo3Uhz6L-cs_0\tperson\nOpEMSVRTyxk_0\tdog\nOpJl0GUiLQI_0\tperson\nOptQqflXY_g_9\telephant\nOptQqflXY_g_0\telephant\nOptQqflXY_g_4\telephant\nOptQqflXY_g_5\telephant\nOptQqflXY_g_8\telephant\nOqmbWcekMxo_0\tperson\nOrPfakDZX64_0\tperson\nOrwr1k0mKho_0\tperson\nOrwr1k0mKho_1\tperson\nOtHHLfag4xg_2\tknife\nOumTAMPogf4_0\tperson\nOvQFDkMjctE_0\tperson\nOyDNx0iCGUM_0\ttruck\nOyKi2PGJERI_0\tperson\nOyKi2PGJERI_1\tperson\nOyhAS52bQMA_1\tperson\nOyhAS52bQMA_0\tperson\nOzORAIgrZOg_1\tknife\nOzQFkM92we8_1\tdog\nO0o_u_t5Y6w_0\tbus\nO2TgLtQU7PI_0\tknife\nO3GPSL92hYw_0\telephant\nO4UhXpMuxJI_0\tperson\nO5PlzlxQuPc_0\tdog\nO5796OHwBy8_0\tbear\nO6cWlrockUQ_2\thorse\nO8s1bsDJrwc_0\tperson\nO9dxeSLiF9A_0\tskateboard\nO9dxeSLiF9A_1\tskateboard\nO90WVIgQwww_0\tperson\nO9_riOoIpKo_4\ttrain\nO9_riOoIpKo_6\ttrain\nO9_riOoIpKo_10\ttrain\nO_hypcyZCFo_0\tairplane\nlryNU4SKncc_0\tcow\nlrzxlHguluE_0\tbird\nlr7T4YcCWSU_0\telephant\nlr7T9GuNUMY_0\tcat\nlskWmTPa9Gk_0\tperson\nls34lS6fGzw_0\tperson\nlt7kXXW5D-c_0\tbus\nlvdU2uEdpnA_0\tboat\nlv6aYZguv6k_0\tperson\nlxXwMvanqo4_1\tboat\nlznoTW8tuLI_0\tbus\nlznoTW8tuLI_1\tbus\nlznoTW8tuLI_2\tbus\nl0J9Km2lk2I_0\tperson\nl0TirY4L7Es_1\thorse\nl0TirY4L7Es_3\thorse\nl3yFwpak_LA_1\thorse\nl38pNVKwDeo_0\tbird\nl4sdxYUAiJQ_0\tperson\nl4_P74HRriU_0\tperson\nl5GlzRyX39s_0\tperson\nl5GlzRyX39s_1\tperson\nl5WawiGWVxg_0\tperson\nl6cEGnOtFZg_0\tairplane\nl682n6ZmpNk_0\tperson\nl7Mmo3ow8qo_0\tperson\nl7kq2yqxPQc_4\thorse\nl7kq2yqxPQc_2\thorse\nl8r-mOc3-3U_1\tperson\nl9QgZQGtQWI_0\tmotorcycle\nl-4jrxgMGTQ_0\tskateboard\nmAEnlKe67pQ_0\tbicycle\nmAhzB1TH8mU_0\ttruck\nmAj62XUNkIM_0\thorse\nmBgSYaKydZY_0\tperson\nmC5X6MO2y9A_0\tperson\nmDf5zsFFweg_2\tknife\nmDf5zsFFweg_1\tknife\nmFbUnWMAreQ_0\tperson\nmGDfepYDRRE_0\tperson\nmHFxPudSk8c_0\tmotorcycle\nmIFnGYdf0po_0\tperson\nmJm2UYBiD8w_0\tcat\nmJo7aqOfRww_0\tairplane\nmJ6qCcS_-AQ_0\tperson\nmJ-DsFbUPUg_0\tmotorcycle\nmKBs2L-xwdU_0\tperson\nmLVHfKExUNU_0\tboat\nmMdGNbPpLKQ_0\ttruck\nmMy70TxInmA_0\tperson\nmNpEoUW_OPI_0\tknife\nmOFqvrGzJiE_1\telephant\nmOFqvrGzJiE_2\telephant\nmOkmKyBZoXI_0\tperson\nmP6-RR-Vuv0_3\ttruck\nmR1y0XlZhQ4_0\tperson\nmTeNKWTwFcs_0\tperson\nmU7E6pi9PFU_0\tbear\nmU7E6pi9PFU_2\tbear\nmWeNwTJwEmo_0\tperson\nmWhw719wEH4_0\tperson\nmXBKJjrxqmc_0\tknife\nmXekeIascCc_0\tperson\nmX_4T1I2ux4_0\tdog\nmYwEvpKN2-Q_0\ttrain\nmZ0VxiELg9A_2\tmotorcycle\nmZ0VxiELg9A_0\tmotorcycle\nmaiqraHgwgg_0\tskateboard\nmbZZ48h5pnY_0\tperson\nmboIIChd8tY_0\tbicycle\nmcR2Fi6wQj8_1\ttrain\nmcR2Fi6wQj8_0\ttrain\nmciQ3fR1QTE_0\ttruck\nmeAfvCGeyyU_0\tperson\nme-WjezBU4U_0\tmotorcycle\nmflX-nwtpzs_0\tskateboard\nmgSJL9uL49w_0\tbus\nmgSJL9uL49w_1\tbus\nmhDnVhRMCHc_5\tcow\nmhDnVhRMCHc_0\tcow\nmhDnVhRMCHc_1\tcow\nmhDnVhRMCHc_2\tcow\nmhDnVhRMCHc_3\tcow\nmhDnVhRMCHc_4\tcow\nmhIULm3ssFk_2\tairplane\nmiJ1b0bNn9M_0\tperson\nmiLapj3u_5g_0\tcat\nmiR8Xeb7SM0_0\tumbrella\nmi4j0PrR-Gs_0\ttruck\nmi4j0PrR-Gs_1\ttruck\nmjSUb46nTjs_0\thorse\nmj2ClgQE_Q0_3\tskateboard\nmj2ClgQE_Q0_2\tskateboard\nmj_R3ENyiKM_0\tperson\nmnOoqy7I3L8_0\tskateboard\nmns4vFzs4_8_1\tskateboard\nmns4vFzs4_8_0\tskateboard\nmnwyrMq92so_0\tperson\nmoBNY2JjuEQ_0\tcow\nmoc2yPvW_JU_1\tperson\nmpA3PWbdVWc_1\tbus\nmp-cHp44pXo_0\tbird\nmp-cHp44pXo_1\tbird\nmqI9CDpsCDE_0\tcat\nmqYD18pFqm8_0\tperson\nmrnDERbyZcM_0\tskateboard\nmtO9ioY8AHY_0\tperson\nmuk5R25UV1A_0\tperson\nmungFWJMSsg_0\tdog\nmwRNyFvem8g_3\ttruck\nmyYMS85ltwo_0\tskateboard\nmyiCWmM3XN4_1\tdog\nmziKTFuKVco_0\tperson\nmznC1uLm_j8_0\tskateboard\nm0z25TJV2vU_0\tperson\nm1VAqMAJ-Lw_0\telephant\nm2DUDsR4tWA_1\tbus\nm2Sr_Q8JpcI_0\thorse\nm2Sr_Q8JpcI_2\thorse\nm2Sr_Q8JpcI_3\thorse\nm2-nK6oZ08E_0\thorse\nm2-nK6oZ08E_1\thorse\nm3u_pETGaMw_0\ttrain\nm4Ozpr8E1EE_1\ttrain\nm5mSFt43spE_4\tmotorcycle\nm7VhCUoV_Dw_0\tperson\nm77tPf0Ulb0_0\tperson\nm8THukZrE7w_0\tperson\nm86BSOvJvS8_0\tperson\nm9hdxJE9HQE_2\ttrain\nm95nb4Vl_R0_0\telephant\nm-Ry10-IgWg_0\thorse\nm-sLdoVujlI_1\tbird\nm_25GAJYGHE_1\tcar\nnAO2Y4kF7b8_0\tbicycle\nnBllCINiO-4_0\ttrain\nnF_NlCSUpFo_0\tcat\nnIO0ZNZi6n0_0\tperson\nnIiXsRSLxZI_0\tperson\nnIiXsRSLxZI_1\tperson\nnJO5eQXPS0M_1\thorse\nnKfhxWUyc4I_0\telephant\nnKfhxWUyc4I_2\telephant\nnLUyCQwkCds_1\tmotorcycle\nnMW7WsVKd_E_0\ttruck\nnO14Z3ggnZs_0\ttruck\nnO16C5NBMQQ_0\tperson\nnO16C5NBMQQ_1\tperson\nnPJJOI4j3UQ_0\tperson\nnQAqVHkffhY_6\ttrain\nnQAqVHkffhY_7\ttrain\nnQAqVHkffhY_1\ttrain\nnQAqVHkffhY_5\ttrain\nnQrJJZvmF74_0\tcat\nnRu8IVZXzCU_0\tairplane\nnR1Ng3PnYoU_0\tcow\nnSUBF0RYH1o_1\tbicycle\nnTfgyYqyO_Y_0\tperson\nnTtqkLze7eY_0\thorse\nnTtqkLze7eY_3\thorse\nnTtqkLze7eY_4\thorse\nnW4sAWZ6dHQ_0\tbicycle\nnXYeq3IDOFo_0\ttruck\nnXgq-W7J6ho_0\tperson\nnYGQy8peDYk_0\tperson\nnYHjMb7HoK8_3\tbird\nnYIUSRVmY30_0\tperson\nnaMdRxX0924_0\ttrain\nna6hNW8gSx8_0\tbus\nnbojUStyLvY_1\tperson\nnbojUStyLvY_0\tperson\nncZiTQHehfk_0\tperson\nnefS_k9oFMI_0\tperson\nngE_mlmsaqY_0\tperson\nnh4AR9Mjwmo_0\tbicycle\nniQ2DNNlBSM_0\tperson\nniUnVyYTszc_0\tperson\nnjOQqZ1pBGM_2\tboat\nnjP6uuU-G6o_6\tbear\nnjcuqdNTGfM_0\tperson\nnj8ALe3wC9c_0\thorse\nnki1SdWtdCI_0\tcow\nnk6FezKWYSY_0\tbird\nnmNSM48p094_0\tknife\nnmRZQdp3xRk_0\tperson\nnn8WcALmZ7c_3\tbear\nnoTnh5A2OHo_4\tboat\nnoTnh5A2OHo_1\tboat\nnoWsAcioI8g_0\ttrain\nnoe-qNQfJBo_0\tbird\nno-b9_3kXiQ_1\tdog\nnpAPemisdEI_3\tboat\nnpGL0Kl16f0_0\tperson\nnpGL0Kl16f0_1\tperson\nnqZya6Vk3iY_0\tcat\nPAdHnsQ5png_0\tcat\nPAi_eJ_z59w_0\tskateboard\nPBPViL9vBZQ_0\tmotorcycle\nPBS3-SzLV2A_1\thorse\nPBwR_Jdod_g_0\tknife\nPCJWOz32Js8_0\tperson\nPDmAbS9Afkc_0\ttruck\nPE8yxnkayr0_0\tperson\nPE8yxnkayr0_1\tperson\nPFKrDvQuKII_1\tcar\nPFb83m0smRg_0\tperson\nPHunbTKqKwk_0\ttrain\nPH5VqmGrnXs_0\tcat\nPIG9w10uliw_0\tbus\nPIo5FlB1sf4_3\tbear\nPIzyVPr2kvQ_0\tperson\nPI_spS2t57M_1\thorse\nPI_spS2t57M_0\thorse\nPJK-c0HQksg_0\tbear\nPJUvXC0Eumw_0\tairplane\nPJsCV-lA78A_0\telephant\nPJ0Y1xQ7ZJo_0\thorse\nPJ2kZmkL25Y_0\tperson\nPKGRn71TQGQ_6\tairplane\nPKGRn71TQGQ_1\tairplane\nPKtLlpi00cM_1\tskateboard\nPK_UdRSa36U_0\tmotorcycle\nPMDSUC0_Ytg_0\tbus\nPNxobv7rkRU_0\tperson\nPOWngj1oBhQ_1\ttrain\nPOpePYwyHWY_0\tbus\nPOu1oPwNd4g_0\tumbrella\nPPeaYnqzi9g_0\tperson\nPPjAhD3i-v4_0\tbus\nPPqkkhaUIdE_3\tbus\nPPqkkhaUIdE_0\tbus\nPPqkkhaUIdE_1\tbus\nPRaq5kZmO2A_0\tbus\nPRyc4Vp0s00_0\tbird\nPSyuR_D5C2c_0\tcat\nPTLtv0VJ0_s_0\tperson\nPTM6VrBcP80_0\tdog\nPTewrgfas9o_1\ttrain\nPT6u63wHOhs_0\tdog\nPT_tMCTzlSc_0\tperson\nPV_FZhj_0hI_0\tcar\nPWZIO2hdNRU_0\tperson\nPWiyz8b24es_0\tairplane\nPXxs6Hzx7Pk_1\tzebra\nPZ3X20r0oVc_1\tbird\nPZ3X20r0oVc_0\tbird\nPdPZkfHUOq0_0\tperson\nPd9bh2hiWAk_0\tperson\nPeA8729U1jg_0\tboat\nPeJxY7YFBTA_0\tknife\nPgFIqGCjnc0_0\thorse\nPgNvdw3Zges_0\tumbrella\nPgjeF-iHzLk_0\tperson\nPgyVMv-RRL8_0\ttruck\nPiI1e3aKeos_0\tperson\nPkju9RRBRAU_0\tperson\nPn01hUEOICo_0\tbicycle\nPoI-RFl6jqU_0\tbird\nPoI-RFl6jqU_2\tbird\nPpX6lJOP6ng_0\tperson\nPq1kVNudVJo_0\tboat\nPsPMm45bDZA_0\tbird\nPskTcGACgjw_0\tperson\nPsrCCNATJd0_1\telephant\nPs9peKxde4U_0\tdog\nPvCZZzw4FKw_0\tperson\nPvQVqhtqTVk_1\tperson\nPvQVqhtqTVk_0\tperson\nPv3IqqHid-w_0\tairplane\nPv3IqqHid-w_1\tairplane\nPw7zlPV9yh4_0\tmotorcycle\nPytUHdEhipQ_0\tairplane\nP0FylASL6h4_0\tperson\nP06NLpHGLb8_0\ttruck\nP06NLpHGLb8_1\ttruck\nP1FTUN2gJkY_0\tperson\nP3JAtlf2-VA_0\tcat\nP3MhJa_p-dU_1\ttruck\nP5MpdcJgQrI_0\tskateboard\nP5NEco_Rqas_0\tmotorcycle\nP5NEco_Rqas_1\tmotorcycle\nP5v3n_5s-F8_0\thorse\nP7i0pgLo9kg_1\tcar\nP8E7gprJa1s_1\tskateboard\nP8_7-uFl2Go_0\tbicycle\nP9dDbodBY8s_2\tmotorcycle\nP9dDbodBY8s_0\tmotorcycle\nP9dDbodBY8s_1\tmotorcycle\nP91LJh-_E0Y_0\tcow\nP-FrYGR7Bf0_0\tperson\nP-phCIDPeWw_0\thorse\nP-27cmR3CZE_0\tknife\nP-_MzAIxz2E_1\tknife\nQBAxag8dq6Q_0\tcow\nQBfotDmdDkk_1\tskateboard\nQBrAST1Q2iE_0\tperson\nQCCt8ooY4qg_0\tperson\nQCjqG8908mY_0\tcow\nQEDWauqnaSk_0\tskateboard\nQEGY7Dq2x9s_0\thorse\nQE0MjXjSFjU_0\tboat\nQFS35qERdLE_0\tperson\nQFeMKKxurVg_2\thorse\nQFxep-yih-s_0\ttruck\nQFxep-yih-s_1\ttruck\nQGN2-Iqa4QQ_0\tperson\nQHPYpnJSf2s_0\tcat\nQHhkx3CSiWk_0\tperson\nQJ1W4Pajbv0_0\tperson\nQLmFsJCZy_o_3\tknife\nQMRFisCEGQc_0\tperson\nQM9Kddu2XcQ_0\ttrain\nQObG-uf4v68_0\tmotorcycle\nQOjAwmQ_7vA_0\tperson\nQPtMbvxzFuE_2\tbear\nQQC7AIIJg2Y_0\telephant\nQQLrVBS8VSo_0\tperson\nQQLrVBS8VSo_1\tperson\nQSTf92HwJS0_1\tdog\nQSTf92HwJS0_0\tdog\nQTjiYkMuDGI_0\tknife\nQTqvJZS8ZNo_0\telephant\nQUIxOZH8N8c_0\tperson\nQUUgu5YvS1c_0\tperson\nQU7X6RkjKPE_1\tboat\nQVUI5ZkkDsA_0\tperson\nQVnam2Ma6mY_0\tperson\nQY1rz6k86s0_1\tperson\nQZS3V-7xnAA_0\tperson\nQZWqiN4OA_A_0\tperson\nQZk1HSA90KA_0\tknife\nQaUHYb5os4U_0\tperson\nQahBgQXhNfo_0\tcat\nQbOvfWFyPzg_0\tdog\nQbOvfWFyPzg_1\tdog\nQbPvdKEmnrI_0\tperson\nQb4RNeQYfPc_0\tboat\nQcLa-GP2ITc_0\tperson\nQcLa-GP2ITc_1\tperson\nQdeUvHCiXwc_1\thorse\nQd0chk9vUQ0_0\tbear\nQeISQLJERxg_0\tperson\nQfJeJLieLew_0\tcow\nQfJk-eDxmKE_0\tperson\nQfkb-gc72qg_0\tcow\nQgPao5AkXFU_0\tskateboard\nQgiX6-1aN-4_0\tbus\nQhGx_MwYnWs_0\tperson\nQhIp71nr7Vk_0\tdog\nQk_VhG5lt1Q_0\tcat\nQmRFPW81gZc_1\ttruck\nQmfJmQuF1-I_0\tbus\nQmuLT1MpdP8_0\tperson\nQm2yaeiexlI_2\tmotorcycle\nQrd-Q3XrT3A_0\ttrain\nQszBg-eN7F8_0\tcat\nQtBYK8AxWCw_1\tperson\nQtpKcTyf4n4_0\tknife\nQtq2m-MV2q4_0\tcow\nQvY9ysq30EI_3\telephant\nQvY9ysq30EI_5\telephant\nQvY9ysq30EI_0\telephant\nQvY9ysq30EI_2\telephant\nQwJNOYFZ3W8_1\telephant\nQwTIODgGfOM_0\tperson\nQxLFtmn_Igw_2\tbear\nQyyPl-aCFUs_0\tcat\nQzETtzOBUaY_0\tperson\nQ0HpPvC0bKA_0\tperson\nQ0M_Fog02Yw_1\thorse\nQ0UrlXLNioY_1\tumbrella\nQ0tQtb1npx4_0\tcar\nQ0x55aCCNxA_0\tperson\nQ31q8b3CSN8_1\tskateboard\nQ4rAM1058Z4_0\thorse\nQ4rAM1058Z4_1\thorse\nQ5G2n-3zXX8_1\tperson\nQ5G2n-3zXX8_0\tperson\nQ5X1kisU8Qo_0\tperson\nQ6hwtMw2jkU_4\tskateboard\nQ6hwtMw2jkU_3\tskateboard\nQ7SViqj0bEg_0\tdog\nQ83xNK10WK0_0\tbear\nQ-lTGQgTOEg_0\tperson\nQ_rsZh5VqdY_0\tperson\nRANBJV7BN3k_0\tperson\nRAmxGTzr25A_0\tperson\nRBccU2wq7Qs_0\tknife\nRBclSX-7rYQ_0\tperson\nRDiehz1pFVA_0\tknife\nRD7nVPZTGEw_0\tskateboard\nREBfrgEC_3U_0\tknife\nREh7f-__WqU_0\tcat\nRE40E9-qdHE_0\thorse\nRFO8tA6rfbo_0\ttruck\nRFbhEQ4qN-A_0\tperson\nRIfxXKT-_88_1\tskateboard\nRJZgo3_JEPs_0\tperson\nRJi5ZRGQb-A_0\tperson\nRJxPTuKUKjk_0\thorse\nRKFpQfRSYIc_2\tmotorcycle\nRKFpQfRSYIc_3\tmotorcycle\nRKFpQfRSYIc_4\tmotorcycle\nRKFpQfRSYIc_6\tmotorcycle\nRKFpQfRSYIc_7\tmotorcycle\nRKFpQfRSYIc_8\tmotorcycle\nRKFpQfRSYIc_9\tmotorcycle\nRKFpQfRSYIc_10\tmotorcycle\nRKFpQfRSYIc_11\tmotorcycle\nRKFpQfRSYIc_0\tmotorcycle\nRKFpQfRSYIc_1\tmotorcycle\nRLcZcFP03fA_0\tperson\nRN6TzMbUlyg_0\tairplane\nROdg8e5a0Fk_1\tcow\nRPwZjkygYo4_1\telephant\nRR-fksDmQTU_0\tdog\nRSLwmLbf3No_0\thorse\nRSO2IDZGDus_0\tperson\nRSQ7pHT5sU4_1\tcow\nRSWyviTCTqk_0\tcat\nRTAQO62dbRo_0\thorse\nRTONY5PqRUo_0\tskateboard\nRT0mh9U0YDc_0\tperson\nRUAbb66fW18_0\tbicycle\nRUW8xYh84q4_0\tdog\nRU0u42rf0Hw_2\ttruck\nRU0u42rf0Hw_3\ttruck\nRU_8ryQNxC0_1\tbird\nRWJfJx1nXNQ_0\tbicycle\nRWo2zaceWcc_0\tbird\nRahqzUIhIkc_0\tcow\nRawtpxzAbmM_0\tperson\nRdZGVs8pH40_2\tskateboard\nRdZGVs8pH40_1\tskateboard\nRdge7lmfdc8_0\tperson\nRfVv6ECZ78Y_3\tbear\nRfa2If7RJTY_0\tknife\nRfa2If7RJTY_1\tknife\nRfvNPPjs-bw_0\tboat\nRi3O4rz5S2o_0\tboat\nRoMemRfbKkc_0\tperson\nRoNJ0fP0VUU_0\tperson\nRqAANAYxYz0_0\tperson\nRqqaUsDM-aI_0\tperson\nRrnixlsQyn8_0\tperson\nRr6AsTlUNKQ_0\tperson\nRspILw0UAM8_0\tperson\nRsyjwcMkRrY_1\tknife\nRsyjwcMkRrY_2\tknife\nRt1reRy5GVY_0\tperson\nRuFIanBmYzM_0\tbicycle\nRu9ksAvNYc0_2\tcow\nRwVTAYsyWMo_0\tperson\nRxWOvD9i9Ig_0\tcar\nRxtS3kGOYoc_0\tbicycle\nRxtS3kGOYoc_2\tbicycle\nRxtS3kGOYoc_4\tbicycle\nRxtS3kGOYoc_6\tbicycle\nRxtS3kGOYoc_9\tbicycle\nRxtS3kGOYoc_12\tbicycle\nRx9YjtdgOEI_0\tperson\nRyVdNK-PCyg_0\tperson\nRylJTxUTfF0_0\tskateboard\nRzdsXt87bVE_0\tdog\nR0biK134LTQ_0\tperson\nR0n9cqLQE4E_0\tskateboard\nR3rDAaPE_s4_3\ttruck\nR45uCINxuVY_0\tperson\nR7IE_IohaIk_1\tairplane\nR7IE_IohaIk_6\tairplane\nR7IE_IohaIk_0\tairplane\nR8Zg4uo1QpM_0\tperson\nR9d1vlii7cs_8\ttruck\nR9hRCG8pAHM_0\thorse\nR9hRCG8pAHM_1\thorse\nR_xLhXpHgp0_4\tskateboard\nSAeiSpeFynU_1\tbus\nSBmb0VU07rs_0\tboat\nSCLi5OFtzQk_0\tskateboard\nSCaWHsWzxqY_0\tperson\nSC18zgZ9Diw_0\tbus\nSDCTiDVOdW0_0\tbear\nSFA4mVjImxk_0\tperson\nSFoil_6CvbI_0\tbird\nSGsRwH8YxQg_1\tairplane\nSGsRwH8YxQg_11\tairplane\nSHSsDGmwywY_0\tcow\nSIZ3AYCr7PQ_0\tperson\nSIv3Hcq1ge8_0\telephant\nSIv3Hcq1ge8_1\telephant\nSJkZwyPxUTg_0\tcow\nSJqduSR9h4g_0\telephant\nSJwgIeOkfTM_1\thorse\nSKoDZimqLV0_4\tbus\nSMF8aDGwELI_0\tgiraffe\nSNbBUZtngzM_0\tperson\nSNnofRkUk8w_2\tboat\nSNqtno2pOzc_1\tdog\nSNqtno2pOzc_2\tdog\nSQn8ueHVBWc_4\telephant\nSQn8ueHVBWc_6\telephant\nSQn8ueHVBWc_1\telephant\nSQn8ueHVBWc_3\telephant\nSQ4tDbbdzr8_0\ttrain\nSQ4tDbbdzr8_2\ttrain\nSSjgAjilS8g_0\tperson\nSSwA_nC9rr0_0\tperson\nSThjw6JeBnQ_0\tperson\nSTuEo8vap08_0\tperson\nSUHEgX-8bo0_0\tperson\nSUwLfCebumU_1\tbear\nSUwLfCebumU_2\tbear\nSVUAFI7bHqQ_0\tperson\nSWedQv5UnQo_0\tperson\nSXWo-zKZICs_0\tperson\nSYT4odK3Dwo_1\tbird\nSc_CAareVEI_1\telephant\nSc_CAareVEI_6\telephant\nSc_CAareVEI_7\telephant\nSc_CAareVEI_2\telephant\nSc_CAareVEI_3\telephant\nSdzIWTR-rkc_0\tperson\nSeBOeRzwqrQ_0\tskateboard\nSeU_71ydaeA_0\telephant\nSehCD9wP-Pk_0\tperson\nSf9OdV3i3I4_0\tperson\nSgglaVke5lo_3\tboat\nSgySshdgJrQ_0\tmotorcycle\nShves64RCp4_0\tcat\nSiotcXGUwAs_0\tperson\nSj56u4dFe4k_2\tperson\nSlR9qCk_m9k_0\tmotorcycle\nSlR9qCk_m9k_1\tmotorcycle\nSlZZmtOGyeE_0\tairplane\nSlZZmtOGyeE_1\tairplane\nSndDcPzB8Hc_0\tcat\nSn2SGmheI-Q_0\tperson\nSn9gOBw9bf4_0\tperson\nSoiA6jtejG4_0\tdog\nSpbyBYH0OjI_0\tperson\nSph2g6B-X2M_0\tcat\nSpjssmEyc_o_0\tairplane\nSqHtdCP5Oao_1\thorse\nSqHtdCP5Oao_2\thorse\nSqLiHZHzp9w_0\tperson\nSqoR7vKYzCY_0\thorse\nSq-Xok-ea7U_0\tperson\nSreiPFJ6vBw_1\tboat\nSsMS0eIy2Ws_0\tperson\nSse7vXMMO6E_0\tperson\nSuaush4Da4s_0\tperson\nSvPL8gOREaU_0\tknife\nSwaILKCtBVA_0\ttruck\nSw4B_VFic3M_0\tskateboard\nSw7L3wImbSA_0\tperson\nSyldRIQbAGU_0\tperson\nSzVyFmQ28Xo_0\tcar\nSzkobSwGTMk_1\tbird\nSz2bTIe9kTo_0\tairplane\nnrEv-Plh45s_0\tbear\nnt_BXwq_xhA_0\tgiraffe\nnuCdww9iIOs_0\thorse\nnuMeNIi1MPY_0\tperson\nnuMeNIi1MPY_1\tperson\nnui8beXjUlU_0\telephant\nnui8beXjUlU_1\telephant\nnvMXQKwroRY_0\tperson\nnvaO13WFhos_0\tperson\nnxBkP48NgKY_0\tmotorcycle\nnxclZ6iCf7o_0\tcow\nnyogtZp3kIk_1\tairplane\nnzf12QyuD4E_0\ttruck\nn0tx4V2rF3I_1\tgiraffe\nn09NxJcTEYQ_0\tperson\nn12ITkwyzvM_0\tcow\nn15n46culQU_0\tperson\nn19nqH4078Y_0\tbear\nn2F8uNrgh1U_1\telephant\nn2daSQR_dTI_0\tmotorcycle\nn3Eb6Cf77Vg_0\tairplane\nn3aHtfCo_aw_0\tperson\nn3fhSGUvtH8_2\tknife\nn5alwWwFPb0_0\tmotorcycle\nn5osSY0_BSo_0\tperson\nn5-RrJI-Lxw_0\tperson\nn6I0k52pV18_0\tbear\nn8xNf-PRHnc_0\ttruck\nn9AUV2KuhLo_0\tcow\nn9zSAZMj2Mk_0\tknife\nn-I-WnLfnqE_0\thorse\nn-QBM6yD7RI_0\tbird\nn-eDiuWYJUc_0\tperson\nn-1FhryZboM_0\tperson\nn_Cv1LzGol0_0\tperson\noBixVhXVcmY_0\tperson\noBjIRWu_BWA_0\ttruck\noCCV0-mP2R4_0\tbus\noDlSzIkDJGM_1\tcar\noDnobYn8maE_0\tperson\noDrYXyIN9xs_2\tdog\noEcyeE0kNFc_0\thorse\noElAgrukyOk_0\tperson\noE0bjG0z-nk_0\tperson\noGDp2b_LvDA_0\tbicycle\noHu9fCIhAjs_0\tperson\noIQuiXJzEUI_0\tperson\noIYCDBqfT6I_1\telephant\noIZHf-r5C3w_2\tbird\noI3ETWYxCi8_2\tperson\noI3ETWYxCi8_1\tperson\noJAivZwYxDE_0\tperson\noLTHGMleOxk_0\tcar\noLTHGMleOxk_1\tcar\noMZczwLgR1Q_0\tboat\noMZczwLgR1Q_3\tboat\noMZczwLgR1Q_1\tboat\noMZczwLgR1Q_2\tboat\noNMf32fzYvo_0\tperson\noOi9E4se4ww_0\tperson\noOp7fTxc8qY_0\tperson\noOp7fTxc8qY_1\tperson\noQcVQukPVdA_0\thorse\noRacxmfNaSM_0\tcat\noSwwku39aC0_0\tskateboard\noXHr2yBfL3Y_0\tcat\noXfOERZ2kMs_0\tcow\noXlK1t1qisA_0\tperson\noYw8UE0VSFk_10\telephant\noYw8UE0VSFk_1\telephant\noYw8UE0VSFk_5\telephant\noYw8UE0VSFk_8\telephant\noY5CyHk-QEo_0\tperson\noaHCd7KI_Fc_0\tairplane\noaK_EfFOb7o_2\tskateboard\noaK_EfFOb7o_0\tskateboard\noa5NT5mX--c_0\tperson\noa838tg7QCk_2\telephant\noa838tg7QCk_3\telephant\nocJUmpBIBOo_0\tperson\noc7XeYj7dOE_0\tskateboard\nodjK5W70JaE_0\tperson\noeYHzAMgoQ4_0\tskateboard\nofynEJHRTz4_1\tperson\nof1ISNDelz4_0\tcat\nogJGxnVqTWY_0\tcow\nogNqc-uHzQ4_0\tumbrella\nohkrDDXUwjY_0\tperson\nohrYGLaImow_0\tcow\nohxeFH800SE_0\tskateboard\noiftoNj28hs_0\telephant\noiwU7UpO9S4_0\tperson\noi4GfdQBxyc_0\tperson\nojiIyU5ibT0_0\tperson\nokPcGR4BRQM_0\tperson\nomsmPSC4u3A_0\tairplane\nonH8ELLteHg_0\tmotorcycle\noo3eTJKpErU_1\telephant\noo3eTJKpErU_2\telephant\nopWm4bW5B9k_2\ttruck\nopYiNVXmySg_0\tskateboard\nopkxXg1s8ZQ_0\thorse\nopkxXg1s8ZQ_2\thorse\nopkxXg1s8ZQ_3\thorse\nopkxXg1s8ZQ_4\thorse\nosYXdQYkiPQ_0\tperson\notKNUa-KgUg_0\tcar\notKNUa-KgUg_1\tcar\notOxAXKskbI_0\tboat\notU4Zd1n65g_0\tbear\notqOLpbz4LQ_0\tairplane\nouK26Crplso_1\tcar\nouSUKHZs1Dc_0\tperson\nousG5WHZq8I_0\telephant\nouwAzKpUG7k_0\ttrain\novQiwCBG8Eg_4\telephant\novZ4In0kLUg_7\tbear\novZ4In0kLUg_2\tbear\novZ4In0kLUg_6\tbear\nowW-da7Tdls_0\tperson\nowtKQFT_gNk_0\tperson\nox0mlEooWI0_0\tskateboard\noyuMudJ9EM8_0\tperson\nozRJI9h3tks_0\thorse\nozRJI9h3tks_1\thorse\nozvxKPrfdo8_0\tdog\noz11xvTIbvM_0\tperson\no0QRA7gPhBI_0\tgiraffe\no02m7tfad28_0\tperson\no02m7tfad28_1\tperson\no09Ks_UmmkY_1\ttrain\no3eHOnTMxnU_0\tairplane\no4PVsZPaxOM_0\ttrain\no4PVsZPaxOM_1\ttrain\no4VOx1SeRKY_0\tbicycle\no4VOx1SeRKY_2\tbicycle\no4VOx1SeRKY_4\tbicycle\no4VOx1SeRKY_5\tbicycle\no4VOx1SeRKY_1\tbicycle\no4VOx1SeRKY_3\tbicycle\no7wb_t8x0D8_0\tperson\no8KS5SYj0GE_6\tbird\no8YfQD0GA00_0\tperson\no9gD7-MVkJ4_1\tbus\no-IwJTgdr_A_4\tbird\no_sONKO9OMk_0\tperson\no_7RumsdAcE_0\tmotorcycle\npAVwx70oxIc_0\tperson\npAthLZfnXaM_0\tperson\npAthLZfnXaM_1\tperson\npBWgDW8f6II_0\tperson\npB5-haagdS8_2\tbird\npEUCkpfCcaw_0\tboat\npEtOW-iQZCA_0\tperson\npE-OFVB2lzo_0\ttrain\npHsAHiqdb-c_0\tbird\npIHbW9IMV2E_1\tairplane\npIHbW9IMV2E_0\tairplane\npINK56mkS-E_0\tcat\npJBMnX2HBFo_0\ttrain\npJ6wkaE8-iY_3\telephant\npKFd8IXz4K4_0\tboat\npKnRcv--qEI_0\tcat\npLvGIJc0ETk_1\tcat\npMKMeBQzCC8_0\tdog\npMKMeBQzCC8_1\tdog\npMgX9KscZSg_1\ttrain\npNG0qeNr-Vo_0\tperson\npNWXXO380uQ_4\tdog\npNWXXO380uQ_10\tdog\npNWXXO380uQ_1\tdog\npNWXXO380uQ_2\tdog\npNWXXO380uQ_6\tdog\npP84ZurhiFY_0\tumbrella\npQAJTPvkPj4_0\tbird\npRArAdUzaKg_0\tperson\npRVlgxVhtuA_0\tcat\npRy6kU2p41E_0\tcat\npS5AzmSvRPY_0\thorse\npU9s744_T6o_0\ttruck\npVR9b-qG1Ig_0\tgiraffe\npVR9b-qG1Ig_6\tgiraffe\npVR9b-qG1Ig_7\tgiraffe\npVR9b-qG1Ig_1\tgiraffe\npXLbIBluyAQ_0\tbus\npXfO7xO-99w_0\tcat\npYXDml6lcAY_0\tmotorcycle\npZCCPMu42GA_0\tperson\npbFuk0oX6a8_0\tbicycle\npbFuk0oX6a8_1\tbicycle\npbFuk0oX6a8_2\tbicycle\npb3p83fw9bg_0\tperson\npcUV4ja1VRc_0\ttruck\npceUU6aj_ao_0\tcat\npdyhFh6-rCo_0\tbear\npeBxgn7gXlw_1\tmotorcycle\npeHZd4qdOMI_3\tboat\npe00hbvqjDI_0\tperson\npe_73GR1-NI_1\tairplane\npfED6WafVwQ_0\tbear\npfpKoO-GjGI_3\ttruck\npfpKoO-GjGI_1\ttruck\nphXjZ1yxWD0_0\tbus\nphec6_yC2HY_0\tperson\nphjJhuKxT5Y_0\ttrain\npiGT-hRYHHQ_0\thorse\npiN1RiueJhY_0\thorse\npjLei6UAHsE_0\tairplane\npjLei6UAHsE_1\tairplane\npjZqJuEX1ow_0\tairplane\nS2FTgueR-80_0\tperson\nS2FTgueR-80_1\tperson\nS3U383sqlRs_0\tbicycle\nS4UDIyyqmlY_2\tmotorcycle\nS6h6E0IKO6Y_0\tdog\nS73sRU7b2dk_0\tperson\nS9QmlxGGxGM_4\tknife\nS9goDsKFXAg_0\tperson\nS-qgaqzenIE_0\tperson\nTBpnes8Z-3s_0\tperson\nTCtRzPGrwls_0\thorse\nTCycfRWpg0s_0\telephant\nTDKDtLliMhg_0\tperson\nTDlLgW8Fjes_0\tperson\nTFcak4kNd2c_0\tperson\nTGFSBSitWNw_0\tcow\nTISjnLr1r-k_4\tgiraffe\nTISjnLr1r-k_5\tgiraffe\nTISjnLr1r-k_3\tgiraffe\nTJsLSuQcb7E_0\thorse\nTKadOIk-uPI_5\ttruck\nTK61mJMHqTE_0\ttrain\nTK61mJMHqTE_1\ttrain\nTLxcXucOpWw_0\tskateboard\nTMaLrtjFU34_3\tcow\nTNNXwm3Bt5I_0\tbicycle\nTOLyNcTSGPA_0\tperson\nTPglVxQN85I_0\tdog\nTRH4PZkAkiE_0\tperson\nTSl3wSreplo_2\tbird\nTSl3wSreplo_0\tbird\nTVuX76wWzwY_0\tperson\nTW9LBSqxNWo_0\tbicycle\nTW9LBSqxNWo_2\tbicycle\nTW9LBSqxNWo_6\tbicycle\nTXD-idarfhU_0\tperson\nTYsJu2G5WVY_2\tknife\nTZdDUMDyozA_0\tdog\nTZfFEYUY5_0_0\tboat\nTZsigdW7Qfs_0\tairplane\nTaL6ssJD8z4_0\tairplane\nTalhQQ9B7vc_0\tzebra\nTa-JBO0InZk_0\thorse\nTa-JBO0InZk_1\thorse\nTa-JBO0InZk_2\thorse\nTbm_BFLOPic_0\ttrain\nTcRl6wotFw4_0\thorse\nTcR9fR_SWLg_0\tbicycle\nTeiC-tObc4o_0\tbicycle\nTgRRY3Mn0Ro_0\tperson\nTi411VXWtAc_0\tdog\nTjCiDUNoDi0_0\tskateboard\nTkktEeCiSAo_4\tknife\nTkktEeCiSAo_5\tknife\nTlXSJmmN3dc_0\tmotorcycle\nTnB8G7eZm24_0\tperson\nTnY1qP0YQQ8_0\tperson\nTnc7CCuk78Y_0\tperson\nTn4trDBJAqE_0\tperson\nTo8VzjtX70s_1\tperson\nTo-lnvpzIKY_0\tperson\nTqKcS4Cx7wc_0\tbird\nTqvuyyM_x4E_0\tbird\nTqvuyyM_x4E_1\tbird\nTsM45PkaTj0_1\tbird\nTs4iqmKVRy4_0\tknife\nTtI1W2xFQ5k_0\tperson\nTtI1W2xFQ5k_1\tperson\nTtnuIzV01ek_2\ttrain\nTtyfhN-jWcc_0\tperson\nTuEArk4EFWg_0\tperson\nTuEwZSEUe5A_0\tperson\nTuOnAlE6TRs_0\tairplane\nTubHgt_FxYo_0\tperson\nTufSi0uSU8M_0\tperson\nTvUmQi32j08_0\tperson\nTvUmQi32j08_1\tperson\nTvuhORVyaL4_0\tperson\nTvuhORVyaL4_1\tperson\nTwH6hv5zVIU_0\tairplane\nTwSnlq5Kma0_0\tskateboard\nTxV4qpdgJ3Y_0\tairplane\nTxV4qpdgJ3Y_1\tairplane\nTyIzjLHGvjo_0\tperson\nTzUMxAOWWcc_0\tbicycle\nTzVawH7veiM_0\tbicycle\nT0WCoXgklkw_0\tperson\nT0r5yfzMs4g_1\tbicycle\nT24d3EHv2GE_0\tbird\nT406qi8vIlk_5\tairplane\nT406qi8vIlk_2\tairplane\nT6XxSbeAl6Q_0\tmotorcycle\nT8e9Qi4dcNY_1\tbear\nT95G52MuPFU_0\thorse\nT-PL14w9TV4_0\tcat\nT-cOBQACeAw_1\tbird\nT_2A3L49ah4_0\tdog\nT_2A3L49ah4_2\tdog\nT_2A3L49ah4_3\tdog\nT_2A3L49ah4_5\tdog\nUANkhHNWM-M_0\tperson\nUAnl6TGZhxs_0\tcow\nUA5VCImEZ2Y_0\tdog\nUBdNIuCPaZ4_0\tcar\nUBdNIuCPaZ4_2\tcar\nUBsG3-ocU64_1\tboat\nUE40h6VhUaU_1\tbicycle\nUF8l_MU2rj8_0\tperson\nUGCPxfU7FKM_0\tperson\nUG5FFY29OV0_0\tcat\nUHO129a_p0U_0\tairplane\nUHYwdGF9W-0_1\thorse\nUHYwdGF9W-0_0\thorse\nUIvJPTYu6Hc_0\ttrain\nUI4IvmmFIPQ_0\tperson\nUKExOybWiRM_0\tmotorcycle\nUKExOybWiRM_1\tmotorcycle\nUKkr05PKrb0_0\tbicycle\nUKlB9mDIXss_0\tperson\nULdZGJs5ta8_0\tmotorcycle\nUMsR07JXCYs_0\tcow\nUM446G0Lud4_0\tknife\nUOUaveJ_TWA_0\tperson\nUO_zNFtEt3Q_0\tperson\nUPkEE2dnlkU_0\telephant\nUPkEE2dnlkU_1\telephant\nUQAJPD_gH7g_0\tcat\nUQDXdgIlpDg_0\tknife\nUQibn_ZNp9Y_0\tskateboard\nUQibn_ZNp9Y_1\tskateboard\nUSAjeRaDlJ0_0\tperson\nUTqlz0i9KIo_0\tperson\nUVTPHohbCV0_0\tperson\nUX4dpwv6qWE_0\tdog\nUYAtAlnvVy4_0\tskateboard\nUYc0lVVxayQ_0\tdog\nUcCtmXy5F4g_0\tdog\nUcbWaG8GwRs_3\tairplane\nUcbWaG8GwRs_2\tairplane\nUceYFW8-zZM_0\ttrain\nUcse975FqUA_0\telephant\nUc5PAhXhIzk_0\tumbrella\nUgsSu7wC28w_0\tbird\nUhj0HRMHPXY_0\tperson\nUhsh3JUb_aI_0\tbicycle\nUisVwousE8g_0\tcat\nUi8yPflhqHs_0\tperson\nUjMTd3LCxyQ_0\tperson\nUjMTd3LCxyQ_1\tperson\nUlFA0xDQcS4_0\tskateboard\nUlhZSONgFCI_1\tcow\nUlhZSONgFCI_2\tcow\nUmvp1XgX6Qc_0\tperson\nUm-FzEOyncc_0\tperson\nUnUlhJaHWlA_0\tbear\nUnyyMjT0BCc_0\thorse\nUsCJdEa7tq4_0\tdog\nUsCJdEa7tq4_1\tdog\nUsrv7_ONvi0_0\thorse\nUs6dL_WD7xg_0\ttruck\nUtyaA_QRIrQ_0\ttruck\nUu9k1VohpvA_0\thorse\nUvptsJcl_ms_0\tperson\nUwtHiozuyRs_0\tperson\nUxPh-hnwal4_0\ttruck\nU2LvNquzuZ0_0\tbicycle\nU2LvNquzuZ0_2\tbicycle\nU4LhReaGH70_0\tperson\nU64eMon0R9w_1\tperson\nU74o2HGsFeI_0\tdog\nU853uMV0qAY_0\tperson\nU86p5VtUC6c_0\tknife\nU9YbGyTBb5k_0\tperson\nU99ENpOmVGI_0\tairplane\npmrTy1xQ5kI_0\tperson\nprJIAYsv8bQ_0\ttruck\npramqy_Y1gA_0\tboat\nprlcpxzCoyc_0\tbus\nps-nNC6Equg_0\tcat\nptF2Hqj7DGk_1\tmotorcycle\nptF2Hqj7DGk_0\tmotorcycle\nptPi712LDq0_3\tbear\nptU4EDudgg8_1\tbus\npt6v3JZFi4c_0\tbird\npuifEp7W50E_0\tmotorcycle\npuifEp7W50E_1\tmotorcycle\npu0G99aVryc_2\tcar\npu0G99aVryc_0\tcar\npwFqv42foTM_0\tperson\npye4y8sPr9I_0\tperson\npy0U90-ZTkI_0\tcat\npy2dhJjpOaI_0\tbear\np19EU6tw9oM_0\tperson\np2DntTqvGT4_3\tcar\np2DntTqvGT4_1\tcar\np2QsmFuYxdI_0\ttrain\np2TTKNDiGv0_1\tbicycle\np4pf9W4qt8s_0\tperson\np40Oqh_akS4_3\tbird\np43GludvR_g_0\tbicycle\np5F9hHDkbKc_0\ttrain\np7UAl7_bv4s_0\tbus\np8KQvF1DyLg_0\tperson\np8YhfWsz1JY_1\tperson\np8YhfWsz1JY_0\tperson\np8gE3VpTAR4_0\tperson\np84Z-poVaAw_0\tmotorcycle\np9ixpjYEEag_0\tmotorcycle\np-J_LbVq7CU_0\tperson\np-SJ_Ym5pTA_0\tcow\np-XasPaki0k_0\tcow\np-cJamorAiY_0\tperson\np-2rgSte1DI_1\tbus\np-2rgSte1DI_2\tbus\np-6u3d8YV70_0\tperson\np_YVPahadQ4_0\telephant\np_YVPahadQ4_1\telephant\nqDP6_m4bDRA_0\thorse\nqD8NS4r2Gd8_1\ttrain\nqEjyhyeCIR8_0\tcow\nqEjyhyeCIR8_3\tcow\nqEjyhyeCIR8_1\tcow\nqEjyhyeCIR8_2\tcow\nqGiLjP8-EVQ_0\tperson\nqHYuGyp8_HU_0\tbear\nqHZsnSLmqEY_0\tperson\nqIJo1R3rHmQ_0\tperson\nqJI7mnjOp0A_1\tumbrella\nqJOaXM8s-Yo_0\tknife\nqJOaXM8s-Yo_1\tknife\nqJugj62heF8_0\tairplane\nqKqEqxMZHVg_0\tperson\nqM566R4U4Ug_0\tbird\nqQbEwbtvdRg_0\tperson\nqSR2E4eqjqI_0\tskateboard\nqSiMwC5e5_I_0\tperson\nqUGXSXCXUbw_1\tperson\nqVCH1ozivyk_0\tperson\nqV9Ll-N_rpc_0\tdog\nqWpIdTdBIQU_0\tboat\nqWpIdTdBIQU_2\tboat\nqWpIdTdBIQU_3\tboat\nqXaS7daelL4_0\tperson\nqXfnmaLtO-M_0\tairplane\nqXwXdnrUo5w_0\ttrain\nqXx4Vj-HwkU_2\tbus\nqYf_XBAUa_o_2\telephant\nqZFwurCX4DM_0\ttrain\nqZH-IY7bBzg_0\tperson\nqZQcY5PTh10_0\tcat\nqZVUho1xBlo_1\ttruck\nqZVUho1xBlo_2\ttruck\nqZVUho1xBlo_0\ttruck\nqbYjOWN6n70_0\thorse\nqceiUxIt1VE_0\tcar\nqcjVVDAbHUI_0\tperson\nqcmbCgcy3co_0\tperson\nqdNXPwWD9_Q_1\tperson\nqdzu1EFDYUE_0\tcow\nqel4U0nmQOI_1\tperson\nqfp7BvAtQa8_0\tperson\nqgKnno5T6f0_0\tmotorcycle\nqguyMwcAj4M_0\tperson\nqhb1bts1fSM_0\tbear\nqheo-lRVpfk_4\tknife\nqheo-lRVpfk_0\tknife\nqheo-lRVpfk_1\tknife\nqheo-lRVpfk_2\tknife\nqheo-lRVpfk_3\tknife\nqhmscyJC8dM_0\telephant\nqh8xnvGfllE_1\tbird\nqh8xnvGfllE_2\tbird\nqipZi2kaQyA_3\tperson\nqi3hoxEao_g_1\tperson\nqi3hoxEao_g_2\tperson\nqptB3_MZagA_1\thorse\nqp5tJGAi9h0_0\tairplane\nqqL9gnwx87g_0\tcow\nqqL9gnwx87g_1\tcow\nqq4_m1S3AOI_0\tperson\nqt6FFVa8DGM_0\tperson\nquoX4193twY_0\tdog\nqvMRVm660LM_0\tperson\nqvZGFb3CbxA_0\tbird\nqvcNxorHqCc_0\tperson\nqx647iZCsoE_5\tumbrella\nqyQFBM_7mBw_0\tbird\nqywYqT8IzaQ_0\tskateboard\nqz4S2Tn1Jkk_0\tperson\nq2qEXqY43ws_0\tcow\nq2v3AmGBH-M_4\ttrain\nq2v3AmGBH-M_1\ttrain\nq2v3AmGBH-M_5\ttrain\nq2v3AmGBH-M_6\ttrain\nq3TB2Rnymkg_1\ttruck\nq3pYgC4-lrs_0\telephant\nq35X7FnaiGw_2\tbear\nq5BC4AVKV4c_0\tperson\nq6nXZqEmQGQ_0\tperson\nq9MXoyUF-BU_0\tperson\nq9d2hPrip6k_0\tdog\nq_dqx0-AtKk_0\tperson\nrA595TIyUgY_0\tbird\nrBko9NgVOX4_0\tperson\nrB2323YW1iA_0\tcow\nrDQ2hcIWoBY_1\ttrain\nrEXtAqxJj8c_0\tperson\nrGVf1BsLfng_0\tcow\nrHvp_Dghuho_0\tperson\nrH33U6qgd9M_1\tumbrella\nrIqhuv94Zuc_0\tperson\nrKN5E25jozk_1\tperson\nrKN5E25jozk_0\tperson\nrLbBCTSGdzc_0\tperson\nrOoxhMEKcgc_0\tbear\nrPEIT9eAAMY_2\tbicycle\nrPEIT9eAAMY_3\tbicycle\nrPUzTjaLdkk_0\tcat\nrPuPm0ctC3s_11\ttrain\nrQHtu5_Piv4_1\tcat\nrQKV6GBQuag_0\tairplane\nrRH0VLQDJZQ_0\tperson\nrSF1UQ01lZc_0\tperson\nrSSbdX8817Q_3\tdog\nrSu82skaMJQ_2\tskateboard\nrSu82skaMJQ_5\tskateboard\nrTIN784f0CM_0\ttrain\nrTIN784f0CM_1\ttrain\nrTIN784f0CM_3\ttrain\nrTV3ev-xyuk_0\ttrain\nrTYmEM2Lhew_0\tbus\nrT4P9ZJeBG8_0\ttrain\nrT4P9ZJeBG8_1\ttrain\nrT4crgFLycE_5\tbicycle\nrUJ7zeax1zY_0\tperson\nrV1Baq6-C6Q_0\telephant\nrWyf2iqpfng_0\thorse\nrXf2T3VO-kI_1\tcow\nrYkLuW5NLic_0\ttrain\nrZi9k9F8S1w_1\tperson\nrZi9k9F8S1w_0\tperson\nrbIYpEELMQc_3\thorse\nrbIYpEELMQc_2\thorse\nrbMVAO2mJiY_0\tperson\nrbn7_DeuItc_0\telephant\nrcF4-O7o_Qk_0\tperson\nrcF4-O7o_Qk_1\tperson\nrc96rbja6VI_5\tskateboard\nrc-e_NDrZDM_0\tperson\nrdBSfuG2KBA_2\tboat\nrdBSfuG2KBA_0\tboat\nrdQvGZDUDJA_1\tperson\nrdhiEKvYF0w_0\tcar\nrdnDsUHCZSY_1\tcat\nrePM3_x9tqw_7\tperson\nrePM3_x9tqw_4\tperson\nrePM3_x9tqw_5\tperson\nrfL51BZGldc_6\ttruck\nVCkpd_d1z4U_0\tairplane\nVE-3PfVw5-Y_1\tairplane\nVG2QbeXEwec_0\telephant\nVIQGgTWrg00_0\tperson\nVIr_rdbfvQQ_0\thorse\nVJVWk9wyMjI_0\tcow\nVJmgPBopcB4_0\thorse\nVJ0by87MRoI_4\tbicycle\nVJ0by87MRoI_7\tbicycle\nVLSeTnShp54_0\tmotorcycle\nVLSeTnShp54_1\tmotorcycle\nVLSol2tA9WY_0\telephant\nVLcSoFR7qBw_0\tcar\nVMDBBz7G-Pg_0\tmotorcycle\nVMmtrv5OtMQ_0\tboat\nVMxS4op_OBg_0\tperson\nVNCLtdahLmI_0\tbear\nVNCLtdahLmI_3\tbear\nVNHGw5Sj0Qc_0\tperson\nVN8_N7Ceofk_0\tcow\nVP0WD1miM00_0\thorse\nVP20LIiI9S4_3\thorse\nVP20LIiI9S4_7\thorse\nVP20LIiI9S4_1\thorse\nVP20LIiI9S4_2\thorse\nVP20LIiI9S4_5\thorse\nVQWxUc9QOjU_4\tbear\nVRtl4gAWELM_0\tskateboard\nVRt9s3OQPzo_0\tperson\nVSLdNogDia0_0\tbird\nVSrmwgo-veI_1\tboat\nVTqoizpYNeI_0\tcar\nVTqoizpYNeI_1\tcar\nVTqoizpYNeI_2\tcar\nVTqoizpYNeI_3\tcar\nVT11p8szxZY_0\tcow\nVUVAbtGJbuE_0\tperson\nVUh5jCDWj08_0\tcat\nVUl6vkX7PRU_0\tairplane\nVVn3XeSqijk_2\tmotorcycle\nVWTes_MfrOc_0\tknife\nVXNEqQb5C4Y_0\tmotorcycle\nVXT0TH9jfZo_0\telephant\nVXZscyYzxqw_1\tperson\nVYYS45KWEgo_1\tdog\nVYr49ml0uaE_0\tperson\nVZj4RHsnOWU_0\tperson\nVZqdzb_qI2g_0\tperson\nVa81siK4zeI_0\tumbrella\nVdLqI43E7eY_0\tcow\nVd5pCJuOoDM_0\tcar\nVfBrelUfLFg_0\tcow\nVgpm6fwLIns_0\tmotorcycle\nVhc7DKkRHOo_0\tdog\nViQIgBdCkh8_0\tcar\nVlBlBgxUa-U_0\thorse\nVlq4fYmrr6g_0\tcar\nVmVN4E_qtfM_0\tperson\nVm9-f0pXycc_2\tbicycle\nVngapMBo560_0\tcow\nVou-Sfzlpu8_2\ttrain\nVqdeO4pa_rc_0\telephant\nVqj-Qv5bVyE_0\tperson\nVr1Wqz5_UA0_1\tcow\nVr1Wqz5_UA0_2\tcow\nVr1Wqz5_UA0_0\tcow\nVsAo8VBzDTM_0\tperson\nVsOw_U6hYRY_0\tmotorcycle\nVsOw_U6hYRY_1\tmotorcycle\nVsyd7-_CUA0_0\tperson\nVs2JphYinjk_0\tgiraffe\nVtdrYDJFw-Y_0\tperson\nVtkV11WZWEc_0\tcow\nVuDA6sPAa9U_0\tperson\nVuLf3ZTqniM_0\tdog\nVuW2wDK-uZI_0\tmotorcycle\nVv-z9_l8_ms_0\tbird\nVwdZHZPjlT0_0\tcat\nVwkf0U9PZvI_0\tairplane\nVwppYMiCI1g_0\tumbrella\nVwvER7iR2YI_0\tperson\nVxG5gvk1mfo_2\telephant\nVxH52JoUd0I_0\tperson\nVxyq13mC_uk_0\tperson\nVxyq13mC_uk_1\tperson\nVyf_VJEQ1jE_0\tairplane\nV0CjVa5_1P0_0\thorse\nV0sliERbCxI_0\tperson\nV0sliERbCxI_1\tmotorcycle\nV0w_hBBqe-g_0\tperson\nV1ufPW4ictQ_0\tskateboard\nV25H8smvzbM_0\tdog\nV56RVnEPG54_0\tmotorcycle\nV6rg5et7Q14_0\tcat\nV6rg5et7Q14_1\tcat\nV6_XA2w3sTs_0\tboat\nV7CVQjk9-Xc_0\tskateboard\nV8Pv-I4ovPs_0\tperson\nV9m1dMbXxug_0\ttruck\nV9qvycn1a3E_0\ttrain\nV-ZKLxW5cuM_5\thorse\nV-ZKLxW5cuM_2\thorse\nV-ZKLxW5cuM_4\thorse\nV-iFCgvAuCg_0\tperson\nWBcYTIQ65Ow_0\tperson\nWB6uQ708AxE_0\tbird\nWCNpGdfG8nk_0\tperson\nWCZ4ZQ5ohf4_0\tmotorcycle\nWGw94BtHxYE_0\tbird\nWGw94BtHxYE_1\tbird\nWG1DuTb70bQ_0\tcat\nWItuBm7azO0_0\tcat\nWKpjUNNgKG0_1\tperson\nWLZkZ-4Y9fY_0\tcow\nWN5u1Y1yGkA_0\tairplane\nWP5JXCVRe9g_0\tperson\nWP5JXCVRe9g_1\tperson\nWQ603pEp_1k_5\tairplane\nWTEO_Ywn9AI_0\tumbrella\nWTw46mBWjOw_1\tairplane\nWUvTKLEimNw_2\ttruck\nWWcVr4lbq3E_0\tperson\nWXETP4eMyD0_0\tcow\nWZWh1M3qGAc_0\ttruck\nWbXmf511q4E_0\thorse\nWb9i7jssQsY_0\tmotorcycle\nWcUFxXISmb0_1\tmotorcycle\nWcUFxXISmb0_2\tmotorcycle\nWcgQXl6I-Ks_0\tcar\nWc6RwJ_8yts_0\tperson\nWc_-Q9ba0zs_0\tairplane\nWdh2SMcRQ2M_0\thorse\nWdh2SMcRQ2M_1\thorse\nWfZR-VRmSB0_3\tboat\nWfl0LOShC_I_0\tbus\nWh9avYClECA_0\tperson\nWixZlWbnBdM_0\tperson\nWkvpcaxQTSg_0\tdog\nWlFD1z5akJc_0\tperson\nWlK6sU21od0_1\tdog\nWlP5_pcua1U_1\ttruck\nWl1vbjfAxeA_0\tdog\nWl1vbjfAxeA_1\tdog\nWmNKtcf5iLM_0\tperson\nWpxEmYBfqSU_0\telephant\nWqb84sv1P68_0\tcat\nWrClMyPxaDk_0\tperson\nWrSS3nc07hE_0\tcat\nWsFZj4Bgtwc_0\tbicycle\nWvGCvwHutAc_1\tairplane\nWvUiJ8ZRRfc_0\tbird\nWvUziN47FfY_4\thorse\nWwx2Vce-1oM_0\tcar\nWx0zNFqSUZo_0\thorse\nWx1qid26zsw_0\tdog\nWzCI6AqY7cg_0\tbus\nWzrI82-Ak4I_1\tmotorcycle\nW1juH0nZ8v0_0\tairplane\nW1yEDHYLG1Y_0\ttruck\nW14Nt0_EGQg_0\tperson\nW17CFtB5Oy4_0\ttruck\nW1-9iBLd1lg_0\tperson\nW23FACVBLgI_0\tperson\nW3Bv11o03TQ_0\tcat\nW4cKlmHvXZ4_0\tknife\nW4gR7_z77A0_0\tperson\nW4iSCn6ILJs_0\tmotorcycle\nW7xlWK7cuEI_1\tskateboard\nW8U3FkkaVbc_0\tperson\nW8d2hNOMHpQ_1\thorse\nW8yL4Qnuo4k_0\telephant\nW86rN6nrllQ_0\tperson\nW9lLrNUFQ9M_0\tperson\nW975mcNRX7c_0\tboat\nW-sCMBY47ck_0\thorse\nW_QxijO2VBw_0\tzebra\nrftE7M9tNqI_0\tperson\nrftE7M9tNqI_1\tperson\nrhWLgPl3lt8_0\tperson\nrhjcRHB4crY_1\tbicycle\nriNqBOlFCuw_3\tdog\nriVZCbT4LDE_2\tperson\nrih7ECmHfRs_1\tcat\nrkIzABhjHkA_0\tperson\nrk1ByqQSwtI_1\telephant\nrlWlgyP-3-s_1\tumbrella\nrlWlgyP-3-s_2\tumbrella\nrlWlgyP-3-s_4\tumbrella\nrlqtE0bF9nk_0\tbicycle\nrmVxFro55IQ_0\tskateboard\nrmxx9X1ytcA_0\tairplane\nrm4XeENehOU_0\tskateboard\nrn9-fIMYEkA_2\tmotorcycle\nrn9-fIMYEkA_0\tmotorcycle\nroUwF9YU21U_0\tperson\nrsne3z-CaDw_1\ttrain\nrtjlk_iOmdE_2\ttrain\nrtjlk_iOmdE_0\ttrain\nrt4Qm6HPVTY_1\tboat\nrvBm-SnbjVI_0\tcow\nrwQl_jKPcyM_0\tperson\nrww5DvtCsG4_0\thorse\nrwzjQSTLmhk_0\tperson\nryUMZWWwJUk_0\tperson\nr0P-2rp1Hpk_1\tbus\nr0vIwhp5RLo_0\tknife\nr03Za0dP0d8_0\tperson\nr09YKBrwa8M_0\thorse\nr3PUq_cy6Mc_0\ttruck\nr3cOrAN6BI8_0\ttrain\nr3cOrAN6BI8_1\ttrain\nr7WW1Fl-s6s_5\tbus\nr7WW1Fl-s6s_4\tbus\nr7WW1Fl-s6s_6\tbus\nr7WW1Fl-s6s_7\tbus\nr7WW1Fl-s6s_1\tbus\nr7xw4qHLKIY_2\thorse\nr7xw4qHLKIY_1\thorse\nr7yOsosLuHI_0\tcow\nr8NwODfEuhI_0\tdog\nr8NwODfEuhI_2\tdog\nr9LAMeOEcsI_0\tperson\nr9jyOtbfWs8_0\tperson\nr9osF8drSbo_0\tperson\nr-Dva6GT-a0_1\tdog\nr-tFy30HVCw_0\tperson\nr-0UD9KQhvY_0\tcar\nr_sRdP_5WaM_0\tskateboard\nsByCUshWhWs_0\tdog\nsB613NHl89g_0\telephant\nsB8zpg-GrRo_0\tperson\nsD_9McrL3UQ_0\tskateboard\nsD_9McrL3UQ_1\tskateboard\nsEzZ3JnSzaM_0\tbird\nsFxTS449nUg_0\tperson\nsG0q9rphsoY_0\tcat\nsIIFHk89TT0_0\tperson\nsI17jkxX6tE_3\tskateboard\nsJyknuUaIOg_0\tskateboard\nsKCW1p03okE_0\tperson\nsKD6TBNqy6s_0\tperson\nsKD6TBNqy6s_1\tperson\nsKJ0JtWZeWw_1\tcow\nsKJ0JtWZeWw_3\tcow\nsLZh8XaxoYw_0\tperson\nsLfyo1VrX3g_3\tknife\nsLfyo1VrX3g_2\tknife\nsLnYAS4LAY8_1\tperson\nsLnYAS4LAY8_2\tperson\nsMVMaH9aWHw_0\thorse\nsNV29dtSqYs_1\tumbrella\nsOfNz788QiQ_2\thorse\nsP4jeoUjHZM_1\tmotorcycle\nsRb7OHsI6s4_0\tbird\nsV9L8gpGDmA_0\tmotorcycle\nsWbk2Sw9Rew_0\tperson\nsWfMpwviOCA_0\tcar\nsXSjs2EV61Y_2\tknife\nsXw73oA1Tq0_0\thorse\nsX5GCwZG8d8_1\tbus\nsbkHA-DWPSI_0\tperson\nscyRfbyCzJU_0\tcat\nsc15m4_lcvw_0\tperson\nsdAAObJErSA_0\tmotorcycle\nsezamC2zGqg_0\tbird\nsf76JIFYKB0_1\tcat\nsgHdQYSWPXg_0\tcar\nsgU4wTZ6k5s_1\tperson\nshXeONsfVmU_0\tperson\nshiIdcOonRs_0\tperson\nsiFucH6jjIs_0\tboat\nsiFucH6jjIs_1\tboat\nsj7NOYq8KBA_0\tperson\nskEWWsL6k9g_0\thorse\nskl1lsZUG4k_0\tperson\nsm346w9J4zA_0\tknife\nsnZjH03fjVk_1\tperson\nsoNDR07vxhQ_1\tperson\nsoNDR07vxhQ_0\tperson\nsofKbpbuX84_0\tperson\nsofKbpbuX84_1\tperson\nspVw0PNXErs_0\tdog\nsqLiQtbkEO4_0\tcow\nsqv-uPhtxwk_0\tairplane\nsq-wqsIw5hw_0\ttrain\nssspgc75B08_0\tgiraffe\nsteKGH-8MZw_0\thorse\nsteKGH-8MZw_2\thorse\nsts2vAv4BQo_0\tperson\nsuERIXWx_z0_1\tperson\nsvCBYM2zl80_0\thorse\nswuFjNkTmQY_0\tdog\nsyZTh043BkQ_0\thorse\ns0YqBVjRDyU_0\tperson\ns1Pd7evRn0U_2\tdog\ns2PyqAoOqrY_0\tcow\ns2x8llFphNY_0\telephant\ns3WiR_wFUBE_0\tcat\ns3ijyNmvxpE_0\tperson\ns4rr5OrSI4k_0\tskateboard\ns5I219neN7c_0\tperson\ns5jmkD6lkbU_0\tdog\ns5n7L55KpWE_1\tskateboard\ns7or9ZhEyXE_0\tperson\ns74eu-v6aqA_0\tperson\ns8W4NK7dWe0_0\tperson\ns83wzR7ySyM_0\tskateboard\ns9G4llLAJiU_0\tskateboard\ns9OmvmQH9hA_0\telephant\ns94ng_sG6Dg_0\tboat\ns-Jnbfjkmak_0\tskateboard\ns-Jnbfjkmak_1\tskateboard\ns-guJTrtfSU_0\tskateboard\ns-yjgHx_YWg_0\ttrain\ntAGvlfgdOsI_0\tskateboard\ntAGvlfgdOsI_2\tskateboard\ntBlPdyu-syw_0\tbird\ntBlPdyu-syw_2\tbird\ntBryhvKADFQ_0\tdog\ntGyP_SbWsVA_0\tperson\ntHA_VdGe90Y_0\tairplane\ntHA_VdGe90Y_1\tairplane\ntHcqw8Cejs8_0\tperson\ntHfOMcj62SY_0\tzebra\ntI2i9_rBdwo_1\tbird\ntI2i9_rBdwo_3\tbird\ntKpbcnqu6bY_0\tbird\ntK0pl2_wbWU_2\telephant\ntLJpuELQgxY_0\tperson\ntLa4F5ekKW0_0\tcat\ntLzUBeOwhyM_1\tbicycle\ntMojfxB-9zA_0\tperson\ntMp5Y1zucfI_1\ttrain\ntMp5Y1zucfI_0\ttrain\ntM3FYC5IVPo_0\tmotorcycle\ntNiu2o7-KPY_1\tcar\ntOK5TnF8eHQ_2\tbird\ntOL0kPV03Uw_0\ttrain\ntOlXErF8Z4o_0\thorse\ntPCRXfE_aGo_0\tbus\ntQj85vHtmeE_0\tbus\ntQnUccPTkck_1\ttruck\ntQ_Vy-9pvoQ_0\tskateboard\ntSlXTInFXss_0\tperson\ntTSVU8IU10c_0\tmotorcycle\ntUdWqmNDeY8_0\tperson\ntUm_oehvEpM_1\tperson\ntVOS6wht6oQ_1\thorse\ntV17SBx-oqE_0\tperson\ntXBDRj1c-Uc_0\tperson\ntXf9xVs5ZGk_0\ttrain\ntYKrjpIMYb0_1\tskateboard\ntYciFvRQuec_1\ttruck\ntYciFvRQuec_0\ttruck\ntY-4fAv_YRU_0\thorse\ntY-4fAv_YRU_1\thorse\nXA65Kh83GmE_0\tcow\nXA65Kh83GmE_1\tcow\nXBNPaOqVqds_0\tbird\nXBUvxtvKWM0_0\tcat\nXByg_hQRQDM_2\tbird\nXDNVcbDkafM_2\tairplane\nXDNVcbDkafM_3\tairplane\nXDNVcbDkafM_4\tairplane\nXD0ydIAwgGM_0\tcow\nXD_iMe4m2vQ_1\tperson\nXGX6SRd3ZkE_0\tbird\nXHu9PxuBpXg_0\tairplane\nXIzQLXQTsRo_0\tcow\nXI3_0lXrnfY_0\tcow\nXJq9qp3jhq0_0\tmotorcycle\nXJq9qp3jhq0_2\tmotorcycle\nXJq9qp3jhq0_1\tmotorcycle\nXLgI0VgtzEw_0\tcow\nXL50qkg4qdA_2\telephant\nXL50qkg4qdA_0\telephant\nXMIsf8xuMh4_0\ttrain\nXPi83QmsR90_0\tcat\nXQliC40rP9M_0\tperson\nXRKZRwdqhNo_0\tbird\nXSMGAlakHWY_0\tperson\nXS5wfvz6XZI_0\tbird\nXTWeBFPqdh0_0\tperson\nXT0t6ims_FI_2\tskateboard\nXVabRVMuX4Q_0\tmotorcycle\nXVabRVMuX4Q_1\tmotorcycle\nXVabRVMuX4Q_2\tmotorcycle\nXVabRVMuX4Q_3\tmotorcycle\nXVabRVMuX4Q_4\tmotorcycle\nXYA6HKrVVQQ_0\tcow\nXZBFfRl6DkA_0\tperson\nXaVZr4HPh2M_0\tcat\nXalkAzccT5I_0\tperson\nXa6tjMVGH2I_0\tmotorcycle\nXa6tjMVGH2I_2\tmotorcycle\nXd9tLIFo_7E_0\tcow\nXeIssB-JkcU_1\tbicycle\nXeIssB-JkcU_2\tbicycle\nXevq2dskQWo_0\ttruck\nXfUIrHPVj-s_0\tcat\nXf09qM8SYBc_0\ttruck\nXgDJ16iRhxs_0\telephant\nXgDJ16iRhxs_1\telephant\nXgDJ16iRhxs_2\telephant\nXgFaXb7Vb58_0\telephant\nXgxYznR79R0_0\tdog\nXhOx4rgdI-8_0\tbird\nXhTWW9CwFzM_0\tmotorcycle\nXiSjHcHG5IU_1\tbird\nXjXFktrwSOk_0\tbear\nXkpxlUwx4oc_5\ttruck\nXkpxlUwx4oc_1\ttruck\nXkpxlUwx4oc_2\ttruck\nXkr3OHSz_CA_1\tperson\nXkr3OHSz_CA_0\tperson\nXlIxLJTiphI_1\tairplane\nXlSvIczm3JA_0\tperson\nXlcJsAWbsyA_0\tdog\nXmwv-NZZat8_0\tperson\nXm_CKSNQE3E_0\tbird\nXnfAvhHnH6M_0\ttrain\nXnfAvhHnH6M_1\ttrain\nXoWHAeOAXg0_0\tmotorcycle\nXoXMpm6Yxfs_0\tperson\nXoa_dCJDiTE_0\tmotorcycle\nXocaP_gyqJU_0\tperson\nXopbyM2SJbc_0\tbicycle\nXopbyM2SJbc_1\tbicycle\nXr_3UPISgT0_0\tskateboard\nXsK5KxttYBA_0\tperson\nXtTLGRBrm3I_0\tskateboard\nXtVTdegdzvI_0\tmotorcycle\nXu6xzBcJySk_0\tperson\nXu6xzBcJySk_1\tperson\nXvvA9Zc1TMA_0\tperson\nXvwOXlVdehA_1\tperson\nXwqm_wzZDQI_0\tcow\nXxkkXeLqqu8_2\tairplane\nXxkkXeLqqu8_0\tairplane\nXxmNQjB1D_Y_0\tcat\nXyldpxZmUN8_0\tdog\nX0CZDjRqcKg_0\thorse\nX02e7Fj9BLM_0\tumbrella\nX0-n3maCrZU_1\tdog\nX2uXOY9J_UU_0\tperson\nX3HCAEcRaW8_0\tbicycle\nX3qbUW_qT7k_2\tairplane\nX4SbOXRpo0A_1\tdog\nX7xm2nZL7jc_0\tbear\nX79vSvy6SOQ_0\tskateboard\nX9L-jwA6Ozg_1\ttrain\nX9L-jwA6Ozg_0\ttrain\nX9a5wEDFXc8_0\tboat\nX_TnIuY27eM_8\tbird\nYA4-rm-dcsw_0\tperson\nYA-N841dD-0_0\tperson\nYB1trUAUzhg_0\tperson\nYB2wzBLh7MU_0\tzebra\nYCU3daBCWsU_0\tumbrella\nYCXHNoYaQRc_3\tskateboard\nYCXHNoYaQRc_4\tskateboard\nYDd_skWNTMs_0\tskateboard\nYDyc1Yv9j_s_0\tperson\nYEPfw3k3vEw_0\tperson\nYEvBzZ5KBYY_1\thorse\nYEz7v7toUwM_0\ttruck\nYFQlAc3qTBQ_0\tmotorcycle\nYIHcQxH9e1o_0\ttrain\nYIzqB2G1UvY_0\tperson\nYI4lmC3imb4_0\thorse\nYJiqdRcs_gU_1\tperson\nYKlWROFtcxc_1\tskateboard\nYKlWROFtcxc_0\tskateboard\nYKoT-GgRSw0_0\telephant\nYKoT-GgRSw0_1\telephant\nYKrdwZe1vq8_0\tdog\nYL97h6yps6w_1\tknife\nYMbqULxZJpg_1\thorse\nYMbqULxZJpg_2\thorse\nYMkOJNatD88_0\tperson\nYNEDPsAWm5I_0\tperson\nYQXwRsP0zvE_1\tperson\nYQgUV8TrYcw_0\tperson\nYRWC7Tdc5oI_0\tperson\nYTD8j8z44qQ_0\tperson\nYTd8Rxtpt1E_0\ttrain\nYTd8Rxtpt1E_3\ttrain\nYTd8Rxtpt1E_4\ttrain\nYTd8Rxtpt1E_6\ttrain\nYTd8Rxtpt1E_7\ttrain\nYTd8Rxtpt1E_8\ttrain\nYTd8Rxtpt1E_9\ttrain\nYTzuVYGpDhA_0\tmotorcycle\nYUhgrCNuMGQ_3\tbear\nYVDCTyDcjjA_1\tcow\nYWRbi_v93Mo_0\tperson\nYWhwljQ3efA_3\ttrain\nYWhwljQ3efA_4\ttrain\nYXeaiwTZ3ZE_0\tcow\nYXz7CDJ11jY_0\tbird\nYYUo7EkkJeg_0\tbicycle\nYYUo7EkkJeg_1\tbicycle\nYZmhYkqgBi0_0\tskateboard\nYZmhYkqgBi0_1\tskateboard\nYZmhYkqgBi0_2\tskateboard\nYZ3kcrHk4N8_1\thorse\nYZ3kcrHk4N8_0\tbicycle\nYax1xdgRbt4_0\tperson\nYa2zfpe-_ro_0\tbus\nYcjMrWCSRSA_0\tperson\nYdooYDhKq00_0\tperson\nYeTYMiaLkWY_1\tcow\nYfvvO_T8j8k_0\tskateboard\nYf9jBSXQTLo_0\tcar\nYf9jBSXQTLo_1\tcar\nYf9jBSXQTLo_2\tcar\nYf9jBSXQTLo_6\tcar\nYf-okdUBk9g_1\tbird\nYgM058nmMnQ_0\tperson\nYjZoPTjqDGw_0\tskateboard\nYj6XWsgomO0_0\tcat\nYluDona_474_2\tbus\nYmlQVVQx4SA_0\tperson\nYm3lE2u4vxE_3\tskateboard\nYm3lE2u4vxE_1\tskateboard\nYm37vW7b0U0_0\tcow\nYnZU-Qa6yeI_2\tbus\nYnyd8SBB5Wg_0\tknife\nYoFfsRgrNeY_0\tperson\nYof6XFKNuNY_2\thorse\nYorREGtes1I_0\tperson\nYo9XVrgl_GM_0\tcat\nYpDsXa1kNZU_0\ttruck\nYpb0U6Ga5pk_3\ttrain\nYpb0U6Ga5pk_1\ttrain\nYpb0U6Ga5pk_2\ttrain\nYp1kl6xU-Og_0\tperson\nYqvGb_tDI38_1\tbird\nYrhvCSxifRc_0\tcar\nYtrNZ4mlMw4_0\telephant\nYvAlZo3quqE_0\tperson\nYvwW9T4Qpek_0\tmotorcycle\nYv3YH0nImQI_3\ttruck\nYxRG0JQrpwI_0\tperson\nYxia21K4O6I_3\ttruck\nYy0lIDbLxQ8_0\telephant\nYy0lIDbLxQ8_3\telephant\nYy0lIDbLxQ8_1\telephant\nYy0lIDbLxQ8_2\telephant\nYzTl0Nf0Kpw_0\tcow\nYzT_UsE8Mhs_0\tairplane\nY0Hz5Hw1AiM_0\tperson\nY1lKSppJhdI_0\tcow\nY16c_yGYw1M_0\telephant\nY16c_yGYw1M_1\telephant\nY2jXJzRVhMI_0\tperson\nY2x6ow80IkQ_0\tperson\nY3TtBVfW6gs_0\tperson\nY3ZDfyDvFi4_0\telephant\nY3c_6Zv0dxg_1\tknife\nY3mx4jYyagQ_0\ttrain\nY5Atu2VWemQ_0\ttrain\nY5BEvakwvuM_0\tdog\nY64ky0LNHko_2\telephant\nY-YU80ccuXg_0\telephant\nZBJsNXYIQ4o_0\tperson\ntaPyucc_cOU_0\tperson\ntaPyucc_cOU_1\tperson\ntafdN9GXP0g_2\tskateboard\ntbLnjlX1xF8_2\tbird\ntbuu2U3o02Y_0\tperson\ntcOx8KjmHPo_0\tperson\ntc98WTYT-VI_0\telephant\ntdIWlg4_01E_1\tbird\ntgRYkhC-gJU_0\tperson\nthZqLw7IxVw_0\tknife\ntj2-fSeuMRI_0\tbird\ntmch--OGZhY_0\tgiraffe\ntmsInTqqzHI_0\tzebra\ntof4QiBHPQQ_0\tperson\ntowJyxwm3wE_0\tbird\nto8OyPMfkaI_0\tperson\ntpQv6Sn5z3o_0\tmotorcycle\ntpcuQY4eNaI_1\tbus\ntpeBIe69wr0_1\tbus\ntpeBIe69wr0_3\tbus\ntpwUnqxQYjo_0\ttrain\ntqy3XprB11s_1\thorse\ntqy3XprB11s_2\thorse\ntq9WP-2U1QM_0\tperson\ntsMTiOeM52E_0\tcat\ntsg-S4Hk2go_0\tperson\nttzJbLLAR34_0\tcat\ntvSJKUR21UM_0\ttrain\ntwewRZpG7Fs_0\tcow\ntwxvNeK9FZo_1\tbear\ntxDhTthoXSk_0\tmotorcycle\ntx0mtmimu0k_1\tperson\ntx2PSvwf7FU_1\tcow\ntyem40ZMKGE_0\tperson\ntygG1C5DURU_0\tperson\nty3iURJku9k_0\tperson\ntzH_tvBDeJA_0\tskateboard\ntzPForR9Ejs_1\ttrain\ntzvKjCoHBMI_0\tbird\nt0TW8zZxCWQ_0\tperson\nt1N1ijCr5NE_0\tbicycle\nt1N1ijCr5NE_1\tbicycle\nt4FZmjCINtw_0\tbus\nt4naVz1a0sg_0\ttrain\nt4zuUZQozs8_0\thorse\nt5B7vIbyRNQ_0\tperson\nt5kzdnId2sI_0\thorse\nt5s4Fs07WLM_0\tdog\nt50QLEhcZCE_0\tperson\nt6C6ukC_zEA_1\tbird\nt6C6ukC_zEA_2\tbird\nt6C6ukC_zEA_0\tbird\nt7YFOxuWxtg_0\tumbrella\nt7YFOxuWxtg_3\tumbrella\nt7s424DNznk_0\tcat\nt8MqK7LWqs8_0\tairplane\nt8mVwobdP40_0\tboat\nt_qvtoXbLRI_0\tperson\nuAWXGcWWgSU_0\tperson\nuAZF38u6SOo_0\tumbrella\nuAzws057QjE_0\tskateboard\nuA1sb8QyXuU_0\tskateboard\nuCZi19CC7rk_1\ttrain\nuCZi19CC7rk_2\ttrain\nuCZi19CC7rk_3\ttrain\nuE5rIJoAafE_0\tbird\nuE5rIJoAafE_1\tbird\nuH0jKXHq7Lw_0\thorse\nuH35b2DEXFw_1\tskateboard\nuH9vcwYxL2s_1\tperson\nuIu2jQswp94_0\tperson\nuJcu-YlAtbc_0\tbird\nuKJqU3gtIWM_0\tumbrella\nuLPuf056wH4_0\thorse\nuMAkaCYTDuc_0\ttruck\nuMYGWhLdrlc_0\tboat\nuMiNpG3NcEw_0\tperson\nuMpufBdwRn8_0\tgiraffe\nuNpHGE63PdQ_2\ttruck\nuNpHGE63PdQ_8\ttruck\nuOmCLzEMPGc_0\ttrain\nuRFXE4UfdTE_0\tcow\nuR8MqB3VgSI_0\ttruck\nuS1QmKXc0uY_0\tperson\nuTsfiR5FPdM_0\tperson\nuT9uk3mtt98_0\tbird\nuUU-VpxxSiM_0\tcow\nuVrW8Mm2xGY_0\tperson\nuWyTGtedEqU_1\tperson\nuWyTGtedEqU_0\tperson\nuarSTtaV_Ps_4\tboat\nua6Xyj9aWT4_0\tbear\nua6Xyj9aWT4_1\tbear\nua6Xyj9aWT4_2\tbear\nubHgpaAseuo_1\telephant\nubijaVodfKg_0\tperson\nubijaVodfKg_1\tperson\nubsr27_dQOk_0\telephant\nubsr27_dQOk_2\telephant\nubsr27_dQOk_3\telephant\nubsr27_dQOk_1\telephant\nucUearjcPHk_1\tairplane\nucfXE6fw3go_0\tcow\nudlyGSCujUU_0\ttruck\nufB4EORClps_1\tknife\nufMXT_CmtK4_0\tairplane\nuhm0JnSA-kQ_0\tperson\nuiLBqX72k4k_7\tboat\nuiM-lDuYaeY_0\tperson\nujoJwRvjEdI_0\tperson\nujz4u55Tp1U_0\tcat\nul47aFS8dQE_1\tmotorcycle\nul47aFS8dQE_2\tmotorcycle\nul47aFS8dQE_3\tmotorcycle\numkNI2_0Lqc_0\tperson\numxZfostBlE_0\ttrain\num22CD4bkqo_0\tcow\nun6QDPagbfo_1\tcow\nun6QDPagbfo_0\tcow\nup6VT6l38-A_1\tskateboard\nuqn85v1WM7A_0\tmotorcycle\nurAYVS5Lz7k_0\tperson\nusAsP-m-qs4_0\tdog\nuuhWeHmlvt4_0\tperson\nuu3KluYuhc0_0\tperson\nuu3pH95cmtk_0\tperson\nuwXhzSsAIJw_0\tperson\nuw9TxuXeiP0_0\ttrain\nuxgUbys1eD8_1\tbus\nuzMFzDPfsws_0\tknife\nuzsdMqrgiL8_0\tperson\nu14Sp3wCQew_0\tcar\nu2BHvsjQGjw_0\tperson\nu25Jazd2yJM_0\tperson\nu4KPFsw5W5c_0\tmotorcycle\nu4oma0FVycA_8\tknife\nu69KRu61wXM_0\tperson\nu7xTeWelI-U_3\tknife\nu8mmwwrdNb0_4\tairplane\nu8mmwwrdNb0_5\tairplane\nu8mmwwrdNb0_9\tairplane\nu80Y4lA5xT0_0\tdog\nu85tUrDgmOQ_0\tbus\nu9HkSfjYpnA_0\tmotorcycle\nu9rfXD33UIM_0\tperson\nu9_P9HFh_NY_0\tdog\nu-_A36Ha04o_0\tcow\nu_D1eyd8AOM_0\tcar\nvAUSfFO5UI4_1\tdog\nvFMzMNDlnBs_0\tperson\nvGIYDcnNTvA_0\tknife\nvHQkxg7kPUk_0\tdog\nvH0ZiiuSQzU_2\tperson\nvH7sKynwjD4_0\tperson\nvJypzwSdyN4_0\ttrain\nvMt5AD41SKM_0\tperson\nvMt5AD41SKM_1\tperson\nvOY2IRNsjYg_1\tperson\nvOY2IRNsjYg_0\tperson\nvQ6eOB8rxUE_0\tperson\nvRjErSbQNNY_0\tperson\nvTa2zdbIyUw_0\tperson\nvT2JpCnT6rg_0\tboat\nvWqexY1OdWg_1\tskateboard\nvXbTARLug3M_0\tperson\nvYN_Gy6fUbI_0\tbus\nvYhPihwivZs_0\tperson\nvaaqJVWoSf0_0\tperson\nvadASNfLl9I_0\tdog\nvas3iNRcsK8_0\telephant\nvas3iNRcsK8_1\telephant\nvbLhfzHqEKc_2\thorse\nvbSnjtc3vIs_0\tcat\nvcALsxetYU4_0\tairplane\nvc-_aAQAXs0_0\tknife\nvdXD-HTzyFM_0\tcat\nvfeKOPKE6l8_0\tperson\nvf7NtV1T5Jc_0\ttrain\nvf7NtV1T5Jc_1\ttrain\nvjb_l1_hEXk_0\tperson\nvjojFy4rPeo_3\tcar\nvjojFy4rPeo_1\tcar\nvj_BAwFKqtQ_0\tumbrella\nvklwqjQis8Y_1\tcat\nvlPgSny76H8_0\tperson\nvlflI5iuszQ_0\tperson\nvnD3gELVAq8_0\tperson\nvnyBVn70QLY_0\tcat\nvnzsKpfAS_M_1\thorse\nvpBxBDjiJxw_1\tdog\nvvamB_-Z0so_0\thorse\nvv3gfxFz2zw_0\tperson\nvwe8ZaV-4z8_0\tbicycle\nvwtokH03eW0_0\tskateboard\nvwxzh1lJ7iw_5\tmotorcycle\nvxmdsyEpU6A_2\tbus\nvx0oKJcOQb0_0\ttrain\nvx0oKJcOQb0_3\ttrain\nvx0oKJcOQb0_4\ttrain\nvyLqolkoVIM_0\tperson\nvzBbUEwED60_0\tperson\nvzBbUEwED60_1\tperson\nvzU0GH4cZM4_0\tcow\nv0tUEeE4RGc_1\ttruck\nv0xTNbrYZY0_0\tgiraffe\nv01IvIxWXTo_0\tperson\nv1iIhTWRjg8_0\tboat\nv1-PGfS1YCY_0\tboat\nv3LIQHdveBA_0\tperson\nv4H5VwQyKEU_0\ttrain\nv4H5VwQyKEU_1\ttrain\nv4QYOX-FHhY_1\tmotorcycle\nv40pc8KBg0I_2\thorse\nv5YzVj25_hs_0\ttruck\nv5lUHsxx0mc_1\tskateboard\nv50Qa_KMCzQ_0\ttruck\nv51CdpETaug_0\tbird\nv6UDfM50GIM_1\ttruck\nv7XVyg16ens_0\tcat\nv8Kp0jhKsKk_0\tperson\nv8ceKkKdqrE_1\tknife\nv8hOOgLXRjg_0\tperson\nv8kyeMoFLqk_0\thorse\nv8rj3jIndSE_0\tdog\nv8tktR3aE38_0\tairplane\nv_yEG5_Qm8Y_0\tperson\nwCu6xsT18qo_0\tperson\nwDHRro9mXuM_0\thorse\nwDcnUJFHguE_0\thorse\nwE8LYkzcq0o_1\thorse\nwE8LYkzcq0o_0\thorse\nwGPW8I8nGmc_0\ttrain\nwGWIrs5ja0Y_0\tbicycle\nwGyJeWBe8VA_0\tumbrella\nwIapUcRvgTM_0\tbear\nwIapUcRvgTM_5\tbear\nwI0a0fzgy3w_0\thorse\nwJdfgWlSY5M_0\tperson\nwJdfgWlSY5M_1\tperson\nwK7yIg1qfZ4_0\tperson\nwLA244rmq6g_0\tcat\nwLHLSvMwmjM_0\tskateboard\nwL0z6-jkCcc_0\tdog\nwL0z6-jkCcc_3\tdog\nwL0z6-jkCcc_1\tdog\nwL9iOnWhckI_1\tskateboard\nwL9iOnWhckI_3\tskateboard\nwMShicf3N_E_0\tperson\nwMyAEfVE_u4_1\telephant\nwNKWZ43SioQ_0\tairplane\nwNKWZ43SioQ_2\tairplane\nwNWW59wDinQ_1\ttrain\nwNcjU9-ck10_0\tperson\nwODzPBxcT0A_0\tmotorcycle\nwODzPBxcT0A_2\tmotorcycle\nwOLrGAo0vFo_0\thorse\nwOSL7OPRBXM_1\tdog\nwPRCf3v0EfI_0\tmotorcycle\nwQtHgysmmFg_1\tboat\nwQvPlByUvB0_1\tknife\nwSSTL6uuM9Y_0\ttrain\nwSmVgAahSUw_0\tskateboard\nwSmVgAahSUw_1\tskateboard\nwSmVgAahSUw_2\tskateboard\nwTMj2Gp8wz4_1\tbird\nwTMj2Gp8wz4_0\tbird\nwTtXB0Z2eMk_0\tcar\nwV1VMLQfTYo_0\tskateboard\nwWpNKbsF6q8_0\tbear\nwa1KdARQXXg_0\ttruck\nwa3jVRzsWGo_2\ttruck\nwbmT4LB3lVQ_2\tknife\nwb9x3QDpcYA_0\tperson\nwb9x3QDpcYA_1\tperson\nwcOuc6Y3Gek_0\ttrain\nwcjnFIBHoc8_0\tbear\nwdb2-oX7HqU_0\tboat\nwdhqMpQcsjc_0\tdog\nwdhqMpQcsjc_2\tdog\nweH4PvRo2GU_1\tbear\nwgZbNzu2Mdw_0\tperson\nwguspvl5Ioo_0\tperson\nwg1ZFP15W8U_0\thorse\nwg6XS3q4Vg8_0\ttrain\nwifl75i2zGw_0\tperson\nwiiV9QdYsYM_3\tbus\nwjfHYr4lXU0_0\tcow\nwmfJAE6gu7w_0\tperson\nwmjfHsCs1CE_0\tperson\nwmn4YG9rirU_1\tbird\nwmn4YG9rirU_0\tbird\nwmx0UeWsPyU_0\tperson\nwoEUh2mzEkE_0\thorse\nwqD1WkfidVw_1\tbear\nwr5b8Op3LUM_2\tbear\nwuAwZ_wX7jk_0\tknife\nwuFVuJjgpLk_0\tairplane\nwvadJ-1Ls80_0\tperson\nwymDvXB08SM_0\tperson\nwzBmon2jJxI_2\tbird\nwzlA0qMLDV8_1\tcow\nwzlA0qMLDV8_2\tcow\nwzlA0qMLDV8_3\tcow\nwzuQhwWLllk_2\tbird\nw0JzCkELpj8_0\tcat\nw0bfVrI7CPQ_0\tbear\nw1j-YVcZpfc_0\tperson\nw2WW3bYmA7s_0\ttruck\nw247rqoLoGg_0\tbear\nw3F_8A8kY7o_3\telephant\nw3F_8A8kY7o_5\telephant\nw3F_8A8kY7o_6\telephant\nw3adXMIxupk_0\tcat\nw35-xR0Vn_0_0\tzebra\nw5Pb_ORVLKI_0\tairplane\nw6A2W9VQeZk_0\tcar\nw6JEUZI5Vh8_2\tskateboard\nw6JEUZI5Vh8_0\tskateboard\nw6JEUZI5Vh8_3\tskateboard\nw7IKxGLuaQA_0\thorse\nw7g5pDCGteg_0\tperson\nw8zrFmMpPmc_0\tmotorcycle\nw8-ovxjadNo_0\ttrain\nw93q7lv9In8_0\tperson\nw-eAEp0TUi0_0\thorse\nw-eAEp0TUi0_1\thorse\nw_euwPW5ukA_0\tbicycle\nxAUupk4sGI0_0\tperson\nxAedjC0r5KY_0\tperson\nxAfxJQL2_aY_0\tzebra\nxDgoaE-g50s_2\tbear\nxFnFWM8KXcE_0\tperson\nxFzsK94M68U_1\tperson\nxGbFeCuGypE_0\tperson\nxHOcerZTZxM_0\tperson\nxIUJ8zlr0TU_0\tbear\nxIizuktSVrM_0\ttruck\nxJ_xdRV9lzo_0\tcat\nxKd8dHsveKg_0\tperson\nxMiQuC8eKGU_0\tperson\nxMp4dCjzI08_0\tcat\nxMuQzm__4bo_1\tperson\nxMuQzm__4bo_0\tperson\nxNBT-PZEMH0_0\tbicycle\nxOLvPvBg-8U_1\thorse\nxOtxf0cmHyA_2\thorse\nxPDDIKF9T3A_0\tperson\nxRJNEyms-F8_0\ttrain\nxSIjCyHBypw_0\tumbrella\nxSIjCyHBypw_1\tumbrella\nxSL4NZUmhW4_0\tperson\nxUB3mR57tLE_0\tbicycle\nxUtGzUu5Ryc_0\tumbrella\nxU_2MZdWfxM_0\tcow\nxVuNCF2vbXs_0\tperson\nxWWnn5OWp4I_0\tairplane\nxYVriT4YV0M_0\tperson\nxZLHtt1yjYk_0\ttruck\nxZZ_W6fRi8E_0\tknife\nxbL4hiu8qh0_0\thorse\nxbQZucd8eu0_0\tbicycle\nxbQZucd8eu0_3\tbicycle\nxbQZucd8eu0_2\tbicycle\nxcY11ewiUMM_1\thorse\nxd_raY9PCHM_0\tbus\nxd_raY9PCHM_1\tbus\nxeAkz6Kg108_0\tbird\nxeBhbPbmS8w_0\tperson\nxfzxTuJ85A4_0\tairplane\nxfzxTuJ85A4_1\tairplane\nxitZyv8gMgQ_1\thorse\nxjdEiJ_z4T8_0\tmotorcycle\nxj3FKNXP-cw_0\tbird\nxkKoATbAX0w_0\tdog\nxkeTuOlBIMM_0\tcat\nxlT93OXr3uc_0\tperson\nxlT93OXr3uc_1\tperson\nxlfOatU3OyY_0\tboat\nxljqBqpwIHo_0\tperson\nxl110TqE0kQ_0\tcat\nxmWAmSXnWCY_0\tcar\nxo54E-kQcoA_1\tboat\nxpGDfRYqtSE_0\tcow\nxpcNJG8acpU_0\tdog\nxp_ShmZCoDw_2\tairplane\nxqNQIYHzAGk_0\tperson\nxrGm-1D2Zqk_1\ttrain\nxsrHSco3Zcs_0\tperson\nxsrNtKa0oZg_1\tperson\nxs1kBHxDpxU_0\ttrain\nxs1kBHxDpxU_1\ttrain\nxs1kBHxDpxU_2\ttrain\nxtHE1-GIP_w_0\tperson\nxtXt8Vm3Qps_2\tdog\nxuAm_BWnXRc_1\tmotorcycle\nxuAm_BWnXRc_0\tmotorcycle\nxucBFquWbi8_1\tbear\nxv4fy9zyuNE_0\tperson\nxv6NQvvvIhk_1\tbicycle\nxxEtEzi7YiY_0\tbus\nxxcJJA7hCQY_0\tperson\nxxdOVyEU-c4_0\tperson\nxyg1xFLohGI_0\tcow\nxyyz5QJ7wi8_0\tdog\nxzC5_r9raeY_0\tperson\nxzFcPnglQf4_0\tperson\nx0RxwpR4wIc_0\tbird\nx0RxwpR4wIc_1\tbird\nx0nlchdJVJw_0\tbear\nx0nlchdJVJw_1\tbear\nx0q0JMiiw1A_0\tcat\nx0xsHmQGaB8_0\tdog\nx1RBYEheBRQ_0\tperson\nx2MJ_zDJY3k_0\tperson\nx2Tfa1fMOyE_0\tperson\nx29EcPsdK1Q_0\tdog\nx29EcPsdK1Q_1\tdog\nx4h9pGwdSMU_0\thorse\nx4r2tx9_9wQ_1\tperson\nx4r2tx9_9wQ_0\tperson\nx4uX_33GiJk_1\ttruck\nx48Ogx7C31g_0\tperson\nx4-I_EckNls_0\tbus\nx4-I_EckNls_1\tbus\nx4-I_EckNls_2\tbus\nx4-I_EckNls_3\tbus\nx5nImw1YH94_0\tperson\nx6sZc4EoI8o_0\tperson\nx6298plJ-7M_0\tcow\nx7jo9uCmWA0_0\tbear\nx8VC2CXIDBI_0\tperson\nx96LXIEQ3SM_1\tcow\nx96LXIEQ3SM_0\tcow\nx-2AUxPCkVM_0\tperson\nx-26Z1zy1-E_1\tperson\nx-26Z1zy1-E_2\tperson\nx-26Z1zy1-E_3\tperson\nx-26Z1zy1-E_0\tperson\nx_CImXdwsg4_0\ttruck\nx_XV2Y3pwDA_1\tbicycle\nx_XV2Y3pwDA_0\tbicycle\nyCYtcDx1zzE_0\tumbrella\nyCaJQKIGAjg_0\tmotorcycle\nyCz3VdCGZMA_0\tperson\nyDw-9GLrYj0_0\tperson\nyF0X9hui-Go_0\tperson\nyGD_BY9mQlM_0\tboat\nyIkwS9Vkq-k_0\telephant\nyJOGbyQ8qs8_0\tperson\nyJZU3h3_06M_1\tcat\nyLFd8GdaqBg_0\tperson\nyLL5Dv2F1rs_1\telephant\nyLL5Dv2F1rs_5\telephant\nyLL5Dv2F1rs_0\telephant\nyLNuhB7I5iI_1\tknife\nyLNuhB7I5iI_2\tknife\nyLkMk9nMaos_0\ttrain\nyLkMk9nMaos_7\ttrain\nyLkMk9nMaos_1\ttrain\nyLkMk9nMaos_2\ttrain\nyM9_GnJpXsM_0\tairplane\nyNnOUMUIIno_0\tbicycle\nyOrqtKYEfNs_0\ttrain\nyOrqtKYEfNs_1\ttrain\nyOrqtKYEfNs_2\ttrain\nyPscRV8ebRg_0\tperson\nyQLGypU_WiY_0\tknife\nyTZekxz2awI_4\tairplane\nyTZekxz2awI_1\tairplane\nyT-tBu_wqEo_0\tcat\nyVO-nlNYxrU_0\tperson\nyV1EsNcE3kY_0\tairplane\nyYIY-K1Hk-0_0\tcat\nyYUnGStTnHE_0\ttrain\nyYUnGStTnHE_1\ttrain\nyYr5tuCEb3w_0\tcat\nyY6S-xTKWGc_1\tperson\nyaNT5d8H3ho_0\tperson\nyahVo8Nqxks_0\tperson\nybCbkJl7tog_0\tperson\nybt9EtMfrdI_0\tperson\nydxMYuiOJAI_0\tperson\nygK39Pz1tKw_1\tmotorcycle\nyhp30idsPKU_0\tboat\nyiCMaealOnQ_0\tcow\nyiujj_fUOg8_0\tperson\nyjOTRS1-3Is_0\tcow\nyjUDTPRe-tg_1\tperson\nyjnR7dP-hxE_1\tbird\nykQnvD35jxs_0\tbus\nymoggco-rpw_1\telephant\nynHMWKjfsNk_0\tcar\nynYz6f5FCOk_0\tmotorcycle\nyoTs9WxR0mI_0\tperson\nyo3wwD8VMLA_0\tperson\nyo9gwC7gpEk_0\tboat\nypC9L5um-ic_0\tperson\nyp9kACFk9KU_0\tcar\nyqWKo_T-YsM_0\tperson\nysb6LLJ0t-c_0\tperson\nyssYMx-tQs4_0\thorse\nyu2v206waMs_0\tperson\nyvDdzmW5jGs_0\tcat\nyxURDHgvWrs_0\ttrain\nyxURDHgvWrs_7\ttrain\nyyMtxTJNnUM_0\tskateboard\nyzE2GgYffew_0\tperson\ny0HZlHGSvHk_0\thorse\ny0ptIotKNVU_1\thorse\ny0qGszhFtUc_0\tbird\ny2BOVk7bg7k_0\tcow\ny2BOVk7bg7k_1\tcow\ny2xzls--cC4_0\tperson\ny2_iaWWx-C0_1\tzebra\ny3VNGZBlDb0_0\tcat\ny3hSeUaVwAY_0\tbus\ny34cSfArQnM_0\tcat\ny6nBJ0OUtDs_0\tperson\ny6nBJ0OUtDs_2\tperson\ny67A9YHKh1U_0\tperson\ny8ib31rVZA0_0\tbicycle\ny8ib31rVZA0_1\tbicycle\ny8r2SJltJ1M_0\tdog\ny9hu6CyRi5s_0\tairplane\ny_O1AiuRLGA_0\tumbrella\ny_5uacneFuc_0\thorse\nzAvoyJ0_PSA_0\tcow\nzBtuA6r8o0M_0\tcat\nzCG95maa310_0\tperson\nzCnZg9VP1xw_0\ttruck\nzDs4lXFLJuM_1\thorse\nzD59UHvdpmY_0\tperson\nzESRFobSQMU_0\ttruck\nzESRFobSQMU_1\ttruck\nzHRsZ9HlcBk_0\tperson\nzIDehNZ1yiM_0\tperson\nzIvzY3cVVbM_0\tperson\nzI5cBWlyAMo_0\tdog\nzI5cBWlyAMo_1\tdog\nzJdOWFEL_CQ_0\tperson\nzLflV_7noSM_1\tairplane\nzMhr8GZ1QeY_1\tairplane\nzMjW-G29IRA_3\tbear\nzMjW-G29IRA_1\tbear\nzMjW-G29IRA_2\tbear\nzMjW-G29IRA_4\tbear\nzNFb--FJ2A4_0\tperson\nzNF5YxfaNTk_0\tcat\nzNfVxQPGrvM_1\telephant\nzN8rF-AchY0_1\tmotorcycle\nzN9Tz6jp7AY_0\tperson\nzOLTybhsJ5s_0\tcat\nzORNq_7nmVQ_1\tgiraffe\nzORNq_7nmVQ_0\tgiraffe\nzOoxYmqzDyc_1\tdog\nzPvrRc94j6s_0\tperson\nzP2DkEcgJFo_0\tperson\nzP8Recx-KgA_0\tboat\nzQbeiOf9ljM_0\tperson\nzU0g6JCyxAs_2\telephant\nzVVQ63dPpe4_2\tbicycle\nzWQQBElMPYI_0\tperson\nzX9OX5I2574_0\tperson\nzYvjN5ShZDI_0\tperson\nzYzASiLjHgY_0\tperson\nzZ8f7oFIg_c_0\tperson\nzbtsVe8RQqI_0\tperson\nzb8-yrB5SlI_1\tbird\nzcgArp_fmjc_5\tskateboard\nzcsREBhC1Rc_0\tdog\nzdWtCunlv1c_0\tcow\nzdqJTtHvwk4_0\tperson\nzd3rNWQ-OUQ_0\tperson\nzgJHKszSf2o_0\tperson\nzgJHKszSf2o_1\tperson\nzgRxry9FvEk_1\thorse\nzgSx8Y5FaPI_1\tknife\nzhDC_SqN7lQ_0\tbear\nzhNNahIXxC8_0\tbear\nzjQG5PadkFQ_0\tperson\nzj4cs0_VpTk_0\ttruck\nzkSIG3AE7tY_0\telephant\nzmDkkM7Buuo_0\tcow\nzmEU5n2Dy8Y_0\tdog\nzmdKmfMPuvA_0\tbird\nznTYxWfU2XM_0\ttruck\nzpEtPFxxD5M_0\thorse\nzqE3Jnn6_gw_0\tperson\nzqYLN7vCqcw_0\ttrain\nzqq508NRpOY_0\tperson\nztMFfJj7jb0_0\tknife\nzt3ojCKnIYM_0\tcat\nzwSnaqQ-5UU_0\tperson\nzxiZnbMo3io_0\tmotorcycle\nzxiZnbMo3io_1\tmotorcycle\nzxzApvuo8Lg_0\tperson\nzx0RzA6ts8U_0\tcow\nzyXxWBoTuww_0\tperson\nzyXxWBoTuww_1\tperson\nzyftQz018g0_0\tbus\nzy0lNSoVB0A_0\tcat\nzzRnX2EiOYU_0\tcat\nz0Tl2FDG69g_0\telephant\nz1kOi92oBDI_0\ttruck\nz1kOi92oBDI_1\ttruck\nz1qQ7Ma5C5U_1\ttruck\nz1qQ7Ma5C5U_0\ttruck\nz18s4h6yW2A_0\tbird\nz2M6XJGE1QM_0\tdog\nz2RqakqNnIM_1\tskateboard\nz29ijVd-dvc_0\tairplane\nz3rcLKwHCxM_1\ttruck\nz5-nsuFvaR8_0\tmotorcycle\nz7FTg1R3Hik_0\thorse\nz7mLqljZMP8_0\tperson\nz709zOu3tM8_0\tcar\nz9HO__A5ryw_0\tdog\nz9wpJN1R63w_0\tperson\nz-iM0zVi7a4_0\tbus\nz_CQX_gwU_o_0\tperson\nz_w1gsSfZhQ_0\tperson\n0AroA_SBRtQ_0\tperson\n0BUPQDR99KY_0\tbear\n0DDYOUzExSY_0\tperson\n0DGPzzGhUgI_0\tperson\n0DHLS1VDcnA_1\tbear\n0EeBXB53BQE_0\tairplane\n0EnI7ZqJvqI_1\tcar\n0EnI7ZqJvqI_2\tcar\n0GzrKbW6Reo_0\tperson\n0G0mSrzOZ2M_8\tbus\n0G0mSrzOZ2M_9\tbus\n0G0mSrzOZ2M_10\tbus\n0IHYTCKh8HM_0\tperson\n0KWfi9m1uZg_0\thorse\n0KWfi9m1uZg_2\thorse\n0KWfi9m1uZg_1\thorse\n0L0JFDbAEZg_0\tknife\n0Neg9vT08to_0\tcow\n0NtpuqPU3YI_0\tairplane\n0N7yCdf7DPs_0\ttruck\n0ORpOxJZo-Y_1\tbear\n0OqnKMwSULM_0\tskateboard\n0OqnKMwSULM_1\tskateboard\n0Pk8OLmmqrM_0\tmotorcycle\n0Pu-_5lNYZM_0\tbird\n0QKe3M6GiT4_0\tperson\n0Tu3KWEm4SE_0\tcow\n0Tu3KWEm4SE_1\tcow\n0TwpPpqiVQ8_0\tcow\n0U6SmZC1j40_0\tperson\n0VKozmEWjZ4_0\tperson\n0VaX_g70BaY_0\tmotorcycle\n0ZGdpgF-bGI_0\tbus\n0ZQ_-4ia7z0_0\tperson\n0c-Cwr5rI_A_0\telephant\n0c-Cwr5rI_A_1\telephant\n0fyRjxenSfY_0\tbear\n0fyRjxenSfY_1\tbear\n0f4alYlvEQw_0\tperson\n0gelRcDsNio_0\tairplane\n0ghRNQFgHow_0\tbicycle\n0gl1mPRzCqo_0\tperson\n0h9x35zsnyo_0\tbird\n0iLR3BtDujk_0\ttrain\n0iYm4g4D2wY_0\tperson\n0iv0Xw_u-sc_0\tbicycle\n0i-Nv28lRT0_0\tbicycle\n0kZSWqFOr0c_0\tperson\n0kidYsWSVvc_0\tperson\n0mbZJnNhckg_0\tperson\n0omh-B4giqI_0\tumbrella\n0owf_YERias_0\tskateboard\n0pAMIiK_RDo_0\tperson\n0pm7YRiUKTc_0\thorse\n0qVc1Whb3GA_0\tperson\n0qwRoiWnwmQ_0\tperson\n0rQzfr4WVKc_0\tcat\n0sA23Q_HQr8_2\tzebra\n0sA23Q_HQr8_1\tgiraffe\n0sA23Q_HQr8_0\tgiraffe\n0sfu67JuBFg_0\tperson\n0ss0_Sgy72g_1\tskateboard\n0tNuUAe5sNE_1\tperson\n0tNuUAe5sNE_0\tperson\n0txAuEdZYTI_0\tmotorcycle\n0uJKDzuaiys_0\ttrain\n0urYbdFc55k_0\ttrain\n0utGbb5enqA_2\tdog\n0utGbb5enqA_1\tdog\n0vQFT9tfq40_0\tperson\n0viKlMZRKdk_0\tperson\n0v7GMl2k-Sk_3\ttrain\n0yCCEL3tl24_0\telephant\n0zmzEkQWyps_0\tboat\n0zraBBQY8ew_0\tumbrella\n0zyhohOeIM4_0\ttrain\n00xcm8_ZTBc_0\tperson\n01CYScp2Yc0_1\thorse\n01mkUffAvo8_0\tperson\n02zor_ScZfo_1\tperson\n02zor_ScZfo_0\tperson\n03p9Ao9JvpY_0\ttrain\n03p9Ao9JvpY_2\ttrain\n03u5BWTYiRg_0\ttrain\n04Sh9tJvOAc_0\tairplane\n04UO1jSx2p4_0\tperson\n04gNIg-kFI8_0\tperson\n057f0LfDVoA_1\ttrain\n08Nunz5Qngc_0\tbus\n09jyC-o18uU_3\telephant\n09kq3b7cMwc_0\tcat\n1AcsNm2kiok_0\thorse\n1BfbSv9ZCu4_0\tknife\n1BfbSv9ZCu4_3\tknife\n1BiqFD2BD7Y_0\thorse\n1C3_qaiKlwo_0\ttruck\n1DHXDdSkk0s_0\tbicycle\n1DeIbpIRrAc_0\tknife\n1Dfkbv8bi9k_0\tperson\n1Dz4x50F-RQ_0\tdog\n1EYL4Mm3dfA_0\tbear\n1EiH3PTqhLE_0\tperson\n1ExRnJBXYP4_0\tknife\n1FVN3QOPlR0_0\tperson\n1FVN3QOPlR0_1\tperson\n1GJ0iwyNHIc_0\tairplane\n1JWHb6FAbmI_0\tperson\n1Knz9s55vjc_0\tcar\n1Knz9s55vjc_1\tcar\n1Knz9s55vjc_2\tcar\n1Knz9s55vjc_3\tcar\n1LmCkh8Dd-o_0\tdog\n1MmlnQKtd6g_0\tumbrella\n1M6GhIT94zE_0\tcow\n1M6GhIT94zE_2\tcow\n1NThnoBEkmc_0\tperson\n1ONRbj8GKJ4_1\tbear\n1ONRbj8GKJ4_2\tbear\n1ONRbj8GKJ4_8\tbear\n1ONRbj8GKJ4_10\tbear\n1ONptqLyHxQ_0\tdog\n1OSa1ptYmzE_0\ttrain\n1OSa1ptYmzE_1\ttrain\n1Ob23hwFaDg_0\tmotorcycle\n1PSIOY62FBg_1\tbear\n1Pe9JpKgjGY_0\tcar\n1P8yUGru9R4_0\tknife\n1RCZCLIZzc4_0\tboat\n1RGxleB_Ezk_0\tperson\n1RKOWfpa5Dc_0\tknife\n1RuPxpqNjBI_0\thorse\n1Tpmsev8onw_0\tcat\n1TsLUvJiluI_1\tperson\n1TsLUvJiluI_0\tperson\n1UhZKsDTuQs_2\tboat\n1V-7ErZ83ZY_0\tbus\n1ZN9xVmQojU_0\tumbrella\n1ZbSl9tPtbA_0\tbird\n1Z7CVnRjVT0_0\tperson\n1as5iG4PPas_0\tbus\n1bFvYEA0U3U_1\telephant\n1bveGPhOKuU_0\tcow\n1cKjzUG0YCQ_0\tbicycle\n1ceprZO-VEU_2\ttrain\n1ecpkwMLabI_0\tperson\n1fOM-kkuRsw_0\tcar\n1ggOn5NDRco_0\tcat\n1hUe5E9cjiU_0\tmotorcycle\n1iQKKup2m3I_0\ttruck\n1iQKKup2m3I_1\ttruck\n1iSjb4IlqfU_0\tperson\n1i7lugA55RU_0\tbicycle\n1i7lugA55RU_1\tbicycle\n1kZMlCvKoe8_0\tskateboard\n1kZMlCvKoe8_1\tskateboard\n1kZMlCvKoe8_2\tskateboard\n1ksBabVqkMY_0\tcar\n1ltK_3kkqfg_4\telephant\n1l7LOpfDmXY_0\tperson\n1ohoCoKJLDU_0\tmotorcycle\n1oyjAtaWDZA_0\ttruck\n1sQ3EL13Vqo_0\tperson\n1tK31PAVNJM_5\telephant\n1tK31PAVNJM_0\telephant\n1tK31PAVNJM_2\telephant\n1tK31PAVNJM_3\telephant\n1v2enBiUcqA_0\tbus\n1wIGd0H1CUo_0\tperson\n1xSI36nguW0_0\tbear\n1xs-ibIaMMU_0\tperson\n1xyKgJUu0lM_0\tskateboard\n1zVWBQWZxV0_0\tperson\n1zVWBQWZxV0_1\tperson\n1zqpqKWhr1Y_0\tperson\n10la9pvd-pk_0\tknife\n11kfBYxzlFA_0\tperson\n12f1R5wMVPs_0\tperson\n12_S_8HkAvA_0\tperson\n1462k8mwVB0_0\telephant\n15Lx-nGngUo_0\tskateboard\n18WxVaz5Ue4_1\tskateboard\n19A2XM5NIWs_0\tperson\n19UmUpkjRbs_0\tperson\n19oZ30mOTkU_0\tboat\n1-p8vd0PFQ4_0\tdog\n1_6ymF7z_iM_0\ttruck\n2ASHEEgYHcU_0\tcat\n2CF0oQ38cBQ_0\tmotorcycle\n2DM1oM4HFjI_0\tmotorcycle\n2FXE_xO8Mb4_0\tbus\n2FvnQne8he8_0\ttrain\n2GTexq12sBY_0\tperson\n2GTtMvLQqio_4\ttruck\n2GZphW1DkS4_0\tperson\n2HvVFwq85n0_0\tperson\n2Hwu-YpHKw0_0\telephant\n2H8AZ00ONQE_0\telephant\n2IJ4H46ZxEE_0\tperson\n2INYBScuPM8_0\tcar\n2IqEaQ0oyQg_0\tairplane\n2JN_uMTDa9I_0\tskateboard\n2KWlj_ZAw94_0\thorse\n2KWlj_ZAw94_1\thorse\n2KWlj_ZAw94_2\thorse\n2K2gLrhP9AU_1\tairplane\n2K2gLrhP9AU_2\tairplane\n2K6iDBPdcHk_0\tmotorcycle\n2LBHZoJ5skk_0\tperson\n2L3uwdhZtV0_0\tcar\n2MJHsLxKUBg_0\tperson\n2MiqTBWBlEc_0\tumbrella\n2NjC1r6v4IQ_0\tperson\n2O-2zfQxbnA_0\tperson\n2PaTs4s2Ybw_1\tbear\n2PaTs4s2Ybw_7\tbear\n2PaTs4s2Ybw_4\tbear\n2Pa1anwpeKE_0\tperson\n2Q3_TaV8vcg_0\tdog\n2Rc-oAwMJBs_0\thorse\n2Tp0YJi7JwQ_0\tgiraffe\n2UpHhiQWzD4_0\ttruck\n2VZlkg5HjME_0\tcow\n2WTwzNufol8_0\tdog\n2WTwzNufol8_1\tdog\n2WtNxQ0RBfc_0\tperson\n2ZXlS-GRWAw_0\tknife\n2Z6wSOr0jLI_1\tperson\n2a5TUccpQ08_0\tdog\n2a_-AyOXTXg_0\tskateboard\n2cFRz-musVA_0\tairplane\n2cFRz-musVA_1\tairplane\n2cFRz-musVA_2\tairplane\n2cFRz-musVA_3\tairplane\n2dZFWL9XGmw_0\tcow\n2fCH7TpvtlM_0\ttrain\n2fCH7TpvtlM_1\ttrain\n2fJ1hPXpiQc_3\tknife\n2fJ1hPXpiQc_0\tknife\n2gGuKs-4t94_0\tboat\n2i45n6p8AT8_0\tperson\n2i_wjgk6DiA_0\thorse\n2lK0mmHTvB8_3\ttrain\n2lK0mmHTvB8_1\ttrain\n2lqlNq6aII0_0\tskateboard\n2lxPwFW5YQo_0\tumbrella\n2l2gnrYWuWQ_0\ttruck\n2l7MPXzF64M_0\tcat\n2l7TuAfDgO8_0\ttruck\n2mO7-ybapaQ_1\tumbrella\n2nqGkC9ebf8_0\tboat\n2oA7J6HSmt8_6\tbicycle\n2oA7J6HSmt8_9\tbicycle\n2tSpb14o7SA_0\tperson\n2vF8Va9DGSM_5\tbicycle\n2vF8Va9DGSM_4\tbicycle\n2vF8Va9DGSM_14\tbicycle\n2vF8Va9DGSM_15\tbicycle\n2vF8Va9DGSM_2\tbicycle\n2vrbssf2sDM_0\ttruck\n2v808Hn8_do_0\tperson\n2v808Hn8_do_1\tperson\n2yEUVUqYMPc_0\tgiraffe\n2ya3SN5pLyU_0\tcar\n2065vf90oIM_0\tperson\n2065vf90oIM_1\tperson\n21GQbN_4k9M_0\tcow\n21Hp5g5RrOc_1\tperson\n21Hp5g5RrOc_0\tperson\n22iFltXYCcQ_0\tcow\n22ztStWwd8g_0\ttrain\n22ztStWwd8g_2\ttrain\n22ztStWwd8g_3\ttrain\n23qU2q5u0OE_6\tbird\n24Zxq5TuxzI_0\tcow\n26kWe8Ikgxk_0\tbird\n28AecePdVok_0\ttruck\n281z-ZLrI3g_7\tbicycle\n281z-ZLrI3g_4\tbicycle\n29bWSLuiEl0_1\tperson\n2_R2wz82ugQ_0\tumbrella\n3A4oCDgMkHw_0\tcow\n3A-dEIjnmyE_1\tskateboard\n3Bag9o-z-Ks_4\tbear\n3DN2iQJzM-k_0\ttrain\n3DaASBRARLQ_0\tcow\n3D8wwibqkYo_0\tcow\n3EtIKWgGaKY_0\tperson\n3FJ4ZWRq_S0_0\tperson\n3GLXlSuXWcs_1\tcow\n3GQxmRKhMMY_1\tairplane\n3GQxmRKhMMY_2\tairplane\n3GQxmRKhMMY_3\tairplane\n3GQxmRKhMMY_4\tairplane\n3GULyU-IOhA_0\tperson\n3HFqP9a97kA_0\tbird\n3IgOwKkKALw_0\tcat\n3LruhG4SULI_1\ttruck\n3LruhG4SULI_2\ttruck\n3LruhG4SULI_7\ttruck\n3LxUuC1C4y8_0\tbird\n3L7LWpMShiw_0\tskateboard\n3L759GhRx6M_0\tperson\n3MiM8HSul5A_0\tcow\n3MiM8HSul5A_2\tcow\n3MiM8HSul5A_4\tcow\n3M9T5RFr_9s_0\tperson\n3OmdALGspY8_0\tperson\n3O4ynxtRIDk_5\ttrain\n3O4ynxtRIDk_2\ttrain\n3RLrjX-XB98_0\tperson\n3RhgYReCxjo_0\tbus\n3S-lQgiUWVU_1\thorse\n3S-lQgiUWVU_0\thorse\n3UDEQElT2yQ_0\ttrain\n3WhmVhG1ZwU_0\tboat\n3WrB7zPpcHU_0\tcow\n3XDvXaNmGpM_0\tdog\n3XDvXaNmGpM_1\tdog\n3X29L9uQCqc_0\ttrain\n3X29L9uQCqc_1\ttrain\n3Y7-acGE4Wc_0\tperson\n3ZBYYBUfT6E_0\ttrain\n3Zwa4XoeZcA_0\tperson\n3bSWlbx1o3I_2\tbear\n3cOMDXFxcOQ_0\tcat\n3dvUlr2yxz4_0\ttrain\n3g4c88ocJ38_0\tskateboard\n3hMszgfh_qA_0\tbicycle\n3hR78-EVNEE_0\ttruck\n3jdK8UPhpO8_1\tskateboard\n3jdK8UPhpO8_0\tskateboard\n3kdpeeQ1Jnc_0\tcar\n3kd_QEZRUWc_1\ttruck\n3kd_QEZRUWc_5\ttruck\n3lHqsoi5cgo_0\tperson\n3liK-2EflUk_0\tcar\n3mIRDwcY1Lg_1\tperson\n3m5eMVv4z6w_1\tbear\n3nD6nhJtxIU_1\tskateboard\n3nbim5nlANI_1\thorse\n3q6LFZBelUs_0\tperson\n3rSUjqH5Wlw_0\ttruck\n3sEpU7UoQP8_0\tperson\n3sg9txiHCp0_0\tbear\n3szPqA1S6P0_0\tperson\n3tv_dUR84cE_1\tairplane\n3tv_dUR84cE_0\tairplane\n3uG4S1gvMxs_0\tbird\n3uVS_DAYfvY_3\tcar\n3vuykX663QA_0\tperson\n3wI_ureHDBY_0\ttrain\n3xLvnY9w5y0_0\tperson\n3xy8Fz8Nsgk_0\tbear\n3zV0wmpiS78_0\tperson\n3zccg30U6vs_0\tperson\n30AwDyYIr7o_0\tskateboard\n325FEWXtOYw_0\tperson\n3293hM-lzx8_0\tperson\n32_1y90B5eQ_0\tperson\n34L4iiCFTXM_0\tairplane\n34Pma_R21A8_2\tperson\n34jFMRay1zg_0\tperson\n35-MplWeZYQ_0\tmotorcycle\n36zopo-HS48_0\tperson\n38fx_nvlYDE_0\ttruck\n39yxd86tGLU_1\tboat\n3-ugxoEDuFY_0\tperson\n3_DeqcBRuwE_1\telephant\n3_DeqcBRuwE_3\telephant\n3_w3NNPGotM_0\tperson\n4ARhlapmEmI_0\tdog\n4Ac5edN3qIA_0\telephant\n4Ac5edN3qIA_1\telephant\n4BItGVIP3_w_0\tcow\n4BItGVIP3_w_1\tcow\n4BO3P7E3NDE_0\ttruck\n4BO3P7E3NDE_1\ttruck\n4BO3P7E3NDE_2\ttruck\n4Bw4gKDBQCM_1\tdog\n4C8rmAORSg8_0\tperson\n4Dcg1W7RRmQ_1\ttrain\n4ENxW7OPynQ_1\tcar\n4ExA1FWRfMM_0\tdog\n4FVfzA07rVs_0\tperson\n4FVfzA07rVs_1\tperson\n4GgzQqhrTmA_0\ttrain\n4GrMZIyjUdo_0\tperson\n4IUjw1DfTd4_0\tcow\n4ItJTYAUV3Q_0\tcat\n4IxmhmTsSRM_0\tperson\n4I72WJJrc1o_0\tperson\n4I72WJJrc1o_1\tperson\n4KFEzxXCjmw_0\tcar\n4KYtNfb0-64_0\tperson\n4KqP6ylUZpI_0\tumbrella\n4LHOLAPnjV8_0\tboat\n4LXlXP1epJE_0\tperson\n4MFPOb36tfo_2\tbear\n4MFPOb36tfo_1\tbear\n4MZrjdSF01s_1\tboat\n4Me3lyNuZ7k_0\tperson\n4M9sKAzevzo_0\ttrain\n4NI5ycFo2TA_0\tairplane\n4NI5ycFo2TA_1\tairplane\n4NKnUR1OMGo_0\thorse\n4NKnUR1OMGo_1\thorse\n4Ng6OxFQ9RY_3\tbear\n4Nx45ho9gSg_0\tperson\n4PNJ3ZV4f8E_0\tairplane\n4PNJ3ZV4f8E_1\tairplane\n4PNvdZPZIdM_0\ttrain\n4PhakAK74GE_1\tmotorcycle\n4PxLGSy75rk_2\tknife\n4QOhfEMrhzU_0\tairplane\n4Q0M6mWNDiU_0\thorse\n4RhaYtFsnGY_0\tperson\n4SrP2aSHoRk_0\tperson\n4TyWpb19rk4_0\tumbrella\n4U9sm_eqKTM_1\tcar\n4U9sm_eqKTM_2\tcar\n4Xd_k2REw4I_3\tbear\n4YRd-9lHLko_0\ttruck\n4ZIgGDQB_R0_0\tairplane\n4ZYWcd-Fdzg_0\tperson\n4Zxsg6aJ9tA_0\tperson\n4aOWHpM7rOM_0\tskateboard\n4avaoLry8L0_2\tskateboard\n4bHGieqZfUk_1\tknife\n4duFrAfYG8k_0\tperson\n4d6P5umc9j0_0\tbird\n4fIznTWAFRw_0\thorse\n4fIznTWAFRw_1\thorse\n4fIznTWAFRw_2\thorse\n4f_X4WbQu4M_0\telephant\n4hCLCX2lLGk_0\tperson\n4iBMfS5mIt8_0\tbird\n4ibKNzoA1tQ_0\ttruck\n4igLFns238c_0\tmotorcycle\n4kGNxHIXcUA_0\tperson\n4kLhVZ9UGDE_0\tskateboard\n4lC7BU1eHxc_0\tbus\n4l683stlRno_0\tknife\n4mv1Nx0j3k4_0\tperson\n4nz8CN4XlBE_0\tdog\n4oWXZIsPnEg_4\telephant\n4ofuHARhFlQ_0\tperson\n4pYH5Cm7Vkg_1\tboat\n4p3JGxvfiNE_4\tbicycle\n4p3JGxvfiNE_8\tbicycle\n4p3JGxvfiNE_10\tbicycle\n4qBYTh0AcfM_0\ttrain\n4qIx-9Qs3Zs_0\tairplane\n4qIx-9Qs3Zs_2\tairplane\n4qRkIra0ARM_0\tperson\n4rhkfDV0QC8_1\ttruck\n4ry_MJjFDUA_0\tcat\n4skAfQd8nX8_0\tperson\n4t79zNxVi0Y_0\telephant\n4t79zNxVi0Y_1\telephant\n4uFHcf-qpkU_0\thorse\n4uwly-P5oxg_0\tperson\n4uwly-P5oxg_1\tperson\n4u7pm-h8fiE_0\tperson\n4wox28JkSKY_1\tperson\n4w3ykGq-Q_E_0\tbicycle\n4w3ykGq-Q_E_2\tbicycle\n4w5q5RdJ5g4_0\thorse\n4w5q5RdJ5g4_2\thorse\n4w5q5RdJ5g4_4\thorse\n4x80RbpjCPM_0\tbear\n4x80RbpjCPM_4\tbear\n4yFIyyevEVY_1\tairplane\n4ycylGSteiU_0\ttruck\n4yjvwunpMKI_0\tcar\n4yjvwunpMKI_1\tcar\n4yjvwunpMKI_2\tcar\n4yw2hFyx47Q_0\tperson\n4y3qJAq5ap0_0\tcar\n40QgDL4dxrc_0\tairplane\n40deMboVqPI_1\tbird\n44FNsfkuWOI_0\telephant\n44hlNbUHL2c_0\tperson\n44672wUoOwM_0\tperson\n46NXMVbpzZw_1\tboat\n468w3XkLHwc_1\tboat\n47Nn3ywWOlU_1\tperson\n47cBD-Sq9mw_1\tperson\n48ujtCaCdX0_0\tperson\n49CwzbRIUpI_1\tbird\n49a6EgDu-ZU_0\ttruck\n4-GpBan9Z8s_0\thorse\n4_A8f6NAa3w_0\tperson\n5BHekdOG9JA_0\telephant\n5Bw22C4nsb4_0\ttrain\n5CPZUe4hn0M_0\tairplane\n5DS23LkFit8_0\tcow\n5DVU9wTDzN8_0\tskateboard\n5DjSsYt5N4Q_0\tskateboard\n5FAbvaslTQE_0\tmotorcycle\n5FXOzzaKrcw_0\tairplane\n5Fro7Bo628Y_0\tboat\n5FxLl3jd7I0_0\tskateboard\n5F5fgLUXow8_3\tcar\n5F5fgLUXow8_7\tcar\n5F5fgLUXow8_8\tcar\n5F5fgLUXow8_0\tcar\n5F5fgLUXow8_1\tcar\n5F5fgLUXow8_2\tcar\n5F5fgLUXow8_4\tcar\n5GMISyAZA9o_0\thorse\n5GpziDmwRTc_0\tcow\n5JPqrGj3CgM_0\tgiraffe\n5Ko6ZHOz4IY_0\tperson\n5Lbguv7FGLM_1\tbird\n5M7Wx_HJ_XQ_0\tperson\n5Nz4g-YykuI_0\tperson\n5O41yfenxMM_1\tcow\n5PeDI6XI7is_3\thorse\n5Qd986abGHo_0\tperson\n5Tza7UHp3xE_0\ttrain\n5WTw98UVUCo_1\thorse\n5WpjuP9uJrI_2\tbird\n5W8Hg8uhxgQ_0\tcar\n5W8Hg8uhxgQ_1\tcar\n5XEAIdyb_ng_0\tperson\n5XcopMzRch4_0\tskateboard\n5YbA5Uw-5xQ_0\tperson\n5YbA5Uw-5xQ_1\tperson\n5bIO0Gl25u0_1\tboat\n5bIO0Gl25u0_0\tboat\n5dGbxAkTDPM_1\tcow\n5dRnssv_jug_0\tcow\n5eRQh3Rv1Lk_0\thorse\n5eak0nLYZC0_0\tairplane\n5enKNMe1Dpg_0\tperson\n5eq6WBGMyME_0\tgiraffe\n5eum6r7kxbw_1\tgiraffe\n5eum6r7kxbw_4\tgiraffe\n5e84K5OEIj4_0\tperson\n5fXoyIBk_gI_0\tperson\n5gNgZQ0nDW8_4\tknife\n5gNgZQ0nDW8_5\tknife\n5gNhZJMFmis_0\tbear\n5gNhZJMFmis_1\tbear\n5gbLo2hItTs_0\tperson\n5geZjQ9qAJU_0\tmotorcycle\n5iDhgUX1kdc_0\tperson\n5iwoWJK4GGo_0\tcar\n5ll8fjNhIzg_0\tperson\n5lv2GCs3_E0_0\tperson\n5l9rlcuS7pE_0\tbus\n5mocfP3c3JE_0\tbear\n5mqvNWXtMCU_0\tcat\n5nAuDbKmWLY_0\telephant\n5nC2ZXfE-sg_0\ttrain\n5nkh3PK6lBs_0\tcow\n5of5t38DQL4_0\tcow\n5okxoIw3cJI_0\tskateboard\n5ovlgihl130_0\tknife\n5phhj08_8hI_0\tdog\n5psIBlFu-yQ_0\tperson\n5rh7nf5z_O0_1\tcow\n5rkM4mLsQoU_0\tknife\n5sIj93XnVc0_1\tmotorcycle\n5sjUnvABkko_0\tairplane\n5s4kqURLLo4_0\tperson\n5toRpAYrY_4_0\tperson\n5uYObEyAbCQ_0\thorse\n5ukcjpXOopg_0\tperson\n5vPXxAEGTrw_0\tairplane\n5vUtusnPXXs_0\tbird\n5vaBUAh4HkU_0\tairplane\n5yMeqHPiJgY_1\thorse\n5yMeqHPiJgY_2\thorse\n5yMeqHPiJgY_3\thorse\n5yeSANffSRk_0\tperson\n5yeSANffSRk_1\tperson\n5zJuhMtO1F8_0\tbird\n5zKtWxffw-0_0\tboat\n51rDJW0FO8w_0\thorse\n51yQTVmaMXw_1\tmotorcycle\n52UjkVxSSHg_0\tperson\n52VFNDCXUHg_0\tperson\n52pNzl4wrxs_0\tperson\n52wdqvYrGv4_0\tperson\n522wkm19sH0_0\tbus\n54icMYqqx_w_1\tbus\n55H1IVgQj3E_0\tboat\n56BI7lH0z1g_0\tperson\n56bgv0J-cXw_1\tknife\n56bgv0J-cXw_4\tknife\n56r2wDCnuQQ_0\thorse\n57BY7QjcYbQ_0\tperson\n574FA_5qp-s_0\tbus\n58K_ZPS7U8M_0\tperson\n58gdyHWU6do_1\ttruck\n5802XdQdAkU_0\tcow\n59JJGcB2jRE_0\thorse\n59JJGcB2jRE_4\thorse\n59JJGcB2jRE_2\thorse\n59cXOQc39JI_1\tzebra\n5928Zhy26yI_1\tgiraffe\n5-Oeo8tmauc_0\tbus\n5-Oeo8tmauc_1\tbus\n5-Oeo8tmauc_2\tbus\n5-O2xma48Tw_0\tbird\n5-y_Rrr8shw_2\tperson\n5_njhyGAXdE_0\ttruck\n5_njhyGAXdE_1\ttruck\n5_njhyGAXdE_2\ttruck\n5_2sGSrZblY_0\tperson\n6AD9GHHEVkE_1\tboat\n6AYkCla5Oak_0\tcar\n6A2LC4_gts4_0\tperson\n6A2LC4_gts4_1\tperson\n6BB65BA-pS0_1\tknife\n6CKS3WJRpHI_0\tperson\n6C1C-L7L6CE_0\tperson\n6DQ-H73b62Y_0\tperson\n6EHcwJiML3g_2\tperson\n6GlBa-DUEqc_0\tperson\n6HlTwF1ZDkc_0\tperson\n6HrWOx9GfzI_0\tperson\n6JrhpITR8po_1\tcow\n6JrhpITR8po_0\tcow\n6KpKxtwB1Ww_0\tperson\n6LiW0KF3fME_0\tperson\n6Meaw8zK8sU_0\tperson\n6M3wDWZDZJ8_0\tcar\n6M4oJG9NsRM_0\tperson\n6Nc1z3BVzlI_0\tbear\n6OlxDr5vZuI_2\thorse\n6Ona04rOyZk_0\tcat\n6PBKPTCkWOo_0\tperson\n6PH-mFChsi0_0\tairplane\n6PwE6q6pebc_1\tperson\n6QFs4uNsSt4_0\tperson\n6RIFox7kLqY_0\tcat\n6SBj14dkVPM_0\tcow\n6SdX0oE9Qm8_0\tcat\n6SizSdOT9_k_0\thorse\n6TEQ098RfzE_0\tcow\n6TQ8X9G4BAY_0\tdog\n6UQbOOWv_ws_0\tcow\n6UQbOOWv_ws_2\tcow\n6XUe2u2YWkQ_2\tumbrella\n6bJPo4tzJvQ_0\tperson\n6bco275PcUs_0\ttruck\n6bco275PcUs_1\ttruck\n6gwBOlfJ34I_1\tskateboard\n6gww5ltOLQY_0\tbird\n6gww5ltOLQY_1\tbird\n6hAG7632JjA_0\tcat\n6htKDjHsXPQ_0\tcow\n6id5A0aiJbE_0\ttrain\n6jwTUZocHXY_0\thorse\n6j07-PcNv70_0\ttruck\n6kjb3q8EygI_0\telephant\n6lAxaY4AYB8_0\tperson\n6lPPfWdeBvU_0\tcat\n6l3SpVgqJY0_0\tperson\n6mYi-vXre4Q_0\ttruck\n6med3JZ2k40_0\tperson\n6miVJWDTBCY_1\ttrain\n6n6fVeWD_m0_0\tknife\n6o61j0KZ9cA_0\tperson\n6pPjKIlVlfY_0\tbicycle\n6pnenPlFGIc_0\tmotorcycle\n6pnenPlFGIc_1\tmotorcycle\n6pny8Td3Lvs_0\thorse\n6qRIuIHqJco_0\ttrain\n6qSDUh2ES7Q_0\tperson\n6qVpY1VC2hU_1\tcat\n6qhp1FiVbBQ_0\tknife\n6rlBtCRp25g_0\tcat\n6r0rYZCL4Qc_0\tperson\n6r0rYZCL4Qc_1\tperson\n6uMmknjq0mg_0\tbicycle\n6uSZqFsKMGI_0\tcow\n6um2PoiKfT4_0\tmotorcycle\n6vAGEaKFuyY_1\tbus\n6vAGEaKFuyY_2\tbus\n6vafM_LKdhA_0\tumbrella\n6vc8u4MPWkY_0\tbird\n6v_NKAM10sA_5\tbicycle\n6v_NKAM10sA_9\tbicycle\n6v_NKAM10sA_10\tbicycle\n6v_NKAM10sA_11\tbicycle\n6v_NKAM10sA_12\tbicycle\n6v_NKAM10sA_0\tbicycle\n6v_NKAM10sA_1\tbicycle\n6w-nwNFVYm8_0\tmotorcycle\n6y78kiGuIAk_0\tperson\n6zPET0HFVaM_3\ttrain\n6zPgsocp4bY_1\tbicycle\n6zPgsocp4bY_2\tbicycle\n6zPgsocp4bY_3\tbicycle\n6zPgsocp4bY_7\tbicycle\n6zPgsocp4bY_9\tbicycle\n6zW1omjPFRs_0\telephant\n6zW1omjPFRs_1\telephant\n62MEsd3U1aQ_0\tperson\n62PpG0cOcbU_0\tperson\n63vKOQ-SCBw_0\tairplane\n63_kFJCm2pQ_0\tperson\n64yGcACuF0g_0\tcat\n64yZxDGH92I_0\tperson\n64-njkqyF7k_0\tbus\n65u4BXZ10RY_0\tdog\n65u4BXZ10RY_1\tdog\n654ylXfWndU_0\tboat\n66HPgc7Up3o_6\thorse\n66HPgc7Up3o_3\thorse\n66HPgc7Up3o_4\thorse\n66HPgc7Up3o_7\thorse\n66N_Ju8hg2U_0\tknife\n665JKK-JrTc_0\tperson\n67kix34dj7A_0\ttruck\n67wgEifQYpg_0\tperson\n68KnEa1hVf8_0\tbicycle\n6-Z9S0qy8ys_1\tdog\n6-7x1BQGuQE_0\tperson\n6_nq4o_21CY_0\telephant\n7BBHz6wfABM_0\tperson\n7CYm8WQftfw_0\tbus\n7DIXCjEBWLw_0\tairplane\n7D-ypPzaTDI_0\tperson\n7GvsFRhnxWc_1\tbird\n7G2sXxpbA-0_0\tmotorcycle\n7HXox1j1X2A_0\tperson\n7Hthj7LhsoI_1\telephant\n7H1AhHiyip0_0\tperson\n7JXhfaNTsUQ_2\tbird\n7K61aiu3UsM_0\tperson\n7K61aiu3UsM_1\tperson\n7LKG4ReUlZA_0\tperson\n7LTKFUY3Xo8_0\tbird\n7MQZWaHzUOo_0\tcow\n7Mb_dcvNENM_7\tbicycle\n7Mb_dcvNENM_3\tbicycle\n7Mb_dcvNENM_4\tbicycle\n7Mb_dcvNENM_5\tbicycle\n7Mb_dcvNENM_6\tbicycle\n7NDhXBp57BY_0\tperson\n7NFMDZwqdw4_0\tperson\n7Ng49Wed4Y4_0\tcow\n7Ng49Wed4Y4_2\tcow\n7NxvW5DSQrI_0\tcat\n7O8grUKQopY_0\tperson\n7PeZgsBNi5g_0\tcar\n7QauV6mvt98_0\tcar\n7RxzfGFIxSg_0\tcat\n7Strg7qJtW0_0\telephant\n7Strg7qJtW0_7\telephant\n7Strg7qJtW0_1\telephant\n7Strg7qJtW0_2\telephant\n7Strg7qJtW0_3\telephant\n7VQ8QZRnxD8_0\tcow\n7Vcfkjk--Fc_1\tdog\n7V5Q7Te4KNI_0\tbus\n7WZRhdW3Ysw_0\telephant\n7XQ-ufhX7gc_0\tcow\n7XQ-ufhX7gc_1\tcow\n7YCox5adS-U_0\tperson\n7YQM-nFSHW4_0\tknife\n7Ya_jh9VO9U_0\tperson\n7aTla4KAK_U_1\tknife\n7bqlApH5GwI_1\tbicycle\n7dFEYp-1Hgo_0\tperson\n7e8WNmzDHUQ_0\tperson\n7fF7heSCMTw_0\tmotorcycle\n7fRxyCT-Wao_0\tgiraffe\n7fRxyCT-Wao_2\tgiraffe\n7fSMUG5W8vk_2\tbicycle\n7g8SI9aAn70_1\tumbrella\n7hIJP5KExbE_1\telephant\n7hjOcuaQm7I_0\telephant\n7kPsaqRQBCk_0\tknife\n7kl1hNW3aVs_0\tmotorcycle\n7k7H9RKhOF8_1\tskateboard\n7k7H9RKhOF8_3\tskateboard\n7ledBa3nuVs_0\ttrain\n7ledBa3nuVs_2\ttrain\n7m98zjjFHbU_0\tperson\n7ntsSm-LFZA_0\tperson\n7ntsSm-LFZA_1\tperson\n7nzY38tPTM0_0\tperson\n7nzY38tPTM0_1\tperson\n7n8C_td0Th8_0\thorse\n7p4RxRFB_Eg_0\thorse\n7rE5dIroJwQ_0\tperson\n7rifGM-TuPA_0\thorse\n7trl2U6nLPc_0\thorse\n7vyHv7_GxbQ_0\tperson\n7wte1pPBwQ0_1\tbear\n7w616uMnI_8_0\telephant\n7w616uMnI_8_1\telephant\n7x8K4JervhE_0\tbus\n7y0joj813H0_3\tbus\n7zRaB-2B7B0_0\ttrain\n72RzEHZFYtM_2\tairplane\n72RzEHZFYtM_1\tairplane\n73Wonc3xnLI_0\tperson\n73Z4KnnAMlU_0\tperson\n74gRlu6vJLY_0\tperson\n747bRdBUPSw_0\tperson\n76LU6w1a7UA_1\tairplane\n76PIBEC3WVo_0\tskateboard\n77GychcVDRI_0\tperson\n77dvi_3OU4M_0\tperson\n79MY0qku9uc_1\thorse\n8AgZqrCi9no_0\thorse\n8BK44tI3ACo_0\tperson\n8BQJVHpHFsU_1\tdog\n8BQJVHpHFsU_2\tdog\n8B3bbakza_Q_0\tperson\n8CJRCoA1Rps_0\tperson\n8ClOgfNAjXs_0\tgiraffe\n8DlXcc1IXlw_0\tcar\n8EwDzFi34nA_0\tcow\n8FEp5ORJ27g_0\ttruck\n8FyuS809d24_0\tdog\n8FyuS809d24_1\tdog\n8GGi0BXLCaM_0\tperson\n8G_vBzM-Ws4_1\tumbrella\n8HcyzPUv5ag_0\tperson\n8JIpa6tfWzo_0\tairplane\n8JKJnuN_UTI_0\tcow\n8JhHIO_7m-0_0\tcow\n8LGnOH6nDbc_0\tdog\n8LGnOH6nDbc_1\tdog\n8Lx004yCltY_6\telephant\n8Lx004yCltY_12\telephant\n8Lx004yCltY_18\telephant\n8MO_kng7L-s_0\tperson\n8MO_kng7L-s_1\tperson\n8NlznvdsNJQ_2\tboat\n8N8hB2Au4JE_0\tperson\n8Pbd3dd3v5E_0\tperson\n8Pz3xq3KFo0_6\telephant\n8Pz3xq3KFo0_4\telephant\n8Qr-5_567tI_1\ttruck\n8Q8g9z-DNF8_0\tmotorcycle\n8RZsKbffdqI_0\tcat\n8Sbz2MGzhp4_0\tperson\n8UcqXCLmq-M_1\telephant\n8UcqXCLmq-M_3\telephant\n8UcqXCLmq-M_6\telephant\n8UcqXCLmq-M_7\telephant\n8Ul_lS0g_RU_0\tskateboard\n8UmKRVMR08g_2\tbird\n8U7BmrkcgcU_2\ttruck\n8VkbfdMQrR8_0\tperson\n8VzjERSpeS4_1\telephant\n8VzjERSpeS4_0\telephant\n8WcBoYh-IMg_0\tbird\n8X27eyH-tx0_0\tcar\n8Zi2bsTpMeY_0\tperson\n8ZmfZDMaVhg_0\tcat\n8Z1GvAHPEnU_0\tcat\n8a1bD-UgfKE_0\ttruck\n8bD-aqWPxwM_0\tmotorcycle\n8bE_FhrjBuM_2\tskateboard\n8bE_FhrjBuM_0\tskateboard\n8bE_FhrjBuM_1\tskateboard\n8bypIjdKgEI_0\tperson\n8b5fedIr-WQ_0\tperson\n8cNzCe26dSM_0\tperson\n8cSOpd9gaPE_0\tcow\n8c8TJ_Jzngk_0\thorse\n8d6950aGpD8_0\tdog\n8eK3ktD9j5o_0\thorse\n8eK3ktD9j5o_1\thorse\n8ewNcrMhg-w_0\tperson\n8gsiG2Wu3YM_0\tgiraffe\n8hFEJz0GvfU_0\telephant\n8hwa44VMdLs_0\tperson\n8h8Cpkugo-Y_0\telephant\n8h_eY7zEIqk_3\ttruck\n8iBiHoA_OJk_0\tperson\n8jRFQ8RKZ0s_1\tcar\n8kTREwiI1-8_0\tcow\n8kn6PJbtsyA_0\tbicycle\n8kn6PJbtsyA_1\tbicycle\n8kn6PJbtsyA_2\tbicycle\n8kn6PJbtsyA_3\tbicycle\n8kn6PJbtsyA_4\tbicycle\n8lKXEr2W3yM_0\tknife\n8lMRKCKyBwk_0\tperson\n8lonNtE99PI_1\tperson\n8l7UmXXnAJs_0\ttruck\n8mlHevSC8cc_0\tcar\n8m-GtOBjbzY_1\tbicycle\n8nWSGwlJyPQ_0\tcat\n8nsl-r_i0AI_0\tperson\n8n3A8io4GNU_0\tperson\n8okfUuO0Pvc_1\tbird\n8poWB-6q4xk_1\tbicycle\n8p2saqn2kiQ_0\tperson\n8qFJg_AoKeY_0\tcow\n8qulLm8MYrM_0\tbus\n8rBxRMDJEFY_0\tperson\n8sOWPIfWpCM_0\thorse\n8tKto2zQWUg_0\telephant\n8uoYlmdJlAo_1\tknife\n8wdvLn40CTk_5\tbus\n8wdvLn40CTk_0\tbus\n8wdvLn40CTk_1\tbus\n8wv3WJBJmog_1\tdog\n8yFZUTSjpos_0\tmotorcycle\n8zBx-nHUqBY_0\tperson\n8zUAF30Hu6c_1\ttrain\n8zUAF30Hu6c_2\ttrain\n8zftjn0I9TQ_0\ttruck\n8zftjn0I9TQ_2\ttruck\n8zjgYuK3nVY_0\tperson\n8z-YLOzAxb4_2\tbicycle\n8z-YLOzAxb4_4\tbicycle\n8z-sTr28AWk_0\tskateboard\n80CcMFD-Rcw_1\tperson\n80CcMFD-Rcw_0\tperson\n81cNVk8boEM_0\tperson\n82lK9rB-e08_1\tmotorcycle\n84P6L_HrN48_0\tbird\n88N5__h7Zdo_0\tbicycle\n89a461_gh2o_0\tbicycle\n89mGhzBokZ8_1\tbear\n89qfsC77BYk_0\tperson\n8_oUj2cuPdo_0\tdog\n9A-VO1zCZJ4_1\tmotorcycle\n9BVgbNz-bi8_0\tperson\n9BVgbNz-bi8_1\tperson\n9BpvtvUGG5g_0\tperson\n9DGpFjuUVBk_0\tperson\n9DY0dTRH5xI_0\tbird\n9D5ORdC7BuQ_6\tbus\n9ELQq5BMR1U_0\tperson\n9E8VBIYmTGY_1\tcow\n9E8VBIYmTGY_0\tcow\n9FAB9BrcQls_0\tperson\n9FTOvdcnzDQ_0\tairplane\n9GdhKEBm0pA_6\tbicycle\n9GdhKEBm0pA_1\tbicycle\n9GdhKEBm0pA_3\tbicycle\n9HqapwdLVzk_4\tknife\n9KfdTsjy53o_0\ttruck\n9LHbQA-pT0U_2\thorse\n9LJRUmW_AII_0\tboat\n9LOpNoTFWKg_0\ttruck\n9LOpNoTFWKg_4\ttruck\n9LOpNoTFWKg_1\ttruck\n9LOpNoTFWKg_2\ttruck\n9LqExSHe9y8_0\tknife\n9Ls7gSZQt1w_2\tbear\n9NsmnTdRiik_0\tairplane\n9PsezNNV0Jc_1\tairplane\n9PsezNNV0Jc_2\tairplane\n9PsezNNV0Jc_0\tairplane\n9Q3srzApSJU_0\tperson\n9RGlWjTKvE0_0\tbus\n9RZCK24Shec_0\tcat\n9ScZtgWAJZA_1\tperson\n9SgrA5Q1d94_0\tperson\n9ShZpsmuvc4_2\tskateboard\n9ShZpsmuvc4_1\tskateboard\n9UU2h6M8DJk_2\ttruck\n9UwLiWKOIGY_0\tperson\n9U-tccGetsk_0\tknife\n9VwSYjCCRYk_1\ttruck\n9VwSYjCCRYk_2\ttruck\n9WDPvYpnrfU_1\ttruck\n9WDt0JjOFIA_0\tperson\n9YVkZ7QxD5E_0\tperson\n9Y6XZFO31JU_0\tcow\n9ZpZZoTtySo_1\tbear\n9Z0Jz1tesQ4_4\tcow\n9Z0Jz1tesQ4_1\tcow\n9Z0Jz1tesQ4_2\tcow\n9Z0Jz1tesQ4_3\tcow\n9aQOAnspXGo_1\tbird\n9bYPYgMQVjU_0\tperson\n9bzmQFGK8m8_0\tperson\n9dOPPvgyMqk_0\tperson\n9eI_0DoOE08_0\tperson\n9eI_0DoOE08_1\tperson\n9g8o260G10k_0\tbird\n9hAU80xKWy0_0\ttruck\n9jS5MThAtmo_0\tperson\n9kGuuCx39JA_0\tmotorcycle\n9lsXenPJ-X8_1\tbird\n9ltdzlYXfp8_0\tcow\n9ltdzlYXfp8_3\tcow\n9muklrcigJY_0\tdog\n9nqU8e9IUPU_0\tskateboard\n9pEB8cjvPSQ_1\thorse\n9qamzN9bwxw_0\tperson\n9rvVWyyuud0_0\tperson\n9r1FvK19XV8_0\tperson\n9uhZRDsQKnc_0\tperson\n9yt1if13PHk_0\telephant\n9y5txKR57mc_0\tbird\n9zBCjCtH3Eg_0\thorse\n9zqk5w8Qx1Q_1\tbicycle\n9zroWMwZHGI_1\tperson\n907A5I4-LpA_0\tmotorcycle\n91SWvU-5TcI_0\tperson\n92MaWPuO8PI_0\tboat\n92560YiwSP0_0\tperson\n93gyPa_dPGU_0\ttruck\n946wiAK4Seg_1\tperson\n95CV_olHtcI_0\tperson\n96WWGXa4QrI_0\tcar\n96akJFw5SPU_0\ttruck\n96iqXHgOXKY_0\tperson\n98XiF-Z__aI_0\tcat\n99Tb7HSFn3I_0\tperson\n9_bFE0FUq_c_1\tknife\n-A-tBuMjU8s_0\tcat\n-B4YQQLrOfI_2\tskateboard\n-C0rYHhL_x4_0\tmotorcycle\n-DYf49hlRSE_0\tperson\n-Ebcfmg0-eE_0\tperson\n-E05a-eQSwY_0\tumbrella\n-FMaVn21dYU_1\thorse\n-Fu9coX9J-A_0\tperson\n-Fu9coX9J-A_1\tperson\n-Gk4iMiEMCc_0\tperson\n-LVtIbelA3M_0\thorse\n-LXr7LdXtrk_0\tboat\n-LjAFTF5WP4_0\tbicycle\n-LjAFTF5WP4_1\tbicycle\n-LjAFTF5WP4_3\tbicycle\n-MpLPuviQ00_0\tperson\n-M_jT3EYgcc_0\tperson\n-NWvB2g952Q_2\tbird\n-OZt785bbpY_0\tairplane\n-P37Y1G6oHk_2\tairplane\n-P37Y1G6oHk_3\tairplane\n-P37Y1G6oHk_0\tairplane\n-QBeUV_OkJg_1\tdog\n-QQCINzsXpw_0\tperson\n-Q6g2xZ0PxY_1\tairplane\n-RjxMfaV-Vo_1\tknife\n-RjxMfaV-Vo_2\tknife\n-SPHavKGd3M_0\tskateboard\n-S8L2HACCPE_12\telephant\n-S8L2HACCPE_1\telephant\n-S8L2HACCPE_10\telephant\n-TKKOo1FfAI_0\tbird\n-VgWHKeRRjs_0\tairplane\n-VgWHKeRRjs_1\tairplane\n-WyEyKxdZOQ_0\tperson\n-XWeGpACKwc_0\tskateboard\n-Xj6MiGVWt0_0\tperson\n-XwZnoNm0FU_0\tdog\n-ZDO95E0pl8_0\tperson\n-anX-ad_gHQ_0\tperson\n-avz2OsPIq4_2\tbicycle\n-bJkl4q5f-A_0\tbird\n-c1b7nHzGn4_0\tairplane\n-dQnNlBQp3o_0\tperson\n-db_SToBhkg_2\tmotorcycle\n-eZUdm8ERQQ_0\tperson\n-e42Pb0YeOY_0\tcat\n-fnhznKC3CU_0\tperson\n-f0JLwuyuTM_0\tperson\n-jL0HOXwYls_0\tperson\n-kLIF2a7yeU_0\tperson\n-k1TxEpOgnA_0\tperson\n-l9NS6DuRPI_0\tperson\n-mgNwLW3ODc_0\tperson\n-mwDgqLpu-k_0\tskateboard\n-nOfuA8B7As_1\tbicycle\n-nzXunuZac4_0\tcat\n-oG6YVPhC_I_0\thorse\n-o28rb1UnYA_0\tcar\n-sJOJNjOCBI_0\tmotorcycle\n-sJOJNjOCBI_1\tmotorcycle\n-sWch1rnO10_0\tperson\n-th9NS9hl6s_0\tcow\n-uP01llwXFY_5\tboat\n-uP01llwXFY_1\tboat\n-u5MNR-9ClU_0\tperson\n-vkMKVuweFA_0\tperson\n-v7FXEhgwtE_0\tperson\n-y652b4w3Ss_0\tbird\n-zqHD6Jthqg_0\tperson\n-0U1vm6LIi8_0\tperson\n-1je1K1ihbk_2\tskateboard\n-2iw3MzUP2Y_0\tmotorcycle\n-3OvKcu5P2U_0\tcar\n-3fzr21Ov5w_0\tperson\n-6vJDV8XnWE_0\tboat\n-7Im8MyvaXU_0\tcat\n--8shIp3t0I_0\tknife\n-_iBuJTwjw8_1\thorse\n-_xag4X_Do0_0\tbird\n_ATEx5gbBEQ_0\tknife\n_ATEx5gbBEQ_1\tknife\n_AcvI8VF5ig_0\tcow\n_Ae4vmwt8uA_0\tperson\n_Auvs-o5Pck_0\ttruck\n_A8nA25Tq8c_1\tperson\n_C_yvxdjVGA_2\thorse\n_C_yvxdjVGA_0\thorse\n_DXAxnPIiBU_0\tcow\n_D-9w3aSX50_0\tperson\n_GyE3cPQ6U8_0\tcar\n_HN1_MjnjWo_2\telephant\n_HYaLoOKE84_1\tcow\n_IhkqtAQHBw_0\ttrain\n_InrHPE8Umw_0\tmotorcycle\n_IpUnYit3Pg_0\tdog\n_JNG6qK6INs_3\tbear\n_KzDIvt0cCk_0\tperson\n_K6jYgDC1JU_0\tairplane\n_NZ4o-omJLE_0\tumbrella\n_NtOMcyVAp4_1\tdog\n_OmnjH4t-IY_0\tperson\n_QF0A9B-xB8_0\tperson\n_QRy9nd4kcg_0\tairplane\n_Q9M8QAjSMk_0\tperson\n_Rd-wEO2r10_0\tperson\n_R6nlDzh6Tc_0\tperson\n_R6nlDzh6Tc_2\tperson\n_T0O1BlYjaU_1\tbear\n_VegkTdhrQE_0\tmotorcycle\n_WKJaPPBz8Q_0\tumbrella\n_WcqTpLKkww_1\ttruck\n_Y6_E1l4blQ_1\tknife\n_ZDU4qi4lcI_2\tcow\n_ZDU4qi4lcI_0\tcow\n_ZDU4qi4lcI_1\tcow\n_ZHmkH59bCQ_0\tperson\n_ZXqLyRe4n0_0\telephant\n_ZsogS9uPJQ_0\tperson\n_akq_DieEWE_0\tperson\n_akq_DieEWE_1\tperson\n_bO2sdIelLY_0\tperson\n_dC_upYbxWI_0\tknife\n_eCb7mFYyIg_0\tmotorcycle\n_egWujmdZtw_0\tperson\n_epdfuB0qRM_0\tcar\n_e5Vvy9DJ9E_4\tbear\n_e5Vvy9DJ9E_0\tbear\n_foK5Dvj1As_0\tbird\n_hryEVGKNuw_0\thorse\n_iY4AnGfq0Y_0\ttrain\n_jBzwdg0QRA_1\tbus\n_jci9tIBIB4_5\ttruck\n_kdhlRke8uI_0\tperson\n_kfdh_5bI-Q_0\tperson\n_lmD-useijU_0\tperson\n_mJBwuCegJ0_12\ttruck\n_mJBwuCegJ0_1\ttruck\n_mJBwuCegJ0_2\ttruck\n_mJBwuCegJ0_8\ttruck\n_mJBwuCegJ0_9\ttruck\n_oRtPVRmtwo_0\tdog\n_pEHwWe2seA_5\telephant\n_sV1Jd1uiYg_0\tperson\n_tZU1XTOML4_0\tboat\n_usyDpllGBo_0\thorse\n_vBAv8cBoqE_0\tskateboard\n_vV0wdWq0cU_0\tperson\n_xMVx44FbT4_0\thorse\n_xQn3TupjYs_0\tcat\n_xy58m6yCko_0\tmotorcycle\n_yQQjARqD1s_0\tboat\n_yfoe4GCA0Q_4\tairplane\n_yfoe4GCA0Q_2\tairplane\n_yv5Cwbm9EA_0\tperson\n_zIDofZkgS4_1\ttruck\n_zQt1CSSKyA_1\tbicycle\n_0eR2vQAEqE_0\telephant\n_0eR2vQAEqE_1\telephant\n_17u-cPTYt0_0\tcar\n_17u-cPTYt0_1\tcar\n_2mIWIhbDPY_0\tbus\n_37U5Elgnck_0\tperson\n_5fE6dP48FM_0\tcow\n_5sIT4l5izM_0\tknife\n_6qUuUUYvUQ_0\tperson\n_7zbbqEa3nw_1\ttrain\n_7zbbqEa3nw_4\ttrain\n_8VTthFkvS0_0\tbird\n_8iyumFI4sQ_1\telephant\n_8iyumFI4sQ_2\telephant\n_8iyumFI4sQ_3\telephant\n_81FImml2gk_0\tdog\n_9bypka_Q4c_0\tbus\n_-CvwC7H730_0\tperson\n_-XcxnQLKPM_0\tdog\n__Q5A7gExpI_0\tperson\n"
  },
  {
    "path": "artrackv2_mindspore/lib/train/dataset/COCO_tool.py",
    "content": "__author__ = 'tylin'\n__version__ = '2.0'\n# Interface for accessing the Microsoft COCO dataset.\n\n# Microsoft COCO is a large image dataset designed for object detection,\n# segmentation, and caption generation. pycocotools is a Python API that\n# assists in loading, parsing and visualizing the annotations in COCO.\n# Please visit http://mscoco.org/ for more information on COCO, including\n# for the data, paper, and tutorials. The exact format of the annotations\n# is also described on the COCO website. For example usage of the pycocotools\n# please see pycocotools_demo.ipynb. In addition to this API, please download both\n# the COCO images and annotations in order to run the demo.\n\n# An alternative to using the API is to load the annotations directly\n# into Python dictionary\n# Using the API provides additional utility functions. Note that this API\n# supports both *instance* and *caption* annotations. In the case of\n# captions not all functions are defined (e.g. categories are undefined).\n\n# The following API functions are defined:\n#  COCO       - COCO api class that loads COCO annotation file and prepare data structures.\n#  decodeMask - Decode binary mask M encoded via run-length encoding.\n#  encodeMask - Encode binary mask M using run-length encoding.\n#  getAnnIds  - Get ann ids that satisfy given filter conditions.\n#  getCatIds  - Get cat ids that satisfy given filter conditions.\n#  getImgIds  - Get img ids that satisfy given filter conditions.\n#  loadAnns   - Load anns with the specified ids.\n#  loadCats   - Load cats with the specified ids.\n#  loadImgs   - Load imgs with the specified ids.\n#  annToMask  - Convert segmentation in an annotation to binary mask.\n#  showAnns   - Display the specified annotations.\n#  loadRes    - Load algorithm results and create API for accessing them.\n#  download   - Download COCO images from mscoco.org server.\n# Throughout the API \"ann\"=annotation, \"cat\"=category, and \"img\"=image.\n# Help on each functions can be accessed by: \"help COCO>function\".\n\n# See also COCO>decodeMask,\n# COCO>encodeMask, COCO>getAnnIds, COCO>getCatIds,\n# COCO>getImgIds, COCO>loadAnns, COCO>loadCats,\n# COCO>loadImgs, COCO>annToMask, COCO>showAnns\n\n# Microsoft COCO Toolbox.      version 2.0\n# Data, paper, and tutorials available at:  http://mscoco.org/\n# Code written by Piotr Dollar and Tsung-Yi Lin, 2014.\n# Licensed under the Simplified BSD License [see bsd.txt]\n\nimport json\nimport time\nimport matplotlib.pyplot as plt\nfrom matplotlib.collections import PatchCollection\nfrom matplotlib.patches import Polygon\nimport numpy as np\nimport copy\nimport itertools\nfrom pycocotools import mask as maskUtils\nimport os\nfrom collections import defaultdict\nimport sys\nPYTHON_VERSION = sys.version_info[0]\nif PYTHON_VERSION == 2:\n    from urllib import urlretrieve\nelif PYTHON_VERSION == 3:\n    from urllib.request import urlretrieve\n\n\ndef _isArrayLike(obj):\n    return hasattr(obj, '__iter__') and hasattr(obj, '__len__')\n\n\nclass COCO:\n    def __init__(self, dataset):\n        \"\"\"\n        Constructor of Microsoft COCO helper class for reading and visualizing annotations.\n        :param annotation_file (str): location of annotation file\n        :param image_folder (str): location to the folder that hosts images.\n        :return:\n        \"\"\"\n        # load dataset\n        self.dataset,self.anns,self.cats,self.imgs = dict(),dict(),dict(),dict()\n        self.imgToAnns, self.catToImgs = defaultdict(list), defaultdict(list)\n        assert type(dataset)==dict, 'annotation file format {} not supported'.format(type(dataset))\n        self.dataset = dataset\n        self.createIndex()\n\n    def createIndex(self):\n        # create index\n        print('creating index...')\n        anns, cats, imgs = {}, {}, {}\n        imgToAnns,catToImgs = defaultdict(list),defaultdict(list)\n        if 'annotations' in self.dataset:\n            for ann in self.dataset['annotations']:\n                imgToAnns[ann['image_id']].append(ann)\n                anns[ann['id']] = ann\n\n        if 'images' in self.dataset:\n            for img in self.dataset['images']:\n                imgs[img['id']] = img\n\n        if 'categories' in self.dataset:\n            for cat in self.dataset['categories']:\n                cats[cat['id']] = cat\n\n        if 'annotations' in self.dataset and 'categories' in self.dataset:\n            for ann in self.dataset['annotations']:\n                catToImgs[ann['category_id']].append(ann['image_id'])\n\n        print('index created!')\n\n        # create class members\n        self.anns = anns\n        self.imgToAnns = imgToAnns\n        self.catToImgs = catToImgs\n        self.imgs = imgs\n        self.cats = cats\n\n    def info(self):\n        \"\"\"\n        Print information about the annotation file.\n        :return:\n        \"\"\"\n        for key, value in self.dataset['info'].items():\n            print('{}: {}'.format(key, value))\n\n    def getAnnIds(self, imgIds=[], catIds=[], areaRng=[], iscrowd=None):\n        \"\"\"\n        Get ann ids that satisfy given filter conditions. default skips that filter\n        :param imgIds  (int array)     : get anns for given imgs\n               catIds  (int array)     : get anns for given cats\n               areaRng (float array)   : get anns for given area range (e.g. [0 inf])\n               iscrowd (boolean)       : get anns for given crowd label (False or True)\n        :return: ids (int array)       : integer array of ann ids\n        \"\"\"\n        imgIds = imgIds if _isArrayLike(imgIds) else [imgIds]\n        catIds = catIds if _isArrayLike(catIds) else [catIds]\n\n        if len(imgIds) == len(catIds) == len(areaRng) == 0:\n            anns = self.dataset['annotations']\n        else:\n            if not len(imgIds) == 0:\n                lists = [self.imgToAnns[imgId] for imgId in imgIds if imgId in self.imgToAnns]\n                anns = list(itertools.chain.from_iterable(lists))\n            else:\n                anns = self.dataset['annotations']\n            anns = anns if len(catIds)  == 0 else [ann for ann in anns if ann['category_id'] in catIds]\n            anns = anns if len(areaRng) == 0 else [ann for ann in anns if ann['area'] > areaRng[0] and ann['area'] < areaRng[1]]\n        if not iscrowd == None:\n            ids = [ann['id'] for ann in anns if ann['iscrowd'] == iscrowd]\n        else:\n            ids = [ann['id'] for ann in anns]\n        return ids\n\n    def getCatIds(self, catNms=[], supNms=[], catIds=[]):\n        \"\"\"\n        filtering parameters. default skips that filter.\n        :param catNms (str array)  : get cats for given cat names\n        :param supNms (str array)  : get cats for given supercategory names\n        :param catIds (int array)  : get cats for given cat ids\n        :return: ids (int array)   : integer array of cat ids\n        \"\"\"\n        catNms = catNms if _isArrayLike(catNms) else [catNms]\n        supNms = supNms if _isArrayLike(supNms) else [supNms]\n        catIds = catIds if _isArrayLike(catIds) else [catIds]\n\n        if len(catNms) == len(supNms) == len(catIds) == 0:\n            cats = self.dataset['categories']\n        else:\n            cats = self.dataset['categories']\n            cats = cats if len(catNms) == 0 else [cat for cat in cats if cat['name']          in catNms]\n            cats = cats if len(supNms) == 0 else [cat for cat in cats if cat['supercategory'] in supNms]\n            cats = cats if len(catIds) == 0 else [cat for cat in cats if cat['id']            in catIds]\n        ids = [cat['id'] for cat in cats]\n        return ids\n\n    def getImgIds(self, imgIds=[], catIds=[]):\n        '''\n        Get img ids that satisfy given filter conditions.\n        :param imgIds (int array) : get imgs for given ids\n        :param catIds (int array) : get imgs with all given cats\n        :return: ids (int array)  : integer array of img ids\n        '''\n        imgIds = imgIds if _isArrayLike(imgIds) else [imgIds]\n        catIds = catIds if _isArrayLike(catIds) else [catIds]\n\n        if len(imgIds) == len(catIds) == 0:\n            ids = self.imgs.keys()\n        else:\n            ids = set(imgIds)\n            for i, catId in enumerate(catIds):\n                if i == 0 and len(ids) == 0:\n                    ids = set(self.catToImgs[catId])\n                else:\n                    ids &= set(self.catToImgs[catId])\n        return list(ids)\n\n    def loadAnns(self, ids=[]):\n        \"\"\"\n        Load anns with the specified ids.\n        :param ids (int array)       : integer ids specifying anns\n        :return: anns (object array) : loaded ann objects\n        \"\"\"\n        if _isArrayLike(ids):\n            return [self.anns[id] for id in ids]\n        elif type(ids) == int:\n            return [self.anns[ids]]\n\n    def loadCats(self, ids=[]):\n        \"\"\"\n        Load cats with the specified ids.\n        :param ids (int array)       : integer ids specifying cats\n        :return: cats (object array) : loaded cat objects\n        \"\"\"\n        if _isArrayLike(ids):\n            return [self.cats[id] for id in ids]\n        elif type(ids) == int:\n            return [self.cats[ids]]\n\n    def loadImgs(self, ids=[]):\n        \"\"\"\n        Load anns with the specified ids.\n        :param ids (int array)       : integer ids specifying img\n        :return: imgs (object array) : loaded img objects\n        \"\"\"\n        if _isArrayLike(ids):\n            return [self.imgs[id] for id in ids]\n        elif type(ids) == int:\n            return [self.imgs[ids]]\n\n    def showAnns(self, anns, draw_bbox=False):\n        \"\"\"\n        Display the specified annotations.\n        :param anns (array of object): annotations to display\n        :return: None\n        \"\"\"\n        if len(anns) == 0:\n            return 0\n        if 'segmentation' in anns[0] or 'keypoints' in anns[0]:\n            datasetType = 'instances'\n        elif 'caption' in anns[0]:\n            datasetType = 'captions'\n        else:\n            raise Exception('datasetType not supported')\n        if datasetType == 'instances':\n            ax = plt.gca()\n            ax.set_autoscale_on(False)\n            polygons = []\n            color = []\n            for ann in anns:\n                c = (np.random.random((1, 3))*0.6+0.4).tolist()[0]\n                if 'segmentation' in ann:\n                    if type(ann['segmentation']) == list:\n                        # polygon\n                        for seg in ann['segmentation']:\n                            poly = np.array(seg).reshape((int(len(seg)/2), 2))\n                            polygons.append(Polygon(poly))\n                            color.append(c)\n                    else:\n                        # mask\n                        t = self.imgs[ann['image_id']]\n                        if type(ann['segmentation']['counts']) == list:\n                            rle = maskUtils.frPyObjects([ann['segmentation']], t['height'], t['width'])\n                        else:\n                            rle = [ann['segmentation']]\n                        m = maskUtils.decode(rle)\n                        img = np.ones( (m.shape[0], m.shape[1], 3) )\n                        if ann['iscrowd'] == 1:\n                            color_mask = np.array([2.0,166.0,101.0])/255\n                        if ann['iscrowd'] == 0:\n                            color_mask = np.random.random((1, 3)).tolist()[0]\n                        for i in range(3):\n                            img[:,:,i] = color_mask[i]\n                        ax.imshow(np.dstack( (img, m*0.5) ))\n                if 'keypoints' in ann and type(ann['keypoints']) == list:\n                    # turn skeleton into zero-based index\n                    sks = np.array(self.loadCats(ann['category_id'])[0]['skeleton'])-1\n                    kp = np.array(ann['keypoints'])\n                    x = kp[0::3]\n                    y = kp[1::3]\n                    v = kp[2::3]\n                    for sk in sks:\n                        if np.all(v[sk]>0):\n                            plt.plot(x[sk],y[sk], linewidth=3, color=c)\n                    plt.plot(x[v>0], y[v>0],'o',markersize=8, markerfacecolor=c, markeredgecolor='k',markeredgewidth=2)\n                    plt.plot(x[v>1], y[v>1],'o',markersize=8, markerfacecolor=c, markeredgecolor=c, markeredgewidth=2)\n\n                if draw_bbox:\n                    [bbox_x, bbox_y, bbox_w, bbox_h] = ann['bbox']\n                    poly = [[bbox_x, bbox_y], [bbox_x, bbox_y+bbox_h], [bbox_x+bbox_w, bbox_y+bbox_h], [bbox_x+bbox_w, bbox_y]]\n                    np_poly = np.array(poly).reshape((4,2))\n                    polygons.append(Polygon(np_poly))\n                    color.append(c)\n\n            p = PatchCollection(polygons, facecolor=color, linewidths=0, alpha=0.4)\n            ax.add_collection(p)\n            p = PatchCollection(polygons, facecolor='none', edgecolors=color, linewidths=2)\n            ax.add_collection(p)\n        elif datasetType == 'captions':\n            for ann in anns:\n                print(ann['caption'])\n\n    def loadRes(self, resFile):\n        \"\"\"\n        Load result file and return a result api object.\n        :param   resFile (str)     : file name of result file\n        :return: res (obj)         : result api object\n        \"\"\"\n        res = COCO()\n        res.dataset['images'] = [img for img in self.dataset['images']]\n\n        print('Loading and preparing results...')\n        tic = time.time()\n        if type(resFile) == str or (PYTHON_VERSION == 2 and type(resFile) == unicode):\n            with open(resFile) as f:\n                anns = json.load(f)\n        elif type(resFile) == np.ndarray:\n            anns = self.loadNumpyAnnotations(resFile)\n        else:\n            anns = resFile\n        assert type(anns) == list, 'results in not an array of objects'\n        annsImgIds = [ann['image_id'] for ann in anns]\n        assert set(annsImgIds) == (set(annsImgIds) & set(self.getImgIds())), \\\n               'Results do not correspond to current coco set'\n        if 'caption' in anns[0]:\n            imgIds = set([img['id'] for img in res.dataset['images']]) & set([ann['image_id'] for ann in anns])\n            res.dataset['images'] = [img for img in res.dataset['images'] if img['id'] in imgIds]\n            for id, ann in enumerate(anns):\n                ann['id'] = id+1\n        elif 'bbox' in anns[0] and not anns[0]['bbox'] == []:\n            res.dataset['categories'] = copy.deepcopy(self.dataset['categories'])\n            for id, ann in enumerate(anns):\n                bb = ann['bbox']\n                x1, x2, y1, y2 = [bb[0], bb[0]+bb[2], bb[1], bb[1]+bb[3]]\n                if not 'segmentation' in ann:\n                    ann['segmentation'] = [[x1, y1, x1, y2, x2, y2, x2, y1]]\n                ann['area'] = bb[2]*bb[3]\n                ann['id'] = id+1\n                ann['iscrowd'] = 0\n        elif 'segmentation' in anns[0]:\n            res.dataset['categories'] = copy.deepcopy(self.dataset['categories'])\n            for id, ann in enumerate(anns):\n                # now only support compressed RLE format as segmentation results\n                ann['area'] = maskUtils.area(ann['segmentation'])\n                if not 'bbox' in ann:\n                    ann['bbox'] = maskUtils.toBbox(ann['segmentation'])\n                ann['id'] = id+1\n                ann['iscrowd'] = 0\n        elif 'keypoints' in anns[0]:\n            res.dataset['categories'] = copy.deepcopy(self.dataset['categories'])\n            for id, ann in enumerate(anns):\n                s = ann['keypoints']\n                x = s[0::3]\n                y = s[1::3]\n                x0,x1,y0,y1 = np.min(x), np.max(x), np.min(y), np.max(y)\n                ann['area'] = (x1-x0)*(y1-y0)\n                ann['id'] = id + 1\n                ann['bbox'] = [x0,y0,x1-x0,y1-y0]\n        print('DONE (t={:0.2f}s)'.format(time.time()- tic))\n\n        res.dataset['annotations'] = anns\n        res.createIndex()\n        return res\n\n    def download(self, tarDir = None, imgIds = [] ):\n        '''\n        Download COCO images from mscoco.org server.\n        :param tarDir (str): COCO results directory name\n               imgIds (list): images to be downloaded\n        :return:\n        '''\n        if tarDir is None:\n            print('Please specify target directory')\n            return -1\n        if len(imgIds) == 0:\n            imgs = self.imgs.values()\n        else:\n            imgs = self.loadImgs(imgIds)\n        N = len(imgs)\n        if not os.path.exists(tarDir):\n            os.makedirs(tarDir)\n        for i, img in enumerate(imgs):\n            tic = time.time()\n            fname = os.path.join(tarDir, img['file_name'])\n            if not os.path.exists(fname):\n                urlretrieve(img['coco_url'], fname)\n            print('downloaded {}/{} images (t={:0.1f}s)'.format(i, N, time.time()- tic))\n\n    def loadNumpyAnnotations(self, data):\n        \"\"\"\n        Convert result data from a numpy array [Nx7] where each row contains {imageID,x1,y1,w,h,score,class}\n        :param  data (numpy.ndarray)\n        :return: annotations (python nested list)\n        \"\"\"\n        print('Converting ndarray to lists...')\n        assert(type(data) == np.ndarray)\n        print(data.shape)\n        assert(data.shape[1] == 7)\n        N = data.shape[0]\n        ann = []\n        for i in range(N):\n            if i % 1000000 == 0:\n                print('{}/{}'.format(i,N))\n            ann += [{\n                'image_id'  : int(data[i, 0]),\n                'bbox'  : [ data[i, 1], data[i, 2], data[i, 3], data[i, 4] ],\n                'score' : data[i, 5],\n                'category_id': int(data[i, 6]),\n                }]\n        return ann\n\n    def annToRLE(self, ann):\n        \"\"\"\n        Convert annotation which can be polygons, uncompressed RLE to RLE.\n        :return: binary mask (numpy 2D array)\n        \"\"\"\n        t = self.imgs[ann['image_id']]\n        h, w = t['height'], t['width']\n        segm = ann['segmentation']\n        if type(segm) == list:\n            # polygon -- a single object might consist of multiple parts\n            # we merge all parts into one mask rle code\n            rles = maskUtils.frPyObjects(segm, h, w)\n            rle = maskUtils.merge(rles)\n        elif type(segm['counts']) == list:\n            # uncompressed RLE\n            rle = maskUtils.frPyObjects(segm, h, w)\n        else:\n            # rle\n            rle = ann['segmentation']\n        return rle\n\n    def annToMask(self, ann):\n        \"\"\"\n        Convert annotation which can be polygons, uncompressed RLE, or RLE to binary mask.\n        :return: binary mask (numpy 2D array)\n        \"\"\"\n        rle = self.annToRLE(ann)\n        m = maskUtils.decode(rle)\n        return m\n"
  },
  {
    "path": "artrackv2_mindspore/lib/train/dataset/__init__.py",
    "content": "from .lasot import Lasot\nfrom .got10k import Got10k\nfrom .tracking_net import TrackingNet\nfrom .imagenetvid import ImagenetVID\nfrom .coco import MSCOCO\nfrom .coco_seq import MSCOCOSeq\nfrom .got10k_lmdb import Got10k_lmdb\nfrom .lasot_lmdb import Lasot_lmdb\nfrom .imagenetvid_lmdb import ImagenetVID_lmdb\nfrom .coco_seq_lmdb import MSCOCOSeq_lmdb\nfrom .tracking_net_lmdb import TrackingNet_lmdb\n"
  },
  {
    "path": "artrackv2_mindspore/lib/train/dataset/base_image_dataset.py",
    "content": "import torch.utils.data\nfrom lib.train.data.image_loader import jpeg4py_loader\n\n\nclass BaseImageDataset(torch.utils.data.Dataset):\n    \"\"\" Base class for image datasets \"\"\"\n\n    def __init__(self, name, root, image_loader=jpeg4py_loader):\n        \"\"\"\n        args:\n            root - The root path to the dataset\n            image_loader (jpeg4py_loader) -  The function to read the images. jpeg4py (https://github.com/ajkxyz/jpeg4py)\n                                            is used by default.\n        \"\"\"\n        self.name = name\n        self.root = root\n        self.image_loader = image_loader\n\n        self.image_list = []     # Contains the list of sequences.\n        self.class_list = []\n\n    def __len__(self):\n        \"\"\" Returns size of the dataset\n        returns:\n            int - number of samples in the dataset\n        \"\"\"\n        return self.get_num_images()\n\n    def __getitem__(self, index):\n        \"\"\" Not to be used! Check get_frames() instead.\n        \"\"\"\n        return None\n\n    def get_name(self):\n        \"\"\" Name of the dataset\n\n        returns:\n            string - Name of the dataset\n        \"\"\"\n        raise NotImplementedError\n\n    def get_num_images(self):\n        \"\"\" Number of sequences in a dataset\n\n        returns:\n            int - number of sequences in the dataset.\"\"\"\n        return len(self.image_list)\n\n    def has_class_info(self):\n        return False\n\n    def get_class_name(self, image_id):\n        return None\n\n    def get_num_classes(self):\n        return len(self.class_list)\n\n    def get_class_list(self):\n        return self.class_list\n\n    def get_images_in_class(self, class_name):\n        raise NotImplementedError\n\n    def has_segmentation_info(self):\n        return False\n\n    def get_image_info(self, seq_id):\n        \"\"\" Returns information about a particular image,\n\n        args:\n            seq_id - index of the image\n\n        returns:\n            Dict\n            \"\"\"\n        raise NotImplementedError\n\n    def get_image(self, image_id, anno=None):\n        \"\"\" Get a image\n\n        args:\n            image_id      - index of image\n            anno(None)  - The annotation for the sequence (see get_sequence_info). If None, they will be loaded.\n\n        returns:\n            image -\n            anno -\n            dict - A dict containing meta information about the sequence, e.g. class of the target object.\n\n        \"\"\"\n        raise NotImplementedError\n\n"
  },
  {
    "path": "artrackv2_mindspore/lib/train/dataset/base_video_dataset.py",
    "content": "import torch.utils.data\n# 2021.1.5 use jpeg4py_loader_w_failsafe as default\nfrom lib.train.data.image_loader import jpeg4py_loader_w_failsafe\n\n\nclass BaseVideoDataset(torch.utils.data.Dataset):\n    \"\"\" Base class for video datasets \"\"\"\n\n    def __init__(self, name, root, image_loader=jpeg4py_loader_w_failsafe):\n        \"\"\"\n        args:\n            root - The root path to the dataset\n            image_loader (jpeg4py_loader) -  The function to read the images. jpeg4py (https://github.com/ajkxyz/jpeg4py)\n                                            is used by default.\n        \"\"\"\n        self.name = name\n        self.root = root\n        self.image_loader = image_loader\n\n        self.sequence_list = []     # Contains the list of sequences.\n        self.class_list = []\n\n    def __len__(self):\n        \"\"\" Returns size of the dataset\n        returns:\n            int - number of samples in the dataset\n        \"\"\"\n        return self.get_num_sequences()\n\n    def __getitem__(self, index):\n        \"\"\" Not to be used! Check get_frames() instead.\n        \"\"\"\n        return None\n\n    def is_video_sequence(self):\n        \"\"\" Returns whether the dataset is a video dataset or an image dataset\n\n        returns:\n            bool - True if a video dataset\n        \"\"\"\n        return True\n\n    def is_synthetic_video_dataset(self):\n        \"\"\" Returns whether the dataset contains real videos or synthetic\n\n        returns:\n            bool - True if a video dataset\n        \"\"\"\n        return False\n\n    def get_name(self):\n        \"\"\" Name of the dataset\n\n        returns:\n            string - Name of the dataset\n        \"\"\"\n        raise NotImplementedError\n\n    def get_num_sequences(self):\n        \"\"\" Number of sequences in a dataset\n\n        returns:\n            int - number of sequences in the dataset.\"\"\"\n        return len(self.sequence_list)\n\n    def has_class_info(self):\n        return False\n\n    def has_occlusion_info(self):\n        return False\n\n    def get_num_classes(self):\n        return len(self.class_list)\n\n    def get_class_list(self):\n        return self.class_list\n\n    def get_sequences_in_class(self, class_name):\n        raise NotImplementedError\n\n    def has_segmentation_info(self):\n        return False\n\n    def get_sequence_info(self, seq_id):\n        \"\"\" Returns information about a particular sequences,\n\n        args:\n            seq_id - index of the sequence\n\n        returns:\n            Dict\n            \"\"\"\n        raise NotImplementedError\n\n    def get_frames(self, seq_id, frame_ids, anno=None):\n        \"\"\" Get a set of frames from a particular sequence\n\n        args:\n            seq_id      - index of sequence\n            frame_ids   - a list of frame numbers\n            anno(None)  - The annotation for the sequence (see get_sequence_info). If None, they will be loaded.\n\n        returns:\n            list - List of frames corresponding to frame_ids\n            list - List of dicts for each frame\n            dict - A dict containing meta information about the sequence, e.g. class of the target object.\n\n        \"\"\"\n        raise NotImplementedError\n\n"
  },
  {
    "path": "artrackv2_mindspore/lib/train/dataset/coco.py",
    "content": "import os\nfrom .base_image_dataset import BaseImageDataset\nimport torch\nimport random\nfrom collections import OrderedDict\nfrom lib.train.data import jpeg4py_loader\nfrom lib.train.admin import env_settings\nfrom pycocotools.coco import COCO\n\n\nclass MSCOCO(BaseImageDataset):\n    \"\"\" The COCO object detection dataset.\n\n    Publication:\n        Microsoft COCO: Common Objects in Context.\n        Tsung-Yi Lin, Michael Maire, Serge J. Belongie, Lubomir D. Bourdev, Ross B. Girshick, James Hays, Pietro Perona,\n        Deva Ramanan, Piotr Dollar and C. Lawrence Zitnick\n        ECCV, 2014\n        https://arxiv.org/pdf/1405.0312.pdf\n\n    Download the images along with annotations from http://cocodataset.org/#download. The root folder should be\n    organized as follows.\n        - coco_root\n            - annotations\n                - instances_train2014.json\n                - instances_train2017.json\n            - images\n                - train2014\n                - train2017\n\n    Note: You also have to install the coco pythonAPI from https://github.com/cocodataset/cocoapi.\n    \"\"\"\n\n    def __init__(self, root=None, image_loader=jpeg4py_loader, data_fraction=None, min_area=None,\n                 split=\"train\", version=\"2014\"):\n        \"\"\"\n        args:\n            root - path to coco root folder\n            image_loader (jpeg4py_loader) - The function to read the images. jpeg4py (https://github.com/ajkxyz/jpeg4py)\n                                            is used by default.\n            data_fraction - Fraction of dataset to be used. The complete dataset is used by default\n            min_area - Objects with area less than min_area are filtered out. Default is 0.0\n            split - 'train' or 'val'.\n            version - version of coco dataset (2014 or 2017)\n        \"\"\"\n\n        root = env_settings().coco_dir if root is None else root\n        super().__init__('COCO', root, image_loader)\n\n        self.img_pth = os.path.join(root, 'images/{}{}/'.format(split, version))\n        self.anno_path = os.path.join(root, 'annotations/instances_{}{}.json'.format(split, version))\n\n        self.coco_set = COCO(self.anno_path)\n\n        self.cats = self.coco_set.cats\n\n        self.class_list = self.get_class_list()  # the parent class thing would happen in the sampler\n\n        self.image_list = self._get_image_list(min_area=min_area)\n\n        if data_fraction is not None:\n            self.image_list = random.sample(self.image_list, int(len(self.image_list) * data_fraction))\n        self.im_per_class = self._build_im_per_class()\n\n    def _get_image_list(self, min_area=None):\n        ann_list = list(self.coco_set.anns.keys())\n        image_list = [a for a in ann_list if self.coco_set.anns[a]['iscrowd'] == 0]\n\n        if min_area is not None:\n            image_list = [a for a in image_list if self.coco_set.anns[a]['area'] > min_area]\n\n        return image_list\n\n    def get_num_classes(self):\n        return len(self.class_list)\n\n    def get_name(self):\n        return 'coco'\n\n    def has_class_info(self):\n        return True\n\n    def has_segmentation_info(self):\n        return True\n\n    def get_class_list(self):\n        class_list = []\n        for cat_id in self.cats.keys():\n            class_list.append(self.cats[cat_id]['name'])\n        return class_list\n\n    def _build_im_per_class(self):\n        im_per_class = {}\n        for i, im in enumerate(self.image_list):\n            class_name = self.cats[self.coco_set.anns[im]['category_id']]['name']\n            if class_name not in im_per_class:\n                im_per_class[class_name] = [i]\n            else:\n                im_per_class[class_name].append(i)\n\n        return im_per_class\n\n    def get_images_in_class(self, class_name):\n        return self.im_per_class[class_name]\n\n    def get_image_info(self, im_id):\n        anno = self._get_anno(im_id)\n\n        bbox = torch.Tensor(anno['bbox']).view(4,)\n\n        mask = torch.Tensor(self.coco_set.annToMask(anno))\n\n        valid = (bbox[2] > 0) & (bbox[3] > 0)\n        visible = valid.clone().byte()\n\n        return {'bbox': bbox, 'mask': mask, 'valid': valid, 'visible': visible}\n\n    def _get_anno(self, im_id):\n        anno = self.coco_set.anns[self.image_list[im_id]]\n\n        return anno\n\n    def _get_image(self, im_id):\n        path = self.coco_set.loadImgs([self.coco_set.anns[self.image_list[im_id]]['image_id']])[0]['file_name']\n        img = self.image_loader(os.path.join(self.img_pth, path))\n        return img\n\n    def get_meta_info(self, im_id):\n        try:\n            cat_dict_current = self.cats[self.coco_set.anns[self.image_list[im_id]]['category_id']]\n            object_meta = OrderedDict({'object_class_name': cat_dict_current['name'],\n                                       'motion_class': None,\n                                       'major_class': cat_dict_current['supercategory'],\n                                       'root_class': None,\n                                       'motion_adverb': None})\n        except:\n            object_meta = OrderedDict({'object_class_name': None,\n                                       'motion_class': None,\n                                       'major_class': None,\n                                       'root_class': None,\n                                       'motion_adverb': None})\n        return object_meta\n\n    def get_class_name(self, im_id):\n        cat_dict_current = self.cats[self.coco_set.anns[self.image_list[im_id]]['category_id']]\n        return cat_dict_current['name']\n\n    def get_image(self, image_id, anno=None):\n        frame = self._get_image(image_id)\n\n        if anno is None:\n            anno = self.get_image_info(image_id)\n\n        object_meta = self.get_meta_info(image_id)\n\n        return frame, anno, object_meta\n"
  },
  {
    "path": "artrackv2_mindspore/lib/train/dataset/coco_seq.py",
    "content": "import os\nfrom .base_video_dataset import BaseVideoDataset\nfrom lib.train.data import jpeg4py_loader\nimport torch\nimport random\nfrom pycocotools.coco import COCO\nfrom collections import OrderedDict\nfrom lib.train.admin import env_settings\n\n\nclass MSCOCOSeq(BaseVideoDataset):\n    \"\"\" The COCO dataset. COCO is an image dataset. Thus, we treat each image as a sequence of length 1.\n\n    Publication:\n        Microsoft COCO: Common Objects in Context.\n        Tsung-Yi Lin, Michael Maire, Serge J. Belongie, Lubomir D. Bourdev, Ross B. Girshick, James Hays, Pietro Perona,\n        Deva Ramanan, Piotr Dollar and C. Lawrence Zitnick\n        ECCV, 2014\n        https://arxiv.org/pdf/1405.0312.pdf\n\n    Download the images along with annotations from http://cocodataset.org/#download. The root folder should be\n    organized as follows.\n        - coco_root\n            - annotations\n                - instances_train2014.json\n                - instances_train2017.json\n            - images\n                - train2014\n                - train2017\n\n    Note: You also have to install the coco pythonAPI from https://github.com/cocodataset/cocoapi.\n    \"\"\"\n\n    def __init__(self, root=None, image_loader=jpeg4py_loader, data_fraction=None, split=\"train\", version=\"2014\"):\n        \"\"\"\n        args:\n            root - path to the coco dataset.\n            image_loader (default_image_loader) -  The function to read the images. If installed,\n                                                   jpeg4py (https://github.com/ajkxyz/jpeg4py) is used by default. Else,\n                                                   opencv's imread is used.\n            data_fraction (None) - Fraction of images to be used. The images are selected randomly. If None, all the\n                                  images  will be used\n            split - 'train' or 'val'.\n            version - version of coco dataset (2014 or 2017)\n        \"\"\"\n        root = env_settings().coco_dir if root is None else root\n        super().__init__('COCO', root, image_loader)\n\n        self.img_pth = os.path.join(root, 'images/{}{}/'.format(split, version))\n        self.anno_path = os.path.join(root, 'annotations/instances_{}{}.json'.format(split, version))\n\n        # Load the COCO set.\n        self.coco_set = COCO(self.anno_path)\n\n        self.cats = self.coco_set.cats\n\n        self.class_list = self.get_class_list()\n\n        self.sequence_list = self._get_sequence_list()\n\n        if data_fraction is not None:\n            self.sequence_list = random.sample(self.sequence_list, int(len(self.sequence_list)*data_fraction))\n        self.seq_per_class = self._build_seq_per_class()\n\n    def _get_sequence_list(self):\n        ann_list = list(self.coco_set.anns.keys())\n        seq_list = [a for a in ann_list if self.coco_set.anns[a]['iscrowd'] == 0]\n\n        return seq_list\n\n    def is_video_sequence(self):\n        return False\n\n    def get_num_classes(self):\n        return len(self.class_list)\n\n    def get_name(self):\n        return 'coco'\n\n    def has_class_info(self):\n        return True\n\n    def get_class_list(self):\n        class_list = []\n        for cat_id in self.cats.keys():\n            class_list.append(self.cats[cat_id]['name'])\n        return class_list\n\n    def has_segmentation_info(self):\n        return True\n\n    def get_num_sequences(self):\n        return len(self.sequence_list)\n\n    def _build_seq_per_class(self):\n        seq_per_class = {}\n        for i, seq in enumerate(self.sequence_list):\n            class_name = self.cats[self.coco_set.anns[seq]['category_id']]['name']\n            if class_name not in seq_per_class:\n                seq_per_class[class_name] = [i]\n            else:\n                seq_per_class[class_name].append(i)\n\n        return seq_per_class\n\n    def get_sequences_in_class(self, class_name):\n        return self.seq_per_class[class_name]\n\n    def get_sequence_info(self, seq_id):\n        anno = self._get_anno(seq_id)\n\n        bbox = torch.Tensor(anno['bbox']).view(1, 4)\n\n        mask = torch.Tensor(self.coco_set.annToMask(anno)).unsqueeze(dim=0)\n\n        '''2021.1.3 To avoid too small bounding boxes. Here we change the threshold to 50 pixels'''\n        valid = (bbox[:, 2] > 50) & (bbox[:, 3] > 50)\n\n        visible = valid.clone().byte()\n\n        return {'bbox': bbox, 'mask': mask, 'valid': valid, 'visible': visible}\n\n    def _get_anno(self, seq_id):\n        anno = self.coco_set.anns[self.sequence_list[seq_id]]\n\n        return anno\n\n    def _get_frames(self, seq_id):\n        path = self.coco_set.loadImgs([self.coco_set.anns[self.sequence_list[seq_id]]['image_id']])[0]['file_name']\n        img = self.image_loader(os.path.join(self.img_pth, path))\n        return img\n\n    def get_meta_info(self, seq_id):\n        try:\n            cat_dict_current = self.cats[self.coco_set.anns[self.sequence_list[seq_id]]['category_id']]\n            object_meta = OrderedDict({'object_class_name': cat_dict_current['name'],\n                                       'motion_class': None,\n                                       'major_class': cat_dict_current['supercategory'],\n                                       'root_class': None,\n                                       'motion_adverb': None})\n        except:\n            object_meta = OrderedDict({'object_class_name': None,\n                                       'motion_class': None,\n                                       'major_class': None,\n                                       'root_class': None,\n                                       'motion_adverb': None})\n        return object_meta\n\n\n    def get_class_name(self, seq_id):\n        cat_dict_current = self.cats[self.coco_set.anns[self.sequence_list[seq_id]]['category_id']]\n        return cat_dict_current['name']\n\n    def get_frames(self, seq_id=None, frame_ids=None, anno=None):\n        # COCO is an image dataset. Thus we replicate the image denoted by seq_id len(frame_ids) times, and return a\n        # list containing these replicated images.\n        frame = self._get_frames(seq_id)\n\n        frame_list = [frame.copy() for _ in frame_ids]\n\n        if anno is None:\n            anno = self.get_sequence_info(seq_id)\n\n        anno_frames = {}\n        for key, value in anno.items():\n            anno_frames[key] = [value[0, ...] for _ in frame_ids]\n\n        object_meta = self.get_meta_info(seq_id)\n\n        return frame_list, anno_frames, object_meta\n"
  },
  {
    "path": "artrackv2_mindspore/lib/train/dataset/coco_seq_lmdb.py",
    "content": "import os\nfrom .base_video_dataset import BaseVideoDataset\nfrom lib.train.data import jpeg4py_loader\nimport torch\nimport random\nfrom collections import OrderedDict\nfrom lib.train.admin import env_settings\nfrom lib.train.dataset.COCO_tool import COCO\nfrom lib.utils.lmdb_utils import decode_img, decode_json\nimport time\n\nclass MSCOCOSeq_lmdb(BaseVideoDataset):\n    \"\"\" The COCO dataset. COCO is an image dataset. Thus, we treat each image as a sequence of length 1.\n\n    Publication:\n        Microsoft COCO: Common Objects in Context.\n        Tsung-Yi Lin, Michael Maire, Serge J. Belongie, Lubomir D. Bourdev, Ross B. Girshick, James Hays, Pietro Perona,\n        Deva Ramanan, Piotr Dollar and C. Lawrence Zitnick\n        ECCV, 2014\n        https://arxiv.org/pdf/1405.0312.pdf\n\n    Download the images along with annotations from http://cocodataset.org/#download. The root folder should be\n    organized as follows.\n        - coco_root\n            - annotations\n                - instances_train2014.json\n                - instances_train2017.json\n            - images\n                - train2014\n                - train2017\n\n    Note: You also have to install the coco pythonAPI from https://github.com/cocodataset/cocoapi.\n    \"\"\"\n\n    def __init__(self, root=None, image_loader=jpeg4py_loader, data_fraction=None, split=\"train\", version=\"2014\"):\n        \"\"\"\n        args:\n            root - path to the coco dataset.\n            image_loader (default_image_loader) -  The function to read the images. If installed,\n                                                   jpeg4py (https://github.com/ajkxyz/jpeg4py) is used by default. Else,\n                                                   opencv's imread is used.\n            data_fraction (None) - Fraction of images to be used. The images are selected randomly. If None, all the\n                                  images  will be used\n            split - 'train' or 'val'.\n            version - version of coco dataset (2014 or 2017)\n        \"\"\"\n        root = env_settings().coco_dir if root is None else root\n        super().__init__('COCO_lmdb', root, image_loader)\n        self.root = root\n        self.img_pth = 'images/{}{}/'.format(split, version)\n        self.anno_path = 'annotations/instances_{}{}.json'.format(split, version)\n\n        # Load the COCO set.\n        print('loading annotations into memory...')\n        tic = time.time()\n        coco_json = decode_json(root, self.anno_path)\n        print('Done (t={:0.2f}s)'.format(time.time() - tic))\n\n        self.coco_set = COCO(coco_json)\n\n        self.cats = self.coco_set.cats\n\n        self.class_list = self.get_class_list()\n\n        self.sequence_list = self._get_sequence_list()\n\n        if data_fraction is not None:\n            self.sequence_list = random.sample(self.sequence_list, int(len(self.sequence_list)*data_fraction))\n        self.seq_per_class = self._build_seq_per_class()\n\n    def _get_sequence_list(self):\n        ann_list = list(self.coco_set.anns.keys())\n        seq_list = [a for a in ann_list if self.coco_set.anns[a]['iscrowd'] == 0]\n\n        return seq_list\n\n    def is_video_sequence(self):\n        return False\n\n    def get_num_classes(self):\n        return len(self.class_list)\n\n    def get_name(self):\n        return 'coco_lmdb'\n\n    def has_class_info(self):\n        return True\n\n    def get_class_list(self):\n        class_list = []\n        for cat_id in self.cats.keys():\n            class_list.append(self.cats[cat_id]['name'])\n        return class_list\n\n    def has_segmentation_info(self):\n        return True\n\n    def get_num_sequences(self):\n        return len(self.sequence_list)\n\n    def _build_seq_per_class(self):\n        seq_per_class = {}\n        for i, seq in enumerate(self.sequence_list):\n            class_name = self.cats[self.coco_set.anns[seq]['category_id']]['name']\n            if class_name not in seq_per_class:\n                seq_per_class[class_name] = [i]\n            else:\n                seq_per_class[class_name].append(i)\n\n        return seq_per_class\n\n    def get_sequences_in_class(self, class_name):\n        return self.seq_per_class[class_name]\n\n    def get_sequence_info(self, seq_id):\n        anno = self._get_anno(seq_id)\n\n        bbox = torch.Tensor(anno['bbox']).view(1, 4)\n\n        mask = torch.Tensor(self.coco_set.annToMask(anno)).unsqueeze(dim=0)\n\n        '''2021.1.3 To avoid too small bounding boxes. Here we change the threshold to 50 pixels'''\n        valid = (bbox[:, 2] > 50) & (bbox[:, 3] > 50)\n\n        visible = valid.clone().byte()\n\n        return {'bbox': bbox, 'mask': mask, 'valid': valid, 'visible': visible}\n\n    def _get_anno(self, seq_id):\n        anno = self.coco_set.anns[self.sequence_list[seq_id]]\n\n        return anno\n\n    def _get_frames(self, seq_id):\n        path = self.coco_set.loadImgs([self.coco_set.anns[self.sequence_list[seq_id]]['image_id']])[0]['file_name']\n        # img = self.image_loader(os.path.join(self.img_pth, path))\n        img = decode_img(self.root, os.path.join(self.img_pth, path))\n        return img\n\n    def get_meta_info(self, seq_id):\n        try:\n            cat_dict_current = self.cats[self.coco_set.anns[self.sequence_list[seq_id]]['category_id']]\n            object_meta = OrderedDict({'object_class_name': cat_dict_current['name'],\n                                       'motion_class': None,\n                                       'major_class': cat_dict_current['supercategory'],\n                                       'root_class': None,\n                                       'motion_adverb': None})\n        except:\n            object_meta = OrderedDict({'object_class_name': None,\n                                       'motion_class': None,\n                                       'major_class': None,\n                                       'root_class': None,\n                                       'motion_adverb': None})\n        return object_meta\n\n\n    def get_class_name(self, seq_id):\n        cat_dict_current = self.cats[self.coco_set.anns[self.sequence_list[seq_id]]['category_id']]\n        return cat_dict_current['name']\n\n    def get_frames(self, seq_id=None, frame_ids=None, anno=None):\n        # COCO is an image dataset. Thus we replicate the image denoted by seq_id len(frame_ids) times, and return a\n        # list containing these replicated images.\n        frame = self._get_frames(seq_id)\n\n        frame_list = [frame.copy() for _ in frame_ids]\n\n        if anno is None:\n            anno = self.get_sequence_info(seq_id)\n\n        anno_frames = {}\n        for key, value in anno.items():\n            anno_frames[key] = [value[0, ...] for _ in frame_ids]\n\n        object_meta = self.get_meta_info(seq_id)\n\n        return frame_list, anno_frames, object_meta\n"
  },
  {
    "path": "artrackv2_mindspore/lib/train/dataset/got10k.py",
    "content": "import os\nimport os.path\nimport numpy as np\nimport torch\nimport csv\nimport pandas\nimport random\nfrom collections import OrderedDict\nfrom .base_video_dataset import BaseVideoDataset\nfrom lib.train.data import jpeg4py_loader\nfrom lib.train.admin import env_settings\n\n\nclass Got10k(BaseVideoDataset):\n    \"\"\" GOT-10k dataset.\n\n    Publication:\n        GOT-10k: A Large High-Diversity Benchmark for Generic Object Tracking in the Wild\n        Lianghua Huang, Xin Zhao, and Kaiqi Huang\n        arXiv:1810.11981, 2018\n        https://arxiv.org/pdf/1810.11981.pdf\n\n    Download dataset from http://got-10k.aitestunion.com/downloads\n    \"\"\"\n\n    def __init__(self, root=None, image_loader=jpeg4py_loader, split=None, seq_ids=None, data_fraction=None):\n        \"\"\"\n        args:\n            root - path to the got-10k training data. Note: This should point to the 'train' folder inside GOT-10k\n            image_loader (jpeg4py_loader) -  The function to read the images. jpeg4py (https://github.com/ajkxyz/jpeg4py)\n                                            is used by default.\n            split - 'train' or 'val'. Note: The validation split here is a subset of the official got-10k train split,\n                    not NOT the official got-10k validation split. To use the official validation split, provide that as\n                    the root folder instead.\n            seq_ids - List containing the ids of the videos to be used for training. Note: Only one of 'split' or 'seq_ids'\n                        options can be used at the same time.\n            data_fraction - Fraction of dataset to be used. The complete dataset is used by default\n        \"\"\"\n        root = env_settings().got10k_dir if root is None else root\n        super().__init__('GOT10k', root, image_loader)\n\n        # all folders inside the root\n        self.sequence_list = self._get_sequence_list()\n\n        # seq_id is the index of the folder inside the got10k root path\n        if split is not None:\n            if seq_ids is not None:\n                raise ValueError('Cannot set both split_name and seq_ids.')\n            ltr_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..')\n            if split == 'train':\n                file_path = os.path.join(ltr_path, 'data_specs', 'got10k_train_split.txt')\n            elif split == 'val':\n                file_path = os.path.join(ltr_path, 'data_specs', 'got10k_val_split.txt')\n            elif split == 'train_full':\n                file_path = os.path.join(ltr_path, 'data_specs', 'got10k_train_full_split.txt')\n            elif split == 'vottrain':\n                file_path = os.path.join(ltr_path, 'data_specs', 'got10k_vot_train_split.txt')\n            elif split == 'votval':\n                file_path = os.path.join(ltr_path, 'data_specs', 'got10k_vot_val_split.txt')\n            else:\n                raise ValueError('Unknown split name.')\n            # seq_ids = pandas.read_csv(file_path, header=None, squeeze=True, dtype=np.int64).values.tolist()\n            seq_ids = pandas.read_csv(file_path, header=None, dtype=np.int64).squeeze(\"columns\").values.tolist()\n        elif seq_ids is None:\n            seq_ids = list(range(0, len(self.sequence_list)))\n\n        self.sequence_list = [self.sequence_list[i] for i in seq_ids]\n\n        if data_fraction is not None:\n            self.sequence_list = random.sample(self.sequence_list, int(len(self.sequence_list)*data_fraction))\n\n        self.sequence_meta_info = self._load_meta_info()\n        self.seq_per_class = self._build_seq_per_class()\n\n        self.class_list = list(self.seq_per_class.keys())\n        self.class_list.sort()\n\n    def get_name(self):\n        return 'got10k'\n\n    def has_class_info(self):\n        return True\n\n    def has_occlusion_info(self):\n        return True\n\n    def _load_meta_info(self):\n        sequence_meta_info = {s: self._read_meta(os.path.join(self.root, s)) for s in self.sequence_list}\n        return sequence_meta_info\n\n    def _read_meta(self, seq_path):\n        try:\n            with open(os.path.join(seq_path, 'meta_info.ini')) as f:\n                meta_info = f.readlines()\n            object_meta = OrderedDict({'object_class_name': meta_info[5].split(': ')[-1][:-1],\n                                       'motion_class': meta_info[6].split(': ')[-1][:-1],\n                                       'major_class': meta_info[7].split(': ')[-1][:-1],\n                                       'root_class': meta_info[8].split(': ')[-1][:-1],\n                                       'motion_adverb': meta_info[9].split(': ')[-1][:-1]})\n        except:\n            object_meta = OrderedDict({'object_class_name': None,\n                                       'motion_class': None,\n                                       'major_class': None,\n                                       'root_class': None,\n                                       'motion_adverb': None})\n        return object_meta\n\n    def _build_seq_per_class(self):\n        seq_per_class = {}\n\n        for i, s in enumerate(self.sequence_list):\n            object_class = self.sequence_meta_info[s]['object_class_name']\n            if object_class in seq_per_class:\n                seq_per_class[object_class].append(i)\n            else:\n                seq_per_class[object_class] = [i]\n\n        return seq_per_class\n\n    def get_sequences_in_class(self, class_name):\n        return self.seq_per_class[class_name]\n\n    def _get_sequence_list(self):\n        with open(os.path.join(self.root, 'list.txt')) as f:\n            dir_list = list(csv.reader(f))\n        dir_list = [dir_name[0] for dir_name in dir_list]\n        return dir_list\n\n    def _read_bb_anno(self, seq_path):\n        bb_anno_file = os.path.join(seq_path, \"groundtruth.txt\")\n        gt = pandas.read_csv(bb_anno_file, delimiter=',', header=None, dtype=np.float32, na_filter=False, low_memory=False).values\n        return torch.tensor(gt)\n\n    def _read_target_visible(self, seq_path):\n        # Read full occlusion and out_of_view\n        occlusion_file = os.path.join(seq_path, \"absence.label\")\n        cover_file = os.path.join(seq_path, \"cover.label\")\n\n        with open(occlusion_file, 'r', newline='') as f:\n            occlusion = torch.ByteTensor([int(v[0]) for v in csv.reader(f)])\n        with open(cover_file, 'r', newline='') as f:\n            cover = torch.ByteTensor([int(v[0]) for v in csv.reader(f)])\n\n        target_visible = ~occlusion & (cover>0).byte()\n\n        visible_ratio = cover.float() / 8\n        return target_visible, visible_ratio\n\n    def _get_sequence_path(self, seq_id):\n        return os.path.join(self.root, self.sequence_list[seq_id])\n\n    def get_sequence_info(self, seq_id):\n        seq_path = self._get_sequence_path(seq_id)\n        bbox = self._read_bb_anno(seq_path)\n\n        valid = (bbox[:, 2] > 0) & (bbox[:, 3] > 0)\n        visible, visible_ratio = self._read_target_visible(seq_path)\n        visible = visible & valid.byte()\n\n        return {'bbox': bbox, 'valid': valid, 'visible': visible, 'visible_ratio': visible_ratio}\n\n    def _get_frame_path(self, seq_path, frame_id):\n        return os.path.join(seq_path, '{:08}.jpg'.format(frame_id+1))    # frames start from 1\n\n    def _get_frame(self, seq_path, frame_id):\n        return self.image_loader(self._get_frame_path(seq_path, frame_id))\n\n    def get_class_name(self, seq_id):\n        obj_meta = self.sequence_meta_info[self.sequence_list[seq_id]]\n\n        return obj_meta['object_class_name']\n\n    def get_frames(self, seq_id, frame_ids, anno=None):\n        seq_path = self._get_sequence_path(seq_id)\n        obj_meta = self.sequence_meta_info[self.sequence_list[seq_id]]\n\n        frame_list = [self._get_frame(seq_path, f_id) for f_id in frame_ids]\n\n        if anno is None:\n            anno = self.get_sequence_info(seq_id)\n\n        anno_frames = {}\n        for key, value in anno.items():\n            anno_frames[key] = [value[f_id, ...].clone() for f_id in frame_ids]\n\n        return frame_list, anno_frames, obj_meta\n"
  },
  {
    "path": "artrackv2_mindspore/lib/train/dataset/got10k_lmdb.py",
    "content": "import os\nimport os.path\nimport numpy as np\nimport torch\nimport csv\nimport pandas\nimport random\nfrom collections import OrderedDict\nfrom .base_video_dataset import BaseVideoDataset\nfrom lib.train.data import jpeg4py_loader\nfrom lib.train.admin import env_settings\n\n'''2021.1.16 Gok10k for loading lmdb dataset'''\nfrom lib.utils.lmdb_utils import *\n\n\nclass Got10k_lmdb(BaseVideoDataset):\n\n    def __init__(self, root=None, image_loader=jpeg4py_loader, split=None, seq_ids=None, data_fraction=None):\n        \"\"\"\n        args:\n            root - path to the got-10k training data. Note: This should point to the 'train' folder inside GOT-10k\n            image_loader (jpeg4py_loader) -  The function to read the images. jpeg4py (https://github.com/ajkxyz/jpeg4py)\n                                            is used by default.\n            split - 'train' or 'val'. Note: The validation split here is a subset of the official got-10k train split,\n                    not NOT the official got-10k validation split. To use the official validation split, provide that as\n                    the root folder instead.\n            seq_ids - List containing the ids of the videos to be used for training. Note: Only one of 'split' or 'seq_ids'\n                        options can be used at the same time.\n            data_fraction - Fraction of dataset to be used. The complete dataset is used by default\n            use_lmdb - whether the dataset is stored in lmdb format\n        \"\"\"\n        root = env_settings().got10k_lmdb_dir if root is None else root\n        super().__init__('GOT10k_lmdb', root, image_loader)\n\n        # all folders inside the root\n        self.sequence_list = self._get_sequence_list()\n\n        # seq_id is the index of the folder inside the got10k root path\n        if split is not None:\n            if seq_ids is not None:\n                raise ValueError('Cannot set both split_name and seq_ids.')\n            train_lib_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..')\n            if split == 'train':\n                file_path = os.path.join(train_lib_path, 'data_specs', 'got10k_train_split.txt')\n            elif split == 'val':\n                file_path = os.path.join(train_lib_path, 'data_specs', 'got10k_val_split.txt')\n            elif split == 'train_full':\n                file_path = os.path.join(train_lib_path, 'data_specs', 'got10k_train_full_split.txt')\n            elif split == 'vottrain':\n                file_path = os.path.join(train_lib_path, 'data_specs', 'got10k_vot_train_split.txt')\n            elif split == 'votval':\n                file_path = os.path.join(train_lib_path, 'data_specs', 'got10k_vot_val_split.txt')\n            else:\n                raise ValueError('Unknown split name.')\n            seq_ids = pandas.read_csv(file_path, header=None, squeeze=True, dtype=np.int64).values.tolist()\n        elif seq_ids is None:\n            seq_ids = list(range(0, len(self.sequence_list)))\n\n        self.sequence_list = [self.sequence_list[i] for i in seq_ids]\n\n        if data_fraction is not None:\n            self.sequence_list = random.sample(self.sequence_list, int(len(self.sequence_list)*data_fraction))\n\n        self.sequence_meta_info = self._load_meta_info()\n        self.seq_per_class = self._build_seq_per_class()\n\n        self.class_list = list(self.seq_per_class.keys())\n        self.class_list.sort()\n\n    def get_name(self):\n        return 'got10k_lmdb'\n\n    def has_class_info(self):\n        return True\n\n    def has_occlusion_info(self):\n        return True\n\n    def _load_meta_info(self):\n        def _read_meta(meta_info):\n\n            object_meta = OrderedDict({'object_class_name': meta_info[5].split(': ')[-1],\n                                       'motion_class': meta_info[6].split(': ')[-1],\n                                       'major_class': meta_info[7].split(': ')[-1],\n                                       'root_class': meta_info[8].split(': ')[-1],\n                                       'motion_adverb': meta_info[9].split(': ')[-1]})\n\n            return object_meta\n        sequence_meta_info = {}\n        for s in self.sequence_list:\n            try:\n                meta_str = decode_str(self.root, \"train/%s/meta_info.ini\" %s)\n                sequence_meta_info[s] = _read_meta(meta_str.split('\\n'))\n            except:\n                sequence_meta_info[s] = OrderedDict({'object_class_name': None,\n                                                     'motion_class': None,\n                                                     'major_class': None,\n                                                     'root_class': None,\n                                                     'motion_adverb': None})\n        return sequence_meta_info\n\n    def _build_seq_per_class(self):\n        seq_per_class = {}\n\n        for i, s in enumerate(self.sequence_list):\n            object_class = self.sequence_meta_info[s]['object_class_name']\n            if object_class in seq_per_class:\n                seq_per_class[object_class].append(i)\n            else:\n                seq_per_class[object_class] = [i]\n\n        return seq_per_class\n\n    def get_sequences_in_class(self, class_name):\n        return self.seq_per_class[class_name]\n\n    def _get_sequence_list(self):\n        dir_str = decode_str(self.root, 'train/list.txt')\n        dir_list = dir_str.split('\\n')\n        return dir_list\n\n    def _read_bb_anno(self, seq_path):\n        bb_anno_file = os.path.join(seq_path, \"groundtruth.txt\")\n        gt_str_list = decode_str(self.root, bb_anno_file).split('\\n')[:-1]  # the last line in got10k is empty\n        gt_list = [list(map(float, line.split(','))) for line in gt_str_list]\n        gt_arr = np.array(gt_list).astype(np.float32)\n\n        return torch.tensor(gt_arr)\n\n    def _read_target_visible(self, seq_path):\n        # full occlusion and out_of_view files\n        occlusion_file = os.path.join(seq_path, \"absence.label\")\n        cover_file = os.path.join(seq_path, \"cover.label\")\n        # Read these files\n        occ_list = list(map(int, decode_str(self.root, occlusion_file).split('\\n')[:-1]))  # the last line in got10k is empty\n        occlusion = torch.ByteTensor(occ_list)\n        cover_list = list(map(int, decode_str(self.root, cover_file).split('\\n')[:-1]))  # the last line in got10k is empty\n        cover = torch.ByteTensor(cover_list)\n\n        target_visible = ~occlusion & (cover>0).byte()\n\n        visible_ratio = cover.float() / 8\n        return target_visible, visible_ratio\n\n    def _get_sequence_path(self, seq_id):\n        return os.path.join(\"train\", self.sequence_list[seq_id])\n\n    def get_sequence_info(self, seq_id):\n        seq_path = self._get_sequence_path(seq_id)\n        bbox = self._read_bb_anno(seq_path)\n\n        valid = (bbox[:, 2] > 0) & (bbox[:, 3] > 0)\n        visible, visible_ratio = self._read_target_visible(seq_path)\n        visible = visible & valid.byte()\n\n        return {'bbox': bbox, 'valid': valid, 'visible': visible, 'visible_ratio': visible_ratio}\n\n    def _get_frame_path(self, seq_path, frame_id):\n        return os.path.join(seq_path, '{:08}.jpg'.format(frame_id+1))    # frames start from 1\n\n    def _get_frame(self, seq_path, frame_id):\n        return decode_img(self.root, self._get_frame_path(seq_path, frame_id))\n\n    def get_class_name(self, seq_id):\n        obj_meta = self.sequence_meta_info[self.sequence_list[seq_id]]\n\n        return obj_meta['object_class_name']\n\n    def get_frames(self, seq_id, frame_ids, anno=None):\n        seq_path = self._get_sequence_path(seq_id)\n        obj_meta = self.sequence_meta_info[self.sequence_list[seq_id]]\n\n        frame_list = [self._get_frame(seq_path, f_id) for f_id in frame_ids]\n\n        if anno is None:\n            anno = self.get_sequence_info(seq_id)\n\n        anno_frames = {}\n        for key, value in anno.items():\n            anno_frames[key] = [value[f_id, ...].clone() for f_id in frame_ids]\n\n        return frame_list, anno_frames, obj_meta\n"
  },
  {
    "path": "artrackv2_mindspore/lib/train/dataset/imagenetvid.py",
    "content": "import os\nfrom .base_video_dataset import BaseVideoDataset\nfrom lib.train.data import jpeg4py_loader\nimport xml.etree.ElementTree as ET\nimport json\nimport torch\nfrom collections import OrderedDict\nfrom lib.train.admin import env_settings\n\n\ndef get_target_to_image_ratio(seq):\n    anno = torch.Tensor(seq['anno'])\n    img_sz = torch.Tensor(seq['image_size'])\n    return (anno[0, 2:4].prod() / (img_sz.prod())).sqrt()\n\n\nclass ImagenetVID(BaseVideoDataset):\n    \"\"\" Imagenet VID dataset.\n\n    Publication:\n        ImageNet Large Scale Visual Recognition Challenge\n        Olga Russakovsky, Jia Deng, Hao Su, Jonathan Krause, Sanjeev Satheesh, Sean Ma, Zhiheng Huang, Andrej Karpathy,\n        Aditya Khosla, Michael Bernstein, Alexander C. Berg and Li Fei-Fei\n        IJCV, 2015\n        https://arxiv.org/pdf/1409.0575.pdf\n\n    Download the dataset from http://image-net.org/\n    \"\"\"\n    def __init__(self, root=None, image_loader=jpeg4py_loader, min_length=0, max_target_area=1):\n        \"\"\"\n        args:\n            root - path to the imagenet vid dataset.\n            image_loader (default_image_loader) -  The function to read the images. If installed,\n                                                   jpeg4py (https://github.com/ajkxyz/jpeg4py) is used by default. Else,\n                                                   opencv's imread is used.\n            min_length - Minimum allowed sequence length.\n            max_target_area - max allowed ratio between target area and image area. Can be used to filter out targets\n                                which cover complete image.\n        \"\"\"\n        root = env_settings().imagenet_dir if root is None else root\n        super().__init__(\"imagenetvid\", root, image_loader)\n\n        cache_file = os.path.join(root, 'cache.json')\n        if os.path.isfile(cache_file):\n            # If available, load the pre-processed cache file containing meta-info for each sequence\n            with open(cache_file, 'r') as f:\n                sequence_list_dict = json.load(f)\n\n            self.sequence_list = sequence_list_dict\n        else:\n            # Else process the imagenet annotations and generate the cache file\n            self.sequence_list = self._process_anno(root)\n\n            with open(cache_file, 'w') as f:\n                json.dump(self.sequence_list, f)\n\n        # Filter the sequences based on min_length and max_target_area in the first frame\n        self.sequence_list = [x for x in self.sequence_list if len(x['anno']) >= min_length and\n                              get_target_to_image_ratio(x) < max_target_area]\n\n    def get_name(self):\n        return 'imagenetvid'\n\n    def get_num_sequences(self):\n        return len(self.sequence_list)\n\n    def get_sequence_info(self, seq_id):\n        bb_anno = torch.Tensor(self.sequence_list[seq_id]['anno'])\n        valid = (bb_anno[:, 2] > 0) & (bb_anno[:, 3] > 0)\n        visible = torch.ByteTensor(self.sequence_list[seq_id]['target_visible']) & valid.byte()\n        return {'bbox': bb_anno, 'valid': valid, 'visible': visible}\n\n    def _get_frame(self, sequence, frame_id):\n        set_name = 'ILSVRC2015_VID_train_{:04d}'.format(sequence['set_id'])\n        vid_name = 'ILSVRC2015_train_{:08d}'.format(sequence['vid_id'])\n        frame_number = frame_id + sequence['start_frame']\n        frame_path = os.path.join(self.root, 'Data', 'VID', 'train', set_name, vid_name,\n                                  '{:06d}.JPEG'.format(frame_number))\n        return self.image_loader(frame_path)\n\n    def get_frames(self, seq_id, frame_ids, anno=None):\n        sequence = self.sequence_list[seq_id]\n\n        frame_list = [self._get_frame(sequence, f) for f in frame_ids]\n\n        if anno is None:\n            anno = self.get_sequence_info(seq_id)\n\n        # Create anno dict\n        anno_frames = {}\n        for key, value in anno.items():\n            anno_frames[key] = [value[f_id, ...].clone() for f_id in frame_ids]\n\n        # added the class info to the meta info\n        object_meta = OrderedDict({'object_class': sequence['class_name'],\n                                   'motion_class': None,\n                                   'major_class': None,\n                                   'root_class': None,\n                                   'motion_adverb': None})\n\n        return frame_list, anno_frames, object_meta\n\n    def _process_anno(self, root):\n        # Builds individual tracklets\n        base_vid_anno_path = os.path.join(root, 'Annotations', 'VID', 'train')\n\n        all_sequences = []\n        for set in sorted(os.listdir(base_vid_anno_path)):\n            set_id = int(set.split('_')[-1])\n            for vid in sorted(os.listdir(os.path.join(base_vid_anno_path, set))):\n\n                vid_id = int(vid.split('_')[-1])\n                anno_files = sorted(os.listdir(os.path.join(base_vid_anno_path, set, vid)))\n\n                frame1_anno = ET.parse(os.path.join(base_vid_anno_path, set, vid, anno_files[0]))\n                image_size = [int(frame1_anno.find('size/width').text), int(frame1_anno.find('size/height').text)]\n\n                objects = [ET.ElementTree(file=os.path.join(base_vid_anno_path, set, vid, f)).findall('object')\n                           for f in anno_files]\n\n                tracklets = {}\n\n                # Find all tracklets along with start frame\n                for f_id, all_targets in enumerate(objects):\n                    for target in all_targets:\n                        tracklet_id = target.find('trackid').text\n                        if tracklet_id not in tracklets:\n                            tracklets[tracklet_id] = f_id\n\n                for tracklet_id, tracklet_start in tracklets.items():\n                    tracklet_anno = []\n                    target_visible = []\n                    class_name_id = None\n\n                    for f_id in range(tracklet_start, len(objects)):\n                        found = False\n                        for target in objects[f_id]:\n                            if target.find('trackid').text == tracklet_id:\n                                if not class_name_id:\n                                    class_name_id = target.find('name').text\n                                x1 = int(target.find('bndbox/xmin').text)\n                                y1 = int(target.find('bndbox/ymin').text)\n                                x2 = int(target.find('bndbox/xmax').text)\n                                y2 = int(target.find('bndbox/ymax').text)\n\n                                tracklet_anno.append([x1, y1, x2 - x1, y2 - y1])\n                                target_visible.append(target.find('occluded').text == '0')\n\n                                found = True\n                                break\n                        if not found:\n                            break\n\n                    new_sequence = {'set_id': set_id, 'vid_id': vid_id, 'class_name': class_name_id,\n                                    'start_frame': tracklet_start, 'anno': tracklet_anno,\n                                    'target_visible': target_visible, 'image_size': image_size}\n                    all_sequences.append(new_sequence)\n\n        return all_sequences\n"
  },
  {
    "path": "artrackv2_mindspore/lib/train/dataset/imagenetvid_lmdb.py",
    "content": "import os\nfrom .base_video_dataset import BaseVideoDataset\nfrom lib.train.data import jpeg4py_loader\nimport torch\nfrom collections import OrderedDict\nfrom lib.train.admin import env_settings\nfrom lib.utils.lmdb_utils import decode_img, decode_json\n\n\ndef get_target_to_image_ratio(seq):\n    anno = torch.Tensor(seq['anno'])\n    img_sz = torch.Tensor(seq['image_size'])\n    return (anno[0, 2:4].prod() / (img_sz.prod())).sqrt()\n\n\nclass ImagenetVID_lmdb(BaseVideoDataset):\n    \"\"\" Imagenet VID dataset.\n\n    Publication:\n        ImageNet Large Scale Visual Recognition Challenge\n        Olga Russakovsky, Jia Deng, Hao Su, Jonathan Krause, Sanjeev Satheesh, Sean Ma, Zhiheng Huang, Andrej Karpathy,\n        Aditya Khosla, Michael Bernstein, Alexander C. Berg and Li Fei-Fei\n        IJCV, 2015\n        https://arxiv.org/pdf/1409.0575.pdf\n\n    Download the dataset from http://image-net.org/\n    \"\"\"\n    def __init__(self, root=None, image_loader=jpeg4py_loader, min_length=0, max_target_area=1):\n        \"\"\"\n        args:\n            root - path to the imagenet vid dataset.\n            image_loader (default_image_loader) -  The function to read the images. If installed,\n                                                   jpeg4py (https://github.com/ajkxyz/jpeg4py) is used by default. Else,\n                                                   opencv's imread is used.\n            min_length - Minimum allowed sequence length.\n            max_target_area - max allowed ratio between target area and image area. Can be used to filter out targets\n                                which cover complete image.\n        \"\"\"\n        root = env_settings().imagenet_dir if root is None else root\n        super().__init__(\"imagenetvid_lmdb\", root, image_loader)\n\n        sequence_list_dict = decode_json(root, \"cache.json\")\n        self.sequence_list = sequence_list_dict\n\n        # Filter the sequences based on min_length and max_target_area in the first frame\n        self.sequence_list = [x for x in self.sequence_list if len(x['anno']) >= min_length and\n                              get_target_to_image_ratio(x) < max_target_area]\n\n    def get_name(self):\n        return 'imagenetvid_lmdb'\n\n    def get_num_sequences(self):\n        return len(self.sequence_list)\n\n    def get_sequence_info(self, seq_id):\n        bb_anno = torch.Tensor(self.sequence_list[seq_id]['anno'])\n        valid = (bb_anno[:, 2] > 0) & (bb_anno[:, 3] > 0)\n        visible = torch.ByteTensor(self.sequence_list[seq_id]['target_visible']) & valid.byte()\n        return {'bbox': bb_anno, 'valid': valid, 'visible': visible}\n\n    def _get_frame(self, sequence, frame_id):\n        set_name = 'ILSVRC2015_VID_train_{:04d}'.format(sequence['set_id'])\n        vid_name = 'ILSVRC2015_train_{:08d}'.format(sequence['vid_id'])\n        frame_number = frame_id + sequence['start_frame']\n        frame_path = os.path.join('Data', 'VID', 'train', set_name, vid_name,\n                                  '{:06d}.JPEG'.format(frame_number))\n        return decode_img(self.root, frame_path)\n\n    def get_frames(self, seq_id, frame_ids, anno=None):\n        sequence = self.sequence_list[seq_id]\n\n        frame_list = [self._get_frame(sequence, f) for f in frame_ids]\n\n        if anno is None:\n            anno = self.get_sequence_info(seq_id)\n\n        # Create anno dict\n        anno_frames = {}\n        for key, value in anno.items():\n            anno_frames[key] = [value[f_id, ...].clone() for f_id in frame_ids]\n\n        # added the class info to the meta info\n        object_meta = OrderedDict({'object_class': sequence['class_name'],\n                                   'motion_class': None,\n                                   'major_class': None,\n                                   'root_class': None,\n                                   'motion_adverb': None})\n\n        return frame_list, anno_frames, object_meta\n\n"
  },
  {
    "path": "artrackv2_mindspore/lib/train/dataset/lasot.py",
    "content": "import os\nimport os.path\nimport torch\nimport numpy as np\nimport pandas\nimport csv\nimport random\nfrom collections import OrderedDict\nfrom .base_video_dataset import BaseVideoDataset\nfrom lib.train.data import jpeg4py_loader\nfrom lib.train.admin import env_settings\n\n\nclass Lasot(BaseVideoDataset):\n    \"\"\" LaSOT dataset.\n\n    Publication:\n        LaSOT: A High-quality Benchmark for Large-scale Single Object Tracking\n        Heng Fan, Liting Lin, Fan Yang, Peng Chu, Ge Deng, Sijia Yu, Hexin Bai, Yong Xu, Chunyuan Liao and Haibin Ling\n        CVPR, 2019\n        https://arxiv.org/pdf/1809.07845.pdf\n\n    Download the dataset from https://cis.temple.edu/lasot/download.html\n    \"\"\"\n\n    def __init__(self, root=None, image_loader=jpeg4py_loader, vid_ids=None, split=None, data_fraction=None):\n        \"\"\"\n        args:\n            root - path to the lasot dataset.\n            image_loader (jpeg4py_loader) -  The function to read the images. jpeg4py (https://github.com/ajkxyz/jpeg4py)\n                                            is used by default.\n            vid_ids - List containing the ids of the videos (1 - 20) used for training. If vid_ids = [1, 3, 5], then the\n                    videos with subscripts -1, -3, and -5 from each class will be used for training.\n            split - If split='train', the official train split (protocol-II) is used for training. Note: Only one of\n                    vid_ids or split option can be used at a time.\n            data_fraction - Fraction of dataset to be used. The complete dataset is used by default\n        \"\"\"\n        root = env_settings().lasot_dir if root is None else root\n        super().__init__('LaSOT', root, image_loader)\n\n        # Keep a list of all classes\n        self.class_list = [f for f in os.listdir(self.root)]\n        self.class_to_id = {cls_name: cls_id for cls_id, cls_name in enumerate(self.class_list)}\n\n        self.sequence_list = self._build_sequence_list(vid_ids, split)\n\n        if data_fraction is not None:\n            self.sequence_list = random.sample(self.sequence_list, int(len(self.sequence_list)*data_fraction))\n\n        self.seq_per_class = self._build_class_list()\n\n    def _build_sequence_list(self, vid_ids=None, split=None):\n        if split is not None:\n            if vid_ids is not None:\n                raise ValueError('Cannot set both split_name and vid_ids.')\n            ltr_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..')\n            if split == 'train':\n                file_path = os.path.join(ltr_path, 'data_specs', 'lasot_train_split.txt')\n            else:\n                raise ValueError('Unknown split name.')\n            # sequence_list = pandas.read_csv(file_path, header=None, squeeze=True).values.tolist()\n            sequence_list = pandas.read_csv(file_path, header=None).squeeze(\"columns\").values.tolist()\n        elif vid_ids is not None:\n            sequence_list = [c+'-'+str(v) for c in self.class_list for v in vid_ids]\n        else:\n            raise ValueError('Set either split_name or vid_ids.')\n\n        return sequence_list\n\n    def _build_class_list(self):\n        seq_per_class = {}\n        for seq_id, seq_name in enumerate(self.sequence_list):\n            class_name = seq_name.split('-')[0]\n            if class_name in seq_per_class:\n                seq_per_class[class_name].append(seq_id)\n            else:\n                seq_per_class[class_name] = [seq_id]\n\n        return seq_per_class\n\n    def get_name(self):\n        return 'lasot'\n\n    def has_class_info(self):\n        return True\n\n    def has_occlusion_info(self):\n        return True\n\n    def get_num_sequences(self):\n        return len(self.sequence_list)\n\n    def get_num_classes(self):\n        return len(self.class_list)\n\n    def get_sequences_in_class(self, class_name):\n        return self.seq_per_class[class_name]\n\n    def _read_bb_anno(self, seq_path):\n        bb_anno_file = os.path.join(seq_path, \"groundtruth.txt\")\n        gt = pandas.read_csv(bb_anno_file, delimiter=',', header=None, dtype=np.float32, na_filter=False, low_memory=False).values\n        return torch.tensor(gt)\n\n    def _read_target_visible(self, seq_path):\n        # Read full occlusion and out_of_view\n        occlusion_file = os.path.join(seq_path, \"full_occlusion.txt\")\n        out_of_view_file = os.path.join(seq_path, \"out_of_view.txt\")\n\n        with open(occlusion_file, 'r', newline='') as f:\n            occlusion = torch.ByteTensor([int(v) for v in list(csv.reader(f))[0]])\n        with open(out_of_view_file, 'r') as f:\n            out_of_view = torch.ByteTensor([int(v) for v in list(csv.reader(f))[0]])\n\n        target_visible = ~occlusion & ~out_of_view\n\n        return target_visible\n\n    def _get_sequence_path(self, seq_id):\n        seq_name = self.sequence_list[seq_id]\n        class_name = seq_name.split('-')[0]\n        vid_id = seq_name.split('-')[1]\n\n        return os.path.join(self.root, class_name, class_name + '-' + vid_id)\n\n    def get_sequence_info(self, seq_id):\n        seq_path = self._get_sequence_path(seq_id)\n        bbox = self._read_bb_anno(seq_path)\n\n        valid = (bbox[:, 2] > 0) & (bbox[:, 3] > 0)\n        visible = self._read_target_visible(seq_path) & valid.byte()\n\n        return {'bbox': bbox, 'valid': valid, 'visible': visible}\n\n    def _get_frame_path(self, seq_path, frame_id):\n        return os.path.join(seq_path, 'img', '{:08}.jpg'.format(frame_id+1))    # frames start from 1\n\n    def _get_frame(self, seq_path, frame_id):\n        return self.image_loader(self._get_frame_path(seq_path, frame_id))\n\n    def _get_class(self, seq_path):\n        raw_class = seq_path.split('/')[-2]\n        return raw_class\n\n    def get_class_name(self, seq_id):\n        seq_path = self._get_sequence_path(seq_id)\n        obj_class = self._get_class(seq_path)\n\n        return obj_class\n\n    def get_frames(self, seq_id, frame_ids, anno=None):\n        seq_path = self._get_sequence_path(seq_id)\n\n        obj_class = self._get_class(seq_path)\n        frame_list = [self._get_frame(seq_path, f_id) for f_id in frame_ids]\n\n        if anno is None:\n            anno = self.get_sequence_info(seq_id)\n\n        anno_frames = {}\n        for key, value in anno.items():\n            anno_frames[key] = [value[f_id, ...].clone() for f_id in frame_ids]\n\n        object_meta = OrderedDict({'object_class_name': obj_class,\n                                   'motion_class': None,\n                                   'major_class': None,\n                                   'root_class': None,\n                                   'motion_adverb': None})\n\n        return frame_list, anno_frames, object_meta\n"
  },
  {
    "path": "artrackv2_mindspore/lib/train/dataset/lasot_lmdb.py",
    "content": "import os\nimport os.path\nimport torch\nimport numpy as np\nimport pandas\nimport csv\nimport random\nfrom collections import OrderedDict\nfrom .base_video_dataset import BaseVideoDataset\nfrom lib.train.data import jpeg4py_loader\nfrom lib.train.admin import env_settings\n'''2021.1.16 Lasot for loading lmdb dataset'''\nfrom lib.utils.lmdb_utils import *\n\n\nclass Lasot_lmdb(BaseVideoDataset):\n\n    def __init__(self, root=None, image_loader=jpeg4py_loader, vid_ids=None, split=None, data_fraction=None):\n        \"\"\"\n        args:\n            root - path to the lasot dataset.\n            image_loader (jpeg4py_loader) -  The function to read the images. jpeg4py (https://github.com/ajkxyz/jpeg4py)\n                                            is used by default.\n            vid_ids - List containing the ids of the videos (1 - 20) used for training. If vid_ids = [1, 3, 5], then the\n                    videos with subscripts -1, -3, and -5 from each class will be used for training.\n            split - If split='train', the official train split (protocol-II) is used for training. Note: Only one of\n                    vid_ids or split option can be used at a time.\n            data_fraction - Fraction of dataset to be used. The complete dataset is used by default\n        \"\"\"\n        root = env_settings().lasot_lmdb_dir if root is None else root\n        super().__init__('LaSOT_lmdb', root, image_loader)\n\n        self.sequence_list = self._build_sequence_list(vid_ids, split)\n        class_list = [seq_name.split('-')[0] for seq_name in self.sequence_list]\n        self.class_list = []\n        for ele in class_list:\n            if ele not in self.class_list:\n                self.class_list.append(ele)\n        # Keep a list of all classes\n        self.class_to_id = {cls_name: cls_id for cls_id, cls_name in enumerate(self.class_list)}\n\n        if data_fraction is not None:\n            self.sequence_list = random.sample(self.sequence_list, int(len(self.sequence_list)*data_fraction))\n\n        self.seq_per_class = self._build_class_list()\n\n    def _build_sequence_list(self, vid_ids=None, split=None):\n        if split is not None:\n            if vid_ids is not None:\n                raise ValueError('Cannot set both split_name and vid_ids.')\n            ltr_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..')\n            if split == 'train':\n                file_path = os.path.join(ltr_path, 'data_specs', 'lasot_train_split.txt')\n            else:\n                raise ValueError('Unknown split name.')\n            sequence_list = pandas.read_csv(file_path, header=None, squeeze=True).values.tolist()\n        elif vid_ids is not None:\n            sequence_list = [c+'-'+str(v) for c in self.class_list for v in vid_ids]\n        else:\n            raise ValueError('Set either split_name or vid_ids.')\n\n        return sequence_list\n\n    def _build_class_list(self):\n        seq_per_class = {}\n        for seq_id, seq_name in enumerate(self.sequence_list):\n            class_name = seq_name.split('-')[0]\n            if class_name in seq_per_class:\n                seq_per_class[class_name].append(seq_id)\n            else:\n                seq_per_class[class_name] = [seq_id]\n\n        return seq_per_class\n\n    def get_name(self):\n        return 'lasot_lmdb'\n\n    def has_class_info(self):\n        return True\n\n    def has_occlusion_info(self):\n        return True\n\n    def get_num_sequences(self):\n        return len(self.sequence_list)\n\n    def get_num_classes(self):\n        return len(self.class_list)\n\n    def get_sequences_in_class(self, class_name):\n        return self.seq_per_class[class_name]\n\n    def _read_bb_anno(self, seq_path):\n        bb_anno_file = os.path.join(seq_path, \"groundtruth.txt\")\n        gt_str_list = decode_str(self.root, bb_anno_file).split('\\n')[:-1]  # the last line is empty\n        gt_list = [list(map(float, line.split(','))) for line in gt_str_list]\n        gt_arr = np.array(gt_list).astype(np.float32)\n        return torch.tensor(gt_arr)\n\n    def _read_target_visible(self, seq_path):\n        # Read full occlusion and out_of_view\n        occlusion_file = os.path.join(seq_path, \"full_occlusion.txt\")\n        out_of_view_file = os.path.join(seq_path, \"out_of_view.txt\")\n\n        occ_list = list(map(int, decode_str(self.root, occlusion_file).split(',')))\n        occlusion = torch.ByteTensor(occ_list)\n        out_view_list = list(map(int, decode_str(self.root, out_of_view_file).split(',')))\n        out_of_view = torch.ByteTensor(out_view_list)\n\n        target_visible = ~occlusion & ~out_of_view\n\n        return target_visible\n\n    def _get_sequence_path(self, seq_id):\n        seq_name = self.sequence_list[seq_id]\n        class_name = seq_name.split('-')[0]\n        vid_id = seq_name.split('-')[1]\n\n        return os.path.join(class_name, class_name + '-' + vid_id)\n\n    def get_sequence_info(self, seq_id):\n        seq_path = self._get_sequence_path(seq_id)\n        bbox = self._read_bb_anno(seq_path)\n\n        valid = (bbox[:, 2] > 0) & (bbox[:, 3] > 0)\n        visible = self._read_target_visible(seq_path) & valid.byte()\n\n        return {'bbox': bbox, 'valid': valid, 'visible': visible}\n\n    def _get_frame_path(self, seq_path, frame_id):\n        return os.path.join(seq_path, 'img', '{:08}.jpg'.format(frame_id+1))    # frames start from 1\n\n    def _get_frame(self, seq_path, frame_id):\n        return decode_img(self.root, self._get_frame_path(seq_path, frame_id))\n\n    def _get_class(self, seq_path):\n        raw_class = seq_path.split('/')[-2]\n        return raw_class\n\n    def get_class_name(self, seq_id):\n        seq_path = self._get_sequence_path(seq_id)\n        obj_class = self._get_class(seq_path)\n\n        return obj_class\n\n    def get_frames(self, seq_id, frame_ids, anno=None):\n        seq_path = self._get_sequence_path(seq_id)\n\n        obj_class = self._get_class(seq_path)\n        frame_list = [self._get_frame(seq_path, f_id) for f_id in frame_ids]\n\n        if anno is None:\n            anno = self.get_sequence_info(seq_id)\n\n        anno_frames = {}\n        for key, value in anno.items():\n            anno_frames[key] = [value[f_id, ...].clone() for f_id in frame_ids]\n\n        object_meta = OrderedDict({'object_class_name': obj_class,\n                                   'motion_class': None,\n                                   'major_class': None,\n                                   'root_class': None,\n                                   'motion_adverb': None})\n\n        return frame_list, anno_frames, object_meta\n"
  },
  {
    "path": "artrackv2_mindspore/lib/train/dataset/tracking_net.py",
    "content": "import torch\nimport os\nimport os.path\nimport numpy as np\nimport pandas\nimport random\nfrom collections import OrderedDict\n\nfrom lib.train.data import jpeg4py_loader\nfrom .base_video_dataset import BaseVideoDataset\nfrom lib.train.admin import env_settings\n\n\ndef list_sequences(root, set_ids):\n    \"\"\" Lists all the videos in the input set_ids. Returns a list of tuples (set_id, video_name)\n\n    args:\n        root: Root directory to TrackingNet\n        set_ids: Sets (0-11) which are to be used\n\n    returns:\n        list - list of tuples (set_id, video_name) containing the set_id and video_name for each sequence\n    \"\"\"\n    sequence_list = []\n\n    for s in set_ids:\n        anno_dir = os.path.join(root, \"TRAIN_\" + str(s), \"anno\")\n\n        sequences_cur_set = [(s, os.path.splitext(f)[0]) for f in os.listdir(anno_dir) if f.endswith('.txt')]\n        sequence_list += sequences_cur_set\n\n    return sequence_list\n\n\nclass TrackingNet(BaseVideoDataset):\n    \"\"\" TrackingNet dataset.\n\n    Publication:\n        TrackingNet: A Large-Scale Dataset and Benchmark for Object Tracking in the Wild.\n        Matthias Mueller,Adel Bibi, Silvio Giancola, Salman Al-Subaihi and Bernard Ghanem\n        ECCV, 2018\n        https://ivul.kaust.edu.sa/Documents/Publications/2018/TrackingNet%20A%20Large%20Scale%20Dataset%20and%20Benchmark%20for%20Object%20Tracking%20in%20the%20Wild.pdf\n\n    Download the dataset using the toolkit https://github.com/SilvioGiancola/TrackingNet-devkit.\n    \"\"\"\n    def __init__(self, root=None, image_loader=jpeg4py_loader, set_ids=None, data_fraction=None):\n        \"\"\"\n        args:\n            root        - The path to the TrackingNet folder, containing the training sets.\n            image_loader (jpeg4py_loader) -  The function to read the images. jpeg4py (https://github.com/ajkxyz/jpeg4py)\n                                            is used by default.\n            set_ids (None) - List containing the ids of the TrackingNet sets to be used for training. If None, all the\n                            sets (0 - 11) will be used.\n            data_fraction - Fraction of dataset to be used. The complete dataset is used by default\n        \"\"\"\n        root = env_settings().trackingnet_dir if root is None else root\n        super().__init__('TrackingNet', root, image_loader)\n\n        if set_ids is None:\n            set_ids = [i for i in range(12)]\n\n        self.set_ids = set_ids\n\n        # Keep a list of all videos. Sequence list is a list of tuples (set_id, video_name) containing the set_id and\n        # video_name for each sequence\n        self.sequence_list = list_sequences(self.root, self.set_ids)\n\n        if data_fraction is not None:\n            self.sequence_list = random.sample(self.sequence_list, int(len(self.sequence_list) * data_fraction))\n\n        self.seq_to_class_map, self.seq_per_class = self._load_class_info()\n\n        # we do not have the class_lists for the tracking net\n        self.class_list = list(self.seq_per_class.keys())\n        self.class_list.sort()\n\n    def _load_class_info(self):\n        ltr_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..')\n        class_map_path = os.path.join(ltr_path, 'data_specs', 'trackingnet_classmap.txt')\n\n        with open(class_map_path, 'r') as f:\n            seq_to_class_map = {seq_class.split('\\t')[0]: seq_class.rstrip().split('\\t')[1] for seq_class in f}\n\n        seq_per_class = {}\n        for i, seq in enumerate(self.sequence_list):\n            class_name = seq_to_class_map.get(seq[1], 'Unknown')\n            if class_name not in seq_per_class:\n                seq_per_class[class_name] = [i]\n            else:\n                seq_per_class[class_name].append(i)\n\n        return seq_to_class_map, seq_per_class\n\n    def get_name(self):\n        return 'trackingnet'\n\n    def has_class_info(self):\n        return True\n\n    def get_sequences_in_class(self, class_name):\n        return self.seq_per_class[class_name]\n\n    def _read_bb_anno(self, seq_id):\n        set_id = self.sequence_list[seq_id][0]\n        vid_name = self.sequence_list[seq_id][1]\n        bb_anno_file = os.path.join(self.root, \"TRAIN_\" + str(set_id), \"anno\", vid_name + \".txt\")\n        gt = pandas.read_csv(bb_anno_file, delimiter=',', header=None, dtype=np.float32, na_filter=False,\n                             low_memory=False).values\n        return torch.tensor(gt)\n\n    def get_sequence_info(self, seq_id):\n        bbox = self._read_bb_anno(seq_id)\n\n        valid = (bbox[:, 2] > 0) & (bbox[:, 3] > 0)\n        visible = valid.clone().byte()\n        return {'bbox': bbox, 'valid': valid, 'visible': visible}\n\n    def _get_frame(self, seq_id, frame_id):\n        set_id = self.sequence_list[seq_id][0]\n        vid_name = self.sequence_list[seq_id][1]\n        frame_path = os.path.join(self.root, \"TRAIN_\" + str(set_id), \"frames\", vid_name, str(frame_id) + \".jpg\")\n        return self.image_loader(frame_path)\n\n    def _get_class(self, seq_id):\n        seq_name = self.sequence_list[seq_id][1]\n        return self.seq_to_class_map[seq_name]\n\n    def get_class_name(self, seq_id):\n        obj_class = self._get_class(seq_id)\n\n        return obj_class\n\n    def get_frames(self, seq_id, frame_ids, anno=None):\n        frame_list = [self._get_frame(seq_id, f) for f in frame_ids]\n\n        if anno is None:\n            anno = self.get_sequence_info(seq_id)\n\n        anno_frames = {}\n        for key, value in anno.items():\n            anno_frames[key] = [value[f_id, ...].clone() for f_id in frame_ids]\n\n        obj_class = self._get_class(seq_id)\n\n        object_meta = OrderedDict({'object_class_name': obj_class,\n                                   'motion_class': None,\n                                   'major_class': None,\n                                   'root_class': None,\n                                   'motion_adverb': None})\n\n        return frame_list, anno_frames, object_meta\n"
  },
  {
    "path": "artrackv2_mindspore/lib/train/dataset/tracking_net_lmdb.py",
    "content": "import torch\nimport os\nimport os.path\nimport numpy as np\nimport random\nfrom collections import OrderedDict\n\nfrom lib.train.data import jpeg4py_loader\nfrom .base_video_dataset import BaseVideoDataset\nfrom lib.train.admin import env_settings\nimport json\nfrom lib.utils.lmdb_utils import decode_img, decode_str\n\n\ndef list_sequences(root):\n    \"\"\" Lists all the videos in the input set_ids. Returns a list of tuples (set_id, video_name)\n\n    args:\n        root: Root directory to TrackingNet\n\n    returns:\n        list - list of tuples (set_id, video_name) containing the set_id and video_name for each sequence\n    \"\"\"\n    fname = os.path.join(root, \"seq_list.json\")\n    with open(fname, \"r\") as f:\n        sequence_list = json.loads(f.read())\n    return sequence_list\n\n\nclass TrackingNet_lmdb(BaseVideoDataset):\n    \"\"\" TrackingNet dataset.\n\n    Publication:\n        TrackingNet: A Large-Scale Dataset and Benchmark for Object Tracking in the Wild.\n        Matthias Mueller,Adel Bibi, Silvio Giancola, Salman Al-Subaihi and Bernard Ghanem\n        ECCV, 2018\n        https://ivul.kaust.edu.sa/Documents/Publications/2018/TrackingNet%20A%20Large%20Scale%20Dataset%20and%20Benchmark%20for%20Object%20Tracking%20in%20the%20Wild.pdf\n\n    Download the dataset using the toolkit https://github.com/SilvioGiancola/TrackingNet-devkit.\n    \"\"\"\n    def __init__(self, root=None, image_loader=jpeg4py_loader, set_ids=None, data_fraction=None):\n        \"\"\"\n        args:\n            root        - The path to the TrackingNet folder, containing the training sets.\n            image_loader (jpeg4py_loader) -  The function to read the images. jpeg4py (https://github.com/ajkxyz/jpeg4py)\n                                            is used by default.\n            set_ids (None) - List containing the ids of the TrackingNet sets to be used for training. If None, all the\n                            sets (0 - 11) will be used.\n            data_fraction - Fraction of dataset to be used. The complete dataset is used by default\n        \"\"\"\n        root = env_settings().trackingnet_lmdb_dir if root is None else root\n        super().__init__('TrackingNet_lmdb', root, image_loader)\n\n        if set_ids is None:\n            set_ids = [i for i in range(12)]\n\n        self.set_ids = set_ids\n\n        # Keep a list of all videos. Sequence list is a list of tuples (set_id, video_name) containing the set_id and\n        # video_name for each sequence\n        self.sequence_list = list_sequences(self.root)\n\n        if data_fraction is not None:\n            self.sequence_list = random.sample(self.sequence_list, int(len(self.sequence_list) * data_fraction))\n\n        self.seq_to_class_map, self.seq_per_class = self._load_class_info()\n\n        # we do not have the class_lists for the tracking net\n        self.class_list = list(self.seq_per_class.keys())\n        self.class_list.sort()\n\n    def _load_class_info(self):\n        ltr_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..')\n        class_map_path = os.path.join(ltr_path, 'data_specs', 'trackingnet_classmap.txt')\n\n        with open(class_map_path, 'r') as f:\n            seq_to_class_map = {seq_class.split('\\t')[0]: seq_class.rstrip().split('\\t')[1] for seq_class in f}\n\n        seq_per_class = {}\n        for i, seq in enumerate(self.sequence_list):\n            class_name = seq_to_class_map.get(seq[1], 'Unknown')\n            if class_name not in seq_per_class:\n                seq_per_class[class_name] = [i]\n            else:\n                seq_per_class[class_name].append(i)\n\n        return seq_to_class_map, seq_per_class\n\n    def get_name(self):\n        return 'trackingnet_lmdb'\n\n    def has_class_info(self):\n        return True\n\n    def get_sequences_in_class(self, class_name):\n        return self.seq_per_class[class_name]\n\n    def _read_bb_anno(self, seq_id):\n        set_id = self.sequence_list[seq_id][0]\n        vid_name = self.sequence_list[seq_id][1]\n        gt_str_list = decode_str(os.path.join(self.root, \"TRAIN_%d_lmdb\" % set_id),\n                                 os.path.join(\"anno\", vid_name + \".txt\")).split('\\n')[:-1]\n        gt_list = [list(map(float, line.split(','))) for line in gt_str_list]\n        gt_arr = np.array(gt_list).astype(np.float32)\n        return torch.tensor(gt_arr)\n\n    def get_sequence_info(self, seq_id):\n        bbox = self._read_bb_anno(seq_id)\n\n        valid = (bbox[:, 2] > 0) & (bbox[:, 3] > 0)\n        visible = valid.clone().byte()\n        return {'bbox': bbox, 'valid': valid, 'visible': visible}\n\n    def _get_frame(self, seq_id, frame_id):\n        set_id = self.sequence_list[seq_id][0]\n        vid_name = self.sequence_list[seq_id][1]\n        return decode_img(os.path.join(self.root, \"TRAIN_%d_lmdb\" % set_id),\n                          os.path.join(\"frames\", vid_name, str(frame_id) + \".jpg\"))\n\n    def _get_class(self, seq_id):\n        seq_name = self.sequence_list[seq_id][1]\n        return self.seq_to_class_map[seq_name]\n\n    def get_class_name(self, seq_id):\n        obj_class = self._get_class(seq_id)\n\n        return obj_class\n\n    def get_frames(self, seq_id, frame_ids, anno=None):\n        frame_list = [self._get_frame(seq_id, f) for f in frame_ids]\n\n        if anno is None:\n            anno = self.get_sequence_info(seq_id)\n\n        anno_frames = {}\n        for key, value in anno.items():\n            anno_frames[key] = [value[f_id, ...].clone() for f_id in frame_ids]\n\n        obj_class = self._get_class(seq_id)\n\n        object_meta = OrderedDict({'object_class_name': obj_class,\n                                   'motion_class': None,\n                                   'major_class': None,\n                                   'root_class': None,\n                                   'motion_adverb': None})\n\n        return frame_list, anno_frames, object_meta\n"
  },
  {
    "path": "artrackv2_mindspore/lib/train/run_training.py",
    "content": "import os\nimport sys\nimport argparse\nimport importlib\nimport cv2 as cv\nimport torch.backends.cudnn\nimport torch.distributed as dist\nimport torch\nimport random\nimport numpy as np\ntorch.backends.cudnn.benchmark = False\n\nimport _init_paths\nimport lib.train.admin.settings as ws_settings\n\n\ndef init_seeds(seed):\n    random.seed(seed)\n    np.random.seed(seed)\n    torch.manual_seed(seed)\n    torch.cuda.manual_seed(seed)\n    torch.backends.cudnn.deterministic = True\n    torch.backends.cudnn.benchmark = False\n    torch.set_num_threads(4)\n    cv.setNumThreads(1)\n    cv.ocl.setUseOpenCL(False)\n\n\ndef run_training(script_name, config_name, cudnn_benchmark=True, local_rank=-1, save_dir=None, base_seed=None,\n                 use_lmdb=False, script_name_prv=None, config_name_prv=None, use_wandb=False,\n                 distill=None, script_teacher=None, config_teacher=None):\n    \"\"\"Run the train script.\n    args:\n        script_name: Name of emperiment in the \"experiments/\" folder.\n        config_name: Name of the yaml file in the \"experiments/<script_name>\".\n        cudnn_benchmark: Use cudnn benchmark or not (default is True).\n    \"\"\"\n    if save_dir is None:\n        print(\"save_dir dir is not given. Use the default dir instead.\")\n    # This is needed to avoid strange crashes related to opencv\n    torch.set_num_threads(4)\n    cv.setNumThreads(4)\n\n    torch.backends.cudnn.benchmark = cudnn_benchmark\n\n    print('script_name: {}.py  config_name: {}.yaml'.format(script_name, config_name))\n\n    '''2021.1.5 set seed for different process'''\n    if base_seed is not None:\n        if local_rank != -1:\n            init_seeds(base_seed + local_rank)\n        else:\n            init_seeds(base_seed)\n\n    settings = ws_settings.Settings()\n    settings.script_name = script_name\n    settings.config_name = config_name\n    settings.project_path = 'train/{}/{}'.format(script_name, config_name)\n    if script_name_prv is not None and config_name_prv is not None:\n        settings.project_path_prv = 'train/{}/{}'.format(script_name_prv, config_name_prv)\n    settings.local_rank = local_rank\n    settings.save_dir = os.path.abspath(save_dir)\n    settings.use_lmdb = use_lmdb\n    prj_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), \"../..\"))\n    settings.cfg_file = os.path.join(prj_dir, 'experiments/%s/%s.yaml' % (script_name, config_name))\n    settings.use_wandb = use_wandb\n    if distill:\n        settings.distill = distill\n        settings.script_teacher = script_teacher\n        settings.config_teacher = config_teacher\n        if script_teacher is not None and config_teacher is not None:\n            settings.project_path_teacher = 'train/{}/{}'.format(script_teacher, config_teacher)\n        settings.cfg_file_teacher = os.path.join(prj_dir, 'experiments/%s/%s.yaml' % (script_teacher, config_teacher))\n        expr_module = importlib.import_module('lib.train.train_script_distill')\n    else:\n        expr_module = importlib.import_module('lib.train.train_script')\n    expr_func = getattr(expr_module, 'run')\n\n    expr_func(settings)\n\n\ndef main():\n    parser = argparse.ArgumentParser(description='Run a train scripts in train_settings.')\n    parser.add_argument('--script', type=str, required=True, help='Name of the train script.')\n    parser.add_argument('--config', type=str, required=True, help=\"Name of the config file.\")\n    parser.add_argument('--cudnn_benchmark', type=bool, default=False, help='Set cudnn benchmark on (1) or off (0) (default is on).')\n    parser.add_argument('--local_rank', default=-1, type=int, help='node rank for distributed training')\n    parser.add_argument('--save_dir', type=str, help='the directory to save checkpoints and logs')\n    parser.add_argument('--seed', type=int, default=42, help='seed for random numbers')\n    parser.add_argument('--use_lmdb', type=int, choices=[0, 1], default=0)  # whether datasets are in lmdb format\n    parser.add_argument('--script_prv', type=str, default=None, help='Name of the train script of previous model.')\n    parser.add_argument('--config_prv', type=str, default=None, help=\"Name of the config file of previous model.\")\n    parser.add_argument('--use_wandb', type=int, choices=[0, 1], default=0)  # whether to use wandb\n    # for knowledge distillation\n    parser.add_argument('--distill', type=int, choices=[0, 1], default=0)  # whether to use knowledge distillation\n    parser.add_argument('--script_teacher', type=str, help='teacher script name')\n    parser.add_argument('--config_teacher', type=str, help='teacher yaml configure file name')\n\n    args = parser.parse_args()\n    if args.local_rank != -1:\n        dist.init_process_group(backend='nccl')\n        torch.cuda.set_device(args.local_rank)\n    else:\n        torch.cuda.set_device(0)\n    run_training(args.script, args.config, cudnn_benchmark=args.cudnn_benchmark,\n                 local_rank=args.local_rank, save_dir=args.save_dir, base_seed=args.seed,\n                 use_lmdb=args.use_lmdb, script_name_prv=args.script_prv, config_name_prv=args.config_prv,\n                 use_wandb=args.use_wandb,\n                 distill=args.distill, script_teacher=args.script_teacher, config_teacher=args.config_teacher)\n\n\nif __name__ == '__main__':\n    main()\n"
  },
  {
    "path": "artrackv2_mindspore/lib/train/train_script.py",
    "content": "import os\n# loss function related\nfrom lib.utils.box_ops import giou_loss\nfrom torch.nn.functional import l1_loss\nfrom torch.nn import BCEWithLogitsLoss\n# train pipeline related\nfrom lib.train.trainers import LTRTrainer\nfrom lib.train.dataset import Lasot, Got10k, MSCOCOSeq, ImagenetVID, TrackingNet\nfrom lib.train.dataset import Lasot_lmdb, Got10k_lmdb, MSCOCOSeq_lmdb, ImagenetVID_lmdb, TrackingNet_lmdb\nfrom lib.train.data import sampler, opencv_loader, processing, LTRLoader, sequence_sampler\n# distributed training related\nfrom torch.nn.parallel import DistributedDataParallel as DDP\nfrom torch.nn.parallel import DataParallel as DP\n# some more advanced functions\nfrom .base_functions import *\n# network related\nfrom lib.models.ostrack import build_ostrack\n# forward propagation related\nfrom lib.train.actors import OSTrackActor\n# for import modules\nimport importlib\n\nfrom ..utils.focal_loss import FocalLoss\n\n\ndef names2datasets(name_list: list, settings, image_loader):\n    assert isinstance(name_list, list)\n    datasets = []\n    #settings.use_lmdb = True\n    for name in name_list:\n        assert name in [\"LASOT\", \"GOT10K_vottrain\", \"GOT10K_votval\", \"GOT10K_train_full\", \"GOT10K_official_val\",\n                        \"COCO17\", \"VID\", \"TRACKINGNET\"]\n        if name == \"LASOT\":\n            if settings.use_lmdb:\n                print(\"Building lasot dataset from lmdb\")\n                datasets.append(Lasot_lmdb(settings.env.lasot_lmdb_dir, split='train', image_loader=image_loader))\n            else:\n                datasets.append(Lasot(settings.env.lasot_dir, split='train', image_loader=image_loader))\n        if name == \"GOT10K_vottrain\":\n            if settings.use_lmdb:\n                print(\"Building got10k from lmdb\")\n                datasets.append(Got10k_lmdb(settings.env.got10k_lmdb_dir, split='vottrain', image_loader=image_loader))\n            else:\n                datasets.append(Got10k(settings.env.got10k_dir, split='vottrain', image_loader=image_loader))\n        if name == \"GOT10K_train_full\":\n            if settings.use_lmdb:\n                print(\"Building got10k_train_full from lmdb\")\n                datasets.append(Got10k_lmdb(settings.env.got10k_lmdb_dir, split='train_full', image_loader=image_loader))\n            else:\n                datasets.append(Got10k(settings.env.got10k_dir, split='train_full', image_loader=image_loader))\n        if name == \"GOT10K_votval\":\n            if settings.use_lmdb:\n                print(\"Building got10k from lmdb\")\n                datasets.append(Got10k_lmdb(settings.env.got10k_lmdb_dir, split='votval', image_loader=image_loader))\n            else:\n                datasets.append(Got10k(settings.env.got10k_dir, split='votval', image_loader=image_loader))\n        if name == \"GOT10K_official_val\":\n            if settings.use_lmdb:\n                raise ValueError(\"Not implement\")\n            else:\n                datasets.append(Got10k(settings.env.got10k_val_dir, split=None, image_loader=image_loader))\n        if name == \"COCO17\":\n            if settings.use_lmdb:\n                print(\"Building COCO2017 from lmdb\")\n                datasets.append(MSCOCOSeq_lmdb(settings.env.coco_lmdb_dir, version=\"2017\", image_loader=image_loader))\n            else:\n                datasets.append(MSCOCOSeq(settings.env.coco_dir, version=\"2017\", image_loader=image_loader))\n        if name == \"VID\":\n            if settings.use_lmdb:\n                print(\"Building VID from lmdb\")\n                datasets.append(ImagenetVID_lmdb(settings.env.imagenet_lmdb_dir, image_loader=image_loader))\n            else:\n                datasets.append(ImagenetVID(settings.env.imagenet_dir, image_loader=image_loader))\n        if name == \"TRACKINGNET\":\n            if settings.use_lmdb:\n                print(\"Building TrackingNet from lmdb\")\n                datasets.append(TrackingNet_lmdb(settings.env.trackingnet_lmdb_dir, image_loader=image_loader))\n            else:\n                # raise ValueError(\"NOW WE CAN ONLY USE TRACKINGNET FROM LMDB\")\n                datasets.append(TrackingNet(settings.env.trackingnet_dir, image_loader=image_loader))\n    return datasets\n\ndef slt_collate(batch):\n    ret = {}\n    for k in batch[0].keys():\n        here_list = []\n        for ex in batch:\n            here_list.append(ex[k])\n        ret[k] = here_list\n    return ret\n\n\nclass SLTLoader(torch.utils.data.dataloader.DataLoader):\n    \"\"\"\n    Data loader. Combines a dataset and a sampler, and provides\n    single- or multi-process iterators over the dataset.\n    \"\"\"\n\n    __initialized = False\n\n    def __init__(self, name, dataset, training=True, batch_size=1, shuffle=False, sampler=None, batch_sampler=None,\n                 num_workers=0, epoch_interval=1, collate_fn=None, stack_dim=0, pin_memory=False, drop_last=False,\n                 timeout=0, worker_init_fn=None):\n\n        if collate_fn is None:\n            collate_fn = slt_collate\n\n        super(SLTLoader, self).__init__(dataset, batch_size, shuffle, sampler, batch_sampler,\n                 num_workers, collate_fn, pin_memory, drop_last,\n                 timeout, worker_init_fn)\n\n        self.name = name\n        self.training = training\n        self.epoch_interval = epoch_interval\n        self.stack_dim = stack_dim\n\ndef run(settings):\n    settings.description = 'Training script for STARK-S, STARK-ST stage1, and STARK-ST stage2'\n\n    # update the default configs with config file\n    if not os.path.exists(settings.cfg_file):\n        raise ValueError(\"%s doesn't exist.\" % settings.cfg_file)\n    config_module = importlib.import_module(\"lib.config.%s.config\" % settings.script_name)\n    cfg = config_module.cfg\n    config_module.update_config_from_file(settings.cfg_file)\n    if settings.local_rank in [-1, 0]:\n        print(\"New configuration is shown below.\")\n        for key in cfg.keys():\n            print(\"%s configuration:\" % key, cfg[key])\n            print('\\n')\n\n    # update settings based on cfg\n    update_settings(settings, cfg)\n\n    # Record the training log\n    log_dir = os.path.join(settings.save_dir, 'logs')\n    if settings.local_rank in [-1, 0]:\n        if not os.path.exists(log_dir):\n            os.makedirs(log_dir)\n    settings.log_file = os.path.join(log_dir, \"%s-%s.log\" % (settings.script_name, settings.config_name))\n\n    # Build dataloaders\n    dataset_train = sequence_sampler.SequenceSampler(datasets=names2datasets(cfg.DATA.TRAIN.DATASETS_NAME, settings, opencv_loader),\n                                                     p_datasets=cfg.DATA.TRAIN.DATASETS_RATIO,\n                                                     samples_per_epoch=cfg.DATA.TRAIN.SAMPLE_PER_EPOCH,\n                                                     max_gap=cfg.DATA.MAX_GAP, max_interval=cfg.DATA.MAX_INTERVAL,\n                                                     num_search_frames=cfg.DATA.SEARCH.NUMBER, num_template_frames=cfg.DATA.TEMPLATE.NUMBER,\n                                                     frame_sample_mode='random_interval',\n                                                     prob=cfg.DATA.INTERVAL_PROB)\n    loader_train = SLTLoader('train', dataset_train, training=True, batch_size=cfg.TRAIN.BATCH_SIZE,\n                             num_workers=cfg.TRAIN.NUM_WORKER,\n                             shuffle=False, drop_last=True, pin_memory=True)\n\n    if \"RepVGG\" in cfg.MODEL.BACKBONE.TYPE or \"swin\" in cfg.MODEL.BACKBONE.TYPE or \"LightTrack\" in cfg.MODEL.BACKBONE.TYPE:\n        cfg.ckpt_dir = settings.save_dir\n    bins = cfg.MODEL.BINS\n    search_size = cfg.DATA.SEARCH.SIZE\n    # Create network\n    if settings.script_name == \"ostrack\":\n        net = build_ostrack(cfg)\n    else:\n        raise ValueError(\"illegal script name\")\n\n    # wrap networks to distributed one\n    net.cuda()\n    import torch.distributed as dist\n    rank = dist.get_rank()\n    device_id = rank % torch.cuda.device_count()\n    net = net.to(device_id)\n    if settings.local_rank != -1:\n        print(f\"Start running basic DDP example on rank {rank}.\")\n        # net = torch.nn.SyncBatchNorm.convert_sync_batchnorm(net)  # add syncBN converter\n        net = DDP(net, find_unused_parameters=True, broadcast_buffers=False)\n        print(f\"Finish running basic DDP example on rank {rank}.\")\n        # net = DDP(net, device_ids=[settings.local_rank], find_unused_parameters=True, broadcast_buffers=False)\n        #net = DP(net)\n        settings.device = torch.device(\"cuda:%d\" % settings.local_rank)\n    else:\n        settings.device = torch.device(\"cuda:0\")\n    settings.deep_sup = getattr(cfg.TRAIN, \"DEEP_SUPERVISION\", False)\n    settings.distill = getattr(cfg.TRAIN, \"DISTILL\", False)\n    settings.distill_loss_type = getattr(cfg.TRAIN, \"DISTILL_LOSS_TYPE\", \"KL\")\n    # Loss functions and Actors\n    if settings.script_name == \"ostrack\":\n        focal_loss = FocalLoss()\n        objective = {'giou': giou_loss, 'l1': l1_loss, 'focal': focal_loss, 'cls': BCEWithLogitsLoss()}\n        loss_weight = {'giou': cfg.TRAIN.GIOU_WEIGHT, 'l1': cfg.TRAIN.L1_WEIGHT, 'focal': 2., 'cls': 2.0}\n        actor = OSTrackActor(net=net, objective=objective, loss_weight=loss_weight, settings=settings, cfg=cfg, bins=bins, search_size=search_size)\n    else:\n        raise ValueError(\"illegal script name\")\n\n    # if cfg.TRAIN.DEEP_SUPERVISION:\n    #     raise ValueError(\"Deep supervision is not supported now.\")\n\n    # Optimizer, parameters, and learning rates\n    optimizer, lr_scheduler = get_optimizer_scheduler(net, cfg)\n    use_amp = getattr(cfg.TRAIN, \"AMP\", False)\n    trainer = LTRTrainer(actor, [loader_train], optimizer, settings, lr_scheduler, use_amp=use_amp)\n\n    # train process\n    trainer.train(cfg.TRAIN.EPOCH, load_latest=True, fail_safe=True)\n"
  },
  {
    "path": "artrackv2_mindspore/lib/train/train_script_distill.py",
    "content": "import os\n# loss function related\nfrom lib.utils.box_ops import giou_loss\nfrom torch.nn.functional import l1_loss\nfrom torch.nn import BCEWithLogitsLoss\n# train pipeline related\nfrom lib.train.trainers import LTRTrainer\n# distributed training related\nfrom torch.nn.parallel import DistributedDataParallel as DDP\n# some more advanced functions\nfrom .base_functions import *\n# network related\nfrom lib.models.stark import build_starks, build_starkst\nfrom lib.models.stark import build_stark_lightning_x_trt\n# forward propagation related\nfrom lib.train.actors import STARKLightningXtrtdistillActor\n# for import modules\nimport importlib\n\n\ndef build_network(script_name, cfg):\n    # Create network\n    if script_name == \"stark_s\":\n        net = build_starks(cfg)\n    elif script_name == \"stark_st1\" or script_name == \"stark_st2\":\n        net = build_starkst(cfg)\n    elif script_name == \"stark_lightning_X_trt\":\n        net = build_stark_lightning_x_trt(cfg, phase=\"train\")\n    else:\n        raise ValueError(\"illegal script name\")\n    return net\n\n\ndef run(settings):\n    settings.description = 'Training script for STARK-S, STARK-ST stage1, and STARK-ST stage2'\n\n    # update the default configs with config file\n    if not os.path.exists(settings.cfg_file):\n        raise ValueError(\"%s doesn't exist.\" % settings.cfg_file)\n    config_module = importlib.import_module(\"lib.config.%s.config\" % settings.script_name)\n    cfg = config_module.cfg\n    config_module.update_config_from_file(settings.cfg_file)\n    if settings.local_rank in [-1, 0]:\n        print(\"New configuration is shown below.\")\n        for key in cfg.keys():\n            print(\"%s configuration:\" % key, cfg[key])\n            print('\\n')\n\n    # update the default teacher configs with teacher config file\n    if not os.path.exists(settings.cfg_file_teacher):\n        raise ValueError(\"%s doesn't exist.\" % settings.cfg_file_teacher)\n    config_module_teacher = importlib.import_module(\"lib.config.%s.config\" % settings.script_teacher)\n    cfg_teacher = config_module_teacher.cfg\n    config_module_teacher.update_config_from_file(settings.cfg_file_teacher)\n    if settings.local_rank in [-1, 0]:\n        print(\"New teacher configuration is shown below.\")\n        for key in cfg_teacher.keys():\n            print(\"%s configuration:\" % key, cfg_teacher[key])\n            print('\\n')\n\n    # update settings based on cfg\n    update_settings(settings, cfg)\n\n    # Record the training log\n    log_dir = os.path.join(settings.save_dir, 'logs')\n    if settings.local_rank in [-1, 0]:\n        if not os.path.exists(log_dir):\n            os.makedirs(log_dir)\n    settings.log_file = os.path.join(log_dir, \"%s-%s.log\" % (settings.script_name, settings.config_name))\n\n    # Build dataloaders\n    loader_train, loader_val = build_dataloaders(cfg, settings)\n\n    if \"RepVGG\" in cfg.MODEL.BACKBONE.TYPE or \"swin\" in cfg.MODEL.BACKBONE.TYPE:\n        cfg.ckpt_dir = settings.save_dir\n    \"\"\"turn on the distillation mode\"\"\"\n    cfg.TRAIN.DISTILL = True\n    cfg_teacher.TRAIN.DISTILL = True\n    net = build_network(settings.script_name, cfg)\n    net_teacher = build_network(settings.script_teacher, cfg_teacher)\n\n    # wrap networks to distributed one\n    net.cuda()\n    net_teacher.cuda()\n    net_teacher.eval()\n\n    if settings.local_rank != -1:\n        net = DDP(net, device_ids=[settings.local_rank], find_unused_parameters=True)\n        net_teacher = DDP(net_teacher, device_ids=[settings.local_rank], find_unused_parameters=True)\n        settings.device = torch.device(\"cuda:%d\" % settings.local_rank)\n    else:\n        settings.device = torch.device(\"cuda:0\")\n    # settings.deep_sup = getattr(cfg.TRAIN, \"DEEP_SUPERVISION\", False)\n    # settings.distill = getattr(cfg.TRAIN, \"DISTILL\", False)\n    settings.distill_loss_type = getattr(cfg.TRAIN, \"DISTILL_LOSS_TYPE\", \"L1\")\n    # Loss functions and Actors\n    if settings.script_name == \"stark_lightning_X_trt\":\n        objective = {'giou': giou_loss, 'l1': l1_loss}\n        loss_weight = {'giou': cfg.TRAIN.GIOU_WEIGHT, 'l1': cfg.TRAIN.L1_WEIGHT}\n        actor = STARKLightningXtrtdistillActor(net=net, objective=objective, loss_weight=loss_weight, settings=settings,\n                                               net_teacher=net_teacher)\n    else:\n        raise ValueError(\"illegal script name\")\n\n    # Optimizer, parameters, and learning rates\n    optimizer, lr_scheduler = get_optimizer_scheduler(net, cfg)\n    use_amp = getattr(cfg.TRAIN, \"AMP\", False)\n    trainer = LTRTrainer(actor, [loader_train, loader_val], optimizer, settings, lr_scheduler, use_amp=use_amp)\n\n    # train process\n    trainer.train(cfg.TRAIN.EPOCH, load_latest=True, fail_safe=True, distill=True)\n"
  },
  {
    "path": "artrackv2_mindspore/lib/train/trainers/__init__.py",
    "content": "from .base_trainer import BaseTrainer\nfrom .ltr_trainer import LTRTrainer\n"
  },
  {
    "path": "artrackv2_mindspore/lib/train/trainers/base_trainer.py",
    "content": "import os\nimport glob\nimport torch\nimport traceback\nfrom lib.train.admin import multigpu\nfrom torch.utils.data.distributed import DistributedSampler\n\n\nclass BaseTrainer:\n    \"\"\"Base trainer class. Contains functions for training and saving/loading checkpoints.\n    Trainer classes should inherit from this one and overload the train_epoch function.\"\"\"\n\n    def __init__(self, actor, loaders, optimizer, settings, lr_scheduler=None):\n        \"\"\"\n        args:\n            actor - The actor for training the network\n            loaders - list of dataset loaders, e.g. [train_loader, val_loader]. In each epoch, the trainer runs one\n                        epoch for each loader.\n            optimizer - The optimizer used for training, e.g. Adam\n            settings - Training settings\n            lr_scheduler - Learning rate scheduler\n        \"\"\"\n        self.actor = actor\n        self.optimizer = optimizer\n        self.lr_scheduler = lr_scheduler\n        self.loaders = loaders\n\n        self.update_settings(settings)\n\n        self.epoch = 0\n        self.stats = {}\n\n        self.device = getattr(settings, 'device', None)\n        if self.device is None:\n            self.device = torch.device(\"cuda:0\" if torch.cuda.is_available() and settings.use_gpu else \"cpu\")\n\n        self.actor.to(self.device)\n        self.settings = settings\n\n    def update_settings(self, settings=None):\n        \"\"\"Updates the trainer settings. Must be called to update internal settings.\"\"\"\n        if settings is not None:\n            self.settings = settings\n\n        if self.settings.env.workspace_dir is not None:\n            self.settings.env.workspace_dir = os.path.expanduser(self.settings.env.workspace_dir)\n            '''2021.1.4 New function: specify checkpoint dir'''\n            if self.settings.save_dir is None:\n                self._checkpoint_dir = os.path.join(self.settings.env.workspace_dir, 'checkpoints')\n            else:\n                self._checkpoint_dir = os.path.join(self.settings.save_dir, 'checkpoints')\n            print(\"checkpoints will be saved to %s\" % self._checkpoint_dir)\n\n            if self.settings.local_rank in [-1, 0]:\n                if not os.path.exists(self._checkpoint_dir):\n                    print(\"Training with multiple GPUs. checkpoints directory doesn't exist. \"\n                          \"Create checkpoints directory\")\n                    os.makedirs(self._checkpoint_dir)\n        else:\n            self._checkpoint_dir = None\n\n    def train(self, max_epochs, load_latest=False, fail_safe=True, load_previous_ckpt=False, distill=False):\n        \"\"\"Do training for the given number of epochs.\n        args:\n            max_epochs - Max number of training epochs,\n            load_latest - Bool indicating whether to resume from latest epoch.\n            fail_safe - Bool indicating whether the training to automatically restart in case of any crashes.\n        \"\"\"\n\n        epoch = -1\n        num_tries = 1\n        for i in range(num_tries):\n            try:\n                if load_latest:\n                    self.load_checkpoint()\n                if load_previous_ckpt:\n                    directory = '{}/{}'.format(self._checkpoint_dir, self.settings.project_path_prv)\n                    self.load_state_dict(directory)\n                if distill:\n                    directory_teacher = '{}/{}'.format(self._checkpoint_dir, self.settings.project_path_teacher)\n                    self.load_state_dict(directory_teacher, distill=True)\n                for epoch in range(self.epoch+1, max_epochs+1):\n                    self.epoch = epoch\n\n                    self.train_epoch()\n\n                    if self.lr_scheduler is not None:\n                        if self.settings.scheduler_type != 'cosine':\n                            self.lr_scheduler.step()\n                        else:\n                            self.lr_scheduler.step(epoch - 1)\n                    # only save the last 10 checkpoints\n                    save_every_epoch = getattr(self.settings, \"save_every_epoch\", False)\n                    save_epochs = [79, 159, 239]\n                    if epoch > (max_epochs - 1) or save_every_epoch or epoch % 1 == 0 or epoch in save_epochs or epoch > (max_epochs - 5):\n                    # if epoch > (max_epochs - 10) or save_every_epoch or epoch % 100 == 0:\n                        if self._checkpoint_dir:\n                            if self.settings.local_rank in [-1, 0]:\n                                self.save_checkpoint()\n            except:\n                print('Training crashed at epoch {}'.format(epoch))\n                if fail_safe:\n                    self.epoch -= 1\n                    load_latest = True\n                    print('Traceback for the error!')\n                    print(traceback.format_exc())\n                    print('Restarting training from last epoch ...')\n                else:\n                    raise\n\n        print('Finished training!')\n\n    def train_epoch(self):\n        raise NotImplementedError\n\n    def save_checkpoint(self):\n        \"\"\"Saves a checkpoint of the network and other variables.\"\"\"\n\n        net = self.actor.net.module if multigpu.is_multi_gpu(self.actor.net) else self.actor.net\n\n        actor_type = type(self.actor).__name__\n        net_type = type(net).__name__\n        state = {\n            'epoch': self.epoch,\n            'actor_type': actor_type,\n            'net_type': net_type,\n            'net': net.state_dict(),\n            'net_info': getattr(net, 'info', None),\n            'constructor': getattr(net, 'constructor', None),\n            'optimizer': self.optimizer.state_dict(),\n            'stats': self.stats,\n            'settings': self.settings\n        }\n\n        directory = '{}/{}'.format(self._checkpoint_dir, self.settings.project_path)\n        print(directory)\n        if not os.path.exists(directory):\n            print(\"directory doesn't exist. creating...\")\n            os.makedirs(directory)\n\n        # First save as a tmp file\n        tmp_file_path = '{}/{}_ep{:04d}.tmp'.format(directory, net_type, self.epoch)\n        torch.save(state, tmp_file_path)\n\n        file_path = '{}/{}_ep{:04d}.pth.tar'.format(directory, net_type, self.epoch)\n\n        # Now rename to actual checkpoint. os.rename seems to be atomic if files are on same filesystem. Not 100% sure\n        os.rename(tmp_file_path, file_path)\n\n    def load_checkpoint(self, checkpoint = None, fields = None, ignore_fields = None, load_constructor = False):\n        \"\"\"Loads a network checkpoint file.\n\n        Can be called in three different ways:\n            load_checkpoint():\n                Loads the latest epoch from the workspace. Use this to continue training.\n            load_checkpoint(epoch_num):\n                Loads the network at the given epoch number (int).\n            load_checkpoint(path_to_checkpoint):\n                Loads the file from the given absolute path (str).\n        \"\"\"\n\n        net = self.actor.net.module if multigpu.is_multi_gpu(self.actor.net) else self.actor.net\n\n        actor_type = type(self.actor).__name__\n        net_type = type(net).__name__\n\n        if checkpoint is None:\n            # Load most recent checkpoint\n            checkpoint_list = sorted(glob.glob('{}/{}/{}_ep*.pth.tar'.format(self._checkpoint_dir,\n                                                                             self.settings.project_path, net_type)))\n            if checkpoint_list:\n                checkpoint_path = checkpoint_list[-1]\n            else:\n                print('No matching checkpoint file found')\n                return\n        elif isinstance(checkpoint, int):\n            # Checkpoint is the epoch number\n            checkpoint_path = '{}/{}/{}_ep{:04d}.pth.tar'.format(self._checkpoint_dir, self.settings.project_path,\n                                                                 net_type, checkpoint)\n        elif isinstance(checkpoint, str):\n            # checkpoint is the path\n            if os.path.isdir(checkpoint):\n                checkpoint_list = sorted(glob.glob('{}/*_ep*.pth.tar'.format(checkpoint)))\n                if checkpoint_list:\n                    checkpoint_path = checkpoint_list[-1]\n                else:\n                    raise Exception('No checkpoint found')\n            else:\n                checkpoint_path = os.path.expanduser(checkpoint)\n        else:\n            raise TypeError\n\n        # Load network\n        checkpoint_dict = torch.load(checkpoint_path, map_location='cpu')\n\n        assert net_type == checkpoint_dict['net_type'], 'Network is not of correct type.'\n\n        if fields is None:\n            fields = checkpoint_dict.keys()\n        if ignore_fields is None:\n            ignore_fields = ['settings']\n\n            # Never load the scheduler. It exists in older checkpoints.\n        ignore_fields.extend(['lr_scheduler', 'constructor', 'net_type', 'actor_type', 'net_info'])\n\n        # Load all fields\n        for key in fields:\n            if key in ignore_fields:\n                continue\n            if key == 'net':\n                net.load_state_dict(checkpoint_dict[key])\n            elif key == 'optimizer':\n                self.optimizer.load_state_dict(checkpoint_dict[key])\n            else:\n                setattr(self, key, checkpoint_dict[key])\n\n        # Set the net info\n        if load_constructor and 'constructor' in checkpoint_dict and checkpoint_dict['constructor'] is not None:\n            net.constructor = checkpoint_dict['constructor']\n        if 'net_info' in checkpoint_dict and checkpoint_dict['net_info'] is not None:\n            net.info = checkpoint_dict['net_info']\n\n        # Update the epoch in lr scheduler\n        if 'epoch' in fields:\n            self.lr_scheduler.last_epoch = self.epoch\n        # 2021.1.10 Update the epoch in data_samplers\n            for loader in self.loaders:\n                if isinstance(loader.sampler, DistributedSampler):\n                    loader.sampler.set_epoch(self.epoch)\n        return True\n\n    def load_state_dict(self, checkpoint=None, distill=False):\n        \"\"\"Loads a network checkpoint file.\n\n        Can be called in three different ways:\n            load_checkpoint():\n                Loads the latest epoch from the workspace. Use this to continue training.\n            load_checkpoint(epoch_num):\n                Loads the network at the given epoch number (int).\n            load_checkpoint(path_to_checkpoint):\n                Loads the file from the given absolute path (str).\n        \"\"\"\n        if distill:\n            net = self.actor.net_teacher.module if multigpu.is_multi_gpu(self.actor.net_teacher) \\\n                else self.actor.net_teacher\n        else:\n            net = self.actor.net.module if multigpu.is_multi_gpu(self.actor.net) else self.actor.net\n\n        net_type = type(net).__name__\n\n        if isinstance(checkpoint, str):\n            # checkpoint is the path\n            if os.path.isdir(checkpoint):\n                checkpoint_list = sorted(glob.glob('{}/*_ep*.pth.tar'.format(checkpoint)))\n                if checkpoint_list:\n                    checkpoint_path = checkpoint_list[-1]\n                else:\n                    raise Exception('No checkpoint found')\n            else:\n                checkpoint_path = os.path.expanduser(checkpoint)\n        else:\n            raise TypeError\n\n        # Load network\n        print(\"Loading pretrained model from \", checkpoint_path)\n        checkpoint_dict = torch.load(checkpoint_path, map_location='cpu')\n\n        assert net_type == checkpoint_dict['net_type'], 'Network is not of correct type.'\n\n        missing_k, unexpected_k = net.load_state_dict(checkpoint_dict[\"net\"], strict=False)\n        print(\"previous checkpoint is loaded.\")\n        print(\"missing keys: \", missing_k)\n        print(\"unexpected keys:\", unexpected_k)\n\n        return True\n"
  },
  {
    "path": "artrackv2_mindspore/lib/train/trainers/ltr_trainer.py",
    "content": "import os\nimport datetime\nfrom collections import OrderedDict\nfrom torch.nn.utils import clip_grad_norm_\n#from lib.train.data.wandb_logger import WandbWriter\nfrom lib.train.trainers import BaseTrainer\nfrom lib.train.admin import AverageMeter, StatValue\nfrom memory_profiler import profile\n#from lib.train.admin import TensorboardWriter\nimport torch\nimport time\nimport numpy as np\nfrom torch.utils.data.distributed import DistributedSampler\nfrom torch.cuda.amp import autocast\nfrom torch.cuda.amp import GradScaler\n\nfrom lib.utils.misc import get_world_size\n\n\nclass LTRTrainer(BaseTrainer):\n    def __init__(self, actor, loaders, optimizer, settings, lr_scheduler=None, use_amp=False):\n        \"\"\"\n        args:\n            actor - The actor for training the network\n            loaders - list of dataset loaders, e.g. [train_loader, val_loader]. In each epoch, the trainer runs one\n                        epoch for each loader.\n            optimizer - The optimizer used for training, e.g. Adam\n            settings - Training settings\n            lr_scheduler - Learning rate scheduler\n        \"\"\"\n        super().__init__(actor, loaders, optimizer, settings, lr_scheduler)\n\n        self._set_default_settings()\n\n        # Initialize statistics variables\n        self.stats = OrderedDict({loader.name: None for loader in self.loaders})\n\n        # Initialize tensorboard and wandb\n        #self.wandb_writer = None\n        #if settings.local_rank in [-1, 0]:\n        #    tensorboard_writer_dir = os.path.join(self.settings.env.tensorboard_dir, self.settings.project_path)\n        #    if not os.path.exists(tensorboard_writer_dir):\n        #        os.makedirs(tensorboard_writer_dir)\n        #    self.tensorboard_writer = TensorboardWriter(tensorboard_writer_dir, [l.name for l in loaders])\n\n        #    if settings.use_wandb:\n        #        world_size = get_world_size()\n        #        cur_train_samples = self.loaders[0].dataset.samples_per_epoch * max(0, self.epoch - 1)\n        #        interval = (world_size * settings.batchsize)  # * interval\n        #        self.wandb_writer = WandbWriter(settings.project_path[6:], {}, tensorboard_writer_dir, cur_train_samples, interval)\n\n        self.move_data_to_gpu = getattr(settings, 'move_data_to_gpu', True)\n        print(\"move_data\", self.move_data_to_gpu)\n        self.settings = settings\n        self.use_amp = use_amp\n        if use_amp:\n            self.scaler = GradScaler()\n\n    def _set_default_settings(self):\n        # Dict of all default values\n        default = {'print_interval': 10,\n                   'print_stats': None,\n                   'description': ''}\n\n        for param, default_value in default.items():\n            if getattr(self.settings, param, None) is None:\n                setattr(self.settings, param, default_value)\n        \n        self.miou_list = []\n    def cycle_dataset(self, loader):\n        \"\"\"Do a cycle of training or validation.\"\"\"\n        torch.autograd.set_detect_anomaly(True)\n        self.actor.train(loader.training)\n        torch.set_grad_enabled(loader.training)\n\n        self._init_timing()\n\n        for i, data in enumerate(loader, 1):\n            self.actor.eval()\n            self.data_read_done_time = time.time()\n            with torch.no_grad():\n                explore_result = self.actor.explore(data)\n            if explore_result == None:\n                print(\"this time i skip\")\n                continue\n            # get inputs\n            #print(data)\n\n            self.data_to_gpu_time = time.time()\n\n            data['epoch'] = self.epoch\n            data['settings'] = self.settings\n\n            stats = {}\n            reward_record = []\n            miou_record = []\n            e_miou_record = []\n            num_seq = len(data['num_frames'])\n\n\n            # Calculate reward tensor\n            #reward_tensor = torch.zeros(explore_result['baseline_iou'].size())\n            baseline_iou = explore_result['baseline_iou']\n            #explore_iou = explore_result['explore_iou']\n            for seq_idx in range(num_seq):\n                num_frames = data['num_frames'][seq_idx] - 1\n                b_miou = torch.mean(baseline_iou[:num_frames, seq_idx])\n            #    e_miou = torch.mean(explore_iou[:num_frames, seq_idx])\n                miou_record.append(b_miou.item())\n            #    e_miou_record.append(e_miou.item())\n\n                b_reward = b_miou.item()\n            #    e_reward = e_miou.item()\n            #    iou_gap = e_reward - b_reward\n            #    reward_record.append(iou_gap)\n            #    reward_tensor[:num_frames, seq_idx] = iou_gap\n\n            # Training mode\n            cursor = 0\n            bs_backward = 1\n\n            #print(self.actor.net.module.box_head.decoder.layers[2].mlpx.fc1.weight)\n            self.optimizer.zero_grad()\n            self.actor.train()\n            cover = torch.Tensor(data['visible_ratio']).permute(1, 0)\n            reverse_cover = torch.flip(cover, dims=[0])\n            cover = cover[1:, :]\n            reverse_cover = reverse_cover[1:, :]\n            cover_real = torch.cat((cover, reverse_cover), dim=1)\n            while cursor < num_seq*2:\n                #print(\"now is \", cursor , \"and all is \", num_seq)\n                model_inputs = {}\n                model_inputs['slt_loss_weight'] = 15\n                #if cursor < num_seq:\n                #    model_inputs['template_images'] = explore_result['template_images'][cursor:cursor + bs_backward].cuda()\n                #else:\n                #    model_inputs['template_images'] = explore_result['template_images_reverse'][cursor - num_seq:cursor - num_seq + bs_backward].cuda()\n                model_inputs['search_images'] = explore_result['search_images'][:, cursor:cursor + bs_backward].cuda()\n                model_inputs['search_anno'] = explore_result['search_anno'][:, cursor:cursor + bs_backward].cuda()\n                model_inputs['pre_seq'] = explore_result['pre_seq'][:, cursor:cursor + bs_backward].cuda()\n                model_inputs['x_feat'] = explore_result['x_feat'].squeeze(1)[:, cursor:cursor + bs_backward].cuda()\n                model_inputs['template_images_z0'] = explore_result['template_images_z0'][:, cursor:cursor + bs_backward].cuda()\n                model_inputs['dz_feat_update'] = explore_result['dz_feat_update'][:, cursor:cursor + bs_backward].cuda()\n                model_inputs['target_in_search'] = explore_result['target_in_search'][:, cursor:cursor + bs_backward].cuda()\n                model_inputs['cover'] = cover_real[:, cursor:cursor + bs_backward].cuda()\n                model_inputs['epoch'] = self.epoch\n                #print(\"this is cursor\")\n                #print(explore_result['pre_seq'].shape)\n                #print(explore_result['x_feat'].squeeze(1).shape)\n                #model_inputs['action_tensor'] = explore_result['action_tensor'][:, cursor:cursor + bs_backward].cuda()\n                #model_inputs['reward_tensor'] = reward_tensor[:, cursor:cursor + bs_backward].cuda()\n\n                loss, stats_cur = self.actor.compute_sequence_losses(model_inputs)\n                #for name, param in self.actor.net.named_parameters():\n                #    shape, c = (param.grad.shape, param.grad.sum()) if param.grad is not None else (None, None)\n                #    print(f'{name}: {param.shape} \\n\\t grad: {shape} \\n\\t {c}')\n                #print(\"i make this!\")\n                loss.backward()\n                #print(\"i made that?\")\n\n                for key, val in stats_cur.items():\n                    if key in stats:\n                        stats[key] += val*(bs_backward / num_seq)\n                    else:\n                        stats[key] = val*(bs_backward / num_seq)\n                cursor += bs_backward\n            grad_norm = clip_grad_norm_(self.actor.net.parameters(), 100)\n            stats['grad_norm'] = grad_norm\n            #print(self.actor.net.module.backbone.blocks[8].mlp.fc1.weight)\n            self.optimizer.step()\n            \n            miou = np.mean(miou_record)\n            self.miou_list.append(miou)\n            #stats['reward'] = np.mean(reward_record)\n            #stats['e_mIoU'] = np.mean(e_miou_record)\n            stats['mIoU'] = miou\n            stats['mIoU10'] = np.mean(self.miou_list[-10:])\n            stats['mIoU100'] = np.mean(self.miou_list[-100:])\n\n            batch_size = num_seq * np.max(data['num_frames'])\n            self._update_stats(stats, batch_size, loader)\n            self._print_stats(i, loader, batch_size)\n            torch.cuda.empty_cache()\n\n            # # forward pass\n            # if not self.use_amp:\n            #     loss, stats = self.actor(data)\n            # else:\n            #     with autocast():\n            #         loss, stats = self.actor(data)\n            #\n            # # backward pass and update weights\n            # if loader.training:\n            #     self.optimizer.zero_grad()\n            #     if not self.use_amp:\n            #         loss.backward()\n            #         if self.settings.grad_clip_norm > 0:\n            #             torch.nn.utils.clip_grad_norm_(self.actor.net.parameters(), self.settings.grad_clip_norm)\n            #         self.optimizer.step()\n            #     else:\n            #         self.scaler.scale(loss).backward()\n            #         self.scaler.step(self.optimizer)\n            #         self.scaler.update()\n\n            # update statistics\n            # batch_size = data['template_images'].shape[loader.stack_dim]\n            # self._update_stats(stats, batch_size, loader)\n\n            # print statistics\n            # self._print_stats(i, loader, batch_size)\n\n            # update wandb status\n            #if self.wandb_writer is not None and i % self.settings.print_interval == 0:\n            #    if self.settings.local_rank in [-1, 0]:\n            #        self.wandb_writer.write_log(self.stats, self.epoch)\n\n        # calculate ETA after every epoch\n        # epoch_time = self.prev_time - self.start_time\n        # print(\"Epoch Time: \" + str(datetime.timedelta(seconds=epoch_time)))\n        # print(\"Avg Data Time: %.5f\" % (self.avg_date_time / self.num_frames * batch_size))\n        # print(\"Avg GPU Trans Time: %.5f\" % (self.avg_gpu_trans_time / self.num_frames * batch_size))\n        # print(\"Avg Forward Time: %.5f\" % (self.avg_forward_time / self.num_frames * batch_size))\n\n    def train_epoch(self):\n        \"\"\"Do one epoch for each loader.\"\"\"\n        for loader in self.loaders:\n            if self.epoch % loader.epoch_interval == 0:\n                # 2021.1.10 Set epoch\n                if isinstance(loader.sampler, DistributedSampler):\n                    loader.sampler.set_epoch(self.epoch)\n                self.cycle_dataset(loader)\n\n        self._stats_new_epoch()\n        #if self.settings.local_rank in [-1, 0]:\n        #    self._write_tensorboard()\n\n    def _init_timing(self):\n        self.num_frames = 0\n        self.start_time = time.time()\n        self.prev_time = self.start_time\n        self.avg_date_time = 0\n        self.avg_gpu_trans_time = 0\n        self.avg_forward_time = 0\n\n    def _update_stats(self, new_stats: OrderedDict, batch_size, loader):\n        # Initialize stats if not initialized yet\n        if loader.name not in self.stats.keys() or self.stats[loader.name] is None:\n            self.stats[loader.name] = OrderedDict({name: AverageMeter() for name in new_stats.keys()})\n\n        # add lr state\n        if loader.training:\n            lr_list = self.lr_scheduler.get_last_lr()\n            for i, lr in enumerate(lr_list):\n                var_name = 'LearningRate/group{}'.format(i)\n                if var_name not in self.stats[loader.name].keys():\n                    self.stats[loader.name][var_name] = StatValue()\n                self.stats[loader.name][var_name].update(lr)\n\n        for name, val in new_stats.items():\n            if name not in self.stats[loader.name].keys():\n                self.stats[loader.name][name] = AverageMeter()\n            self.stats[loader.name][name].update(val, batch_size)\n\n    def _print_stats(self, i, loader, batch_size):\n        self.num_frames += batch_size\n        current_time = time.time()\n        batch_fps = batch_size / (current_time - self.prev_time)\n        average_fps = self.num_frames / (current_time - self.start_time)\n        prev_frame_time_backup = self.prev_time\n        self.prev_time = current_time\n\n        self.avg_date_time += (self.data_read_done_time - prev_frame_time_backup)\n        self.avg_gpu_trans_time += (self.data_to_gpu_time - self.data_read_done_time)\n        self.avg_forward_time += current_time - self.data_to_gpu_time\n\n        if i % self.settings.print_interval == 0 or i == loader.__len__():\n            print_str = '[%s: %d, %d / %d] ' % (loader.name, self.epoch, i, loader.__len__())\n            print_str += 'FPS: %.1f (%.1f)  ,  ' % (average_fps, batch_fps)\n\n            # 2021.12.14 add data time print\n            print_str += 'DataTime: %.3f (%.3f)  ,  ' % (self.avg_date_time / self.num_frames * batch_size, self.avg_gpu_trans_time / self.num_frames * batch_size)\n            print_str += 'ForwardTime: %.3f  ,  ' % (self.avg_forward_time / self.num_frames * batch_size)\n            print_str += 'TotalTime: %.3f  ,  ' % ((current_time - self.start_time) / self.num_frames * batch_size)\n            # print_str += 'DataTime: %.3f (%.3f)  ,  ' % (self.data_read_done_time - prev_frame_time_backup, self.data_to_gpu_time - self.data_read_done_time)\n            # print_str += 'ForwardTime: %.3f  ,  ' % (current_time - self.data_to_gpu_time)\n            # print_str += 'TotalTime: %.3f  ,  ' % (current_time - prev_frame_time_backup)\n\n            for name, val in self.stats[loader.name].items():\n                if (self.settings.print_stats is None or name in self.settings.print_stats):\n                    if hasattr(val, 'avg'):\n                        print_str += '%s: %.5f  ,  ' % (name, val.avg)\n                    # else:\n                    #     print_str += '%s: %r  ,  ' % (name, val)\n\n            print(print_str[:-5])\n            log_str = print_str[:-5] + '\\n'\n            with open(self.settings.log_file, 'a') as f:\n                f.write(log_str)\n\n    def _stats_new_epoch(self):\n        # Record learning rate\n        for loader in self.loaders:\n            if loader.training:\n                try:\n                    lr_list = self.lr_scheduler.get_last_lr()\n                except:\n                    lr_list = self.lr_scheduler._get_lr(self.epoch)\n                for i, lr in enumerate(lr_list):\n                    var_name = 'LearningRate/group{}'.format(i)\n                    if var_name not in self.stats[loader.name].keys():\n                        self.stats[loader.name][var_name] = StatValue()\n                    self.stats[loader.name][var_name].update(lr)\n\n        for loader_stats in self.stats.values():\n            if loader_stats is None:\n                continue\n            for stat_value in loader_stats.values():\n                if hasattr(stat_value, 'new_epoch'):\n                    stat_value.new_epoch()\n\n    #def _write_tensorboard(self):\n    #    if self.epoch == 1:\n    #        self.tensorboard_writer.write_info(self.settings.script_name, self.settings.description)\n\n    #    self.tensorboard_writer.write_epoch(self.stats, self.epoch)\n"
  },
  {
    "path": "artrackv2_mindspore/lib/utils/__init__.py",
    "content": "# from .tensor import TensorDict, TensorList\n"
  },
  {
    "path": "artrackv2_mindspore/lib/utils/box_ops.py",
    "content": "import numpy as np\n\n\ndef box_xywh_to_cxywh(x):\n\tx1, y1, w, h = x.unbind(-1)\n\tb = [x1+0.5*w, y1+0.5*h, w, h]\n\treturn torch.stack(b, dim=-1)\n\ndef box_cxcywh_to_xyxy(x):\n    x_c, y_c, w, h = x.unbind(-1)\n    b = [(x_c - 0.5 * w), (y_c - 0.5 * h),\n         (x_c + 0.5 * w), (y_c + 0.5 * h)]\n    return torch.stack(b, dim=-1)\n\n\ndef box_xywh_to_xyxy(x):\n    x1, y1, w, h = x.unbind(-1)\n    b = [x1, y1, x1 + w, y1 + h]\n    return torch.stack(b, dim=-1)\n\n\ndef box_xyxy_to_xywh(x):\n    x1, y1, x2, y2 = x.unbind(-1)\n    b = [x1, y1, x2 - x1, y2 - y1]\n    return torch.stack(b, dim=-1)\n\n\ndef box_xyxy_to_cxcywh(x):\n    x0, y0, x1, y1 = x.unbind(-1)\n    b = [(x0 + x1) / 2, (y0 + y1) / 2,\n         (x1 - x0), (y1 - y0)]\n    return torch.stack(b, dim=-1)\n\n\n# modified from torchvision to also return the union\n'''Note that this function only supports shape (N,4)'''\n\n\ndef box_iou(boxes1, boxes2):\n    \"\"\"\n\n    :param boxes1: (N, 4) (x1,y1,x2,y2)\n    :param boxes2: (N, 4) (x1,y1,x2,y2)\n    :return:\n    \"\"\"\n    area1 = box_area(boxes1) # (N,)\n    area2 = box_area(boxes2) # (N,)\n\n    lt = torch.max(boxes1[:, :2], boxes2[:, :2])  # (N,2)\n    rb = torch.min(boxes1[:, 2:], boxes2[:, 2:])  # (N,2)\n\n    wh = (rb - lt).clamp(min=0)  # (N,2)\n    inter = wh[:, 0] * wh[:, 1]  # (N,)\n\n    union = area1 + area2 - inter\n\n    iou = inter / union\n    return iou, union\n\n\n'''Note that this implementation is different from DETR's'''\n\n\ndef generalized_box_iou(boxes1, boxes2):\n    \"\"\"\n    Generalized IoU from https://giou.stanford.edu/\n\n    The boxes should be in [x0, y0, x1, y1] format\n\n    boxes1: (N, 4)\n    boxes2: (N, 4)\n    \"\"\"\n    # degenerate boxes gives inf / nan results\n    # so do an early check\n    # try:\n    #assert (boxes1[:, 2:] >= boxes1[:, :2]).all()\n    # assert (boxes2[:, 2:] >= boxes2[:, :2]).all()\n    iou, union = box_iou(boxes1, boxes2) # (N,)\n\n    lt = torch.min(boxes1[:, :2], boxes2[:, :2])\n    rb = torch.max(boxes1[:, 2:], boxes2[:, 2:])\n\n    wh = (rb - lt).clamp(min=0)  # (N,2)\n    area = wh[:, 0] * wh[:, 1] # (N,)\n\n    return iou - (area - union) / area, iou\n\n\ndef giou_loss(boxes1, boxes2):\n    \"\"\"\n\n    :param boxes1: (N, 4) (x1,y1,x2,y2)\n    :param boxes2: (N, 4) (x1,y1,x2,y2)\n    :return:\n    \"\"\"\n    giou, iou = generalized_box_iou(boxes1, boxes2)\n    return (1 - giou).mean(), iou\n\n\ndef clip_box(box: list, H, W, margin=0):\n    x1, y1, w, h = box\n    x2, y2 = x1 + w, y1 + h\n    x1 = min(max(0, x1), W-margin)\n    x2 = min(max(margin, x2), W)\n    y1 = min(max(0, y1), H-margin)\n    y2 = min(max(margin, y2), H)\n    w = max(margin, x2-x1)\n    h = max(margin, y2-y1)\n    return [x1, y1, w, h]\n"
  },
  {
    "path": "artrackv2_mindspore/lib/utils/ce_utils.py",
    "content": "def generate_bbox_mask(bbox_mask, bbox):\r\n    b, h, w = bbox_mask.shape\r\n    for i in range(b):\r\n        bbox_i = bbox[i].cpu().tolist()\r\n        bbox_mask[i, int(bbox_i[1]):int(bbox_i[1] + bbox_i[3] - 1), int(bbox_i[0]):int(bbox_i[0] + bbox_i[2] - 1)] = 1\r\n    return bbox_mask\r\n\r\n\r\ndef generate_mask_cond(cfg, bs, device, gt_bbox):\r\n    template_size = cfg.DATA.TEMPLATE.SIZE\r\n    stride = cfg.MODEL.BACKBONE.STRIDE\r\n    template_feat_size = template_size // stride\r\n\r\n    if cfg.MODEL.BACKBONE.CE_TEMPLATE_RANGE == 'ALL':\r\n        box_mask_z = None\r\n    elif cfg.MODEL.BACKBONE.CE_TEMPLATE_RANGE == 'CTR_POINT':\r\n        if template_feat_size == 8:\r\n            index = slice(3, 4)\r\n        elif template_feat_size == 12:\r\n            index = slice(5, 6)\r\n        elif template_feat_size == 7:\r\n            index = slice(3, 4)\r\n        elif template_feat_size == 14:\r\n            index = slice(6, 7)\r\n        else:\r\n            raise NotImplementedError\r\n        box_mask_z = torch.zeros([bs, template_feat_size, template_feat_size], device=device)\r\n        box_mask_z[:, index, index] = 1\r\n        box_mask_z = box_mask_z.flatten(1).to(torch.bool)\r\n    elif cfg.MODEL.BACKBONE.CE_TEMPLATE_RANGE == 'CTR_REC':\r\n        # use fixed 4x4 region, 3:5 for 8x8\r\n        # use fixed 4x4 region 5:6 for 12x12\r\n        if template_feat_size == 8:\r\n            index = slice(3, 5)\r\n        elif template_feat_size == 12:\r\n            index = slice(5, 7)\r\n        elif template_feat_size == 7:\r\n            index = slice(3, 4)\r\n        else:\r\n            raise NotImplementedError\r\n        box_mask_z = torch.zeros([bs, template_feat_size, template_feat_size], device=device)\r\n        box_mask_z[:, index, index] = 1\r\n        box_mask_z = box_mask_z.flatten(1).to(torch.bool)\r\n\r\n    elif cfg.MODEL.BACKBONE.CE_TEMPLATE_RANGE == 'GT_BOX':\r\n        box_mask_z = torch.zeros([bs, template_size, template_size], device=device)\r\n        # box_mask_z_ori = data['template_seg'][0].view(-1, 1, *data['template_seg'].shape[2:])  # (batch, 1, 128, 128)\r\n        box_mask_z = generate_bbox_mask(box_mask_z, gt_bbox * template_size).unsqueeze(1).to(\r\n            torch.float)  # (batch, 1, 128, 128)\r\n        # box_mask_z_vis = box_mask_z.cpu().numpy()\r\n        box_mask_z = F.interpolate(box_mask_z, scale_factor=1. / cfg.MODEL.BACKBONE.STRIDE, mode='bilinear',\r\n                                   align_corners=False)\r\n        box_mask_z = box_mask_z.flatten(1).to(torch.bool)\r\n        # box_mask_z_vis = box_mask_z[:, 0, ...].cpu().numpy()\r\n        # gaussian_maps_vis = generate_heatmap(data['template_anno'], self.cfg.DATA.TEMPLATE.SIZE, self.cfg.MODEL.STRIDE)[0].cpu().numpy()\r\n    else:\r\n        raise NotImplementedError\r\n\r\n    return box_mask_z\r\n\r\n\r\ndef adjust_keep_rate(epoch, warmup_epochs, total_epochs, ITERS_PER_EPOCH, base_keep_rate=0.5, max_keep_rate=1, iters=-1):\r\n    if epoch < warmup_epochs:\r\n        return 1\r\n    if epoch >= total_epochs:\r\n        return base_keep_rate\r\n    if iters == -1:\r\n        iters = epoch * ITERS_PER_EPOCH\r\n    total_iters = ITERS_PER_EPOCH * (total_epochs - warmup_epochs)\r\n    iters = iters - ITERS_PER_EPOCH * warmup_epochs\r\n    keep_rate = base_keep_rate + (max_keep_rate - base_keep_rate) \\\r\n        * (math.cos(iters / total_iters * math.pi) + 1) * 0.5\r\n\r\n    return keep_rate\r\n"
  },
  {
    "path": "artrackv2_mindspore/lib/utils/focal_loss.py",
    "content": "from abc import ABC\r\n\r\nimport torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\n\r\n\r\nclass FocalLoss(nn.Module, ABC):\r\n    def __init__(self, alpha=2, beta=4):\r\n        super(FocalLoss, self).__init__()\r\n        self.alpha = alpha\r\n        self.beta = beta\r\n\r\n    def forward(self, prediction, target):\r\n        positive_index = target.eq(1).float()\r\n        negative_index = target.lt(1).float()\r\n\r\n        negative_weights = torch.pow(1 - target, self.beta)\r\n        # clamp min value is set to 1e-12 to maintain the numerical stability\r\n        prediction = torch.clamp(prediction, 1e-12)\r\n\r\n        positive_loss = torch.log(prediction) * torch.pow(1 - prediction, self.alpha) * positive_index\r\n        negative_loss = torch.log(1 - prediction) * torch.pow(prediction,\r\n                                                              self.alpha) * negative_weights * negative_index\r\n\r\n        num_positive = positive_index.float().sum()\r\n        positive_loss = positive_loss.sum()\r\n        negative_loss = negative_loss.sum()\r\n\r\n        if num_positive == 0:\r\n            loss = -negative_loss\r\n        else:\r\n            loss = -(positive_loss + negative_loss) / num_positive\r\n\r\n        return loss\r\n\r\n\r\nclass LBHinge(nn.Module):\r\n    \"\"\"Loss that uses a 'hinge' on the lower bound.\r\n    This means that for samples with a label value smaller than the threshold, the loss is zero if the prediction is\r\n    also smaller than that threshold.\r\n    args:\r\n        error_matric:  What base loss to use (MSE by default).\r\n        threshold:  Threshold to use for the hinge.\r\n        clip:  Clip the loss if it is above this value.\r\n    \"\"\"\r\n    def __init__(self, error_metric=nn.MSELoss(), threshold=None, clip=None):\r\n        super().__init__()\r\n        self.error_metric = error_metric\r\n        self.threshold = threshold if threshold is not None else -100\r\n        self.clip = clip\r\n\r\n    def forward(self, prediction, label, target_bb=None):\r\n        negative_mask = (label < self.threshold).float()\r\n        positive_mask = (1.0 - negative_mask)\r\n\r\n        prediction = negative_mask * F.relu(prediction) + positive_mask * prediction\r\n\r\n        loss = self.error_metric(prediction, positive_mask * label)\r\n\r\n        if self.clip is not None:\r\n            loss = torch.min(loss, torch.tensor([self.clip], device=loss.device))\r\n        return loss"
  },
  {
    "path": "artrackv2_mindspore/lib/utils/heapmap_utils.py",
    "content": "import numpy as np\r\nimport torch\r\n\r\n\r\ndef generate_heatmap(bboxes, patch_size=320, stride=16):\r\n    \"\"\"\r\n    Generate ground truth heatmap same as CenterNet\r\n    Args:\r\n        bboxes (torch.Tensor): shape of [num_search, bs, 4]\r\n\r\n    Returns:\r\n        gaussian_maps: list of generated heatmap\r\n\r\n    \"\"\"\r\n    gaussian_maps = []\r\n    heatmap_size = patch_size // stride\r\n    for single_patch_bboxes in bboxes:\r\n        bs = single_patch_bboxes.shape[0]\r\n        gt_scoremap = torch.zeros(bs, heatmap_size, heatmap_size)\r\n        classes = torch.arange(bs).to(torch.long)\r\n        bbox = single_patch_bboxes * heatmap_size\r\n        wh = bbox[:, 2:]\r\n        centers_int = (bbox[:, :2] + wh / 2).round()\r\n        CenterNetHeatMap.generate_score_map(gt_scoremap, classes, wh, centers_int, 0.7)\r\n        gaussian_maps.append(gt_scoremap.to(bbox.device))\r\n    return gaussian_maps\r\n\r\n\r\nclass CenterNetHeatMap(object):\r\n    @staticmethod\r\n    def generate_score_map(fmap, gt_class, gt_wh, centers_int, min_overlap):\r\n        radius = CenterNetHeatMap.get_gaussian_radius(gt_wh, min_overlap)\r\n        radius = torch.clamp_min(radius, 0)\r\n        radius = radius.type(torch.int).cpu().numpy()\r\n        for i in range(gt_class.shape[0]):\r\n            channel_index = gt_class[i]\r\n            CenterNetHeatMap.draw_gaussian(fmap[channel_index], centers_int[i], radius[i])\r\n\r\n    @staticmethod\r\n    def get_gaussian_radius(box_size, min_overlap):\r\n        \"\"\"\r\n        copyed from CornerNet\r\n        box_size (w, h), it could be a torch.Tensor, numpy.ndarray, list or tuple\r\n        notice: we are using a bug-version, please refer to fix bug version in CornerNet\r\n        \"\"\"\r\n        # box_tensor = torch.Tensor(box_size)\r\n        box_tensor = box_size\r\n        width, height = box_tensor[..., 0], box_tensor[..., 1]\r\n\r\n        a1 = 1\r\n        b1 = height + width\r\n        c1 = width * height * (1 - min_overlap) / (1 + min_overlap)\r\n        sq1 = torch.sqrt(b1 ** 2 - 4 * a1 * c1)\r\n        r1 = (b1 + sq1) / 2\r\n\r\n        a2 = 4\r\n        b2 = 2 * (height + width)\r\n        c2 = (1 - min_overlap) * width * height\r\n        sq2 = torch.sqrt(b2 ** 2 - 4 * a2 * c2)\r\n        r2 = (b2 + sq2) / 2\r\n\r\n        a3 = 4 * min_overlap\r\n        b3 = -2 * min_overlap * (height + width)\r\n        c3 = (min_overlap - 1) * width * height\r\n        sq3 = torch.sqrt(b3 ** 2 - 4 * a3 * c3)\r\n        r3 = (b3 + sq3) / 2\r\n\r\n        return torch.min(r1, torch.min(r2, r3))\r\n\r\n    @staticmethod\r\n    def gaussian2D(radius, sigma=1):\r\n        # m, n = [(s - 1.) / 2. for s in shape]\r\n        m, n = radius\r\n        y, x = np.ogrid[-m: m + 1, -n: n + 1]\r\n\r\n        gauss = np.exp(-(x * x + y * y) / (2 * sigma * sigma))\r\n        gauss[gauss < np.finfo(gauss.dtype).eps * gauss.max()] = 0\r\n        return gauss\r\n\r\n    @staticmethod\r\n    def draw_gaussian(fmap, center, radius, k=1):\r\n        diameter = 2 * radius + 1\r\n        gaussian = CenterNetHeatMap.gaussian2D((radius, radius), sigma=diameter / 6)\r\n        gaussian = torch.Tensor(gaussian)\r\n        x, y = int(center[0]), int(center[1])\r\n        height, width = fmap.shape[:2]\r\n\r\n        left, right = min(x, radius), min(width - x, radius + 1)\r\n        top, bottom = min(y, radius), min(height - y, radius + 1)\r\n\r\n        masked_fmap = fmap[y - top: y + bottom, x - left: x + right]\r\n        masked_gaussian = gaussian[radius - top: radius + bottom, radius - left: radius + right]\r\n        if min(masked_gaussian.shape) > 0 and min(masked_fmap.shape) > 0:\r\n            masked_fmap = torch.max(masked_fmap, masked_gaussian * k)\r\n            fmap[y - top: y + bottom, x - left: x + right] = masked_fmap\r\n        # return fmap\r\n\r\n\r\ndef compute_grids(features, strides):\r\n    \"\"\"\r\n    grids regret to the input image size\r\n    \"\"\"\r\n    grids = []\r\n    for level, feature in enumerate(features):\r\n        h, w = feature.size()[-2:]\r\n        shifts_x = torch.arange(\r\n            0, w * strides[level],\r\n            step=strides[level],\r\n            dtype=torch.float32, device=feature.device)\r\n        shifts_y = torch.arange(\r\n            0, h * strides[level],\r\n            step=strides[level],\r\n            dtype=torch.float32, device=feature.device)\r\n        shift_y, shift_x = torch.meshgrid(shifts_y, shifts_x)\r\n        shift_x = shift_x.reshape(-1)\r\n        shift_y = shift_y.reshape(-1)\r\n        grids_per_level = torch.stack((shift_x, shift_y), dim=1) + \\\r\n                          strides[level] // 2\r\n        grids.append(grids_per_level)\r\n    return grids\r\n\r\n\r\ndef get_center3x3(locations, centers, strides, range=3):\r\n    '''\r\n    Inputs:\r\n        locations: M x 2\r\n        centers: N x 2\r\n        strides: M\r\n    '''\r\n    range = (range - 1) / 2\r\n    M, N = locations.shape[0], centers.shape[0]\r\n    locations_expanded = locations.view(M, 1, 2).expand(M, N, 2)  # M x N x 2\r\n    centers_expanded = centers.view(1, N, 2).expand(M, N, 2)  # M x N x 2\r\n    strides_expanded = strides.view(M, 1, 1).expand(M, N, 2)  # M x N\r\n    centers_discret = ((centers_expanded / strides_expanded).int() * strides_expanded).float() + \\\r\n                      strides_expanded / 2  # M x N x 2\r\n    dist_x = (locations_expanded[:, :, 0] - centers_discret[:, :, 0]).abs()\r\n    dist_y = (locations_expanded[:, :, 1] - centers_discret[:, :, 1]).abs()\r\n    return (dist_x <= strides_expanded[:, :, 0] * range) & \\\r\n           (dist_y <= strides_expanded[:, :, 0] * range)\r\n\r\n\r\ndef get_pred(score_map_ctr, size_map, offset_map, feat_size):\r\n    max_score, idx = torch.max(score_map_ctr.flatten(1), dim=1, keepdim=True)\r\n\r\n    idx = idx.unsqueeze(1).expand(idx.shape[0], 2, 1)\r\n    size = size_map.flatten(2).gather(dim=2, index=idx).squeeze(-1)\r\n    offset = offset_map.flatten(2).gather(dim=2, index=idx).squeeze(-1)\r\n\r\n    return size * feat_size, offset\r\n"
  },
  {
    "path": "artrackv2_mindspore/lib/utils/image.py",
    "content": ""
  },
  {
    "path": "artrackv2_mindspore/lib/utils/lmdb_utils.py",
    "content": "import lmdb\nimport numpy as np\nimport cv2\nimport json\n\nLMDB_ENVS = dict()\nLMDB_HANDLES = dict()\nLMDB_FILELISTS = dict()\n\n\ndef get_lmdb_handle(name):\n    global LMDB_HANDLES, LMDB_FILELISTS\n    item = LMDB_HANDLES.get(name, None)\n    if item is None:\n        env = lmdb.open(name, readonly=True, lock=False, readahead=False, meminit=False)\n        LMDB_ENVS[name] = env\n        item = env.begin(write=False)\n        LMDB_HANDLES[name] = item\n\n    return item\n\n\ndef decode_img(lmdb_fname, key_name):\n    handle = get_lmdb_handle(lmdb_fname)\n    binfile = handle.get(key_name.encode())\n    if binfile is None:\n        print(\"Illegal data detected. %s %s\" % (lmdb_fname, key_name))\n    s = np.frombuffer(binfile, np.uint8)\n    x = cv2.cvtColor(cv2.imdecode(s, cv2.IMREAD_COLOR), cv2.COLOR_BGR2RGB)\n    return x\n\n\ndef decode_str(lmdb_fname, key_name):\n    handle = get_lmdb_handle(lmdb_fname)\n    binfile = handle.get(key_name.encode())\n    string = binfile.decode()\n    return string\n\n\ndef decode_json(lmdb_fname, key_name):\n    return json.loads(decode_str(lmdb_fname, key_name))\n\n\nif __name__ == \"__main__\":\n    lmdb_fname = \"/data/sda/v-yanbi/iccv21/LittleBoy_clean/data/got10k_lmdb\"\n    '''Decode image'''\n    # key_name = \"test/GOT-10k_Test_000001/00000001.jpg\"\n    # img = decode_img(lmdb_fname, key_name)\n    # cv2.imwrite(\"001.jpg\", img)\n    '''Decode str'''\n    # key_name = \"test/list.txt\"\n    # key_name = \"train/GOT-10k_Train_000001/groundtruth.txt\"\n    key_name = \"train/GOT-10k_Train_000001/absence.label\"\n    str_ = decode_str(lmdb_fname, key_name)\n    print(str_)\n"
  },
  {
    "path": "artrackv2_mindspore/lib/utils/merge.py",
    "content": "import torch\n\n\ndef merge_template_search(inp_list, return_search=False, return_template=False):\n    \"\"\"NOTICE: search region related features must be in the last place\"\"\"\n    seq_dict = {\"feat\": torch.cat([x[\"feat\"] for x in inp_list], dim=0),\n                \"mask\": torch.cat([x[\"mask\"] for x in inp_list], dim=1),\n                \"pos\": torch.cat([x[\"pos\"] for x in inp_list], dim=0)}\n    if return_search:\n        x = inp_list[-1]\n        seq_dict.update({\"feat_x\": x[\"feat\"], \"mask_x\": x[\"mask\"], \"pos_x\": x[\"pos\"]})\n    if return_template:\n        z = inp_list[0]\n        seq_dict.update({\"feat_z\": z[\"feat\"], \"mask_z\": z[\"mask\"], \"pos_z\": z[\"pos\"]})\n    return seq_dict\n\n\ndef get_qkv(inp_list):\n    \"\"\"The 1st element of the inp_list is about the template,\n    the 2nd (the last) element is about the search region\"\"\"\n    dict_x = inp_list[-1]\n    dict_c = {\"feat\": torch.cat([x[\"feat\"] for x in inp_list], dim=0),\n              \"mask\": torch.cat([x[\"mask\"] for x in inp_list], dim=1),\n              \"pos\": torch.cat([x[\"pos\"] for x in inp_list], dim=0)}  # concatenated dict\n    q = dict_x[\"feat\"] + dict_x[\"pos\"]\n    k = dict_c[\"feat\"] + dict_c[\"pos\"]\n    v = dict_c[\"feat\"]\n    key_padding_mask = dict_c[\"mask\"]\n    return q, k, v, key_padding_mask\n"
  },
  {
    "path": "artrackv2_mindspore/lib/utils/misc.py",
    "content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n\"\"\"\nMisc functions, including distributed helpers.\n\nMostly copy-paste from torchvision references.\n\"\"\"\nimport os\nimport subprocess\nimport time\nfrom collections import defaultdict, deque\nimport datetime\nimport pickle\nfrom typing import Optional, List\n\nimport torch\nimport torch.distributed as dist\nfrom mindspore import Tensor\n\n# needed due to empty tensor bug in pytorch and torchvision 0.5\nimport torchvision\nvers = torchvision.__version__.split('.')\nif int(vers[0]) <= 0 and int(vers[1]) < 7:\n    from torchvision.ops import _new_empty_tensor\n    from torchvision.ops.misc import _output_size\n\n\nclass SmoothedValue(object):\n    \"\"\"Track a series of values and provide access to smoothed values over a\n    window or the global series average.\n    \"\"\"\n\n    def __init__(self, window_size=20, fmt=None):\n        if fmt is None:\n            fmt = \"{median:.4f} ({global_avg:.4f})\"\n        self.deque = deque(maxlen=window_size)\n        self.total = 0.0\n        self.count = 0\n        self.fmt = fmt\n\n    def update(self, value, n=1):\n        self.deque.append(value)\n        self.count += n\n        self.total += value * n\n\n    def synchronize_between_processes(self):\n        \"\"\"\n        Warning: does not synchronize the deque!\n        \"\"\"\n        if not is_dist_avail_and_initialized():\n            return\n        t = torch.tensor([self.count, self.total], dtype=torch.float64, device='cuda')\n        dist.barrier()\n        dist.all_reduce(t)\n        t = t.tolist()\n        self.count = int(t[0])\n        self.total = t[1]\n\n    @property\n    def median(self):\n        d = torch.tensor(list(self.deque))\n        return d.median().item()\n\n    @property\n    def avg(self):\n        d = torch.tensor(list(self.deque), dtype=torch.float32)\n        return d.mean().item()\n\n    @property\n    def global_avg(self):\n        return self.total / self.count\n\n    @property\n    def max(self):\n        return max(self.deque)\n\n    @property\n    def value(self):\n        return self.deque[-1]\n\n    def __str__(self):\n        return self.fmt.format(\n            median=self.median,\n            avg=self.avg,\n            global_avg=self.global_avg,\n            max=self.max,\n            value=self.value)\n\n\ndef all_gather(data):\n    \"\"\"\n    Run all_gather on arbitrary picklable data (not necessarily tensors)\n    Args:\n        data: any picklable object\n    Returns:\n        list[data]: list of data gathered from each rank\n    \"\"\"\n    world_size = get_world_size()\n    if world_size == 1:\n        return [data]\n\n    # serialized to a Tensor\n    buffer = pickle.dumps(data)\n    storage = torch.ByteStorage.from_buffer(buffer)\n    tensor = torch.ByteTensor(storage).to(\"cuda\")\n\n    # obtain Tensor size of each rank\n    local_size = torch.tensor([tensor.numel()], device=\"cuda\")\n    size_list = [torch.tensor([0], device=\"cuda\") for _ in range(world_size)]\n    dist.all_gather(size_list, local_size)\n    size_list = [int(size.item()) for size in size_list]\n    max_size = max(size_list)\n\n    # receiving Tensor from all ranks\n    # we pad the tensor because torch all_gather does not support\n    # gathering tensors of different shapes\n    tensor_list = []\n    for _ in size_list:\n        tensor_list.append(torch.empty((max_size,), dtype=torch.uint8, device=\"cuda\"))\n    if local_size != max_size:\n        padding = torch.empty(size=(max_size - local_size,), dtype=torch.uint8, device=\"cuda\")\n        tensor = torch.cat((tensor, padding), dim=0)\n    dist.all_gather(tensor_list, tensor)\n\n    data_list = []\n    for size, tensor in zip(size_list, tensor_list):\n        buffer = tensor.cpu().numpy().tobytes()[:size]\n        data_list.append(pickle.loads(buffer))\n\n    return data_list\n\n\ndef reduce_dict(input_dict, average=True):\n    \"\"\"\n    Args:\n        input_dict (dict): all the values will be reduced\n        average (bool): whether to do average or sum\n    Reduce the values in the dictionary from all processes so that all processes\n    have the averaged results. Returns a dict with the same fields as\n    input_dict, after reduction.\n    \"\"\"\n    world_size = get_world_size()\n    if world_size < 2:\n        return input_dict\n    with torch.no_grad():\n        names = []\n        values = []\n        # sort the keys so that they are consistent across processes\n        for k in sorted(input_dict.keys()):\n            names.append(k)\n            values.append(input_dict[k])\n        values = torch.stack(values, dim=0)\n        dist.all_reduce(values)\n        if average:\n            values /= world_size\n        reduced_dict = {k: v for k, v in zip(names, values)}\n    return reduced_dict\n\n\nclass MetricLogger(object):\n    def __init__(self, delimiter=\"\\t\"):\n        self.meters = defaultdict(SmoothedValue)\n        self.delimiter = delimiter\n\n    def update(self, **kwargs):\n        for k, v in kwargs.items():\n            if isinstance(v, torch.Tensor):\n                v = v.item()\n            assert isinstance(v, (float, int))\n            self.meters[k].update(v)\n\n    def __getattr__(self, attr):\n        if attr in self.meters:\n            return self.meters[attr]\n        if attr in self.__dict__:\n            return self.__dict__[attr]\n        raise AttributeError(\"'{}' object has no attribute '{}'\".format(\n            type(self).__name__, attr))\n\n    def __str__(self):\n        loss_str = []\n        for name, meter in self.meters.items():\n            loss_str.append(\n                \"{}: {}\".format(name, str(meter))\n            )\n        return self.delimiter.join(loss_str)\n\n    def synchronize_between_processes(self):\n        for meter in self.meters.values():\n            meter.synchronize_between_processes()\n\n    def add_meter(self, name, meter):\n        self.meters[name] = meter\n\n    def log_every(self, iterable, print_freq, header=None):\n        i = 0\n        if not header:\n            header = ''\n        start_time = time.time()\n        end = time.time()\n        iter_time = SmoothedValue(fmt='{avg:.4f}')\n        data_time = SmoothedValue(fmt='{avg:.4f}')\n        space_fmt = ':' + str(len(str(len(iterable)))) + 'd'\n        if torch.cuda.is_available():\n            log_msg = self.delimiter.join([\n                header,\n                '[{0' + space_fmt + '}/{1}]',\n                'eta: {eta}',\n                '{meters}',\n                'time: {time}',\n                'data: {data}',\n                'max mem: {memory:.0f}'\n            ])\n        else:\n            log_msg = self.delimiter.join([\n                header,\n                '[{0' + space_fmt + '}/{1}]',\n                'eta: {eta}',\n                '{meters}',\n                'time: {time}',\n                'data: {data}'\n            ])\n        MB = 1024.0 * 1024.0\n        for obj in iterable:\n            data_time.update(time.time() - end)\n            yield obj\n            iter_time.update(time.time() - end)\n            if i % print_freq == 0 or i == len(iterable) - 1:\n                eta_seconds = iter_time.global_avg * (len(iterable) - i)\n                eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))\n                if torch.cuda.is_available():\n                    print(log_msg.format(\n                        i, len(iterable), eta=eta_string,\n                        meters=str(self),\n                        time=str(iter_time), data=str(data_time),\n                        memory=torch.cuda.max_memory_allocated() / MB))\n                else:\n                    print(log_msg.format(\n                        i, len(iterable), eta=eta_string,\n                        meters=str(self),\n                        time=str(iter_time), data=str(data_time)))\n            i += 1\n            end = time.time()\n        total_time = time.time() - start_time\n        total_time_str = str(datetime.timedelta(seconds=int(total_time)))\n        print('{} Total time: {} ({:.4f} s / it)'.format(\n            header, total_time_str, total_time / len(iterable)))\n\n\ndef get_sha():\n    cwd = os.path.dirname(os.path.abspath(__file__))\n\n    def _run(command):\n        return subprocess.check_output(command, cwd=cwd).decode('ascii').strip()\n    sha = 'N/A'\n    diff = \"clean\"\n    branch = 'N/A'\n    try:\n        sha = _run(['git', 'rev-parse', 'HEAD'])\n        subprocess.check_output(['git', 'diff'], cwd=cwd)\n        diff = _run(['git', 'diff-index', 'HEAD'])\n        diff = \"has uncommited changes\" if diff else \"clean\"\n        branch = _run(['git', 'rev-parse', '--abbrev-ref', 'HEAD'])\n    except Exception:\n        pass\n    message = f\"sha: {sha}, status: {diff}, branch: {branch}\"\n    return message\n\n\ndef collate_fn(batch):\n    batch = list(zip(*batch))\n    batch[0] = nested_tensor_from_tensor_list(batch[0])\n    return tuple(batch)\n\n\ndef _max_by_axis(the_list):\n    # type: (List[List[int]]) -> List[int]\n    maxes = the_list[0] # get the first one\n    for sublist in the_list[1:]: # [h,w,3]\n        for index, item in enumerate(sublist): # index: 0,1,2\n            maxes[index] = max(maxes[index], item) # compare current max with the other elements in the whole\n    return maxes\n\n\nclass NestedTensor(object):\n    def __init__(self, tensors, mask: Optional[Tensor]):\n        self.tensors = tensors\n        self.mask = mask\n\n    def to(self, device):\n        # type: (Device) -> NestedTensor # noqa\n        cast_tensor = self.tensors.to(device)\n        mask = self.mask\n        if mask is not None:\n            assert mask is not None\n            cast_mask = mask.to(device)\n        else:\n            cast_mask = None\n        return NestedTensor(cast_tensor, cast_mask)\n\n    def decompose(self):\n        return self.tensors, self.mask\n\n    def __repr__(self):\n        return str(self.tensors)\n\n\ndef nested_tensor_from_tensor_list(tensor_list: List[Tensor]):\n    # TODO make this more general\n    if tensor_list[0].ndim == 3:\n        if torchvision._is_tracing():\n            # nested_tensor_from_tensor_list() does not export well to ONNX\n            # call _onnx_nested_tensor_from_tensor_list() instead\n            return _onnx_nested_tensor_from_tensor_list(tensor_list)\n\n        # TODO make it support different-sized images\n        max_size = _max_by_axis([list(img.shape) for img in tensor_list]) # [[3,h1,w1], [3,h2,w2], [3,h3,w3], ...]\n        # min_size = tuple(min(s) for s in zip(*[img.shape for img in tensor_list]))\n        batch_shape = [len(tensor_list)] + max_size # ()\n        b, c, h, w = batch_shape\n        dtype = tensor_list[0].dtype\n        device = tensor_list[0].device\n        tensor = torch.zeros(batch_shape, dtype=dtype, device=device)\n        mask = torch.ones((b, h, w), dtype=torch.bool, device=device)\n        for img, pad_img, m in zip(tensor_list, tensor, mask):\n            pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img) # copy valid regions of the images to the largest padded base.\n            m[: img.shape[1], :img.shape[2]] = False\n    else:\n        raise ValueError('not supported')\n    return NestedTensor(tensor, mask)\n\n\n# _onnx_nested_tensor_from_tensor_list() is an implementation of\n# nested_tensor_from_tensor_list() that is supported by ONNX tracing.\n@torch.jit.unused\ndef _onnx_nested_tensor_from_tensor_list(tensor_list: List[Tensor]) -> NestedTensor:\n    max_size = []\n    for i in range(tensor_list[0].dim()):\n        max_size_i = torch.max(torch.stack([img.shape[i] for img in tensor_list]).to(torch.float32)).to(torch.int64)\n        max_size.append(max_size_i)\n    max_size = tuple(max_size)\n\n    # work around for\n    # pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img)\n    # m[: img.shape[1], :img.shape[2]] = False\n    # which is not yet supported in onnx\n    padded_imgs = []\n    padded_masks = []\n    for img in tensor_list:\n        padding = [(s1 - s2) for s1, s2 in zip(max_size, tuple(img.shape))]\n        padded_img = torch.nn.functional.pad(img, (0, padding[2], 0, padding[1], 0, padding[0]))\n        padded_imgs.append(padded_img)\n\n        m = torch.zeros_like(img[0], dtype=torch.int, device=img.device)\n        padded_mask = torch.nn.functional.pad(m, (0, padding[2], 0, padding[1]), \"constant\", 1)\n        padded_masks.append(padded_mask.to(torch.bool))\n\n    tensor = torch.stack(padded_imgs)\n    mask = torch.stack(padded_masks)\n\n    return NestedTensor(tensor, mask=mask)\n\n\ndef setup_for_distributed(is_master):\n    \"\"\"\n    This function disables printing when not in master process\n    \"\"\"\n    import builtins as __builtin__\n    builtin_print = __builtin__.print\n\n    def print(*args, **kwargs):\n        force = kwargs.pop('force', False)\n        if is_master or force:\n            builtin_print(*args, **kwargs)\n\n    __builtin__.print = print\n\n\ndef is_dist_avail_and_initialized():\n    if not dist.is_available():\n        return False\n    if not dist.is_initialized():\n        return False\n    return True\n\n\ndef get_world_size():\n    if not is_dist_avail_and_initialized():\n        return 1\n    return dist.get_world_size()\n\n\ndef get_rank():\n    if not is_dist_avail_and_initialized():\n        return 0\n    return dist.get_rank()\n\n\ndef is_main_process():\n    return get_rank() == 0\n\n\ndef save_on_master(*args, **kwargs):\n    if is_main_process():\n        torch.save(*args, **kwargs)\n\n\ndef init_distributed_mode(args):\n    if 'RANK' in os.environ and 'WORLD_SIZE' in os.environ:\n        args.rank = int(os.environ[\"RANK\"])\n        args.world_size = int(os.environ['WORLD_SIZE'])\n        args.gpu = int(os.environ['LOCAL_RANK'])\n    elif 'SLURM_PROCID' in os.environ:\n        args.rank = int(os.environ['SLURM_PROCID'])\n        args.gpu = args.rank % torch.cuda.device_count()\n    else:\n        print('Not using distributed mode')\n        args.distributed = False\n        return\n\n    args.distributed = True\n\n    torch.cuda.set_device(args.gpu)\n    args.dist_backend = 'nccl'\n    print('| distributed init (rank {}): {}'.format(\n        args.rank, args.dist_url), flush=True)\n    torch.distributed.init_process_group(backend=args.dist_backend, init_method=args.dist_url,\n                                         world_size=args.world_size, rank=args.rank)\n    torch.distributed.barrier()\n    setup_for_distributed(args.rank == 0)\n\n\n@torch.no_grad()\ndef accuracy(output, target, topk=(1,)):\n    \"\"\"Computes the precision@k for the specified values of k\"\"\"\n    if target.numel() == 0:\n        return [torch.zeros([], device=output.device)]\n    maxk = max(topk)\n    batch_size = target.size(0)\n\n    _, pred = output.topk(maxk, 1, True, True)\n    pred = pred.t()\n    correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n    res = []\n    for k in topk:\n        correct_k = correct[:k].view(-1).float().sum(0)\n        res.append(correct_k.mul_(100.0 / batch_size))\n    return res\n\n\ndef interpolate(input, size=None, scale_factor=None, mode=\"nearest\", align_corners=None):\n    # type: (Tensor, Optional[List[int]], Optional[float], str, Optional[bool]) -> Tensor\n    \"\"\"\n    Equivalent to nn.functional.interpolate, but with support for empty batch sizes.\n    This will eventually be supported natively by PyTorch, and this\n    class can go away.\n    \"\"\"\n    if float(torchvision.__version__[:3]) < 0.7:\n        if input.numel() > 0:\n            return torch.nn.functional.interpolate(\n                input, size, scale_factor, mode, align_corners\n            )\n\n        output_shape = _output_size(2, input, size, scale_factor)\n        output_shape = list(input.shape[:-2]) + list(output_shape)\n        return _new_empty_tensor(input, output_shape)\n    else:\n        return torchvision.ops.misc.interpolate(input, size, scale_factor, mode, align_corners)\n"
  },
  {
    "path": "artrackv2_mindspore/lib/utils/tensor.py",
    "content": "import functools\nimport torch\nimport copy\nfrom collections import OrderedDict\n\n\nclass TensorDict(OrderedDict):\n    \"\"\"Container mainly used for dicts of torch tensors. Extends OrderedDict with pytorch functionality.\"\"\"\n\n    def concat(self, other):\n        \"\"\"Concatenates two dicts without copying internal data.\"\"\"\n        return TensorDict(self, **other)\n\n    def copy(self):\n        return TensorDict(super(TensorDict, self).copy())\n\n    def __deepcopy__(self, memodict={}):\n        return TensorDict(copy.deepcopy(list(self), memodict))\n\n    def __getattr__(self, name):\n        if not hasattr(torch.Tensor, name):\n            raise AttributeError('\\'TensorDict\\' object has not attribute \\'{}\\''.format(name))\n\n        def apply_attr(*args, **kwargs):\n            return TensorDict({n: getattr(e, name)(*args, **kwargs) if hasattr(e, name) else e for n, e in self.items()})\n        return apply_attr\n\n    def attribute(self, attr: str, *args):\n        return TensorDict({n: getattr(e, attr, *args) for n, e in self.items()})\n\n    def apply(self, fn, *args, **kwargs):\n        return TensorDict({n: fn(e, *args, **kwargs) for n, e in self.items()})\n\n    @staticmethod\n    def _iterable(a):\n        return isinstance(a, (TensorDict, list))\n\n\nclass TensorList(list):\n    \"\"\"Container mainly used for lists of torch tensors. Extends lists with pytorch functionality.\"\"\"\n\n    def __init__(self, list_of_tensors = None):\n        if list_of_tensors is None:\n            list_of_tensors = list()\n        super(TensorList, self).__init__(list_of_tensors)\n\n    def __deepcopy__(self, memodict={}):\n        return TensorList(copy.deepcopy(list(self), memodict))\n\n    def __getitem__(self, item):\n        if isinstance(item, int):\n            return super(TensorList, self).__getitem__(item)\n        elif isinstance(item, (tuple, list)):\n            return TensorList([super(TensorList, self).__getitem__(i) for i in item])\n        else:\n            return TensorList(super(TensorList, self).__getitem__(item))\n\n    def __add__(self, other):\n        if TensorList._iterable(other):\n            return TensorList([e1 + e2 for e1, e2 in zip(self, other)])\n        return TensorList([e + other for e in self])\n\n    def __radd__(self, other):\n        if TensorList._iterable(other):\n            return TensorList([e2 + e1 for e1, e2 in zip(self, other)])\n        return TensorList([other + e for e in self])\n\n    def __iadd__(self, other):\n        if TensorList._iterable(other):\n            for i, e2 in enumerate(other):\n                self[i] += e2\n        else:\n            for i in range(len(self)):\n                self[i] += other\n        return self\n\n    def __sub__(self, other):\n        if TensorList._iterable(other):\n            return TensorList([e1 - e2 for e1, e2 in zip(self, other)])\n        return TensorList([e - other for e in self])\n\n    def __rsub__(self, other):\n        if TensorList._iterable(other):\n            return TensorList([e2 - e1 for e1, e2 in zip(self, other)])\n        return TensorList([other - e for e in self])\n\n    def __isub__(self, other):\n        if TensorList._iterable(other):\n            for i, e2 in enumerate(other):\n                self[i] -= e2\n        else:\n            for i in range(len(self)):\n                self[i] -= other\n        return self\n\n    def __mul__(self, other):\n        if TensorList._iterable(other):\n            return TensorList([e1 * e2 for e1, e2 in zip(self, other)])\n        return TensorList([e * other for e in self])\n\n    def __rmul__(self, other):\n        if TensorList._iterable(other):\n            return TensorList([e2 * e1 for e1, e2 in zip(self, other)])\n        return TensorList([other * e for e in self])\n\n    def __imul__(self, other):\n        if TensorList._iterable(other):\n            for i, e2 in enumerate(other):\n                self[i] *= e2\n        else:\n            for i in range(len(self)):\n                self[i] *= other\n        return self\n\n    def __truediv__(self, other):\n        if TensorList._iterable(other):\n            return TensorList([e1 / e2 for e1, e2 in zip(self, other)])\n        return TensorList([e / other for e in self])\n\n    def __rtruediv__(self, other):\n        if TensorList._iterable(other):\n            return TensorList([e2 / e1 for e1, e2 in zip(self, other)])\n        return TensorList([other / e for e in self])\n\n    def __itruediv__(self, other):\n        if TensorList._iterable(other):\n            for i, e2 in enumerate(other):\n                self[i] /= e2\n        else:\n            for i in range(len(self)):\n                self[i] /= other\n        return self\n\n    def __matmul__(self, other):\n        if TensorList._iterable(other):\n            return TensorList([e1 @ e2 for e1, e2 in zip(self, other)])\n        return TensorList([e @ other for e in self])\n\n    def __rmatmul__(self, other):\n        if TensorList._iterable(other):\n            return TensorList([e2 @ e1 for e1, e2 in zip(self, other)])\n        return TensorList([other @ e for e in self])\n\n    def __imatmul__(self, other):\n        if TensorList._iterable(other):\n            for i, e2 in enumerate(other):\n                self[i] @= e2\n        else:\n            for i in range(len(self)):\n                self[i] @= other\n        return self\n\n    def __mod__(self, other):\n        if TensorList._iterable(other):\n            return TensorList([e1 % e2 for e1, e2 in zip(self, other)])\n        return TensorList([e % other for e in self])\n\n    def __rmod__(self, other):\n        if TensorList._iterable(other):\n            return TensorList([e2 % e1 for e1, e2 in zip(self, other)])\n        return TensorList([other % e for e in self])\n\n    def __pos__(self):\n        return TensorList([+e for e in self])\n\n    def __neg__(self):\n        return TensorList([-e for e in self])\n\n    def __le__(self, other):\n        if TensorList._iterable(other):\n            return TensorList([e1 <= e2 for e1, e2 in zip(self, other)])\n        return TensorList([e <= other for e in self])\n\n    def __ge__(self, other):\n        if TensorList._iterable(other):\n            return TensorList([e1 >= e2 for e1, e2 in zip(self, other)])\n        return TensorList([e >= other for e in self])\n\n    def concat(self, other):\n        return TensorList(super(TensorList, self).__add__(other))\n\n    def copy(self):\n        return TensorList(super(TensorList, self).copy())\n\n    def unroll(self):\n        if not any(isinstance(t, TensorList) for t in self):\n            return self\n\n        new_list = TensorList()\n        for t in self:\n            if isinstance(t, TensorList):\n                new_list.extend(t.unroll())\n            else:\n                new_list.append(t)\n        return new_list\n\n    def list(self):\n        return list(self)\n\n    def attribute(self, attr: str, *args):\n        return TensorList([getattr(e, attr, *args) for e in self])\n\n    def apply(self, fn):\n        return TensorList([fn(e) for e in self])\n\n    def __getattr__(self, name):\n        if not hasattr(torch.Tensor, name):\n            raise AttributeError('\\'TensorList\\' object has not attribute \\'{}\\''.format(name))\n\n        def apply_attr(*args, **kwargs):\n            return TensorList([getattr(e, name)(*args, **kwargs) for e in self])\n\n        return apply_attr\n\n    @staticmethod\n    def _iterable(a):\n        return isinstance(a, (TensorList, list))\n\n\ndef tensor_operation(op):\n    def islist(a):\n        return isinstance(a, TensorList)\n\n    @functools.wraps(op)\n    def oplist(*args, **kwargs):\n        if len(args) == 0:\n            raise ValueError('Must be at least one argument without keyword (i.e. operand).')\n\n        if len(args) == 1:\n            if islist(args[0]):\n                return TensorList([op(a, **kwargs) for a in args[0]])\n        else:\n            # Multiple operands, assume max two\n            if islist(args[0]) and islist(args[1]):\n                return TensorList([op(a, b, *args[2:], **kwargs) for a, b in zip(*args[:2])])\n            if islist(args[0]):\n                return TensorList([op(a, *args[1:], **kwargs) for a in args[0]])\n            if islist(args[1]):\n                return TensorList([op(args[0], b, *args[2:], **kwargs) for b in args[1]])\n\n        # None of the operands are lists\n        return op(*args, **kwargs)\n\n    return oplist\n"
  },
  {
    "path": "artrackv2_mindspore/lib/utils/variable_hook.py",
    "content": "import torch\r\nfrom bytecode import Bytecode, Instr\r\n\r\n\r\nclass get_local(object):\r\n    cache = {}\r\n    is_activate = False\r\n\r\n    def __init__(self, varname):\r\n        self.varname = varname\r\n\r\n    def __call__(self, func):\r\n        if not type(self).is_activate:\r\n            return func\r\n\r\n        type(self).cache[func.__qualname__] = []\r\n        c = Bytecode.from_code(func.__code__)\r\n        extra_code = [\r\n            Instr('STORE_FAST', '_res'),\r\n            Instr('LOAD_FAST', self.varname),\r\n            Instr('STORE_FAST', '_value'),\r\n            Instr('LOAD_FAST', '_res'),\r\n            Instr('LOAD_FAST', '_value'),\r\n            Instr('BUILD_TUPLE', 2),\r\n            Instr('STORE_FAST', '_result_tuple'),\r\n            Instr('LOAD_FAST', '_result_tuple'),\r\n        ]\r\n        c[-1:-1] = extra_code\r\n        func.__code__ = c.to_code()\r\n\r\n        def wrapper(*args, **kwargs):\r\n            res, values = func(*args, **kwargs)\r\n            if isinstance(values, torch.Tensor):\r\n                type(self).cache[func.__qualname__].append(values.detach().cpu().numpy())\r\n            elif isinstance(values, list):  # list of Tensor\r\n                type(self).cache[func.__qualname__].append([value.detach().cpu().numpy() for value in values])\r\n            else:\r\n                raise NotImplementedError\r\n            return res\r\n\r\n        return wrapper\r\n\r\n    @classmethod\r\n    def clear(cls):\r\n        for key in cls.cache.keys():\r\n            cls.cache[key] = []\r\n\r\n    @classmethod\r\n    def activate(cls):\r\n        cls.is_activate = True\r\n"
  },
  {
    "path": "artrackv2_mindspore/lib/vis/__init__.py",
    "content": ""
  },
  {
    "path": "artrackv2_mindspore/lib/vis/plotting.py",
    "content": "import matplotlib.pyplot as plt\nimport numpy as np\nimport torch\nimport cv2\n\n\ndef draw_figure(fig):\n    fig.canvas.draw()\n    fig.canvas.flush_events()\n    plt.pause(0.001)\n\n\ndef show_tensor(a: torch.Tensor, fig_num = None, title = None, range=(None, None), ax=None):\n    \"\"\"Display a 2D tensor.\n    args:\n        fig_num: Figure number.\n        title: Title of figure.\n    \"\"\"\n    a_np = a.squeeze().cpu().clone().detach().numpy()\n    if a_np.ndim == 3:\n        a_np = np.transpose(a_np, (1, 2, 0))\n\n    if ax is None:\n        fig = plt.figure(fig_num)\n        plt.tight_layout()\n        plt.cla()\n        plt.imshow(a_np, vmin=range[0], vmax=range[1])\n        plt.axis('off')\n        plt.axis('equal')\n        if title is not None:\n            plt.title(title)\n        draw_figure(fig)\n    else:\n        ax.cla()\n        ax.imshow(a_np, vmin=range[0], vmax=range[1])\n        ax.set_axis_off()\n        ax.axis('equal')\n        if title is not None:\n            ax.set_title(title)\n        draw_figure(plt.gcf())\n\n\ndef plot_graph(a: torch.Tensor, fig_num = None, title = None):\n    \"\"\"Plot graph. Data is a 1D tensor.\n    args:\n        fig_num: Figure number.\n        title: Title of figure.\n    \"\"\"\n    a_np = a.squeeze().cpu().clone().detach().numpy()\n    if a_np.ndim > 1:\n        raise ValueError\n    fig = plt.figure(fig_num)\n    # plt.tight_layout()\n    plt.cla()\n    plt.plot(a_np)\n    if title is not None:\n        plt.title(title)\n    draw_figure(fig)\n\n\ndef show_image_with_boxes(im, boxes, iou_pred=None, disp_ids=None):\n    im_np = im.clone().cpu().squeeze().numpy()\n    im_np = np.ascontiguousarray(im_np.transpose(1, 2, 0).astype(np.uint8))\n\n    boxes = boxes.view(-1, 4).cpu().numpy().round().astype(int)\n\n    # Draw proposals\n    for i_ in range(boxes.shape[0]):\n        if disp_ids is None or disp_ids[i_]:\n            bb = boxes[i_, :]\n            disp_color = (i_*38 % 256, (255 - i_*97) % 256, (123 + i_*66) % 256)\n            cv2.rectangle(im_np, (bb[0], bb[1]), (bb[0] + bb[2], bb[1] + bb[3]),\n                          disp_color, 1)\n\n            if iou_pred is not None:\n                text_pos = (bb[0], bb[1] - 5)\n                cv2.putText(im_np, 'ID={} IOU = {:3.2f}'.format(i_, iou_pred[i_]), text_pos,\n                            cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 1, bottomLeftOrigin=False)\n\n    im_tensor = torch.from_numpy(im_np.transpose(2, 0, 1)).float()\n\n    return im_tensor\n\n\n\ndef _pascal_color_map(N=256, normalized=False):\n    \"\"\"\n    Python implementation of the color map function for the PASCAL VOC data set.\n    Official Matlab version can be found in the PASCAL VOC devkit\n    http://host.robots.ox.ac.uk/pascal/VOC/voc2012/index.html#devkit\n    \"\"\"\n\n    def bitget(byteval, idx):\n        return (byteval & (1 << idx)) != 0\n\n    dtype = 'float32' if normalized else 'uint8'\n    cmap = np.zeros((N, 3), dtype=dtype)\n    for i in range(N):\n        r = g = b = 0\n        c = i\n        for j in range(8):\n            r = r | (bitget(c, 0) << 7 - j)\n            g = g | (bitget(c, 1) << 7 - j)\n            b = b | (bitget(c, 2) << 7 - j)\n            c = c >> 3\n\n        cmap[i] = np.array([r, g, b])\n\n    cmap = cmap / 255 if normalized else cmap\n    return cmap\n\n\ndef overlay_mask(im, ann, alpha=0.5, colors=None, contour_thickness=None):\n    \"\"\" Overlay mask over image.\n    Source: https://github.com/albertomontesg/davis-interactive/blob/master/davisinteractive/utils/visualization.py\n    This function allows you to overlay a mask over an image with some\n    transparency.\n    # Arguments\n        im: Numpy Array. Array with the image. The shape must be (H, W, 3) and\n            the pixels must be represented as `np.uint8` data type.\n        ann: Numpy Array. Array with the mask. The shape must be (H, W) and the\n            values must be intergers\n        alpha: Float. Proportion of alpha to apply at the overlaid mask.\n        colors: Numpy Array. Optional custom colormap. It must have shape (N, 3)\n            being N the maximum number of colors to represent.\n        contour_thickness: Integer. Thickness of each object index contour draw\n            over the overlay. This function requires to have installed the\n            package `opencv-python`.\n    # Returns\n        Numpy Array: Image of the overlay with shape (H, W, 3) and data type\n            `np.uint8`.\n    \"\"\"\n    im, ann = np.asarray(im, dtype=np.uint8), np.asarray(ann, dtype=np.int)\n    if im.shape[:-1] != ann.shape:\n        raise ValueError('First two dimensions of `im` and `ann` must match')\n    if im.shape[-1] != 3:\n        raise ValueError('im must have three channels at the 3 dimension')\n\n    colors = colors or _pascal_color_map()\n    colors = np.asarray(colors, dtype=np.uint8)\n\n    mask = colors[ann]\n    fg = im * alpha + (1 - alpha) * mask\n\n    img = im.copy()\n    img[ann > 0] = fg[ann > 0]\n\n    if contour_thickness:  # pragma: no cover\n        import cv2\n        for obj_id in np.unique(ann[ann > 0]):\n            contours = cv2.findContours((ann == obj_id).astype(\n                np.uint8), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)[-2:]\n            cv2.drawContours(img, contours[0], -1, colors[obj_id].tolist(),\n                             contour_thickness)\n    return img\n"
  },
  {
    "path": "artrackv2_mindspore/lib/vis/utils.py",
    "content": "import torch\r\nimport numpy as np\r\n\r\n\r\ndef numpy_to_torch(a: np.ndarray):\r\n    return torch.from_numpy(a).float().permute(2, 0, 1).unsqueeze(0)"
  },
  {
    "path": "artrackv2_mindspore/tracking/_init_paths.py",
    "content": "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os.path as osp\nimport sys\n\n\ndef add_path(path):\n    if path not in sys.path:\n        sys.path.insert(0, path)\n\n\nthis_dir = osp.dirname(__file__)\n\nprj_path = osp.join(this_dir, '..')\nadd_path(prj_path)\n"
  },
  {
    "path": "artrackv2_mindspore/tracking/analysis_results.ipynb",
    "content": "{\r\n \"cells\": [\r\n  {\r\n   \"cell_type\": \"code\",\r\n   \"execution_count\": null,\r\n   \"outputs\": [],\r\n   \"source\": [\r\n    \"%load_ext autoreload\\n\",\r\n    \"%autoreload 2\\n\",\r\n    \"%matplotlib inline\\n\",\r\n    \"import os\\n\",\r\n    \"import sys\\n\",\r\n    \"import matplotlib.pyplot as plt\\n\",\r\n    \"plt.rcParams['figure.figsize'] = [14, 8]\\n\",\r\n    \"\\n\",\r\n    \"sys.path.append('/home/yebotao/OSTrack')\\n\",\r\n    \"from lib.test.analysis.plot_results import plot_results, print_results, print_per_sequence_results, print_results_per_video\\n\",\r\n    \"from lib.test.evaluation import get_dataset, trackerlist\"\r\n   ],\r\n   \"metadata\": {\r\n    \"collapsed\": false,\r\n    \"pycharm\": {\r\n     \"name\": \"#%%\\n\"\r\n    }\r\n   }\r\n  },\r\n  {\r\n   \"cell_type\": \"code\",\r\n   \"execution_count\": null,\r\n   \"outputs\": [],\r\n   \"source\": [\r\n    \"dataset_name = 'lasot'\\n\",\r\n    \"\\n\",\r\n    \"trackers = []\\n\",\r\n    \"trackers.extend(trackerlist(name='ostrack', parameter_name='vitb_256_mae_ce_32x4_ep300', dataset_name=dataset_name,\\n\",\r\n    \"                            run_ids=None, display_name='OSTrack256'))\\n\",\r\n    \"trackers.extend(trackerlist(name='ostrack', parameter_name='vitb_384_mae_ce_32x4_ep300', dataset_name=dataset_name,\\n\",\r\n    \"                            run_ids=None, display_name='OSTrack384'))\\n\",\r\n    \"\\n\",\r\n    \"dataset = get_dataset(dataset_name)\\n\",\r\n    \"# plot_results(trackers, dataset, dataset_name, merge_results=True, plot_types=('success', 'prec'),\\n\",\r\n    \"#              skip_missing_seq=False, force_evaluation=True, plot_bin_gap=0.05, exclude_invalid_frames=False)\\n\",\r\n    \"print_results(trackers, dataset, dataset_name, merge_results=True, plot_types=('success', 'prec', 'norm_prec'))\\n\",\r\n    \"# print_results_per_video(trackers, dataset, dataset_name, merge_results=True, plot_types=('success', 'prec', 'norm_prec'),\\n\",\r\n    \"#                         per_video=True, force_evaluation=True)\\n\",\r\n    \"# print_per_sequence_results(trackers, dataset, dataset_name, merge_results=True, plot_types=('success', 'prec', 'norm_prec'))\"\r\n   ],\r\n   \"metadata\": {\r\n    \"collapsed\": false,\r\n    \"pycharm\": {\r\n     \"name\": \"#%%\\n\"\r\n    }\r\n   }\r\n  }\r\n ],\r\n \"metadata\": {\r\n  \"kernelspec\": {\r\n   \"display_name\": \"Python 3\",\r\n   \"language\": \"python\",\r\n   \"name\": \"python3\"\r\n  },\r\n  \"language_info\": {\r\n   \"codemirror_mode\": {\r\n    \"name\": \"ipython\",\r\n    \"version\": 2\r\n   },\r\n   \"file_extension\": \".py\",\r\n   \"mimetype\": \"text/x-python\",\r\n   \"name\": \"python\",\r\n   \"nbconvert_exporter\": \"python\",\r\n   \"pygments_lexer\": \"ipython2\",\r\n   \"version\": \"2.7.6\"\r\n  }\r\n },\r\n \"nbformat\": 4,\r\n \"nbformat_minor\": 0\r\n}"
  },
  {
    "path": "artrackv2_mindspore/tracking/analysis_results.py",
    "content": "import _init_paths\nimport matplotlib.pyplot as plt\nplt.rcParams['figure.figsize'] = [8, 8]\n\nfrom lib.test.analysis.plot_results import plot_results, print_results, print_per_sequence_results\nfrom lib.test.evaluation import get_dataset, trackerlist\n\ntrackers = []\n#dataset_name = 'lasot_extension_subset'\ndataset_name = 'lasot'\n\"\"\"stark\"\"\"\n# trackers.extend(trackerlist(name='stark_s', parameter_name='baseline', dataset_name=dataset_name,\n#                             run_ids=None, display_name='STARK-S50'))\n# trackers.extend(trackerlist(name='stark_st', parameter_name='baseline', dataset_name=dataset_name,\n#                             run_ids=None, display_name='STARK-ST50'))\n# trackers.extend(trackerlist(name='stark_st', parameter_name='baseline_R101', dataset_name=dataset_name,\n#                             run_ids=None, display_name='STARK-ST101'))\n\"\"\"TransT\"\"\"\n# trackers.extend(trackerlist(name='TransT_N2', parameter_name=None, dataset_name=None,\n#                             run_ids=None, display_name='TransT_N2', result_only=True))\n# trackers.extend(trackerlist(name='TransT_N4', parameter_name=None, dataset_name=None,\n#                             run_ids=None, display_name='TransT_N4', result_only=True))\n\"\"\"pytracking\"\"\"\n# trackers.extend(trackerlist('atom', 'default', None, range(0,5), 'ATOM'))\n# trackers.extend(trackerlist('dimp', 'dimp18', None, range(0,5), 'DiMP18'))\n# trackers.extend(trackerlist('dimp', 'dimp50', None, range(0,5), 'DiMP50'))\n# trackers.extend(trackerlist('dimp', 'prdimp18', None, range(0,5), 'PrDiMP18'))\n# trackers.extend(trackerlist('dimp', 'prdimp50', None, range(0,5), 'PrDiMP50'))\n\"\"\"ostrack\"\"\"\ntrackers.extend(trackerlist(name='ostrack', parameter_name='finetune_384', dataset_name=dataset_name,\n                            run_ids=None, display_name='OSTrack256'))\n#trackers.extend(trackerlist(name='ostrack', parameter_name='vitb_384_mae_ce_32x4_ep300', dataset_name=dataset_name,\n#.                            run_ids=None, display_name='OSTrack384'))\n\n\ndataset = get_dataset(dataset_name)\n# dataset = get_dataset('otb', 'nfs', 'uav', 'tc128ce')\n# plot_results(trackers, dataset, 'OTB2015', merge_results=True, plot_types=('success', 'norm_prec'),\n#              skip_missing_seq=False, force_evaluation=True, plot_bin_gap=0.05)\nprint_results(trackers, dataset, dataset_name, merge_results=True, plot_types=('success', 'norm_prec', 'prec'))\n# print_results(trackers, dataset, 'UNO', merge_results=True, plot_types=('success', 'prec'))\n"
  },
  {
    "path": "artrackv2_mindspore/tracking/analysis_results_ITP.py",
    "content": "import _init_paths\nimport argparse\nfrom lib.test.analysis.plot_results import print_results\nfrom lib.test.evaluation import get_dataset, trackerlist\n\n\ndef parse_args():\n    \"\"\"\n    args for evaluation.\n    \"\"\"\n    parser = argparse.ArgumentParser(description='Parse args for training')\n    # for train\n    parser.add_argument('--script', type=str, help='training script name')\n    parser.add_argument('--config', type=str, default='baseline', help='yaml configure file name')\n\n    args = parser.parse_args()\n\n    return args\n\n\nif __name__ == \"__main__\":\n    args = parse_args()\n    trackers = []\n    trackers.extend(trackerlist(args.script, args.config, \"None\", None, args.config))\n\n    dataset = get_dataset('lasot')\n\n    print_results(trackers, dataset, 'LaSOT', merge_results=True, plot_types=('success', 'prec', 'norm_prec'))"
  },
  {
    "path": "artrackv2_mindspore/tracking/convert_transt.py",
    "content": "import _init_paths\nimport os\nfrom lib.test.evaluation import get_dataset\nimport shutil\n\ntrackers = []\n# dataset_name = 'uav'\ndataset_name = 'nfs'\n\n\nroot_dir = \"/data/sda/v-yanbi/iccv21/STARK_Latest/Stark\"\nbase_dir = os.path.join(root_dir, \"test/tracking_results/TransT_N2\")\ndataset = get_dataset(dataset_name)\nfor x in dataset:\n    seq_name = x.name\n    file_name = \"%s.txt\" % (seq_name.replace(\"nfs_\", \"\"))\n    file_path = os.path.join(base_dir, file_name)\n    file_path_new = os.path.join(base_dir, \"%s.txt\" % seq_name)\n    if os.path.exists(file_path):\n        shutil.move(file_path, file_path_new)\n\n"
  },
  {
    "path": "artrackv2_mindspore/tracking/create_default_local_file.py",
    "content": "import argparse\nimport os\nimport _init_paths\nfrom lib.train.admin import create_default_local_file_ITP_train\nfrom lib.test.evaluation import create_default_local_file_ITP_test\n\n\ndef parse_args():\n    parser = argparse.ArgumentParser(description='Create default local file on ITP or PAI')\n    parser.add_argument(\"--workspace_dir\", type=str, required=True)  # workspace dir\n    parser.add_argument(\"--data_dir\", type=str, required=True)\n    parser.add_argument(\"--save_dir\", type=str, required=True)\n    args = parser.parse_args()\n    return args\n\n\nif __name__ == \"__main__\":\n    args = parse_args()\n    workspace_dir = os.path.realpath(args.workspace_dir)\n    data_dir = os.path.realpath(args.data_dir)\n    save_dir = os.path.realpath(args.save_dir)\n    create_default_local_file_ITP_train(workspace_dir, data_dir)\n    create_default_local_file_ITP_test(workspace_dir, data_dir, save_dir)\n"
  },
  {
    "path": "artrackv2_mindspore/tracking/download_pytracking_results.py",
    "content": "import os\nimport sys\nimport gdown\nimport re\nimport shutil\nimport argparse\nimport tempfile\nimport _init_paths\n\nfrom lib.test.evaluation.environment import env_settings\n\npytracking_results_link_dict = {\n    \"dimp\": {\n        \"prdimp50_003.zip\": \"1p13j3iwcOCubBi3ms0hLwqnP6-x0J8Mc\",\n        \"prdimp50_002.zip\": \"1PPKgrAepbuyM2kjfzYAozQKTL6AjcQOz\",\n        \"prdimp50_001.zip\": \"17NFBObEDeK6mW4Mk2vN5Ekk1SGbFvxRS\",\n        \"prdimp50_000.zip\": \"1r3Efq7AumML2yGQ_KV4zmf4ATKVE1bo6\",\n        \"prdimp18_004.zip\": \"1DF4ZJQAa4CwvN_OiT4te33AV0kpsO7JM\",\n        \"prdimp18_003.zip\": \"1RgwJAN4TxnzgVgsfvrHIg1OUXD1EBZkO\",\n        \"prdimp18_002.zip\": \"17lMllYhygCqgE81DoHX4BZar3xc3auzM\",\n        \"prdimp18_001.zip\": \"1Yg7DmGYOnn2k0MYtSjjKlGyzO1Uimj4G\",\n        \"prdimp18_000.zip\": \"1DuZJSBJ-23WJBQTOWSAaoPYSbGAJJN2Z\",\n        \"prdimp50_004.zip\": \"1f9bx9-dtx3B5_IvIJhjjJyp-cnXciqLO\",\n        \"dimp50_004.zip\": \"1Lj3p8mYCoIqxzdQXZkWFTw-MA8c6eeLa\",\n        \"dimp50_000.zip\": \"1LCgf5sg453Z4bY37A_W5mbXeG68U1fET\",\n        \"dimp18_000.zip\": \"17M7dJZ1oKrIY4-O5lL_mlQPEubUn034g\",\n        \"dimp18_001.zip\": \"1AsiliVgISyDTouYOQYVOXA0srj3YskhJ\",\n        \"dimp18_002.zip\": \"1I0GrBaPnySOyPWSvItHhXH8182tFCi_Y\",\n        \"dimp50_001.zip\": \"1XfPvwAcymW88J1rq7RlhyKmqsawJDK-K\",\n        \"dimp18_004.zip\": \"1EztF6bpROFwZ1PSJWgMB7bQ4G_Z08YIg\",\n        \"dimp18_003.zip\": \"1iuiFLv04WE7GfBjm8UkZXFq4gheG2Ru8\",\n        \"dimp50_003.zip\": \"1rLsgeQXyKpD6ryl9BjlIVdO3vd27ekwy\",\n        \"dimp50_002.zip\": \"1wj2jUwlpHgsP1hAcuxXAVriUPuEspsu4\",\n    },\n    \"atom\": {\n        \"default_004.zip\": \"1BapnQh_8iRM44DXj862eOZV4q8zQLdmT\",\n        \"default_003.zip\": \"1YpfOBLBEUQQiX0fWMPA5pnW3dm0NG3E5\",\n        \"default_000.zip\": \"1x6fKGZk3V839mX99Gl_pw7JUaiMaTxc5\",\n        \"default_002.zip\": \"1QIlQFv3p6MBTwsYdIMYmzUDBDQGxGsUC\",\n        \"default_001.zip\": \"1-K2--GNCURDKEgUuiEF18K4DcCLvDEVt\",\n    },\n    \"kys\": {\n        \"default_004.zip\": \"1QdfkA3d4MzKwdDiBOM1ZhDJWk9NmALxD\",\n        \"default_000.zip\": \"1SCs79_ePTc8zxPDzRAgAmbbRlnmE89SN\",\n        \"default_003.zip\": \"1TCzq38QW4YiMrgU5VR6NAEefJ85gwzfT\",\n        \"default_002.zip\": \"1_9u1ybCFxHu0yJmW5ZzDR4-isJMEUsDf\",\n        \"default_001.zip\": \"1utJhdosNj6vlI75dfzUxGM3Vy8OjWslT\",\n    },\n}\n\n\ndef _download_file(file_id, path):\n    link = 'https://drive.google.com/uc?id=' + file_id\n    gdown.download(link, path, quiet=True)\n\n\ndef download_results(download_path, trackers='pytracking'):\n    \"\"\"\n    Script to automatically download tracker results for PyTracking.\n    args:\n        download_path - Directory where the zipped results are downloaded\n        trackers - Tracker results which are to be downloaded.\n                   If set to 'pytracking', results for all pytracking based trackers will be downloaded.\n                   If set to 'external', results for available external trackers will be downloaded.\n                   If set to 'all', all available results are downloaded.\n                   If set to a name of a tracker (e.g. atom), all results for that tracker are downloaded.\n                   Otherwise, it can be set to a dict, where the keys are the names of the trackers for which results are\n                   downloaded. The value can be set to either 'all', in which case all available results for the\n                    tracker are downloaded. Else the value should be a list of parameter file names.\n    \"\"\"\n    print('Using download path ''{}'''.format(download_path))\n\n    os.makedirs(download_path, exist_ok=True)\n\n    if isinstance(trackers, str):\n        if trackers == 'all':\n            all_trackers = list(pytracking_results_link_dict.keys()) + list(external_results_link_dict.keys())\n            trackers = {k: 'all' for k in all_trackers}\n        elif trackers == 'pytracking':\n            trackers = {k: 'all' for k in pytracking_results_link_dict.keys()}\n        elif trackers == 'external':\n            trackers = {k: 'all' for k in external_results_link_dict.keys()}\n        elif trackers in pytracking_results_link_dict or trackers in external_results_link_dict:\n            trackers = {trackers: 'all'}\n        else:\n            raise Exception('tracker_list must be set to ''all'', a tracker name, or be a dict')\n    elif isinstance(trackers, dict):\n        pass\n    else:\n        raise Exception('tracker_list must be set to ''all'', or be a dict')\n\n    common_link_dict = pytracking_results_link_dict\n    # for k, v in external_results_link_dict.items():\n    #     common_link_dict[k] = v\n\n    for trk, runfiles in trackers.items():\n        trk_path = os.path.join(download_path, trk)\n        if not os.path.exists(trk_path):\n            os.makedirs(trk_path)\n\n        if runfiles == 'all':\n            for params, fileid in common_link_dict[trk].items():\n                print('Downloading: {}/{}'.format(trk, params))\n                _download_file(fileid, os.path.join(trk_path, params))\n        elif isinstance(runfiles, (list, tuple)):\n            for p in runfiles:\n                for params, fileid in common_link_dict[trk].items():\n                    if re.match(r'{}(|_(\\d\\d\\d)).zip'.format(p), params) is not None:\n                        print('Downloading: {}/{}'.format(trk, params))\n                        _download_file(fileid, os.path.join(trk_path, params))\n\n        else:\n            raise Exception('tracker_list values must either be set to ''all'', or be a list of param names')\n\n\n\ndef unpack_tracking_results(download_path, output_path=None):\n    \"\"\"\n    Unpacks zipped benchmark results. The directory 'download_path' should have the following structure\n    - root\n        - tracker1\n            - param1.zip\n            - param2.zip\n            .\n            .\n        - tracker2\n            - param1.zip\n            - param2.zip\n        .\n        .\n    args:\n        download_path - Path to the directory where the zipped results are stored\n        output_path - Path to the directory where the results will be unpacked. Set to env_settings().results_path\n                      by default\n    \"\"\"\n\n    if output_path is None:\n        output_path = env_settings().results_path\n\n    if not os.path.exists(output_path):\n        os.makedirs(output_path)\n\n    trackers = os.listdir(download_path)\n\n    for t in trackers:\n        runfiles = os.listdir(os.path.join(download_path, t))\n\n        for r in runfiles:\n            save_path = os.path.join(output_path, t)\n            if not os.path.exists(save_path):\n                os.makedirs(save_path)\n            shutil.unpack_archive(os.path.join(download_path, t, r), os.path.join(save_path, r[:-4]), 'zip')\n\n\ndef main():\n    parser = argparse.ArgumentParser(description='Download and unpack zipped results')\n    parser.add_argument('--tracker', type=str, default='pytracking',\n                        help='Name of tracker results to download, or \"pytracking\" (downloads results for PyTracking'\n                             ' based trackers, or \"external\" (downloads results for external trackers) or \"all\"')\n    parser.add_argument('--output_path', type=str, default=None,\n                        help='Path to the directory where the results will be unpacked.')\n    parser.add_argument('--temp_download_path', type=str, default=None,\n                        help='Temporary path used for downloading the Zip files.')\n    parser.add_argument('--download', type=bool, default=True,\n                        help='Whether to download results or unpack existing downloaded files.')\n    args = parser.parse_args()\n\n    download_path = args.temp_download_path\n    if download_path is None:\n        download_path = '{}/pytracking_results/'.format(tempfile.gettempdir())\n\n    if args.download:\n        download_results(download_path, args.tracker)\n\n    unpack_tracking_results(download_path, args.output_path)\n\n\nif __name__ == '__main__':\n    main()"
  },
  {
    "path": "artrackv2_mindspore/tracking/pre_read_datasets.py",
    "content": "import _init_paths\nimport multiprocessing as mp\nimport argparse\nimport os\nfrom lib.utils.lmdb_utils import decode_str\nimport time\nimport json\n\n\ndef parse_args():\n    \"\"\"\n    args for training.\n    \"\"\"\n    parser = argparse.ArgumentParser(description='Parse args for training')\n    parser.add_argument('--data_dir', type=str, help='directory where lmdb data is located')\n    parser.add_argument('--dataset_str', type=str, help=\"which datasets to use\")\n    args = parser.parse_args()\n\n    return args\n\n\ndef get_trknet_dict(trknet_dir):\n    with open(os.path.join(trknet_dir, \"seq_list.json\"), \"r\") as f:\n        seq_list = json.loads(f.read())\n    res_dict = {}\n    set_idx_pre = -1\n    for set_idx, seq_name in seq_list:\n        if set_idx != set_idx_pre:\n            res_dict[set_idx] = \"anno/%s.txt\" % seq_name\n            set_idx_pre = set_idx\n    return res_dict\n\n\ndef target(lmdb_dir, key_name):\n    _ = decode_str(lmdb_dir, key_name)\n\n\nif __name__ == \"__main__\":\n    args = parse_args()\n    data_dir = args.data_dir\n    dataset_str = args.dataset_str\n    key_dict = {\"got10k_lmdb\": \"train/list.txt\",\n                \"lasot_lmdb\": \"LaSOTBenchmark.json\",\n                \"coco_lmdb\": \"annotations/instances_train2017.json\",\n                \"vid_lmdb\": \"cache.json\"}\n    print(\"Ready to pre load datasets\")\n    start = time.time()\n    ps = []\n    datasets = []\n    if 'g' in dataset_str:\n        datasets.append(\"got10k_lmdb\")\n    if 'l' in dataset_str:\n        datasets.append(\"lasot_lmdb\")\n    if 'c' in dataset_str:\n        datasets.append(\"coco_lmdb\")\n    if 'v' in dataset_str:\n        datasets.append(\"vid_lmdb\")\n    for dataset in datasets:\n        lmdb_dir = os.path.join(data_dir, dataset)\n        p = mp.Process(target=target, args=(lmdb_dir, key_dict[dataset]))\n        print(\"add %s %s to job queue\" % (lmdb_dir, key_dict[dataset]))\n        ps.append(p)\n    # deal with trackingnet\n    if 't' in dataset_str:\n        trknet_dict = get_trknet_dict(os.path.join(data_dir, \"trackingnet_lmdb\"))\n        for set_idx, seq_path in trknet_dict.items():\n            lmdb_dir = os.path.join(data_dir, \"trackingnet_lmdb\", \"TRAIN_%d_lmdb\" % set_idx)\n            p = mp.Process(target=target, args=(lmdb_dir, seq_path))\n            print(\"add %s %s to job queue\" % (lmdb_dir, seq_path))\n            ps.append(p)\n    for p in ps:\n        p.start()\n    for p in ps:\n        p.join()\n\n    print(\"Pre read over\")\n    end = time.time()\n    hour = (end - start) / 3600\n    print(\"it takes %.2f hours to pre-read data\" % hour)\n"
  },
  {
    "path": "artrackv2_mindspore/tracking/profile_model.py",
    "content": "import os\nimport sys\n\nprj_path = os.path.join(os.path.dirname(__file__), '..')\nif prj_path not in sys.path:\n    sys.path.append(prj_path)\n\nimport argparse\nimport torch\nfrom lib.utils.misc import NestedTensor\nfrom thop import profile\nfrom thop.utils import clever_format\nimport time\nimport importlib\n\n\ndef parse_args():\n    \"\"\"\n    args for training.\n    \"\"\"\n    parser = argparse.ArgumentParser(description='Parse args for training')\n    # for train\n    parser.add_argument('--script', type=str, default='ostrack', choices=['ostrack'],\n                        help='training script name')\n    parser.add_argument('--config', type=str, default='2stage_256_got', help='yaml configure file name')\n    args = parser.parse_args()\n\n    return args\n\n\ndef evaluate_vit(model, template, search, seq_input, stage):\n    '''Speed Test'''\n    #macs1, params1 = profile(model, inputs=(template, search),\n    #                         custom_ops=None, verbose=False)\n    #macs, params = clever_format([macs1, params1], \"%.3f\")\n    #print('overall macs is ', macs)\n    #print('overall params is ', params)\n\n    T_w = 500\n    T_t = 1000\n    print(\"testing speed ...\")\n    torch.cuda.synchronize()\n    with torch.no_grad():\n        # overall\n        for i in range(T_w):\n            _ = model(template, search, seq_input=seq_input, stage=stage)\n        start = time.time()\n        for i in range(T_t):\n            _ = model(template, search, seq_input=seq_input, stage=stage)\n        torch.cuda.synchronize()\n        end = time.time()\n        avg_lat = (end - start) / T_t\n        print(\"The average overall latency is %.2f ms\" % (avg_lat * 1000))\n        print(\"FPS is %.2f fps\" % (1. / avg_lat))\n        # for i in range(T_w):\n        #     _ = model(template, search)\n        # start = time.time()\n        # for i in range(T_t):\n        #     _ = model(template, search)\n        # end = time.time()\n        # avg_lat = (end - start) / T_t\n        # print(\"The average backbone latency is %.2f ms\" % (avg_lat * 1000))\n\n\ndef evaluate_vit_separate(model, template, search):\n    '''Speed Test'''\n    T_w = 50\n    T_t = 1000\n    print(\"testing speed ...\")\n    z = model.forward_backbone(template, image_type='template')\n    x = model.forward_backbone(search, image_type='search')\n    with torch.no_grad():\n        # overall\n        for i in range(T_w):\n            _ = model.forward_backbone(search, image_type='search')\n            _ = model.forward_cat(z, x)\n        start = time.time()\n        for i in range(T_t):\n            _ = model.forward_backbone(search, image_type='search')\n            _ = model.forward_cat(z, x)\n        end = time.time()\n        avg_lat = (end - start) / T_t\n        print(\"The average overall latency is %.2f ms\" % (avg_lat * 1000))\n\n\ndef get_data(bs, sz):\n    img_patch = torch.randn(bs, 3, sz, sz)\n    att_mask = torch.rand(bs, sz, sz) > 0.5\n    return NestedTensor(img_patch, att_mask)\n\n\nif __name__ == \"__main__\":\n    device = \"cuda:1\"\n    torch.cuda.set_device(device)\n    # Compute the Flops and Params of our STARK-S model\n    args = parse_args()\n    '''update cfg'''\n    yaml_fname = 'experiments/%s/%s.yaml' % (args.script, args.config)\n    config_module = importlib.import_module('lib.config.%s.config' % args.script)\n    cfg = config_module.cfg\n    config_module.update_config_from_file(yaml_fname)\n    print(cfg)\n    '''set some values'''\n    bs = 1\n    z_sz = cfg.TEST.TEMPLATE_SIZE\n    x_sz = cfg.TEST.SEARCH_SIZE\n    print(x_sz)\n    print(z_sz)\n\n    if args.script == \"ostrack\":\n        model_module = importlib.import_module('lib.models')\n        model_constructor = model_module.build_ostrack\n        model = model_constructor(cfg, training=False)\n        # get the template and search\n        template = torch.randn(bs, 2, 3, z_sz, z_sz)\n        search = torch.randn(bs, 3, x_sz, x_sz)\n        # transfer to device\n        model = model.to(device)\n        model = model.eval()\n        template = template.to(device)\n        search = search.to(device)\n\n        merge_layer = cfg.MODEL.BACKBONE.MERGE_LAYER\n        #seq_input = torch.Tensor([[1]])\n        #seq_input = torch.Tensor([[1,2,3,4,5]])\n        #seq_input = torch.Tensor([[5,6,7,8,9,10,11,12,13,14,15,16,17,1,2,3]])\n        seq_input = torch.Tensor([[5,6,7,8,9,10,11,12,13,14,15,16,1,2,3,4,17,18,19,20]]).to(device).repeat(bs, 1)\n        stage = \"doit\"\n        #stage = None\n        if merge_layer <= 0:\n            evaluate_vit(model, template, search, seq_input, stage)\n        else:\n            evaluate_vit_separate(model, template, search)\n\n    else:\n        raise NotImplementedError\n"
  },
  {
    "path": "artrackv2_mindspore/tracking/test.py",
    "content": "import os\nimport sys\nimport argparse\nimport mindspore as ms\nfrom mindspore import context\nimport mindspore\n\nprj_path = os.path.join(os.path.dirname(__file__), '..')\nif prj_path not in sys.path:\n    sys.path.append(prj_path)\n\nfrom lib.test.evaluation import get_dataset\nfrom lib.test.evaluation.running import run_dataset\nfrom lib.test.evaluation.tracker import Tracker\n\n\ndef run_tracker(tracker_name, tracker_param, run_id=None, dataset_name='otb', sequence=None, debug=0, threads=0,\n                num_gpus=8):\n    \"\"\"Run tracker on sequence or dataset.\n    args:\n        tracker_name: Name of tracking method.\n        tracker_param: Name of parameter file.\n        run_id: The run id.\n        dataset_name: Name of dataset (otb, nfs, uav, tpl, vot, tn, gott, gotv, lasot).\n        sequence: Sequence number or name.\n        debug: Debug level.\n        threads: Number of threads.\n    \"\"\"\n    dataset = get_dataset(dataset_name)\n\n    if sequence is not None:\n        dataset = [dataset[sequence]]\n\n    trackers = [Tracker(tracker_name, tracker_param, dataset_name, run_id)]\n\n    run_dataset(dataset, trackers, debug, threads, num_gpus=num_gpus)\n\n\ndef main():\n    context.set_context(device_target=\"GPU\")\n    mindspore.run_check()\n\n    parser = argparse.ArgumentParser(description='Run tracker on sequence or dataset.')\n    parser.add_argument('tracker_name', type=str, help='Name of tracking method.')\n    parser.add_argument('tracker_param', type=str, help='Name of config file.')\n    parser.add_argument('--runid', type=int, default=None, help='The run id.')\n    parser.add_argument('--dataset_name', type=str, default='otb', help='Name of dataset (otb, nfs, uav, tpl, vot, tn, gott, gotv, lasot).')\n    parser.add_argument('--sequence', type=str, default=None, help='Sequence number or name.')\n    parser.add_argument('--debug', type=int, default=0, help='Debug level.')\n    parser.add_argument('--threads', type=int, default=0, help='Number of threads.')\n    parser.add_argument('--num_gpus', type=int, default=8)\n\n    args = parser.parse_args()\n\n    try:\n        seq_name = int(args.sequence)\n    except:\n        seq_name = args.sequence\n\n    run_tracker(args.tracker_name, args.tracker_param, args.runid, args.dataset_name, seq_name, args.debug,\n                args.threads, num_gpus=args.num_gpus)\n\n\nif __name__ == '__main__':\n    main()\n"
  },
  {
    "path": "artrackv2_mindspore/tracking/test_exp.py",
    "content": "import os\nimport sys\nimport argparse\n\nprj_path = os.path.join(os.path.dirname(__file__), '..')\nif prj_path not in sys.path:\n    sys.path.append(prj_path)\n\nfrom lib.test.evaluation import get_dataset\nfrom lib.test.evaluation.running import run_dataset\nfrom lib.test.evaluation.tracker import Tracker\n\n\ndef run_tracker(tracker_name, tracker_param, run_id=None, dataset_name='otb', sequence=None, debug=0, threads=0,\n                num_gpus=8):\n    \"\"\"Run tracker on sequence or dataset.\n    args:\n        tracker_name: Name of tracking method.\n        tracker_param: Name of parameter file.\n        run_id: The run id.\n        dataset_name: Name of dataset (otb, nfs, uav, tpl, vot, tn, gott, gotv, lasot).\n        sequence: Sequence number or name.\n        debug: Debug level.\n        threads: Number of threads.\n    \"\"\"\n\n    dataset = get_dataset(*dataset_name)\n\n    if sequence is not None:\n        dataset = [dataset[sequence]]\n\n    trackers = [Tracker(tracker_name, tracker_param, dataset_name, run_id)]\n\n    run_dataset(dataset, trackers, debug, threads, num_gpus=num_gpus)\n\n\ndef main():\n    parser = argparse.ArgumentParser(description='Run tracker on sequence or dataset.')\n    parser.add_argument('tracker_name', type=str, help='Name of tracking method.')\n    parser.add_argument('tracker_param', type=str, help='Name of config file.')\n    parser.add_argument('--runid', type=int, default=None, help='The run id.')\n    parser.add_argument('--dataset_name', type=str, default='otb', help='Name of dataset (otb, nfs, uav, tpl, vot, tn, gott, gotv, lasot).')\n    parser.add_argument('--sequence', type=str, default=None, help='Sequence number or name.')\n    parser.add_argument('--debug', type=int, default=0, help='Debug level.')\n    parser.add_argument('--threads', type=int, default=0, help='Number of threads.')\n    parser.add_argument('--num_gpus', type=int, default=8)\n\n    args = parser.parse_args()\n\n    try:\n        seq_name = int(args.sequence)\n    except:\n        seq_name = args.sequence\n\n    args.dataset_name = ['trackingnet', 'got10k_test', 'lasot']\n\n    run_tracker(args.tracker_name, args.tracker_param, args.runid, args.dataset_name, seq_name, args.debug,\n                args.threads, num_gpus=args.num_gpus)\n\n\nif __name__ == '__main__':\n    main()\n"
  },
  {
    "path": "artrackv2_mindspore/tracking/train.py",
    "content": "import os\nimport argparse\nimport random\nimport torch\n\n\ndef parse_args():\n    \"\"\"\n    args for training.\n    \"\"\"\n    parser = argparse.ArgumentParser(description='Parse args for training')\n    # for train\n    parser.add_argument('--script', type=str, help='training script name')\n    parser.add_argument('--config', type=str, default='baseline', help='yaml configure file name')\n    parser.add_argument('--save_dir', type=str, help='root directory to save checkpoints, logs, and tensorboard')\n    parser.add_argument('--mode', type=str, choices=[\"single\", \"multiple\", \"multi_node\"], default=\"multiple\",\n                        help=\"train on single gpu or multiple gpus\")\n    parser.add_argument('--nproc_per_node', type=int, help=\"number of GPUs per node\")  # specify when mode is multiple\n    parser.add_argument('--use_lmdb', type=int, choices=[0, 1], default=0)  # whether datasets are in lmdb format\n    parser.add_argument('--script_prv', type=str, help='training script name')\n    parser.add_argument('--config_prv', type=str, default='baseline', help='yaml configure file name')\n    parser.add_argument('--use_wandb', type=int, choices=[0, 1], default=0)  # whether to use wandb\n    # for knowledge distillation\n    parser.add_argument('--distill', type=int, choices=[0, 1], default=0)  # whether to use knowledge distillation\n    parser.add_argument('--script_teacher', type=str, help='teacher script name')\n    parser.add_argument('--config_teacher', type=str, help='teacher yaml configure file name')\n\n    # for multiple machines\n    parser.add_argument('--rank', type=int, help='Rank of the current process.')\n    parser.add_argument('--world-size', type=int, help='Number of processes participating in the job.')\n    parser.add_argument('--ip', type=str, default='127.0.0.1', help='IP of the current rank 0.')\n    parser.add_argument('--port', type=int, default='20000', help='Port of the current rank 0.')\n\n    args = parser.parse_args()\n\n    return args\n\n\ndef main():\n    torch.set_num_threads(8)\n    args = parse_args()\n    if args.mode == \"single\":\n        train_cmd = \"python lib/train/run_training.py --script %s --config %s --save_dir %s --use_lmdb %d \" \\\n                    \"--script_prv %s --config_prv %s --distill %d --script_teacher %s --config_teacher %s --use_wandb %d\"\\\n                    % (args.script, args.config, args.save_dir, args.use_lmdb, args.script_prv, args.config_prv,\n                       args.distill, args.script_teacher, args.config_teacher, args.use_wandb)\n    elif args.mode == \"multiple\":\n        train_cmd = \"python -m torch.distributed.launch --nproc_per_node %d --master_port %d lib/train/run_training.py \" \\\n                    \"--script %s --config %s --save_dir %s --use_lmdb %d --script_prv %s --config_prv %s --use_wandb %d \" \\\n                    \"--distill %d --script_teacher %s --config_teacher %s\" \\\n                    % (args.nproc_per_node, random.randint(10000, 50000), args.script, args.config, args.save_dir, args.use_lmdb, args.script_prv, args.config_prv, args.use_wandb,\n                       args.distill, args.script_teacher, args.config_teacher)\n    elif args.mode == \"multi_node\":\n        train_cmd = \"python -m torch.distributed.launch --nproc_per_node %d --master_addr %s --master_port %d --nnodes %d --node_rank %d lib/train/run_training.py \" \\\n                    \"--script %s --config %s --save_dir %s --use_lmdb %d --script_prv %s --config_prv %s --use_wandb %d \" \\\n                    \"--distill %d --script_teacher %s --config_teacher %s\" \\\n                    % (args.nproc_per_node, args.ip, args.port, args.world_size, args.rank, args.script, args.config, args.save_dir, args.use_lmdb, args.script_prv, args.config_prv, args.use_wandb,\n                       args.distill, args.script_teacher, args.config_teacher)\n    else:\n        raise ValueError(\"mode should be 'single' or 'multiple'.\")\n    os.system(train_cmd)\n\n\nif __name__ == \"__main__\":\n    main()\n"
  },
  {
    "path": "artrackv2_mindspore/tracking/video_demo.py",
    "content": "import os\nimport sys\nimport argparse\n\nprj_path = os.path.join(os.path.dirname(__file__), '..')\nif prj_path not in sys.path:\n    sys.path.append(prj_path)\n\nfrom lib.test.evaluation import Tracker\n\n\ndef run_video(tracker_name, tracker_param, videofile, optional_box=None, debug=None, save_results=False):\n    \"\"\"Run the tracker on your webcam.\n    args:\n        tracker_name: Name of tracking method.\n        tracker_param: Name of parameter file.\n        debug: Debug level.\n    \"\"\"\n    tracker = Tracker(tracker_name, tracker_param, \"video\")\n    tracker.run_video(videofilepath=videofile, optional_box=optional_box, debug=debug, save_results=save_results)\n\n\ndef main():\n    parser = argparse.ArgumentParser(description='Run the tracker on your webcam.')\n    parser.add_argument('tracker_name', type=str, help='Name of tracking method.')\n    parser.add_argument('tracker_param', type=str, help='Name of parameter file.')\n    parser.add_argument('videofile', type=str, help='path to a video file.')\n    parser.add_argument('--optional_box', type=float, default=None, nargs=\"+\", help='optional_box with format x y w h.')\n    parser.add_argument('--debug', type=int, default=0, help='Debug level.')\n    parser.add_argument('--save_results', dest='save_results', action='store_true', help='Save bounding boxes')\n    parser.set_defaults(save_results=False)\n\n    args = parser.parse_args()\n\n    run_video(args.tracker_name, args.tracker_param, args.videofile, args.optional_box, args.debug, args.save_results)\n\n\nif __name__ == '__main__':\n    main()\n"
  },
  {
    "path": "artrackv2_mindspore/tracking/vis_results.py",
    "content": "import os\r\nimport sys\r\nimport time\r\n\r\nimport torch\r\nimport numpy as np\r\nimport cv2 as cv\r\nfrom tqdm import tqdm\r\n\r\nfrom lib.vis.visdom_cus import Visdom\r\n\r\nenv_path = os.path.join(os.path.dirname(__file__), '../lib')\r\nif env_path not in sys.path:\r\n    sys.path.append(env_path)\r\n\r\nfrom lib.test.evaluation import trackerlist, get_dataset\r\nfrom lib.test.utils.load_text import load_text\r\n\r\n\r\nclass VisResults(object):\r\n    def __init__(self):\r\n        self._init_visdom()\r\n\r\n    def vis_dataset(self, dataset, trackers, skip_missing_seq=False, seq_list=[]):\r\n        for seq_id, seq in enumerate(tqdm(dataset)):\r\n            # Load anno\r\n            seq_name = seq.name\r\n            if seq_list:\r\n                if seq_name not in seq_list:\r\n                    continue\r\n\r\n            anno_bb = torch.tensor(seq.ground_truth_rect)\r\n            target_visible = torch.tensor(seq.target_visible,\r\n                                          dtype=torch.uint8) if seq.target_visible is not None else None\r\n\r\n            all_pred_boxes = []\r\n\r\n            for trk_id, trk in enumerate(trackers):\r\n                # Load results\r\n                base_results_path = '{}/{}'.format(trk.results_dir, seq.name)\r\n                results_path = '{}.txt'.format(base_results_path)\r\n\r\n                if os.path.isfile(results_path):\r\n                    pred_bb = torch.tensor(load_text(str(results_path), delimiter=('\\t', ','), dtype=np.float64))\r\n                    all_pred_boxes.append(pred_bb)\r\n                else:\r\n                    if skip_missing_seq:\r\n                        break\r\n                    else:\r\n                        raise Exception('Result not found. {}'.format(results_path))\r\n\r\n            frame_list = seq.frames\r\n            for i in range(len(anno_bb)):\r\n                data = []\r\n                frame = frame_list[i]\r\n                im = cv.imread(frame)\r\n                im = cv.cvtColor(im, cv.COLOR_BGR2RGB)\r\n                # im = torch.from_numpy(im).float().permute(2, 0, 1)\r\n                # im = im.numpy()\r\n                data.append(im)\r\n\r\n                gt_box = anno_bb[i]\r\n                data.append(gt_box)\r\n                for tracker_result in all_pred_boxes:\r\n                    data.append(tracker_result[i])\r\n\r\n                while self.pause_mode:\r\n                    if self.step:\r\n                        self.step = False\r\n                        break\r\n\r\n                if self.next_seq:\r\n                    self.next_seq = False\r\n                    break\r\n\r\n                self.update_boxes(data, seq_name + '-' + str(i).zfill(3))\r\n                # self.update_seg_result(im, frame)\r\n\r\n    def update_boxes(self, data, caption):\r\n        caption = 'Green: GT, Red: stark_s, Yellow: stark_motion  _' + caption\r\n        self.visdom.register(data, 'Tracking', 1, 'Tracking', caption=caption)\r\n\r\n    def update_seg_result(self, frame_img, frame_path):\r\n        seg_mask_path = os.path.join(os.path.dirname(frame_path), 'seg_mask',\r\n                                     os.path.basename(frame_path).replace('jpg', 'png'))\r\n        seg_mask = cv.imread(seg_mask_path)\r\n        alpha = 0.5\r\n        out_img = (alpha * frame_img) + ((1 - alpha) * seg_mask)\r\n\r\n        if max(out_img.shape) > 480:\r\n            resize_factor = 480.0 / float(max(out_img.shape))\r\n            out_img = cv.resize(out_img, None, fx=resize_factor, fy=resize_factor)\r\n\r\n        out_img = torch.from_numpy(out_img).float().permute(2, 0, 1)\r\n        self.visdom.register(out_img, 'image', 1, 'Segmentation Result')\r\n\r\n    def _init_visdom(self, visdom_info=None):\r\n        visdom_info = {} if visdom_info is None else visdom_info\r\n        self.pause_mode = False\r\n        self.step = False\r\n        self.next_seq = False\r\n\r\n        try:\r\n            self.visdom = Visdom(1, {'handler': self._visdom_ui_handler, 'win_id': 'Tracking'},\r\n                                 visdom_info=visdom_info, env='vis_results')\r\n\r\n            # Show help\r\n            help_text = 'You can pause/unpause the tracker by pressing ''space'' with the ''Tracking'' window ' \\\r\n                        'selected. During paused mode, you can track for one frame by pressing the right arrow key.' \\\r\n                        'To enable/disable plotting of a data block, tick/untick the corresponding entry in ' \\\r\n                        'block list.'\r\n            self.visdom.register(help_text, 'text', 1, 'Help')\r\n        except:\r\n            time.sleep(0.5)\r\n            print('!!! WARNING: Visdom could not start, so using matplotlib visualization instead !!!\\n'\r\n                  '!!! Start Visdom in a separate terminal window by typing \\'visdom\\' !!!')\r\n\r\n    def _visdom_ui_handler(self, data):\r\n        if data['event_type'] == 'KeyPress':\r\n            if data['key'] == ' ':\r\n                self.pause_mode = not self.pause_mode\r\n\r\n            elif data['key'] == 'n':\r\n                self.next_seq = True\r\n\r\n            elif data['key'] == 'ArrowRight' and self.pause_mode:\r\n                self.step = True\r\n\r\n\r\nif __name__ == '__main__':\r\n    viser = VisResults()\r\n    dataset_name = 'lasot'\r\n\r\n    trackers = []\r\n    # trackers.extend(trackerlist('defor_stark_s', 'baseline_got10k_only', None, 'defor_stark'))\r\n    # trackers.extend(trackerlist('stark_s', 'baseline_got10k_only', None, 'stark'))\r\n    # trackers.extend(trackerlist('dimp', 'dimp50', dataset_name, None, 'DiMP50'))\r\n    # trackers.extend(trackerlist('sa', 'attn_direct', None, 'SA')) # ori\r\n    # trackers.extend(trackerlist('sa', 'attn1', None, 'SA')) # extrat conv to conver backbone feature from 1024 to 32\r\n    # trackers.extend(trackerlist('sa', 'attn_segaddlabel', None, 'SA')) # add label to seg mask\r\n\r\n    # trackers.extend(trackerlist('stark_s', 'baseline_got10k_only_encoder_only_ep500', None, 'STARK-S-Encoder-EP500'))\r\n    # trackers.extend(trackerlist('stark_motion', 'baseline_got10k_only_ep100_002', None, 'STARK-motion-EP100'))\r\n    # trackers.extend(\r\n    #     trackerlist('stark_motion', 'baseline_got10k_only_offset_ep100_001', None, 'STARK-motion-offset-EP100'))\r\n\r\n    # trackers.extend(trackerlist('stark_s', 'baseline', dataset_name, None, 'stark_s'))\r\n    # trackers.extend(trackerlist('stark_st', 'baseline', dataset_name, None, 'stark_st'))\r\n\r\n    # trackers.extend(trackerlist('stark_mem', 'baseline_roi', dataset_name, None, 'baseline'))\r\n    # trackers.extend(trackerlist('stark_mem', 'baseline_roi', dataset_name, 17, 'update_template'))\r\n    # trackers.extend(trackerlist('stark_mem', 'baseline_roi', dataset_name, 32, 'update_template'))\r\n    # trackers.extend(trackerlist('stark_mem', 'baseline_roi', dataset_name, 36, 'update_template'))\r\n    # trackers.extend(trackerlist('stark_mem', 'baseline_roi_ep300', dataset_name, None, 'update_template'))\r\n    # trackers.extend(trackerlist('stark_mem', 'baseline_roi_ep300', dataset_name, 6, 'update_template'))\r\n    # trackers.extend(trackerlist('stark_mem', 'baseline_roi_ep300', dataset_name, 55, 'update_template'))\r\n    # trackers.extend(trackerlist('stark_mem', 'baseline_roi_ep300', dataset_name, 57, 'update_template'))\r\n\r\n    # trackers.extend(trackerlist('stark_mem', 'baseline_roi_upsample_syncbn', dataset_name, None, 'baseline_roi_upsample_syncbn'))\r\n    # trackers.extend(trackerlist('stark_mem', 'baseline_roi', dataset, 9, 'update_template1'))\r\n    # trackers.extend(trackerlist('stark_mem', 'baseline_roi_ciou', dataset_name, None, 'update_template'))\r\n    # trackers.extend(trackerlist('stark_mem', 'baseline_roi_ciou', dataset_name, 1, 'update_template'))\r\n\r\n    trackers.extend(trackerlist('stark_mem', 'baseline_roi_ep500', dataset_name, None, 'baseline_roi_ep500'))\r\n\r\n    trackers.extend(trackerlist('vit_tracker', 'cait_small_224_ep300', dataset_name, None, 'cait_small_224_ep300'))\r\n    trackers.extend(trackerlist('vit_tracker', 'cait_small_224_fcos_new_32x4_ep300', dataset_name, None, 'cait_small_224_fcos_new_32x4_ep300'))\r\n    trackers.extend(trackerlist('vit_tracker', 'cait_small_224_fcos_cn_32x4_ep300_test_cn', dataset_name, None, 'cait_small_224_fcos_cn_32x4_ep300_test_cn'))\r\n    # trackers.extend(trackerlist('vit_tracker', 'cait_small_224_vfloss_64x2_ep100', dataset_name, None, 'cait_small_224_vfloss_64x2_ep100'))\r\n    # trackers.extend(trackerlist('vit_tracker', 'cait_small_224_fcos_cn_32x4_ep300_retest', dataset_name, None, 'retest'))\r\n\r\n    dataset = get_dataset(dataset_name)\r\n\r\n    # trackers.extend(trackerlist('dimp', 'dimp50', None, 'DiMP50'))\r\n    # trackers.extend(trackerlist('sa', 'attn_direct', None, 'SA'))\r\n    # dataset = get_dataset('vot')\r\n    # 'GOT-10k_Train_001350'\r\n    viser.vis_dataset(dataset, trackers, seq_list=[])\r\n    # viser.vis_dataset(dataset, trackers, seq_list=['GOT-10k_Train_007446'])\r\n"
  },
  {
    "path": "experiments/artrack/artrack_256_full.yaml",
    "content": "DATA:\n  MAX_SAMPLE_INTERVAL: 200\n  MEAN:\n  - 0.485\n  - 0.456\n  - 0.406\n  SEARCH:\n    CENTER_JITTER: 3\n    FACTOR: 4.0\n    SCALE_JITTER: 0.25\n    SIZE: 256\n    NUMBER: 1\n  STD:\n  - 0.229\n  - 0.224\n  - 0.225\n  TEMPLATE:\n    CENTER_JITTER: 0\n    FACTOR: 2.0\n    SCALE_JITTER: 0\n    SIZE: 128\n  TRAIN:\n    DATASETS_NAME:\n    - LASOT\n    - GOT10K_vottrain\n    - COCO17\n    - TRACKINGNET\n    DATASETS_RATIO:\n    - 1\n    - 1\n    - 1\n    - 1\n    SAMPLE_PER_EPOCH: 60000\n  VAL:\n    DATASETS_NAME:\n    - GOT10K_official_val\n    DATASETS_RATIO:\n    - 1\n    SAMPLE_PER_EPOCH: 10000\nMODEL:\n  BINS: 400\n  RANGE: 2\n  PRETRAIN_FILE: \"mae_pretrain_vit_base.pth\"\n  BACKBONE:\n    TYPE: vit_base_patch16_224\n    STRIDE: 16\n  HEAD:\n    TYPE: PIX\n    NUM_CHANNELS: 768\nTRAIN:\n  BACKBONE_MULTIPLIER: 0.1\n  DROP_PATH_RATE: 0.1\n  BATCH_SIZE: 50\n  EPOCH: 240\n  GIOU_WEIGHT: 2.0\n  L1_WEIGHT: 0.0\n  GRAD_CLIP_NORM: 0.1\n  LR: 0.00008\n  LR_DROP_EPOCH: 192\n  NUM_WORKER: 4\n  OPTIMIZER: ADAMW\n  PRINT_INTERVAL: 10\n  SCHEDULER:\n    TYPE: step\n    DECAY_RATE: 0.1\n  VAL_EPOCH_INTERVAL: 20\n  WEIGHT_DECAY: 0.0001\n  AMP: False\nTEST:\n  EPOCH: 240\n  SEARCH_FACTOR: 4.0\n  SEARCH_SIZE: 256\n  TEMPLATE_FACTOR: 2.0\n  TEMPLATE_SIZE: 128\n"
  },
  {
    "path": "experiments/artrack/artrack_256_got.yaml",
    "content": "DATA:\n  MAX_SAMPLE_INTERVAL: 200\n  MEAN:\n  - 0.485\n  - 0.456\n  - 0.406\n  SEARCH:\n    CENTER_JITTER: 3.0\n    FACTOR: 4.0\n    SCALE_JITTER: 0.25\n    SIZE: 256\n    NUMBER: 1\n  STD:\n  - 0.229\n  - 0.224\n  - 0.225\n  TEMPLATE:\n    CENTER_JITTER: 0\n    FACTOR: 2.0\n    SCALE_JITTER: 0\n    SIZE: 128\n  TRAIN:\n    DATASETS_NAME:\n      - GOT10K_train_full\n    DATASETS_RATIO:\n      - 1\n    SAMPLE_PER_EPOCH: 60000\n  VAL:\n    DATASETS_NAME:\n    - GOT10K_official_val\n    DATASETS_RATIO:\n    - 1\n    SAMPLE_PER_EPOCH: 10000\nMODEL:\n  BINS: 400\n  RANGE: 2\n  PRETRAIN_FILE: \"mae_pretrain_vit_base.pth\"\n  BACKBONE:\n    TYPE: vit_base_patch16_224\n    STRIDE: 16\n  HEAD:\n    TYPE: PIX\n    NUM_CHANNELS: 768\nTRAIN:\n  BACKBONE_MULTIPLIER: 0.1\n  DROP_PATH_RATE: 0.1\n  BATCH_SIZE: 50\n  EPOCH: 120\n  GIOU_WEIGHT: 2.0\n  L1_WEIGHT: 0.0\n  GRAD_CLIP_NORM: 0.1\n  LR: 0.00008\n  LR_DROP_EPOCH: 96\n  NUM_WORKER: 4\n  OPTIMIZER: ADAMW\n  PRINT_INTERVAL: 10\n  SCHEDULER:\n    TYPE: step\n    DECAY_RATE: 0.1\n  VAL_EPOCH_INTERVAL: 20\n  WEIGHT_DECAY: 0.0001\n  AMP: False\nTEST:\n  EPOCH: 120\n  SEARCH_FACTOR: 4.0\n  SEARCH_SIZE: 256\n  TEMPLATE_FACTOR: 2.0\n  TEMPLATE_SIZE: 128\n"
  },
  {
    "path": "experiments/artrack/artrack_384_full.yaml",
    "content": "DATA:\r\n  MAX_SAMPLE_INTERVAL: 200\r\n  MEAN:\r\n  - 0.485\r\n  - 0.456\r\n  - 0.406\r\n  SEARCH:\r\n    CENTER_JITTER: 4.5\r\n    FACTOR: 5.0\r\n    SCALE_JITTER: 0.5\r\n    SIZE: 384\r\n    NUMBER: 1\r\n  STD:\r\n  - 0.229\r\n  - 0.224\r\n  - 0.225\r\n  TEMPLATE:\r\n    CENTER_JITTER: 0\r\n    FACTOR: 2.0\r\n    SCALE_JITTER: 0\r\n    SIZE: 192\r\n  TRAIN:\r\n    DATASETS_NAME:\r\n    - LASOT\r\n    - GOT10K_vottrain\r\n    - COCO17\r\n    - TRACKINGNET\r\n    DATASETS_RATIO:\r\n    - 1\r\n    - 1\r\n    - 1\r\n    - 1\r\n    SAMPLE_PER_EPOCH: 60000\r\n  VAL:\r\n    DATASETS_NAME:\r\n    - GOT10K_official_val\r\n    DATASETS_RATIO:\r\n    - 1\r\n    SAMPLE_PER_EPOCH: 10000\r\nMODEL:\r\n  BINS: 600\r\n  RANGE: 2\r\n  PRETRAIN_FILE: \"mae_pretrain_vit_base.pth\"\r\n  BACKBONE:\r\n    TYPE: vit_base_patch16_224\r\n    STRIDE: 16\r\n  HEAD:\r\n    TYPE: PIX\r\n    NUM_CHANNELS: 768\r\nTRAIN:\r\n  BACKBONE_MULTIPLIER: 0.1\r\n  DROP_PATH_RATE: 0.1\r\n  BATCH_SIZE: 48\r\n  EPOCH: 240\r\n  GIOU_WEIGHT: 2.0\r\n  L1_WEIGHT: 0.0\r\n  GRAD_CLIP_NORM: 0.1\r\n  LR: 0.00008\r\n  LR_DROP_EPOCH: 192\r\n  NUM_WORKER: 4\r\n  OPTIMIZER: ADAMW\r\n  PRINT_INTERVAL: 10\r\n  SCHEDULER:\r\n    TYPE: step\r\n    DECAY_RATE: 0.1\r\n  VAL_EPOCH_INTERVAL: 20\r\n  WEIGHT_DECAY: 0.0001\r\n  AMP: False\r\nTEST:\r\n  EPOCH: 240\r\n  SEARCH_FACTOR: 5.0\r\n  SEARCH_SIZE: 384\r\n  TEMPLATE_FACTOR: 2.0\r\n  TEMPLATE_SIZE: 192\r\n"
  },
  {
    "path": "experiments/artrack/artrack_large_384_full.yaml",
    "content": "DATA:\r\n  MAX_SAMPLE_INTERVAL: 200\r\n  MEAN:\r\n  - 0.485\r\n  - 0.456\r\n  - 0.406\r\n  SEARCH:\r\n    CENTER_JITTER: 4.5\r\n    FACTOR: 5.0\r\n    SCALE_JITTER: 0.5\r\n    SIZE: 384\r\n    NUMBER: 1\r\n  STD:\r\n  - 0.229\r\n  - 0.224\r\n  - 0.225\r\n  TEMPLATE:\r\n    CENTER_JITTER: 0\r\n    FACTOR: 2.0\r\n    SCALE_JITTER: 0\r\n    SIZE: 192\r\n  TRAIN:\r\n    DATASETS_NAME:\r\n    - LASOT\r\n    - GOT10K_vottrain\r\n    - COCO17\r\n    - TRACKINGNET\r\n    DATASETS_RATIO:\r\n    - 1\r\n    - 1\r\n    - 1\r\n    - 1\r\n    SAMPLE_PER_EPOCH: 60000\r\n  VAL:\r\n    DATASETS_NAME:\r\n    - GOT10K_official_val\r\n    DATASETS_RATIO:\r\n    - 1\r\n    SAMPLE_PER_EPOCH: 10000\r\nMODEL:\r\n  BINS: 600\r\n  RANGE: 2\r\n  PRETRAIN_FILE: \"mae_pretrain_vit_large.pth\"\r\n  BACKBONE:\r\n    TYPE: vit_large_patch16_224\r\n    STRIDE: 16\r\n  HEAD:\r\n    TYPE: PIX\r\n    NUM_CHANNELS: 1024\r\nTRAIN:\r\n  BACKBONE_MULTIPLIER: 0.1\r\n  DROP_PATH_RATE: 0.1\r\n  BATCH_SIZE: 15\r\n  EPOCH: 120\r\n  GIOU_WEIGHT: 2.0\r\n  L1_WEIGHT: 0.0\r\n  GRAD_CLIP_NORM: 0.1\r\n  LR: 0.00008\r\n  LR_DROP_EPOCH: 96\r\n  NUM_WORKER: 4\r\n  OPTIMIZER: ADAMW\r\n  PRINT_INTERVAL: 10\r\n  SCHEDULER:\r\n    TYPE: step\r\n    DECAY_RATE: 0.1\r\n  VAL_EPOCH_INTERVAL: 20\r\n  WEIGHT_DECAY: 0.0001\r\n  AMP: False\r\nTEST:\r\n  EPOCH: 120\r\n  SEARCH_FACTOR: 5.0\r\n  SEARCH_SIZE: 384\r\n  TEMPLATE_FACTOR: 2.0\r\n  TEMPLATE_SIZE: 192\r\n"
  },
  {
    "path": "experiments/artrack_seq/artrack_seq_256_full.yaml",
    "content": "DATA:\n  MAX_SAMPLE_INTERVAL: 200\n  MEAN:\n  - 0.485\n  - 0.456\n  - 0.406\n  SEARCH:\n    CENTER_JITTER: 3\n    FACTOR: 4.0\n    SCALE_JITTER: 0.25\n    SIZE: 256\n    NUMBER: 36\n  STD:\n  - 0.229\n  - 0.224\n  - 0.225\n  TEMPLATE:\n    CENTER_JITTER: 0\n    FACTOR: 2.0\n    SCALE_JITTER: 0\n    SIZE: 128\n  TRAIN:\n    DATASETS_NAME:\n    - LASOT\n    - GOT10K_vottrain\n    - TRACKINGNET\n    DATASETS_RATIO:\n    - 1\n    - 1\n    - 1\n    SAMPLE_PER_EPOCH: 1000\n  VAL:\n    DATASETS_NAME:\n    - GOT10K_official_val\n    DATASETS_RATIO:\n    - 1\n    SAMPLE_PER_EPOCH: 10000\nMODEL:\n  BINS: 400\n  RANGE: 2\n  PRENUM: 7\n  PRETRAIN_FILE: \"mae_pretrain_vit_base.pth\"\n  PRETRAIN_PTH: \"\"\n  EXTRA_MERGER: False\n  RETURN_INTER: False\n  BACKBONE:\n    TYPE: vit_base_patch16_224\n    STRIDE: 16\n  HEAD:\n    TYPE: PIX\n    NUM_CHANNELS: 768\nTRAIN:\n  BACKBONE_MULTIPLIER: 0.1\n  DROP_PATH_RATE: 0.1\n  BATCH_SIZE: 8\n  EPOCH: 60\n  GIOU_WEIGHT: 2.0\n  L1_WEIGHT: 0.0\n  GRAD_CLIP_NORM: 0.1\n  LR: 0.000004\n  LR_DROP_EPOCH: 999\n  NUM_WORKER: 4\n  OPTIMIZER: ADAMW\n  PRINT_INTERVAL: 1\n  SCHEDULER:\n    TYPE: step\n    DECAY_RATE: 0.1\n  VAL_EPOCH_INTERVAL: 10\n  WEIGHT_DECAY: 0.05\n  AMP: False\nTEST:\n  EPOCH: 60\n  SEARCH_FACTOR: 4.0\n  SEARCH_SIZE: 256\n  TEMPLATE_FACTOR: 2.0\n  TEMPLATE_SIZE: 128\n"
  },
  {
    "path": "experiments/artrack_seq/artrack_seq_256_got.yaml",
    "content": "DATA:\r\n  MAX_SAMPLE_INTERVAL: 200\r\n  MEAN:\r\n  - 0.485\r\n  - 0.456\r\n  - 0.406\r\n  SEARCH:\r\n    CENTER_JITTER: 3\r\n    FACTOR: 4.0\r\n    SCALE_JITTER: 0.25\r\n    SIZE: 256\r\n    NUMBER: 36\r\n  STD:\r\n  - 0.229\r\n  - 0.224\r\n  - 0.225\r\n  TEMPLATE:\r\n    CENTER_JITTER: 0\r\n    FACTOR: 2.0\r\n    SCALE_JITTER: 0\r\n    SIZE: 128\r\n  TRAIN:\r\n    DATASETS_NAME:\r\n    - GOT10K_train_full\r\n    DATASETS_RATIO:\r\n    - 1\r\n    SAMPLE_PER_EPOCH: 1000\r\n  VAL:\r\n    DATASETS_NAME:\r\n    - GOT10K_official_val\r\n    DATASETS_RATIO:\r\n    - 1\r\n    SAMPLE_PER_EPOCH: 10000\r\nMODEL:\r\n  BINS: 400\r\n  RANGE: 2\r\n  PRENUM: 7\r\n  PRETRAIN_FILE: \"mae_pretrain_vit_base.pth\"\r\n  PRETRAIN_PTH: \"\"\r\n  EXTRA_MERGER: False\r\n  RETURN_INTER: False\r\n  BACKBONE:\r\n    TYPE: vit_base_patch16_224\r\n    STRIDE: 16\r\n  HEAD:\r\n    TYPE: PIX\r\n    NUM_CHANNELS: 768\r\nTRAIN:\r\n  BACKBONE_MULTIPLIER: 0.1\r\n  DROP_PATH_RATE: 0.1\r\n  BATCH_SIZE: 8\r\n  EPOCH: 30\r\n  GIOU_WEIGHT: 2.0\r\n  L1_WEIGHT: 0.0\r\n  GRAD_CLIP_NORM: 0.1\r\n  LR: 0.000004\r\n  LR_DROP_EPOCH: 999\r\n  NUM_WORKER: 4\r\n  OPTIMIZER: ADAMW\r\n  PRINT_INTERVAL: 1\r\n  SCHEDULER:\r\n    TYPE: step\r\n    DECAY_RATE: 0.1\r\n  VAL_EPOCH_INTERVAL: 10\r\n  WEIGHT_DECAY: 0.05\r\n  AMP: False\r\nTEST:\r\n  EPOCH: 30\r\n  SEARCH_FACTOR: 4.0\r\n  SEARCH_SIZE: 256\r\n  TEMPLATE_FACTOR: 2.0\r\n  TEMPLATE_SIZE: 128\r\n"
  },
  {
    "path": "experiments/artrack_seq/artrack_seq_384_full.yaml",
    "content": "DATA:\r\n  MAX_SAMPLE_INTERVAL: 200\r\n  MEAN:\r\n  - 0.485\r\n  - 0.456\r\n  - 0.406\r\n  SEARCH:\r\n    CENTER_JITTER: 4.5\r\n    FACTOR: 5.0\r\n    SCALE_JITTER: 0.5\r\n    SIZE: 384\r\n    NUMBER: 32\r\n  STD:\r\n  - 0.229\r\n  - 0.224\r\n  - 0.225\r\n  TEMPLATE:\r\n    CENTER_JITTER: 0\r\n    FACTOR: 2.0\r\n    SCALE_JITTER: 0\r\n    SIZE: 192\r\n  TRAIN:\r\n    DATASETS_NAME:\r\n    - LASOT\r\n    - GOT10K_vottrain\r\n    - TRACKINGNET\r\n    DATASETS_RATIO:\r\n    - 1\r\n    - 1\r\n    - 1\r\n    SAMPLE_PER_EPOCH: 1000\r\n  VAL:\r\n    DATASETS_NAME:\r\n    - GOT10K_official_val\r\n    DATASETS_RATIO:\r\n    - 1\r\n    SAMPLE_PER_EPOCH: 10000\r\nMODEL:\r\n  BINS: 600\r\n  RANGE: 2\r\n  PRENUM: 7\r\n  PRETRAIN_FILE: \"mae_pretrain_vit_base.pth\"\r\n  PRETRAIN_PTH: \"\"\r\n  EXTRA_MERGER: False\r\n  RETURN_INTER: False\r\n  BACKBONE:\r\n    TYPE: vit_base_patch16_224\r\n    STRIDE: 16\r\n  HEAD:\r\n    TYPE: PIX\r\n    NUM_CHANNELS: 768\r\nTRAIN:\r\n  BACKBONE_MULTIPLIER: 0.1\r\n  DROP_PATH_RATE: 0.1\r\n  BATCH_SIZE: 8\r\n  EPOCH: 60\r\n  GIOU_WEIGHT: 2.0\r\n  L1_WEIGHT: 0.0\r\n  GRAD_CLIP_NORM: 0.1\r\n  LR: 0.000004\r\n  LR_DROP_EPOCH: 999\r\n  NUM_WORKER: 4\r\n  OPTIMIZER: ADAMW\r\n  PRINT_INTERVAL: 1\r\n  SCHEDULER:\r\n    TYPE: step\r\n    DECAY_RATE: 0.1\r\n  VAL_EPOCH_INTERVAL: 10\r\n  WEIGHT_DECAY: 0.05\r\n  AMP: False\r\nTEST:\r\n  EPOCH: 60\r\n  SEARCH_FACTOR: 5.0\r\n  SEARCH_SIZE: 384\r\n  TEMPLATE_FACTOR: 2.0\r\n  TEMPLATE_SIZE: 192\r\n"
  },
  {
    "path": "experiments/artrack_seq/artrack_seq_large_384_full.yaml",
    "content": "DATA:\r\n  MAX_SAMPLE_INTERVAL: 200\r\n  MEAN:\r\n  - 0.485\r\n  - 0.456\r\n  - 0.406\r\n  SEARCH:\r\n    CENTER_JITTER: 4.5\r\n    FACTOR: 5.0\r\n    SCALE_JITTER: 0.5\r\n    SIZE: 384\r\n    NUMBER: 15\r\n  STD:\r\n  - 0.229\r\n  - 0.224\r\n  - 0.225\r\n  TEMPLATE:\r\n    CENTER_JITTER: 0\r\n    FACTOR: 2.0\r\n    SCALE_JITTER: 0\r\n    SIZE: 192\r\n  TRAIN:\r\n    DATASETS_NAME:\r\n    - LASOT\r\n    - GOT10K_vottrain\r\n    - TRACKINGNET\r\n    DATASETS_RATIO:\r\n    - 1\r\n    - 1\r\n    - 1\r\n    SAMPLE_PER_EPOCH: 1000\r\n  VAL:\r\n    DATASETS_NAME:\r\n    - GOT10K_official_val\r\n    DATASETS_RATIO:\r\n    - 1\r\n    SAMPLE_PER_EPOCH: 10000\r\nMODEL:\r\n  BINS: 600\r\n  RANGE: 2\r\n  PRENUM: 7\r\n  PRETRAIN_FILE: \"mae_pretrain_vit_large.pth\"\r\n  PRETRAIN_PTH: \"\"\r\n  EXTRA_MERGER: False\r\n  RETURN_INTER: False\r\n  BACKBONE:\r\n    TYPE: vit_large_patch16_224\r\n    STRIDE: 16\r\n  HEAD:\r\n    TYPE: PIX\r\n    NUM_CHANNELS: 1024\r\nTRAIN:\r\n  BACKBONE_MULTIPLIER: 0.1\r\n  DROP_PATH_RATE: 0.1\r\n  BATCH_SIZE: 8\r\n  EPOCH: 60\r\n  GIOU_WEIGHT: 2.0\r\n  L1_WEIGHT: 0.0\r\n  GRAD_CLIP_NORM: 0.1\r\n  LR: 0.000004\r\n  LR_DROP_EPOCH: 999\r\n  NUM_WORKER: 4\r\n  OPTIMIZER: ADAMW\r\n  PRINT_INTERVAL: 1\r\n  SCHEDULER:\r\n    TYPE: step\r\n    DECAY_RATE: 0.1\r\n  VAL_EPOCH_INTERVAL: 10\r\n  WEIGHT_DECAY: 0.05\r\n  AMP: False\r\nTEST:\r\n  EPOCH: 60\r\n  SEARCH_FACTOR: 5.0\r\n  SEARCH_SIZE: 384\r\n  TEMPLATE_FACTOR: 2.0\r\n  TEMPLATE_SIZE: 192\r\n"
  },
  {
    "path": "experiments/artrackv2/artrackv2_256_full.yaml",
    "content": "DATA:\n  MAX_SAMPLE_INTERVAL: 200\n  MEAN:\n  - 0.485\n  - 0.456\n  - 0.406\n  SEARCH:\n    CENTER_JITTER: 3\n    FACTOR: 4.0\n    SCALE_JITTER: 0.25\n    SIZE: 256\n    NUMBER: 1\n  STD:\n  - 0.229\n  - 0.224\n  - 0.225\n  TEMPLATE:\n    CENTER_JITTER: 0\n    FACTOR: 2.0\n    SCALE_JITTER: 0\n    SIZE: 128\n    NUMBER: 2\n  TRAIN:\n    DATASETS_NAME:\n    - LASOT\n    - GOT10K_vottrain\n    - COCO17\n    - TRACKINGNET\n    DATASETS_RATIO:\n    - 1\n    - 1\n    - 1\n    - 1\n    SAMPLE_PER_EPOCH: 76800\n  VAL:\n    DATASETS_NAME:\n    - GOT10K_votval\n    DATASETS_RATIO:\n    - 1\n    SAMPLE_PER_EPOCH: 10000\nMODEL:\n  BINS: 400\n  RANGE: 2\n  EXTENSION: 3\n  PRETRAIN_FILE: \"mae_pretrain_vit_base.pth\"\n  EXTRA_MERGER: False\n  RETURN_INTER: False\n  BACKBONE:\n    TYPE: vit_base_patch16_224\n    STRIDE: 16\n  HEAD:\n    TYPE: PIX\n    NUM_CHANNELS: 768\nTRAIN:\n  BACKBONE_MULTIPLIER: 0.1\n  DROP_PATH_RATE: 0.1\n  BATCH_SIZE: 32\n  EPOCH: 300\n  GIOU_WEIGHT: 2.0\n  L1_WEIGHT: 0.0\n  GRAD_CLIP_NORM: 0.1\n  LR: 0.00008\n  LR_DROP_EPOCH: 240\n  NUM_WORKER: 6\n  OPTIMIZER: ADAMW\n  PRINT_INTERVAL: 10\n  SCHEDULER:\n    TYPE: step\n    DECAY_RATE: 0.1\n  VAL_EPOCH_INTERVAL: 20\n  WEIGHT_DECAY: 0.0001\n  AMP: False\nTEST:\n  EPOCH: 118\n  SEARCH_FACTOR: 4.0\n  SEARCH_SIZE: 256\n  TEMPLATE_FACTOR: 2.0\n  TEMPLATE_SIZE: 128\n"
  },
  {
    "path": "experiments/artrackv2/artrackv2_256_got.yaml",
    "content": "DATA:\n  MAX_SAMPLE_INTERVAL: 200\n  MEAN:\n  - 0.485\n  - 0.456\n  - 0.406\n  SEARCH:\n    CENTER_JITTER: 3\n    FACTOR: 4.0\n    SCALE_JITTER: 0.25\n    SIZE: 256\n    NUMBER: 1\n  STD:\n  - 0.229\n  - 0.224\n  - 0.225\n  TEMPLATE:\n    CENTER_JITTER: 0\n    FACTOR: 2.0\n    SCALE_JITTER: 0\n    SIZE: 128\n    NUMBER: 2\n  TRAIN:\n    DATASETS_NAME:\n      - GOT10K_train_full\n    DATASETS_RATIO:\n      - 1\n    SAMPLE_PER_EPOCH: 76800\n  VAL:\n    DATASETS_NAME:\n    - GOT10K_votval\n    DATASETS_RATIO:\n    - 1\n    SAMPLE_PER_EPOCH: 10000\nMODEL:\n  BINS: 400\n  RANGE: 2\n  EXTENSION: 3\n  PRETRAIN_FILE: \"mae_pretrain_vit_base.pth\"\n  EXTRA_MERGER: False\n  RETURN_INTER: False\n  BACKBONE:\n    TYPE: vit_base_patch16_224\n    STRIDE: 16\n  HEAD:\n    TYPE: PIX\n    NUM_CHANNELS: 768\nTRAIN:\n  BACKBONE_MULTIPLIER: 0.1\n  DROP_PATH_RATE: 0.1\n  BATCH_SIZE: 32\n  EPOCH: 120\n  GIOU_WEIGHT: 2.0\n  L1_WEIGHT: 0.0\n  GRAD_CLIP_NORM: 0.1\n  LR: 0.00008\n  LR_DROP_EPOCH: 96\n  NUM_WORKER: 6\n  OPTIMIZER: ADAMW\n  PRINT_INTERVAL: 10\n  SCHEDULER:\n    TYPE: step\n    DECAY_RATE: 0.1\n  VAL_EPOCH_INTERVAL: 20\n  WEIGHT_DECAY: 0.0001\n  AMP: False\nTEST:\n  EPOCH: 118\n  SEARCH_FACTOR: 4.0\n  SEARCH_SIZE: 256\n  TEMPLATE_FACTOR: 2.0\n  TEMPLATE_SIZE: 128\n"
  },
  {
    "path": "experiments/artrackv2/artrackv2_large_384_got.yaml",
    "content": "DATA:\n  MAX_SAMPLE_INTERVAL: 200\n  MEAN:\n  - 0.485\n  - 0.456\n  - 0.406\n  SEARCH:\n    CENTER_JITTER: 4.5\n    FACTOR: 5.0\n    SCALE_JITTER: 0.5\n    SIZE: 384\n    NUMBER: 1\n  STD:\n  - 0.229\n  - 0.224\n  - 0.225\n  TEMPLATE:\n    CENTER_JITTER: 0\n    FACTOR: 2.0\n    SCALE_JITTER: 0\n    SIZE: 192\n    NUMBER: 2\n  TRAIN:\n    DATASETS_NAME:\n    - GOT10K_train_full\n    DATASETS_RATIO:\n    - 1\n    SAMPLE_PER_EPOCH: 57600\n  VAL:\n    DATASETS_NAME:\n    - GOT10K_votval\n    DATASETS_RATIO:\n    - 1\n    SAMPLE_PER_EPOCH: 10000\nMODEL:\n  BINS: 600\n  RANGE: 2\n  EXTENSION: 6\n  PRETRAIN_FILE: \"mae_pretrain_vit_large.pth\"\n  EXTRA_MERGER: False\n  RETURN_INTER: False\n  BACKBONE:\n    TYPE: vit_large_patch16_224\n    STRIDE: 16\n  HEAD:\n    TYPE: PIX\n    NUM_CHANNELS: 1024\nTRAIN:\n  BACKBONE_MULTIPLIER: 0.1\n  DROP_PATH_RATE: 0.1\n  BATCH_SIZE: 12\n  EPOCH: 100\n  GIOU_WEIGHT: 2.0\n  L1_WEIGHT: 0.0\n  GRAD_CLIP_NORM: 0.1\n  LR: 0.000008\n  LR_DROP_EPOCH: 80\n  NUM_WORKER: 8\n  OPTIMIZER: ADAMW\n  PRINT_INTERVAL: 10\n  SCHEDULER:\n    TYPE: step\n    DECAY_RATE: 0.1\n  VAL_EPOCH_INTERVAL: 20\n  WEIGHT_DECAY: 0.0001\n  AMP: False\nTEST:\n  EPOCH: 100\n  SEARCH_FACTOR: 5.0\n  SEARCH_SIZE: 384\n  TEMPLATE_FACTOR: 2.0\n  TEMPLATE_SIZE: 192\n"
  },
  {
    "path": "experiments/artrackv2_seq/artrackv2_seq_256_full.yaml",
    "content": "DATA:\n  MAX_SAMPLE_INTERVAL: 200\n  MEAN:\n  - 0.485\n  - 0.456\n  - 0.406\n  SEARCH:\n    CENTER_JITTER: 3\n    FACTOR: 4.0\n    SCALE_JITTER: 0.25\n    SIZE: 256\n    NUMBER: 24\n  STD:\n  - 0.229\n  - 0.224\n  - 0.225\n  TEMPLATE:\n    CENTER_JITTER: 0\n    FACTOR: 2.0\n    SCALE_JITTER: 0\n    SIZE: 128\n    NUMBER: 2\n  TRAIN:\n    DATASETS_NAME:\n    - LASOT\n    - GOT10K_vottrain\n    - TRACKINGNET\n    DATASETS_RATIO:\n    - 1\n    - 1\n    - 1\n    SAMPLE_PER_EPOCH: 1000\n  VAL:\n    DATASETS_NAME:\n    - GOT10K_official_val\n    DATASETS_RATIO:\n    - 1\n    SAMPLE_PER_EPOCH: 10000\nMODEL:\n  BINS: 400\n  RANGE: 2\n  EXTENSION: 3\n  PRENUM: 7\n  PRETRAIN_FILE: \"mae_pretrain_vit_base.pth\"\n  PRETRAIN_PTH: \"\"\n  EXTRA_MERGER: False\n  RETURN_INTER: False\n  DECODER:\n    TYPE: \"mask\"\n    MASK_RATIO: 0.75\n    EMBEDDIM: 512\n    DEPTH: 8\n    NUMHEADS: 16\n    MLPRATIO: 4\n  BACKBONE:\n    TYPE: vit_base_patch16_224\n    STRIDE: 16\n  HEAD:\n    TYPE: PIX\n    NUM_CHANNELS: 768\nTRAIN:\n  BACKBONE_MULTIPLIER: 0.1\n  DROP_PATH_RATE: 0.1\n  BATCH_SIZE: 8\n  EPOCH: 40\n  GIOU_WEIGHT: 2.0\n  L1_WEIGHT: 0.0\n  GRAD_CLIP_NORM: 0.1\n  LR: 0.00008\n  LR_DROP_EPOCH: 999\n  NUM_WORKER: 6\n  OPTIMIZER: ADAMW\n  PRINT_INTERVAL: 1\n  SCHEDULER:\n    TYPE: step\n    DECAY_RATE: 0.05\n  VAL_EPOCH_INTERVAL: 10\n  WEIGHT_DECAY: 0.05\n  AMP: False\nTEST:\n  EPOCH: 40\n  SEARCH_FACTOR: 4.0\n  SEARCH_SIZE: 256\n  TEMPLATE_FACTOR: 2.0\n  TEMPLATE_SIZE: 128\n"
  },
  {
    "path": "experiments/artrackv2_seq/artrackv2_seq_256_got.yaml",
    "content": "DATA:\n  MAX_SAMPLE_INTERVAL: 200\n  MEAN:\n  - 0.485\n  - 0.456\n  - 0.406\n  SEARCH:\n    CENTER_JITTER: 3\n    FACTOR: 4.0\n    SCALE_JITTER: 0.25\n    SIZE: 256\n    NUMBER: 24\n  STD:\n  - 0.229\n  - 0.224\n  - 0.225\n  TEMPLATE:\n    CENTER_JITTER: 0\n    FACTOR: 2.0\n    SCALE_JITTER: 0\n    SIZE: 128\n    NUMBER: 2\n  TRAIN:\n    DATASETS_NAME:\n    - GOT10K_train_full\n    DATASETS_RATIO:\n    - 1\n    SAMPLE_PER_EPOCH: 1000\n  VAL:\n    DATASETS_NAME:\n    - GOT10K_official_val\n    DATASETS_RATIO:\n    - 1\n    SAMPLE_PER_EPOCH: 10000\nMODEL:\n  BINS: 400\n  RANGE: 2\n  EXTENSION: 3\n  PRENUM: 7\n  PRETRAIN_FILE: \"mae_pretrain_vit_base.pth\"\n  PRETRAIN_PTH: \"\"\n  EXTRA_MERGER: False\n  RETURN_INTER: False\n  DECODER:\n    TYPE: \"mask\"\n    MASK_RATIO: 0.75\n    EMBEDDIM: 512\n    DEPTH: 8\n    NUMHEADS: 16\n    MLPRATIO: 4\n  BACKBONE:\n    TYPE: vit_base_patch16_224\n    STRIDE: 16\n  HEAD:\n    TYPE: PIX\n    NUM_CHANNELS: 768\nTRAIN:\n  BACKBONE_MULTIPLIER: 0.1\n  DROP_PATH_RATE: 0.1\n  BATCH_SIZE: 8\n  EPOCH: 30\n  GIOU_WEIGHT: 2.0\n  L1_WEIGHT: 0.0\n  GRAD_CLIP_NORM: 0.1\n  LR: 0.00008\n  LR_DROP_EPOCH: 999\n  NUM_WORKER: 6\n  OPTIMIZER: ADAMW\n  PRINT_INTERVAL: 1\n  SCHEDULER:\n    TYPE: step\n    DECAY_RATE: 0.05\n  VAL_EPOCH_INTERVAL: 10\n  WEIGHT_DECAY: 0.05\n  AMP: False\nTEST:\n  EPOCH: 30\n  SEARCH_FACTOR: 3.95\n  SEARCH_SIZE: 256\n  TEMPLATE_FACTOR: 2.0\n  TEMPLATE_SIZE: 128\n"
  },
  {
    "path": "experiments/artrackv2_seq/artrackv2_seq_large_384_got.yaml",
    "content": "DATA:\n  MAX_SAMPLE_INTERVAL: 200\n  MEAN:\n  - 0.485\n  - 0.456\n  - 0.406\n  SEARCH:\n    CENTER_JITTER: 4.5\n    FACTOR: 5.0\n    SCALE_JITTER: 0.5\n    SIZE: 384\n    NUMBER: 12\n  STD:\n  - 0.229\n  - 0.224\n  - 0.225\n  TEMPLATE:\n    CENTER_JITTER: 0\n    FACTOR: 2.0\n    SCALE_JITTER: 0\n    SIZE: 192\n    NUMBER: 2\n  TRAIN:\n    - LASOT\n    - GOT10K_vottrain\n    - TRACKINGNET\n    - SAV\n    DATASETS_RATIO:\n    - 1\n    - 1\n    - 1\n    - 4\n    SAMPLE_PER_EPOCH: 1000\n  VAL:\n    DATASETS_NAME:\n    - GOT10K_official_val\n    DATASETS_RATIO:\n    - 1\n    SAMPLE_PER_EPOCH: 10000\nMODEL:\n  BINS: 600\n  RANGE: 2\n  EXTENSION: 6\n  PRENUM: 7\n  PRETRAIN_FILE: \"mae_pretrain_vit_large.pth\"\n  PRETRAIN_PTH: \"\"\n  EXTRA_MERGER: False\n  RETURN_INTER: False\n  DECODER:\n    TYPE: \"mask\"\n    MASK_RATIO: 0.75\n    EMBEDDIM: 512\n    DEPTH: 8\n    NUMHEADS: 16\n    MLPRATIO: 4\n  BACKBONE:\n    TYPE: vit_base_patch16_224\n    STRIDE: 16\n  HEAD:\n    TYPE: PIX\n    NUM_CHANNELS: 1024\nTRAIN:\n  BACKBONE_MULTIPLIER: 0.1\n  DROP_PATH_RATE: 0.1\n  BATCH_SIZE: 8\n  EPOCH: 120\n  GIOU_WEIGHT: 2.0\n  L1_WEIGHT: 0.0\n  GRAD_CLIP_NORM: 0.1\n  LR: 0.00008\n  LR_DROP_EPOCH: 60\n  NUM_WORKER: 6\n  OPTIMIZER: ADAMW\n  PRINT_INTERVAL: 1\n  SCHEDULER:\n    TYPE: step\n    DECAY_RATE: 0.05\n  VAL_EPOCH_INTERVAL: 10\n  WEIGHT_DECAY: 0.05\n  AMP: False\nTEST:\n  EPOCH: 30\n  SEARCH_FACTOR: 4.55\n  SEARCH_SIZE: 384\n  TEMPLATE_FACTOR: 2.0\n  TEMPLATE_SIZE: 192\n"
  },
  {
    "path": "external/AR/README.md",
    "content": "# Alpha-Refine\n## Introduction\nAlpha-Refine is the winner of the VOT Real-Time Challenge 2020, which has great ability to predict high-quality masks. \nIn this work, we combine the STARK tracker with Alpha-Refine to test on the VOT2020 benchamark.\n\n## Installation\nAfter the environment has been installed according to the README.md of STARK, you only need to install a few more packages as shown below.\n\n* Install ninja-build for Precise ROI pooling  \n```bash\nsudo apt-get install ninja-build\n```\nIn case of issues, we refer to https://github.com/vacancy/PreciseRoIPooling.\n\n* Install the Precise ROI pooling\n```\ncd ltr/external\ngit clone https://github.com/vacancy/PreciseRoIPooling.git\ncd ../..\n```\n* Add the project path to environment variables\n```\nexport PYTHONPATH=<absolute_path_of_AR>:$PYTHONPATH\n```\n\n* Setup the environment  \n\nCreate the default environment setting files. \n```bash\n# Environment settings for pytracking. Saved at pytracking/evaluation/local.py\npython -c \"from pytracking.evaluation.environment import create_default_local_file; create_default_local_file()\"\n\n# Environment settings for ltr. Saved at ltr/admin/local.py\npython -c \"from ltr.admin.environment import create_default_local_file; create_default_local_file()\"\n```\n\nYou can modify these files to set the paths to datasets, results paths etc.  \n\n* Download the pre-trained Alpha-Refine network  \nDownload the network for [Alpha-Refine](https://drive.google.com/open?id=1qOQRfaRMbQ2nmgX1NFjoQHfXOAn609QM) \nand put it under the ltr/checkpoints/ltr/ARcm_seg/ARcm_coco_seg_only_mask_384 dir.\n\n"
  },
  {
    "path": "external/AR/__init__.py",
    "content": ""
  },
  {
    "path": "external/AR/ltr/README.md",
    "content": "# LTR\n\nA general PyTorch based framework for learning tracking representations. \n## Table of Contents\n\n* [Quick Start](#quick-start)\n* [Overview](#overview)\n* [Trackers](#trackers)\n   * [PrDiMP](#PrDiMP)\n   * [DiMP](#DiMP)\n   * [ATOM](#ATOM)\n* [Training your own networks](#training-your-own-networks)\n\n## Quick Start\nThe installation script will automatically generate a local configuration file  \"admin/local.py\". In case the file was not generated, run ```admin.environment.create_default_local_file()``` to generate it. Next, set the paths to the training workspace, \ni.e. the directory where the checkpoints will be saved. Also set the paths to the datasets you want to use. If all the dependencies have been correctly installed, you can train a network using the run_training.py script in the correct conda environment.  \n```bash\nconda activate pytracking\npython run_training.py train_module train_name\n```\nHere, ```train_module``` is the sub-module inside ```train_settings``` and ```train_name``` is the name of the train setting file to be used.\n\nFor example, you can train using the included default ATOM settings by running:\n```bash\npython run_training bbreg atom_default\n```\n\n\n## Overview\nThe framework consists of the following sub-modules.  \n - [actors](actors): Contains the actor classes for different trainings. The actor class is responsible for passing the input data through the network can calculating losses.  \n - [admin](admin): Includes functions for loading networks, tensorboard etc. and also contains environment settings.  \n - [dataset](dataset): Contains integration of a number of training datasets, namely [TrackingNet](https://tracking-net.org/), [GOT-10k](http://got-10k.aitestunion.com/), [LaSOT](https://cis.temple.edu/lasot/), \n [ImageNet-VID](http://image-net.org/), [DAVIS](https://davischallenge.org), [YouTube-VOS](https://youtube-vos.org), [MS-COCO](http://cocodataset.org/#home), [SBD](http://home.bharathh.info/pubs/codes/SBD), [LVIS](https://www.lvisdataset.org), [ECSSD](http://www.cse.cuhk.edu.hk/leojia/projects/hsaliency/dataset.html), [MSRA10k](https://mmcheng.net/msra10k), and [HKU-IS](https://sites.google.com/site/ligb86/hkuis). Additionally, it includes modules to generate synthetic videos from image datasets. \n - [data_specs](data_specs): Information about train/val splits of different datasets.   \n - [data](data): Contains functions for processing data, e.g. loading images, data augmentations, sampling frames from videos.  \n - [external](external): External libraries needed for training. Added as submodules.  \n - [models](models): Contains different layers and network definitions.  \n - [trainers](trainers): The main class which runs the training.  \n - [train_settings](train_settings): Contains settings files, specifying the training of a network.   \n \n## Trackers\n The framework currently contains the training code for the following trackers.\n\n### PrDiMP\n The following setting files can be used train the DiMP networks, or to know the exact training details. \n - [dimp.prdimp18](train_settings/dimp/prdimp18.py): The default settings used for training the PrDiMP model with ResNet-18 backbone.\n - [dimp.prdimp50](train_settings/dimp/prdimp50.py): The default settings used for training the PrDiMP model with ResNet-50 backbone. \n - [dimp.super_dimp](train_settings/dimp/super_dimp.py): Combines the bounding-box regressor of PrDiMP with the standard DiMP classifier and better training and inference settings. \n \n### DiMP\n The following setting files can be used train the DiMP networks, or to know the exact training details. \n - [dimp.dimp18](train_settings/dimp/dimp18.py): The default settings used for training the DiMP model with ResNet-18 backbone.\n - [dimp.dimp50](train_settings/dimp/dimp50.py): The default settings used for training the DiMP model with ResNet-50 backbone.\n \n### ATOM\n The following setting file can be used train the ATOM network, or to know the exact training details. \n - [bbreg.atom](train_settings/bbreg/atom_paper.py): The settings used in the paper for training the network in ATOM.\n - [bbreg.atom](train_settings/bbreg/atom.py): Newer settings used for training the network in ATOM, also utilizing the GOT10k dataset.\n - [bbreg.atom](train_settings/bbreg/atom_prob_ml.py): Settings for ATOM with the probabilistic bounding box regression proposed in [this paper](https://arxiv.org/abs/1909.12297). \n - [bbreg.atom](train_settings/bbreg/atom_paper.py): The baseline ATOM* setting evaluated in [this paper](https://arxiv.org/abs/1909.12297).  \n \n## Training your own networks\nTo train a custom network using the toolkit, the following components need to be specified in the train settings. For reference, see [atom.py](train_settings/bbreg/atom.py).  \n- Datasets: The datasets to be used for training. A number of standard tracking datasets are already available in ```dataset``` module.  \n- Processing: This function should perform the necessary post-processing of the data, e.g. cropping of target region, data augmentations etc.  \n- Sampler: Determines how the frames are sampled from a video sequence to form the batches.  \n- Network: The network module to be trained.  \n- Objective: The training objective.  \n- Actor: The trainer passes the training batch to the actor who is responsible for passing the data through the network correctly, and calculating the training loss.  \n- Optimizer: Optimizer to be used, e.g. Adam.  \n- Trainer: The main class which runs the epochs and saves checkpoints. \n \n\n "
  },
  {
    "path": "external/AR/ltr/__init__.py",
    "content": "from .admin.loading import load_network\nfrom .admin.model_constructor import model_constructor\nfrom .admin.multigpu import MultiGPU"
  },
  {
    "path": "external/AR/ltr/actors/__init__.py",
    "content": "from .base_actor import BaseActor\nfrom .bbreg import AtomActor\nfrom .tracking import DiMPActor"
  },
  {
    "path": "external/AR/ltr/actors/base_actor.py",
    "content": "from pytracking import TensorDict\n\n\nclass BaseActor:\n    \"\"\" Base class for actor. The actor class handles the passing of the data through the network\n    and calculation the loss\"\"\"\n    def __init__(self, net, objective):\n        \"\"\"\n        args:\n            net - The network to train\n            objective - The loss function\n        \"\"\"\n        self.net = net\n        self.objective = objective\n\n    def __call__(self, data: TensorDict):\n        \"\"\" Called in each training iteration. Should pass in input data through the network, calculate the loss, and\n        return the training stats for the input data\n        args:\n            data - A TensorDict containing all the necessary data blocks.\n\n        returns:\n            loss    - loss for the input data\n            stats   - a dict containing detailed losses\n        \"\"\"\n        raise NotImplementedError\n\n    def to(self, device):\n        \"\"\" Move the network to device\n        args:\n            device - device to use. 'cpu' or 'cuda'\n        \"\"\"\n        self.net.to(device)\n\n    def train(self, mode=True):\n        \"\"\" Set whether the network is in train mode.\n        args:\n            mode (True) - Bool specifying whether in training mode.\n        \"\"\"\n        self.net.train(mode)\n\n    def eval(self):\n        \"\"\" Set network to eval mode\"\"\"\n        self.train(False)"
  },
  {
    "path": "external/AR/ltr/actors/bbreg.py",
    "content": "from . import BaseActor\n\n\nclass AtomActor(BaseActor):\n    \"\"\" Actor for training the IoU-Net in ATOM\"\"\"\n    def __call__(self, data):\n        \"\"\"\n        args:\n            data - The input data, should contain the fields 'train_images', 'test_images', 'train_anno',\n                    'test_proposals' and 'proposal_iou'.\n\n        returns:\n            loss    - the training loss\n            states  -  dict containing detailed losses\n        \"\"\"\n        # Run network to obtain IoU prediction for each proposal in 'test_proposals'\n        iou_pred = self.net(data['train_images'], data['test_images'], data['train_anno'], data['test_proposals'])\n\n        iou_pred = iou_pred.view(-1, iou_pred.shape[2])\n        iou_gt = data['proposal_iou'].view(-1, data['proposal_iou'].shape[2])\n\n        # Compute loss\n        loss = self.objective(iou_pred, iou_gt)\n\n        # Return training stats\n        stats = {'Loss/total': loss.item(),\n                 'Loss/iou': loss.item()}\n\n        return loss, stats\n\n\nclass AtomBBKLActor(BaseActor):\n    \"\"\" Actor for training the IoU-Net in ATOM with BBKL\"\"\"\n    def __call__(self, data):\n        \"\"\"\n        args:\n            data - The input data, should contain the fields 'train_images', 'test_images', 'train_anno',\n                    'test_proposals', 'proposal_density', and 'gt_density'.\n\n        returns:\n            loss    - the training loss\n            states  -  dict containing detailed losses\n        \"\"\"\n        # Run network to obtain IoU prediction for each proposal in 'test_proposals'\n        bb_scores = self.net(data['train_images'], data['test_images'], data['train_anno'], data['test_proposals'])\n\n        bb_scores = bb_scores.view(-1, bb_scores.shape[2])\n        proposal_density = data['proposal_density'].view(-1, data['proposal_density'].shape[2])\n        gt_density = data['gt_density'].view(-1, data['gt_density'].shape[2])\n\n        # Compute loss\n        loss = self.objective(bb_scores, sample_density=proposal_density, gt_density=gt_density, mc_dim=1)\n\n        # Return training stats\n        stats = {'Loss/total': loss.item(),\n                 'Loss/bb_ce': loss.item()}\n\n        return loss, stats\n"
  },
  {
    "path": "external/AR/ltr/actors/tracking.py",
    "content": "from . import BaseActor\nimport torch\n\n\nclass DiMPActor(BaseActor):\n    \"\"\"Actor for training the DiMP network.\"\"\"\n    def __init__(self, net, objective, loss_weight=None):\n        super().__init__(net, objective)\n        if loss_weight is None:\n            loss_weight = {'iou': 1.0, 'test_clf': 1.0}\n        self.loss_weight = loss_weight\n\n    def __call__(self, data):\n        \"\"\"\n        args:\n            data - The input data, should contain the fields 'train_images', 'test_images', 'train_anno',\n                    'test_proposals', 'proposal_iou' and 'test_label'.\n\n        returns:\n            loss    - the training loss\n            stats  -  dict containing detailed losses\n        \"\"\"\n        # Run network\n        target_scores, iou_pred = self.net(train_imgs=data['train_images'],\n                                           test_imgs=data['test_images'],\n                                           train_bb=data['train_anno'],\n                                           test_proposals=data['test_proposals'])\n\n        # Classification losses for the different optimization iterations\n        clf_losses_test = [self.objective['test_clf'](s, data['test_label'], data['test_anno']) for s in target_scores]\n\n        # Loss of the final filter\n        clf_loss_test = clf_losses_test[-1]\n        loss_target_classifier = self.loss_weight['test_clf'] * clf_loss_test\n\n        # Compute loss for ATOM IoUNet\n        loss_iou = self.loss_weight['iou'] * self.objective['iou'](iou_pred, data['proposal_iou'])\n\n        # Loss for the initial filter iteration\n        loss_test_init_clf = 0\n        if 'test_init_clf' in self.loss_weight.keys():\n            loss_test_init_clf = self.loss_weight['test_init_clf'] * clf_losses_test[0]\n\n        # Loss for the intermediate filter iterations\n        loss_test_iter_clf = 0\n        if 'test_iter_clf' in self.loss_weight.keys():\n            test_iter_weights = self.loss_weight['test_iter_clf']\n            if isinstance(test_iter_weights, list):\n                loss_test_iter_clf = sum([a*b for a, b in zip(test_iter_weights, clf_losses_test[1:-1])])\n            else:\n                loss_test_iter_clf = (test_iter_weights / (len(clf_losses_test) - 2)) * sum(clf_losses_test[1:-1])\n\n        # Total loss\n        loss = loss_iou + loss_target_classifier + loss_test_init_clf + loss_test_iter_clf\n\n        # Log stats\n        stats = {'Loss/total': loss.item(),\n                 'Loss/iou': loss_iou.item(),\n                 'Loss/target_clf': loss_target_classifier.item()}\n        if 'test_init_clf' in self.loss_weight.keys():\n            stats['Loss/test_init_clf'] = loss_test_init_clf.item()\n        if 'test_iter_clf' in self.loss_weight.keys():\n            stats['Loss/test_iter_clf'] = loss_test_iter_clf.item()\n        stats['ClfTrain/test_loss'] = clf_loss_test.item()\n        if len(clf_losses_test) > 0:\n            stats['ClfTrain/test_init_loss'] = clf_losses_test[0].item()\n            if len(clf_losses_test) > 2:\n                stats['ClfTrain/test_iter_loss'] = sum(clf_losses_test[1:-1]).item() / (len(clf_losses_test) - 2)\n\n        return loss, stats\n\n\nclass KLDiMPActor(BaseActor):\n    \"\"\"Actor for training the DiMP network.\"\"\"\n    def __init__(self, net, objective, loss_weight=None):\n        super().__init__(net, objective)\n        if loss_weight is None:\n            loss_weight = {'bb_ce': 1.0}\n        self.loss_weight = loss_weight\n\n    def __call__(self, data):\n        \"\"\"\n        args:\n            data - The input data, should contain the fields 'train_images', 'test_images', 'train_anno',\n                    'test_proposals', 'proposal_iou' and 'test_label'.\n\n        returns:\n            loss    - the training loss\n            stats  -  dict containing detailed losses\n        \"\"\"\n        # Run network\n        target_scores, bb_scores = self.net(train_imgs=data['train_images'],\n                                            test_imgs=data['test_images'],\n                                            train_bb=data['train_anno'],\n                                            test_proposals=data['test_proposals'])\n\n        # Reshape bb reg variables\n        is_valid = data['test_anno'][:, :, 0] < 99999.0\n        bb_scores = bb_scores[is_valid, :]\n        proposal_density = data['proposal_density'][is_valid, :]\n        gt_density = data['gt_density'][is_valid, :]\n\n        # Compute loss\n        bb_ce = self.objective['bb_ce'](bb_scores, sample_density=proposal_density, gt_density=gt_density, mc_dim=1)\n        loss_bb_ce = self.loss_weight['bb_ce'] * bb_ce\n\n        # If standard DiMP classifier is used\n        loss_target_classifier = 0\n        loss_test_init_clf = 0\n        loss_test_iter_clf = 0\n        if 'test_clf' in self.loss_weight.keys():\n            # Classification losses for the different optimization iterations\n            clf_losses_test = [self.objective['test_clf'](s, data['test_label'], data['test_anno']) for s in target_scores]\n\n            # Loss of the final filter\n            clf_loss_test = clf_losses_test[-1]\n            loss_target_classifier = self.loss_weight['test_clf'] * clf_loss_test\n\n            # Loss for the initial filter iteration\n            if 'test_init_clf' in self.loss_weight.keys():\n                loss_test_init_clf = self.loss_weight['test_init_clf'] * clf_losses_test[0]\n\n            # Loss for the intermediate filter iterations\n            if 'test_iter_clf' in self.loss_weight.keys():\n                test_iter_weights = self.loss_weight['test_iter_clf']\n                if isinstance(test_iter_weights, list):\n                    loss_test_iter_clf = sum([a * b for a, b in zip(test_iter_weights, clf_losses_test[1:-1])])\n                else:\n                    loss_test_iter_clf = (test_iter_weights / (len(clf_losses_test) - 2)) * sum(clf_losses_test[1:-1])\n\n        # If PrDiMP classifier is used\n        loss_clf_ce = 0\n        loss_clf_ce_init = 0\n        loss_clf_ce_iter = 0\n        if 'clf_ce' in self.loss_weight.keys():\n            # Classification losses for the different optimization iterations\n            clf_ce_losses = [self.objective['clf_ce'](s, data['test_label_density'], grid_dim=(-2,-1)) for s in target_scores]\n\n            # Loss of the final filter\n            clf_ce = clf_ce_losses[-1]\n            loss_clf_ce = self.loss_weight['clf_ce'] * clf_ce\n\n            # Loss for the initial filter iteration\n            if 'clf_ce_init' in self.loss_weight.keys():\n                loss_clf_ce_init = self.loss_weight['clf_ce_init'] * clf_ce_losses[0]\n\n            # Loss for the intermediate filter iterations\n            if 'clf_ce_iter' in self.loss_weight.keys() and len(clf_ce_losses) > 2:\n                test_iter_weights = self.loss_weight['clf_ce_iter']\n                if isinstance(test_iter_weights, list):\n                    loss_clf_ce_iter = sum([a * b for a, b in zip(test_iter_weights, clf_ce_losses[1:-1])])\n                else:\n                    loss_clf_ce_iter = (test_iter_weights / (len(clf_ce_losses) - 2)) * sum(clf_ce_losses[1:-1])\n\n        # Total loss\n        loss = loss_bb_ce + loss_clf_ce + loss_clf_ce_init + loss_clf_ce_iter + \\\n                            loss_target_classifier + loss_test_init_clf + loss_test_iter_clf\n\n        if torch.isinf(loss) or torch.isnan(loss):\n            raise Exception('ERROR: Loss was nan or inf!!!')\n\n        # Log stats\n        stats = {'Loss/total': loss.item(),\n                 'Loss/bb_ce': bb_ce.item(),\n                 'Loss/loss_bb_ce': loss_bb_ce.item()}\n        if 'test_clf' in self.loss_weight.keys():\n            stats['Loss/target_clf'] = loss_target_classifier.item()\n        if 'test_init_clf' in self.loss_weight.keys():\n            stats['Loss/test_init_clf'] = loss_test_init_clf.item()\n        if 'test_iter_clf' in self.loss_weight.keys():\n            stats['Loss/test_iter_clf'] = loss_test_iter_clf.item()\n        if 'clf_ce' in self.loss_weight.keys():\n            stats['Loss/clf_ce'] = loss_clf_ce.item()\n        if 'clf_ce_init' in self.loss_weight.keys():\n            stats['Loss/clf_ce_init'] = loss_clf_ce_init.item()\n        if 'clf_ce_iter' in self.loss_weight.keys() and len(clf_ce_losses) > 2:\n            stats['Loss/clf_ce_iter'] = loss_clf_ce_iter.item()\n\n        if 'test_clf' in self.loss_weight.keys():\n            stats['ClfTrain/test_loss'] = clf_loss_test.item()\n            if len(clf_losses_test) > 0:\n                stats['ClfTrain/test_init_loss'] = clf_losses_test[0].item()\n                if len(clf_losses_test) > 2:\n                    stats['ClfTrain/test_iter_loss'] = sum(clf_losses_test[1:-1]).item() / (len(clf_losses_test) - 2)\n\n        if 'clf_ce' in self.loss_weight.keys():\n            stats['ClfTrain/clf_ce'] = clf_ce.item()\n            if len(clf_ce_losses) > 0:\n                stats['ClfTrain/clf_ce_init'] = clf_ce_losses[0].item()\n                if len(clf_ce_losses) > 2:\n                    stats['ClfTrain/clf_ce_iter'] = sum(clf_ce_losses[1:-1]).item() / (len(clf_ce_losses) - 2)\n\n        return loss, stats\n"
  },
  {
    "path": "external/AR/ltr/admin/__init__.py",
    "content": ""
  },
  {
    "path": "external/AR/ltr/admin/environment.py",
    "content": "import importlib\nimport os\nfrom collections import OrderedDict\n\n\ndef create_default_local_file():\n    path = os.path.join(os.path.dirname(__file__), 'local.py')\n\n    empty_str = '\\'\\''\n    default_settings = OrderedDict({\n        'workspace_dir': empty_str,\n        'tensorboard_dir': 'self.workspace_dir + \\'/tensorboard/\\'',\n        'lasot_dir': empty_str,\n        'got10k_dir': empty_str,\n        'trackingnet_dir': empty_str,\n        'coco_dir': empty_str,\n        'lvis_dir': empty_str,\n        'sbd_dir': empty_str,\n        'imagenet_dir': empty_str,\n        'imagenetdet_dir': empty_str,\n        'ecssd_dir': empty_str,\n        'hkuis_dir': empty_str,\n        'msra10k_dir': empty_str,\n        'davis_dir': empty_str,\n        'youtubevos_dir': empty_str})\n\n    comment = {'workspace_dir': 'Base directory for saving network checkpoints.',\n               'tensorboard_dir': 'Directory for tensorboard files.'}\n\n    with open(path, 'w') as f:\n        f.write('class EnvironmentSettings:\\n')\n        f.write('    def __init__(self):\\n')\n\n        for attr, attr_val in default_settings.items():\n            comment_str = None\n            if attr in comment:\n                comment_str = comment[attr]\n            if comment_str is None:\n                f.write('        self.{} = {}\\n'.format(attr, attr_val))\n            else:\n                f.write('        self.{} = {}    # {}\\n'.format(attr, attr_val, comment_str))\n\n\ndef env_settings():\n    env_module_name = 'ltr.admin.local'\n    try:\n        env_module = importlib.import_module(env_module_name)\n        return env_module.EnvironmentSettings()\n    except:\n        env_file = os.path.join(os.path.dirname(__file__), 'local.py')\n\n        create_default_local_file()\n        raise RuntimeError('YOU HAVE NOT SETUP YOUR local.py!!!\\n Go to \"{}\" and set all the paths you need. Then try to run again.'.format(env_file))\n"
  },
  {
    "path": "external/AR/ltr/admin/loading.py",
    "content": "import torch\nimport os\nimport sys\nfrom pathlib import Path\nimport importlib\nimport inspect\nfrom ltr.admin import settings as ws_settings\n\n\ndef load_trained_network(workspace_dir, network_path, checkpoint=None):\n    \"\"\"OUTDATED. Use load_pretrained instead!\"\"\"\n    checkpoint_dir = os.path.join(workspace_dir, 'checkpoints')\n    directory = '{}/{}'.format(checkpoint_dir, network_path)\n\n    net, _ = load_network(directory, checkpoint)\n    return net\n\n\ndef load_pretrained(module, name, checkpoint=None, **kwargs):\n    \"\"\"Load a network trained using the LTR framework. This is useful when you want to initialize your new network with\n    a previously trained model.\n    args:\n        module  -  Name of the train script module. I.e. the name of the folder in ltr/train_scripts.\n        name  -  The name of the train_script.\n        checkpoint  -  You can supply the checkpoint number or the full path to the checkpoint file (see load_network).\n        **kwargs  -  These are passed to load_network (see that function).\n    \"\"\"\n\n    settings = ws_settings.Settings()\n    network_dir = os.path.join(settings.env.workspace_dir, 'checkpoints', 'ltr', module, name)\n    return load_network(network_dir=network_dir, checkpoint=checkpoint, **kwargs)\n\n\ndef load_network(network_dir=None, checkpoint=None, constructor_fun_name=None, constructor_module=None, **kwargs):\n    \"\"\"Loads a network checkpoint file.\n\n    Can be called in two different ways:\n        load_checkpoint(network_dir):\n            Loads the checkpoint file given by the path. If checkpoint_dir is a directory,\n            it tries to find the latest checkpoint in that directory.\n\n        load_checkpoint(network_dir, checkpoint=epoch_num):\n            Loads the network at the given epoch number (int).\n\n    The extra keyword arguments are supplied to the network constructor to replace saved ones.\n    \"\"\"\n\n    if network_dir is not None:\n        net_path = Path(network_dir)\n    else:\n        net_path = None\n\n    if net_path.is_file():\n        checkpoint = str(net_path)\n\n    if checkpoint is None:\n        # Load most recent checkpoint\n        checkpoint_list = sorted(net_path.glob('*.pth.tar'))\n        if checkpoint_list:\n            checkpoint_path = checkpoint_list[-1]\n        else:\n            raise Exception('No matching checkpoint file found')\n    elif isinstance(checkpoint, int):\n        # Checkpoint is the epoch number\n        checkpoint_list = sorted(net_path.glob('*_ep{:04d}.pth.tar'.format(checkpoint)))\n        if not checkpoint_list or len(checkpoint_list) == 0:\n            raise Exception('No matching checkpoint file found')\n        if len(checkpoint_list) > 1:\n            raise Exception('Multiple matching checkpoint files found')\n        else:\n            checkpoint_path = checkpoint_list[0]\n    elif isinstance(checkpoint, str):\n        # Checkpoint is the path\n        checkpoint_path = os.path.expanduser(checkpoint)\n    else:\n        raise TypeError\n\n    # Load network\n    checkpoint_dict = torch_load_legacy(checkpoint_path)\n\n    # Construct network model\n    if 'constructor' in checkpoint_dict and checkpoint_dict['constructor'] is not None:\n        net_constr = checkpoint_dict['constructor']\n        if constructor_fun_name is not None:\n            net_constr.fun_name = constructor_fun_name\n        if constructor_module is not None:\n            net_constr.fun_module = constructor_module\n        # Legacy networks before refactoring\n        if net_constr.fun_module.startswith('dlframework.'):\n            net_constr.fun_module = net_constr.fun_module[len('dlframework.'):]\n        net_fun = getattr(importlib.import_module(net_constr.fun_module), net_constr.fun_name)\n        net_fun_args = list(inspect.signature(net_fun).parameters.keys())\n        for arg, val in kwargs.items():\n            if arg in net_fun_args:\n                net_constr.kwds[arg] = val\n            else:\n                print('WARNING: Keyword argument \"{}\" not found when loading network. It was ignored.'.format(arg))\n        net = net_constr.get()\n    else:\n        raise RuntimeError('No constructor for the given network.')\n\n    net.load_state_dict(checkpoint_dict['net'])\n\n    net.constructor = checkpoint_dict['constructor']\n    if 'net_info' in checkpoint_dict and checkpoint_dict['net_info'] is not None:\n        net.info = checkpoint_dict['net_info']\n\n    return net, checkpoint_dict\n\n\ndef load_weights(net, path, strict=True):\n    checkpoint_dict = torch.load(path)\n    weight_dict = checkpoint_dict['net']\n    net.load_state_dict(weight_dict, strict=strict)\n    return net\n\n\ndef torch_load_legacy(path):\n    \"\"\"Load network with legacy environment.\"\"\"\n\n    # Setup legacy env (for older networks)\n    _setup_legacy_env()\n\n    # Load network\n    checkpoint_dict = torch.load(path, map_location='cpu')\n\n    # Cleanup legacy\n    _cleanup_legacy_env()\n\n    return checkpoint_dict\n\n\ndef _setup_legacy_env():\n    importlib.import_module('ltr')\n    sys.modules['dlframework'] = sys.modules['ltr']\n    sys.modules['dlframework.common'] = sys.modules['ltr']\n    importlib.import_module('ltr.admin')\n    sys.modules['dlframework.common.utils'] = sys.modules['ltr.admin']\n    for m in ('model_constructor', 'stats', 'settings', 'local'):\n        importlib.import_module('ltr.admin.' + m)\n        sys.modules['dlframework.common.utils.' + m] = sys.modules['ltr.admin.' + m]\n\n\ndef _cleanup_legacy_env():\n    del_modules = []\n    for m in sys.modules.keys():\n        if m.startswith('dlframework'):\n            del_modules.append(m)\n    for m in del_modules:\n        del sys.modules[m]\n"
  },
  {
    "path": "external/AR/ltr/admin/model_constructor.py",
    "content": "from functools import wraps\nimport importlib\n\n\ndef model_constructor(f):\n    \"\"\" Wraps the function 'f' which returns the network. An extra field 'constructor' is added to the network returned\n    by 'f'. This field contains an instance of the  'NetConstructor' class, which contains the information needed to\n    re-construct the network, such as the name of the function 'f', the function arguments etc. Thus, the network can\n    be easily constructed from a saved checkpoint by calling NetConstructor.get() function.\n    \"\"\"\n    @wraps(f)\n    def f_wrapper(*args, **kwds):\n        net_constr = NetConstructor(f.__name__, f.__module__, args, kwds)\n        output = f(*args, **kwds)\n        if isinstance(output, (tuple, list)):\n            # Assume first argument is the network\n            output[0].constructor = net_constr\n        else:\n            output.constructor = net_constr\n        return output\n    return f_wrapper\n\n\nclass NetConstructor:\n    \"\"\" Class to construct networks. Takes as input the function name (e.g. atom_resnet18), the name of the module\n    which contains the network function (e.g. ltr.models.bbreg.atom) and the arguments for the network\n    function. The class object can then be stored along with the network weights to re-construct the network.\"\"\"\n    def __init__(self, fun_name, fun_module, args, kwds):\n        \"\"\"\n        args:\n            fun_name - The function which returns the network\n            fun_module - the module which contains the network function\n            args - arguments which are passed to the network function\n            kwds - arguments which are passed to the network function\n        \"\"\"\n        self.fun_name = fun_name\n        self.fun_module = fun_module\n        self.args = args\n        self.kwds = kwds\n\n    def get(self):\n        \"\"\" Rebuild the network by calling the network function with the correct arguments. \"\"\"\n        net_module = importlib.import_module(self.fun_module)\n        net_fun = getattr(net_module, self.fun_name)\n        return net_fun(*self.args, **self.kwds)\n"
  },
  {
    "path": "external/AR/ltr/admin/multigpu.py",
    "content": "import torch.nn as nn\n\n\ndef is_multi_gpu(net):\n    return isinstance(net, (MultiGPU, nn.DataParallel))\n\n\nclass MultiGPU(nn.DataParallel):\n    \"\"\"Wraps a network to allow simple multi-GPU training.\"\"\"\n    def __getattr__(self, item):\n        try:\n            return super().__getattr__(item)\n        except:\n            pass\n        return getattr(self.module, item)"
  },
  {
    "path": "external/AR/ltr/admin/settings.py",
    "content": "from ltr.admin.environment import env_settings\n\n\nclass Settings:\n    \"\"\" Training settings, e.g. the paths to datasets and networks.\"\"\"\n    def __init__(self):\n        self.set_default()\n\n    def set_default(self):\n        self.env = env_settings()\n        self.use_gpu = True\n\n\n"
  },
  {
    "path": "external/AR/ltr/admin/stats.py",
    "content": "\n\nclass StatValue:\n    def __init__(self):\n        self.clear()\n\n    def reset(self):\n        self.val = 0\n\n    def clear(self):\n        self.reset()\n        self.history = []\n\n    def update(self, val):\n        self.val = val\n        self.history.append(self.val)\n\n\nclass AverageMeter(object):\n    \"\"\"Computes and stores the average and current value\"\"\"\n    def __init__(self):\n        self.clear()\n        self.has_new_data = False\n\n    def reset(self):\n        self.avg = 0\n        self.val = 0\n        self.sum = 0\n        self.count = 0\n\n    def clear(self):\n        self.reset()\n        self.history = []\n\n    def update(self, val, n=1):\n        self.val = val\n        self.sum += val * n\n        self.count += n\n        self.avg = self.sum / self.count\n\n    def new_epoch(self):\n        if self.count > 0:\n            self.history.append(self.avg)\n            self.reset()\n            self.has_new_data = True\n        else:\n            self.has_new_data = False\n\n\ndef topk_accuracy(output, target, topk=(1,)):\n    \"\"\"Computes the precision@k for the specified values of k\"\"\"\n    single_input = not isinstance(topk, (tuple, list))\n    if single_input:\n        topk = (topk,)\n\n    maxk = max(topk)\n    batch_size = target.size(0)\n\n    _, pred = output.topk(maxk, 1, True, True)\n    pred = pred.t()\n    correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n    res = []\n    for k in topk:\n        correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)[0]\n        res.append(correct_k * 100.0 / batch_size)\n\n    if single_input:\n        return res[0]\n\n    return res\n"
  },
  {
    "path": "external/AR/ltr/admin/tensorboard.py",
    "content": "import os\nfrom collections import OrderedDict\ntry:\n    from torch.utils.tensorboard import SummaryWriter\nexcept:\n    print('WARNING: You are using tensorboardX instead sis you have a too old pytorch version.')\n    from tensorboardX import SummaryWriter\n\n\nclass TensorboardWriter:\n    def __init__(self, directory, loader_names):\n        self.directory = directory\n        self.writer = OrderedDict({name: SummaryWriter(os.path.join(self.directory, name)) for name in loader_names})\n\n    def write_info(self, module_name, script_name, description):\n        tb_info_writer = SummaryWriter(os.path.join(self.directory, 'info'))\n        tb_info_writer.add_text('Modulet_name', module_name)\n        tb_info_writer.add_text('Script_name', script_name)\n        tb_info_writer.add_text('Description', description)\n        tb_info_writer.close()\n\n    def write_epoch(self, stats: OrderedDict, epoch: int, ind=-1):\n        for loader_name, loader_stats in stats.items():\n            if loader_stats is None:\n                continue\n            for var_name, val in loader_stats.items():\n                if hasattr(val, 'history') and getattr(val, 'has_new_data', True):\n                    self.writer[loader_name].add_scalar(var_name, val.history[ind], epoch)"
  },
  {
    "path": "external/AR/ltr/data/__init__.py",
    "content": "from .loader import LTRLoader"
  },
  {
    "path": "external/AR/ltr/data/bounding_box_utils.py",
    "content": "import torch\n\n\ndef rect_to_rel(bb, sz_norm=None):\n    \"\"\"Convert standard rectangular parametrization of the bounding box [x, y, w, h]\n    to relative parametrization [cx/sw, cy/sh, log(w), log(h)], where [cx, cy] is the center coordinate.\n    args:\n        bb  -  N x 4 tensor of boxes.\n        sz_norm  -  [N] x 2 tensor of value of [sw, sh] (optional). sw=w and sh=h if not given.\n    \"\"\"\n\n    c = bb[...,:2] + 0.5 * bb[...,2:]\n    if sz_norm is None:\n        c_rel = c / bb[...,2:]\n    else:\n        c_rel = c / sz_norm\n    sz_rel = torch.log(bb[...,2:])\n    return torch.cat((c_rel, sz_rel), dim=-1)\n\n\ndef rel_to_rect(bb, sz_norm=None):\n    \"\"\"Inverts the effect of rect_to_rel. See above.\"\"\"\n\n    sz = torch.exp(bb[...,2:])\n    if sz_norm is None:\n        c = bb[...,:2] * sz\n    else:\n        c = bb[...,:2] * sz_norm\n    tl = c - 0.5 * sz\n    return torch.cat((tl, sz), dim=-1)\n\n\ndef masks_to_bboxes(mask, fmt='c'):\n\n    \"\"\" Convert a mask tensor to one or more bounding boxes.\n    Note: This function is a bit new, make sure it does what it says.  /Andreas\n    :param mask: Tensor of masks, shape = (..., H, W)\n    :param fmt: bbox layout. 'c' => \"center + size\" or (x_center, y_center, width, height)\n                             't' => \"top left + size\" or (x_left, y_top, width, height)\n                             'v' => \"vertices\" or (x_left, y_top, x_right, y_bottom)\n    :return: tensor containing a batch of bounding boxes, shape = (..., 4)\n    \"\"\"\n    batch_shape = mask.shape[:-2]\n    mask = mask.reshape((-1, *mask.shape[-2:]))\n    bboxes = []\n\n    for m in mask:\n        mx = m.sum(dim=-2).nonzero()\n        my = m.sum(dim=-1).nonzero()\n        bb = [mx.min(), my.min(), mx.max(), my.max()] if (len(mx) > 0 and len(my) > 0) else [0, 0, 0, 0]\n        bboxes.append(bb)\n\n    bboxes = torch.tensor(bboxes, dtype=torch.float32, device=mask.device)\n    bboxes = bboxes.reshape(batch_shape + (4,))\n\n    if fmt == 'v':\n        return bboxes\n\n    x1 = bboxes[..., :2]\n    s = bboxes[..., 2:] - x1 + 1\n\n    if fmt == 'c':\n        return torch.cat((x1 + 0.5 * s, s), dim=-1)\n    elif fmt == 't':\n        return torch.cat((x1, s), dim=-1)\n\n    raise ValueError(\"Undefined bounding box layout '%s'\" % fmt)\n\n\ndef masks_to_bboxes_multi(mask, ids, fmt='c'):\n    assert mask.dim() == 2\n    bboxes = []\n\n    for id in ids:\n        mx = (mask == id).sum(dim=-2).nonzero()\n        my = (mask == id).float().sum(dim=-1).nonzero()\n        bb = [mx.min(), my.min(), mx.max(), my.max()] if (len(mx) > 0 and len(my) > 0) else [0, 0, 0, 0]\n\n        bb = torch.tensor(bb, dtype=torch.float32, device=mask.device)\n\n        x1 = bb[:2]\n        s = bb[2:] - x1 + 1\n\n        if fmt == 'v':\n            pass\n        elif fmt == 'c':\n            bb = torch.cat((x1 + 0.5 * s, s), dim=-1)\n        elif fmt == 't':\n            bb = torch.cat((x1, s), dim=-1)\n        else:\n            raise ValueError(\"Undefined bounding box layout '%s'\" % fmt)\n        bboxes.append(bb)\n\n    return bboxes\n"
  },
  {
    "path": "external/AR/ltr/data/image_loader.py",
    "content": "import jpeg4py\nimport cv2 as cv\nfrom PIL import Image\nimport numpy as np\n\ndavis_palette = np.repeat(np.expand_dims(np.arange(0,256), 1), 3, 1).astype(np.uint8)\ndavis_palette[:22, :] = [[0, 0, 0], [128, 0, 0], [0, 128, 0], [128, 128, 0],\n                         [0, 0, 128], [128, 0, 128], [0, 128, 128], [128, 128, 128],\n                         [64, 0, 0], [191, 0, 0], [64, 128, 0], [191, 128, 0],\n                         [64, 0, 128], [191, 0, 128], [64, 128, 128], [191, 128, 128],\n                         [0, 64, 0], [128, 64, 0], [0, 191, 0], [128, 191, 0],\n                         [0, 64, 128], [128, 64, 128]]\n\n\ndef default_image_loader(path):\n    \"\"\"The default image loader, reads the image from the given path. It first tries to use the jpeg4py_loader,\n    but reverts to the opencv_loader if the former is not available.\"\"\"\n    if default_image_loader.use_jpeg4py is None:\n        # Try using jpeg4py\n        im = jpeg4py_loader(path)\n        if im is None:\n            default_image_loader.use_jpeg4py = False\n            print('Using opencv_loader instead.')\n        else:\n            default_image_loader.use_jpeg4py = True\n            return im\n    if default_image_loader.use_jpeg4py:\n        return jpeg4py_loader(path)\n    return opencv_loader(path)\n\ndefault_image_loader.use_jpeg4py = None\n\n\ndef jpeg4py_loader(path):\n    \"\"\" Image reading using jpeg4py https://github.com/ajkxyz/jpeg4py\"\"\"\n    try:\n        return jpeg4py.JPEG(path).decode()\n    except Exception as e:\n        print('ERROR: Could not read image \"{}\"'.format(path))\n        print(e)\n        return None\n\n\ndef opencv_loader(path):\n    \"\"\" Read image using opencv's imread function and returns it in rgb format\"\"\"\n    try:\n        im = cv.imread(path, cv.IMREAD_COLOR)\n\n        # convert to rgb and return\n        return cv.cvtColor(im, cv.COLOR_BGR2RGB)\n    except Exception as e:\n        print('ERROR: Could not read image \"{}\"'.format(path))\n        print(e)\n        return None\n\n\ndef jpeg4py_loader_w_failsafe(path):\n    \"\"\" Image reading using jpeg4py https://github.com/ajkxyz/jpeg4py\"\"\"\n    try:\n        return jpeg4py.JPEG(path).decode()\n    except:\n        try:\n            im = cv.imread(path, cv.IMREAD_COLOR)\n\n            # convert to rgb and return\n            return cv.cvtColor(im, cv.COLOR_BGR2RGB)\n        except Exception as e:\n            print('ERROR: Could not read image \"{}\"'.format(path))\n            print(e)\n            return None\n\n\ndef opencv_seg_loader(path):\n    \"\"\" Read segmentation annotation using opencv's imread function\"\"\"\n    try:\n        return cv.imread(path)\n    except Exception as e:\n        print('ERROR: Could not read image \"{}\"'.format(path))\n        print(e)\n        return None\n\n\ndef imread_indexed(filename):\n    \"\"\" Load indexed image with given filename. Used to read segmentation annotations.\"\"\"\n\n    im = Image.open(filename)\n\n    annotation = np.atleast_3d(im)[...,0]\n    return annotation\n\n\ndef imwrite_indexed(filename, array, color_palette=None):\n    \"\"\" Save indexed image as png. Used to save segmentation annotation.\"\"\"\n\n    if color_palette is None:\n        color_palette = davis_palette\n\n    if np.atleast_3d(array).shape[2] != 1:\n        raise Exception(\"Saving indexed PNGs requires 2D array.\")\n\n    im = Image.fromarray(array)\n    im.putpalette(color_palette.ravel())\n    im.save(filename, format='PNG')"
  },
  {
    "path": "external/AR/ltr/data/loader.py",
    "content": "import torch\nimport torch.utils.data.dataloader\nimport importlib\nimport collections\nfrom torch._six import string_classes, int_classes\nfrom pytracking import TensorDict, TensorList\n\n\ndef _check_use_shared_memory():\n    if hasattr(torch.utils.data.dataloader, '_use_shared_memory'):\n        return getattr(torch.utils.data.dataloader, '_use_shared_memory')\n    collate_lib = importlib.import_module('torch.utils.data._utils.collate')\n    if hasattr(collate_lib, '_use_shared_memory'):\n        return getattr(collate_lib, '_use_shared_memory')\n    return torch.utils.data.get_worker_info() is not None\n\n\ndef ltr_collate(batch):\n    \"\"\"Puts each data field into a tensor with outer dimension batch size\"\"\"\n\n    error_msg = \"batch must contain tensors, numbers, dicts or lists; found {}\"\n    elem_type = type(batch[0])\n    if isinstance(batch[0], torch.Tensor):\n        out = None\n        if _check_use_shared_memory():\n            # If we're in a background process, concatenate directly into a\n            # shared memory tensor to avoid an extra copy\n            numel = sum([x.numel() for x in batch])\n            storage = batch[0].storage()._new_shared(numel)\n            out = batch[0].new(storage)\n        return torch.stack(batch, 0, out=out)\n        # if batch[0].dim() < 4:\n        #     return torch.stack(batch, 0, out=out)\n        # return torch.cat(batch, 0, out=out)\n    elif elem_type.__module__ == 'numpy' and elem_type.__name__ != 'str_' \\\n            and elem_type.__name__ != 'string_':\n        elem = batch[0]\n        if elem_type.__name__ == 'ndarray':\n            # array of string classes and object\n            if torch.utils.data.dataloader.re.search('[SaUO]', elem.dtype.str) is not None:\n                raise TypeError(error_msg.format(elem.dtype))\n\n            return torch.stack([torch.from_numpy(b) for b in batch], 0)\n        if elem.shape == ():  # scalars\n            py_type = float if elem.dtype.name.startswith('float') else int\n            return torch.utils.data.dataloader.numpy_type_map[elem.dtype.name](list(map(py_type, batch)))\n    elif isinstance(batch[0], int_classes):\n        return torch.LongTensor(batch)\n    elif isinstance(batch[0], float):\n        return torch.DoubleTensor(batch)\n    elif isinstance(batch[0], string_classes):\n        return batch\n    elif isinstance(batch[0], TensorDict):\n        return TensorDict({key: ltr_collate([d[key] for d in batch]) for key in batch[0]})\n    elif isinstance(batch[0], collections.Mapping):\n        return {key: ltr_collate([d[key] for d in batch]) for key in batch[0]}\n    elif isinstance(batch[0], TensorList):\n        transposed = zip(*batch)\n        return TensorList([ltr_collate(samples) for samples in transposed])\n    elif isinstance(batch[0], collections.Sequence):\n        transposed = zip(*batch)\n        return [ltr_collate(samples) for samples in transposed]\n    elif batch[0] is None:\n        return batch\n\n    raise TypeError((error_msg.format(type(batch[0]))))\n\n\ndef ltr_collate_stack1(batch):\n    \"\"\"Puts each data field into a tensor. The tensors are stacked at dim=1 to form the batch\"\"\"\n\n    error_msg = \"batch must contain tensors, numbers, dicts or lists; found {}\"\n    elem_type = type(batch[0])\n    if isinstance(batch[0], torch.Tensor):\n        out = None\n        if _check_use_shared_memory():\n            # If we're in a background process, concatenate directly into a\n            # shared memory tensor to avoid an extra copy\n            numel = sum([x.numel() for x in batch])\n            storage = batch[0].storage()._new_shared(numel)\n            out = batch[0].new(storage)\n        return torch.stack(batch, 1, out=out)\n        # if batch[0].dim() < 4:\n        #     return torch.stack(batch, 0, out=out)\n        # return torch.cat(batch, 0, out=out)\n    elif elem_type.__module__ == 'numpy' and elem_type.__name__ != 'str_' \\\n            and elem_type.__name__ != 'string_':\n        elem = batch[0]\n        if elem_type.__name__ == 'ndarray':\n            # array of string classes and object\n            if torch.utils.data.dataloader.re.search('[SaUO]', elem.dtype.str) is not None:\n                raise TypeError(error_msg.format(elem.dtype))\n\n            return torch.stack([torch.from_numpy(b) for b in batch], 1)\n        if elem.shape == ():  # scalars\n            py_type = float if elem.dtype.name.startswith('float') else int\n            return torch.utils.data.dataloader.numpy_type_map[elem.dtype.name](list(map(py_type, batch)))\n    elif isinstance(batch[0], int_classes):\n        return torch.LongTensor(batch)\n    elif isinstance(batch[0], float):\n        return torch.DoubleTensor(batch)\n    elif isinstance(batch[0], string_classes):\n        return batch\n    elif isinstance(batch[0], TensorDict):\n        return TensorDict({key: ltr_collate_stack1([d[key] for d in batch]) for key in batch[0]})\n    elif isinstance(batch[0], collections.Mapping):\n        return {key: ltr_collate_stack1([d[key] for d in batch]) for key in batch[0]}\n    elif isinstance(batch[0], TensorList):\n        transposed = zip(*batch)\n        return TensorList([ltr_collate_stack1(samples) for samples in transposed])\n    elif isinstance(batch[0], collections.Sequence):\n        transposed = zip(*batch)\n        return [ltr_collate_stack1(samples) for samples in transposed]\n    elif batch[0] is None:\n        return batch\n\n    raise TypeError((error_msg.format(type(batch[0]))))\n\n\nclass LTRLoader(torch.utils.data.dataloader.DataLoader):\n    \"\"\"\n    Data loader. Combines a dataset and a sampler, and provides\n    single- or multi-process iterators over the dataset.\n\n    Note: The only difference with default pytorch DataLoader is that an additional option stack_dim is available to\n            select along which dimension the data should be stacked to form a batch.\n\n    Arguments:\n        dataset (Dataset): dataset from which to load the data.\n        batch_size (int, optional): how many samples per batch to load\n            (default: 1).\n        shuffle (bool, optional): set to ``True`` to have the data reshuffled\n            at every epoch (default: False).\n        sampler (Sampler, optional): defines the strategy to draw samples from\n            the dataset. If specified, ``shuffle`` must be False.\n        batch_sampler (Sampler, optional): like sampler, but returns a batch of\n            indices at a time. Mutually exclusive with batch_size, shuffle,\n            sampler, and drop_last.\n        num_workers (int, optional): how many subprocesses to use for data\n            loading. 0 means that the data will be loaded in the main process.\n            (default: 0)\n        collate_fn (callable, optional): merges a list of samples to form a mini-batch.\n        stack_dim (int): Dimension along which to stack to form the batch. (default: 0)\n        pin_memory (bool, optional): If ``True``, the data loader will copy tensors\n            into CUDA pinned memory before returning them.\n        drop_last (bool, optional): set to ``True`` to drop the last incomplete batch,\n            if the dataset size is not divisible by the batch size. If ``False`` and\n            the size of dataset is not divisible by the batch size, then the last batch\n            will be smaller. (default: False)\n        timeout (numeric, optional): if positive, the timeout value for collecting a batch\n            from workers. Should always be non-negative. (default: 0)\n        worker_init_fn (callable, optional): If not None, this will be called on each\n            worker subprocess with the worker id (an int in ``[0, num_workers - 1]``) as\n            input, after seeding and before data loading. (default: None)\n\n    .. note:: By default, each worker will have its PyTorch seed set to\n              ``base_seed + worker_id``, where ``base_seed`` is a long generated\n              by main process using its RNG. However, seeds for other libraies\n              may be duplicated upon initializing workers (w.g., NumPy), causing\n              each worker to return identical random numbers. (See\n              :ref:`dataloader-workers-random-seed` section in FAQ.) You may\n              use ``torch.initial_seed()`` to access the PyTorch seed for each\n              worker in :attr:`worker_init_fn`, and use it to set other seeds\n              before data loading.\n\n    .. warning:: If ``spawn`` start method is used, :attr:`worker_init_fn` cannot be an\n                 unpicklable object, e.g., a lambda function.\n    \"\"\"\n\n    __initialized = False\n\n    def __init__(self, name, dataset, training=True, batch_size=1, shuffle=False, sampler=None, batch_sampler=None,\n                 num_workers=0, epoch_interval=1, collate_fn=None, stack_dim=0, pin_memory=False, drop_last=False,\n                 timeout=0, worker_init_fn=None):\n        if collate_fn is None:\n            if stack_dim == 0:\n                collate_fn = ltr_collate\n            elif stack_dim == 1:\n                collate_fn = ltr_collate_stack1\n            else:\n                raise ValueError('Stack dim no supported. Must be 0 or 1.')\n\n        super(LTRLoader, self).__init__(dataset, batch_size, shuffle, sampler, batch_sampler,\n                 num_workers, collate_fn, pin_memory, drop_last,\n                 timeout, worker_init_fn)\n\n        self.name = name\n        self.training = training\n        self.epoch_interval = epoch_interval\n        self.stack_dim = stack_dim"
  },
  {
    "path": "external/AR/ltr/data/processing.py",
    "content": "import torch\nimport torchvision.transforms as transforms\nfrom pytracking import TensorDict\nimport ltr.data.processing_utils as prutils\n\n\ndef stack_tensors(x):\n    if isinstance(x, (list, tuple)) and isinstance(x[0], torch.Tensor):\n        return torch.stack(x)\n    return x\n\n\nclass BaseProcessing:\n    \"\"\" Base class for Processing. Processing class is used to process the data returned by a dataset, before passing it\n     through the network. For example, it can be used to crop a search region around the object, apply various data\n     augmentations, etc.\"\"\"\n    def __init__(self, transform=transforms.ToTensor(), train_transform=None, test_transform=None, joint_transform=None):\n        \"\"\"\n        args:\n            transform       - The set of transformations to be applied on the images. Used only if train_transform or\n                                test_transform is None.\n            train_transform - The set of transformations to be applied on the train images. If None, the 'transform'\n                                argument is used instead.\n            test_transform  - The set of transformations to be applied on the test images. If None, the 'transform'\n                                argument is used instead.\n            joint_transform - The set of transformations to be applied 'jointly' on the train and test images.  For\n                                example, it can be used to convert both test and train images to grayscale.\n        \"\"\"\n        self.transform = {'train': transform if train_transform is None else train_transform,\n                          'test':  transform if test_transform is None else test_transform,\n                          'joint': joint_transform}\n\n    def __call__(self, data: TensorDict):\n        raise NotImplementedError\n\n\nclass ATOMProcessing(BaseProcessing):\n    \"\"\" The processing class used for training ATOM. The images are processed in the following way.\n    First, the target bounding box is jittered by adding some noise. Next, a square region (called search region )\n    centered at the jittered target center, and of area search_area_factor^2 times the area of the jittered box is\n    cropped from the image. The reason for jittering the target box is to avoid learning the bias that the target is\n    always at the center of the search region. The search region is then resized to a fixed size given by the\n    argument output_sz. A set of proposals are then generated for the test images by jittering the ground truth box.\n\n    \"\"\"\n\n    def __init__(self, search_area_factor, output_sz, center_jitter_factor, scale_jitter_factor, proposal_params,\n                 mode='pair', *args, **kwargs):\n        \"\"\"\n        args:\n            search_area_factor - The size of the search region  relative to the target size.\n            output_sz - An integer, denoting the size to which the search region is resized. The search region is always\n                        square.\n            center_jitter_factor - A dict containing the amount of jittering to be applied to the target center before\n                                    extracting the search region. See _get_jittered_box for how the jittering is done.\n            scale_jitter_factor - A dict containing the amount of jittering to be applied to the target size before\n                                    extracting the search region. See _get_jittered_box for how the jittering is done.\n            proposal_params - Arguments for the proposal generation process. See _generate_proposals for details.\n            mode - Either 'pair' or 'sequence'. If mode='sequence', then output has an extra dimension for frames\n        \"\"\"\n        super().__init__(*args, **kwargs)\n        self.search_area_factor = search_area_factor\n        self.output_sz = output_sz\n        self.center_jitter_factor = center_jitter_factor\n        self.scale_jitter_factor = scale_jitter_factor\n        self.proposal_params = proposal_params\n        self.mode = mode\n\n    def _get_jittered_box(self, box, mode):\n        \"\"\" Jitter the input box\n        args:\n            box - input bounding box\n            mode - string 'train' or 'test' indicating train or test data\n\n        returns:\n            torch.Tensor - jittered box\n        \"\"\"\n\n        jittered_size = box[2:4] * torch.exp(torch.randn(2) * self.scale_jitter_factor[mode])\n        max_offset = (jittered_size.prod().sqrt() * torch.tensor(self.center_jitter_factor[mode]).float())\n        jittered_center = box[0:2] + 0.5 * box[2:4] + max_offset * (torch.rand(2) - 0.5)\n\n        return torch.cat((jittered_center - 0.5 * jittered_size, jittered_size), dim=0)\n\n    def _generate_proposals(self, box):\n        \"\"\" Generates proposals by adding noise to the input box\n        args:\n            box - input box\n\n        returns:\n            torch.Tensor - Array of shape (num_proposals, 4) containing proposals\n            torch.Tensor - Array of shape (num_proposals,) containing IoU overlap of each proposal with the input box. The\n                        IoU is mapped to [-1, 1]\n        \"\"\"\n        # Generate proposals\n        num_proposals = self.proposal_params['boxes_per_frame']\n        proposal_method = self.proposal_params.get('proposal_method', 'default')\n\n        if proposal_method == 'default':\n            proposals = torch.zeros((num_proposals, 4))\n            gt_iou = torch.zeros(num_proposals)\n            for i in range(num_proposals):\n                proposals[i, :], gt_iou[i] = prutils.perturb_box(box, min_iou=self.proposal_params['min_iou'],\n                                                                 sigma_factor=self.proposal_params['sigma_factor'])\n        elif proposal_method == 'gmm':\n            proposals, _, _ = prutils.sample_box_gmm(box, self.proposal_params['proposal_sigma'],\n                                                                             num_samples=num_proposals)\n            gt_iou = prutils.iou(box.view(1,4), proposals.view(-1,4))\n\n        # Map to [-1, 1]\n        gt_iou = gt_iou * 2 - 1\n        return proposals, gt_iou\n\n    def __call__(self, data: TensorDict):\n        \"\"\"\n        args:\n            data - The input data, should contain the following fields:\n                'train_images', test_images', 'train_anno', 'test_anno'\n        returns:\n            TensorDict - output data block with following fields:\n                'train_images', 'test_images', 'train_anno', 'test_anno', 'test_proposals', 'proposal_iou'\n        \"\"\"\n        # Apply joint transforms\n        if self.transform['joint'] is not None:\n            data['train_images'], data['train_anno'] = self.transform['joint'](image=data['train_images'], bbox=data['train_anno'])\n            data['test_images'], data['test_anno'] = self.transform['joint'](image=data['test_images'], bbox=data['test_anno'], new_roll=False)\n\n        for s in ['train', 'test']:\n            assert self.mode == 'sequence' or len(data[s + '_images']) == 1, \\\n                \"In pair mode, num train/test frames must be 1\"\n\n            # Add a uniform noise to the center pos\n            jittered_anno = [self._get_jittered_box(a, s) for a in data[s + '_anno']]\n\n            # Crop image region centered at jittered_anno box\n            crops, boxes = prutils.jittered_center_crop(data[s + '_images'], jittered_anno, data[s + '_anno'],\n                                                        self.search_area_factor, self.output_sz)\n\n            # Apply transforms\n            data[s + '_images'], data[s + '_anno'] = self.transform[s](image=crops, bbox=boxes, joint=False)\n\n        # Generate proposals\n        frame2_proposals, gt_iou = zip(*[self._generate_proposals(a) for a in data['test_anno']])\n\n        data['test_proposals'] = list(frame2_proposals)\n        data['proposal_iou'] = list(gt_iou)\n\n        # Prepare output\n        if self.mode == 'sequence':\n            data = data.apply(stack_tensors)\n        else:\n            data = data.apply(lambda x: x[0] if isinstance(x, list) else x)\n\n        return data\n\n\nclass KLBBregProcessing(BaseProcessing):\n    \"\"\" Based on ATOMProcessing. It supports training ATOM using the Maximum Likelihood or KL-divergence based learning\n    introduced in [https://arxiv.org/abs/1909.12297] and in PrDiMP [https://arxiv.org/abs/2003.12565].\n    \"\"\"\n\n    def __init__(self, search_area_factor, output_sz, center_jitter_factor, scale_jitter_factor, proposal_params,\n                 mode='pair', *args, **kwargs):\n        \"\"\"\n        args:\n            search_area_factor - The size of the search region  relative to the target size.\n            output_sz - An integer, denoting the size to which the search region is resized. The search region is always\n                        square.\n            center_jitter_factor - A dict containing the amount of jittering to be applied to the target center before\n                                    extracting the search region. See _get_jittered_box for how the jittering is done.\n            scale_jitter_factor - A dict containing the amount of jittering to be applied to the target size before\n                                    extracting the search region. See _get_jittered_box for how the jittering is done.\n            proposal_params - Arguments for the proposal generation process. See _generate_proposals for details.\n            mode - Either 'pair' or 'sequence'. If mode='sequence', then output has an extra dimension for frames\n        \"\"\"\n        super().__init__(*args, **kwargs)\n        self.search_area_factor = search_area_factor\n        self.output_sz = output_sz\n        self.center_jitter_factor = center_jitter_factor\n        self.scale_jitter_factor = scale_jitter_factor\n        self.proposal_params = proposal_params\n        self.mode = mode\n\n    def _get_jittered_box(self, box, mode):\n        \"\"\" Jitter the input box\n        args:\n            box - input bounding box\n            mode - string 'train' or 'test' indicating train or test data\n\n        returns:\n            torch.Tensor - jittered box\n        \"\"\"\n\n        jittered_size = box[2:4] * torch.exp(torch.randn(2) * self.scale_jitter_factor[mode])\n        max_offset = (jittered_size.prod().sqrt() * torch.tensor(self.center_jitter_factor[mode]).float())\n        jittered_center = box[0:2] + 0.5 * box[2:4] + max_offset * (torch.rand(2) - 0.5)\n\n        return torch.cat((jittered_center - 0.5 * jittered_size, jittered_size), dim=0)\n\n    def _generate_proposals(self, box):\n        \"\"\"\n        \"\"\"\n        # Generate proposals\n        proposals, proposal_density, gt_density = prutils.sample_box_gmm(box, self.proposal_params['proposal_sigma'],\n                                                                         gt_sigma=self.proposal_params['gt_sigma'],\n                                                                         num_samples=self.proposal_params[\n                                                                             'boxes_per_frame'],\n                                                                         add_mean_box=self.proposal_params.get(\n                                                                             'add_mean_box', False))\n\n        return proposals, proposal_density, gt_density\n\n    def __call__(self, data: TensorDict):\n        \"\"\"\n        args:\n            data - The input data, should contain the following fields:\n                'train_images', test_images', 'train_anno', 'test_anno'\n        returns:\n            TensorDict - output data block with following fields:\n                'train_images', 'test_images', 'train_anno', 'test_anno', 'test_proposals', 'proposal_density', 'gt_density'\n        \"\"\"\n        # Apply joint transforms\n        if self.transform['joint'] is not None:\n            data['train_images'], data['train_anno'] = self.transform['joint'](image=data['train_images'], bbox=data['train_anno'])\n            data['test_images'], data['test_anno'] = self.transform['joint'](image=data['test_images'], bbox=data['test_anno'], new_roll=False)\n\n        for s in ['train', 'test']:\n            assert self.mode == 'sequence' or len(data[s + '_images']) == 1, \\\n                \"In pair mode, num train/test frames must be 1\"\n\n            # Add a uniform noise to the center pos\n            jittered_anno = [self._get_jittered_box(a, s) for a in data[s + '_anno']]\n\n            # Crop image region centered at jittered_anno box\n            crops, boxes, _ = prutils.jittered_center_crop(data[s + '_images'], jittered_anno, data[s + '_anno'],\n                                                        self.search_area_factor, self.output_sz)\n\n            # Apply transforms\n            data[s + '_images'], data[s + '_anno'] = self.transform[s](image=crops, bbox=boxes, joint=False)\n\n        # Generate proposals\n        proposals, proposal_density, gt_density = zip(*[self._generate_proposals(a) for a in data['test_anno']])\n\n        data['test_proposals'] = proposals\n        data['proposal_density'] = proposal_density\n        data['gt_density'] = gt_density\n\n        # Prepare output\n        if self.mode == 'sequence':\n            data = data.apply(stack_tensors)\n        else:\n            data = data.apply(lambda x: x[0] if isinstance(x, list) else x)\n\n        return data\n\n\nclass ATOMwKLProcessing(BaseProcessing):\n    \"\"\"Same as ATOMProcessing but using the GMM-based sampling of proposal boxes used in KLBBregProcessing.\"\"\"\n    def __init__(self, search_area_factor, output_sz, center_jitter_factor, scale_jitter_factor, proposal_params,\n                 mode='pair', *args, **kwargs):\n        super().__init__(*args, **kwargs)\n        self.search_area_factor = search_area_factor\n        self.output_sz = output_sz\n        self.center_jitter_factor = center_jitter_factor\n        self.scale_jitter_factor = scale_jitter_factor\n        self.proposal_params = proposal_params\n        self.mode = mode\n\n    def _get_jittered_box(self, box, mode):\n        \"\"\" Jitter the input box\n        args:\n            box - input bounding box\n            mode - string 'train' or 'test' indicating train or test data\n\n        returns:\n            torch.Tensor - jittered box\n        \"\"\"\n\n        jittered_size = box[2:4] * torch.exp(torch.randn(2) * self.scale_jitter_factor[mode])\n        max_offset = (jittered_size.prod().sqrt() * torch.tensor(self.center_jitter_factor[mode]).float())\n        jittered_center = box[0:2] + 0.5 * box[2:4] + max_offset * (torch.rand(2) - 0.5)\n\n        return torch.cat((jittered_center - 0.5 * jittered_size, jittered_size), dim=0)\n\n    def _generate_proposals(self, box):\n        \"\"\"\n        \"\"\"\n        # Generate proposals\n        proposals, proposal_density, gt_density = prutils.sample_box_gmm(box, self.proposal_params['proposal_sigma'],\n                                                                         self.proposal_params['gt_sigma'],\n                                                                         self.proposal_params['boxes_per_frame'])\n\n        iou = prutils.iou_gen(proposals, box.view(1, 4))\n        return proposals, proposal_density, gt_density, iou\n\n    def __call__(self, data: TensorDict):\n        # Apply joint transforms\n        if self.transform['joint'] is not None:\n            data['train_images'], data['train_anno'] = self.transform['joint'](image=data['train_images'], bbox=data['train_anno'])\n            data['test_images'], data['test_anno'] = self.transform['joint'](image=data['test_images'], bbox=data['test_anno'], new_roll=False)\n\n        for s in ['train', 'test']:\n            assert self.mode == 'sequence' or len(data[s + '_images']) == 1, \\\n                \"In pair mode, num train/test frames must be 1\"\n\n            # Add a uniform noise to the center pos\n            jittered_anno = [self._get_jittered_box(a, s) for a in data[s + '_anno']]\n\n            # Crop image region centered at jittered_anno box\n            crops, boxes = prutils.jittered_center_crop(data[s + '_images'], jittered_anno, data[s + '_anno'],\n                                                        self.search_area_factor, self.output_sz)\n\n            # Apply transforms\n            data[s + '_images'], data[s + '_anno'] = self.transform[s](image=crops, bbox=boxes, joint=False)\n\n        # Generate proposals\n        proposals, proposal_density, gt_density, proposal_iou = zip(\n            *[self._generate_proposals(a) for a in data['test_anno']])\n\n        data['test_proposals'] = proposals\n        data['proposal_density'] = proposal_density\n        data['gt_density'] = gt_density\n        data['proposal_iou'] = proposal_iou\n        # Prepare output\n        if self.mode == 'sequence':\n            data = data.apply(stack_tensors)\n        else:\n            data = data.apply(lambda x: x[0] if isinstance(x, list) else x)\n\n        return data\n\n\n\nclass DiMPProcessing(BaseProcessing):\n    \"\"\" The processing class used for training DiMP. The images are processed in the following way.\n    First, the target bounding box is jittered by adding some noise. Next, a square region (called search region )\n    centered at the jittered target center, and of area search_area_factor^2 times the area of the jittered box is\n    cropped from the image. The reason for jittering the target box is to avoid learning the bias that the target is\n    always at the center of the search region. The search region is then resized to a fixed size given by the\n    argument output_sz. A Gaussian label centered at the target is generated for each image. These label functions are\n    used for computing the loss of the predicted classification model on the test images. A set of proposals are\n    also generated for the test images by jittering the ground truth box. These proposals are used to train the\n    bounding box estimating branch.\n\n    \"\"\"\n\n    def __init__(self, search_area_factor, output_sz, center_jitter_factor, scale_jitter_factor, crop_type='replicate',\n                 max_scale_change=None, mode='pair', proposal_params=None, label_function_params=None, *args, **kwargs):\n        \"\"\"\n        args:\n            search_area_factor - The size of the search region  relative to the target size.\n            output_sz - An integer, denoting the size to which the search region is resized. The search region is always\n                        square.\n            center_jitter_factor - A dict containing the amount of jittering to be applied to the target center before\n                                    extracting the search region. See _get_jittered_box for how the jittering is done.\n            scale_jitter_factor - A dict containing the amount of jittering to be applied to the target size before\n                                    extracting the search region. See _get_jittered_box for how the jittering is done.\n            crop_type - If 'replicate', the boundary pixels are replicated in case the search region crop goes out of image.\n                        If 'inside', the search region crop is shifted/shrunk to fit completely inside the image.\n                        If 'inside_major', the search region crop is shifted/shrunk to fit completely inside one axis of the image.\n            max_scale_change - Maximum allowed scale change when performing the crop (only applicable for 'inside' and 'inside_major')\n            mode - Either 'pair' or 'sequence'. If mode='sequence', then output has an extra dimension for frames\n            proposal_params - Arguments for the proposal generation process. See _generate_proposals for details.\n            label_function_params - Arguments for the label generation process. See _generate_label_function for details.\n        \"\"\"\n        super().__init__(*args, **kwargs)\n        self.search_area_factor = search_area_factor\n        self.output_sz = output_sz\n        self.center_jitter_factor = center_jitter_factor\n        self.scale_jitter_factor = scale_jitter_factor\n        self.crop_type = crop_type\n        self.mode = mode\n        self.max_scale_change = max_scale_change\n\n        self.proposal_params = proposal_params\n        self.label_function_params = label_function_params\n\n    def _get_jittered_box(self, box, mode):\n        \"\"\" Jitter the input box\n        args:\n            box - input bounding box\n            mode - string 'train' or 'test' indicating train or test data\n\n        returns:\n            torch.Tensor - jittered box\n        \"\"\"\n\n        jittered_size = box[2:4] * torch.exp(torch.randn(2) * self.scale_jitter_factor[mode])\n        max_offset = (jittered_size.prod().sqrt() * torch.tensor(self.center_jitter_factor[mode]).float())\n        jittered_center = box[0:2] + 0.5 * box[2:4] + max_offset * (torch.rand(2) - 0.5)\n\n        return torch.cat((jittered_center - 0.5 * jittered_size, jittered_size), dim=0)\n\n    def _generate_proposals(self, box):\n        \"\"\" Generates proposals by adding noise to the input box\n        args:\n            box - input box\n\n        returns:\n            torch.Tensor - Array of shape (num_proposals, 4) containing proposals\n            torch.Tensor - Array of shape (num_proposals,) containing IoU overlap of each proposal with the input box. The\n                        IoU is mapped to [-1, 1]\n        \"\"\"\n        # Generate proposals\n        num_proposals = self.proposal_params['boxes_per_frame']\n        proposal_method = self.proposal_params.get('proposal_method', 'default')\n\n        if proposal_method == 'default':\n            proposals = torch.zeros((num_proposals, 4))\n            gt_iou = torch.zeros(num_proposals)\n\n            for i in range(num_proposals):\n                proposals[i, :], gt_iou[i] = prutils.perturb_box(box, min_iou=self.proposal_params['min_iou'],\n                                                                 sigma_factor=self.proposal_params['sigma_factor'])\n        elif proposal_method == 'gmm':\n            proposals, _, _ = prutils.sample_box_gmm(box, self.proposal_params['proposal_sigma'],\n                                                     num_samples=num_proposals)\n            gt_iou = prutils.iou(box.view(1, 4), proposals.view(-1, 4))\n        else:\n            raise ValueError('Unknown proposal method.')\n\n        # Map to [-1, 1]\n        gt_iou = gt_iou * 2 - 1\n        return proposals, gt_iou\n\n    def _generate_label_function(self, target_bb):\n        \"\"\" Generates the gaussian label function centered at target_bb\n        args:\n            target_bb - target bounding box (num_images, 4)\n\n        returns:\n            torch.Tensor - Tensor of shape (num_images, label_sz, label_sz) containing the label for each sample\n        \"\"\"\n\n        gauss_label = prutils.gaussian_label_function(target_bb.view(-1, 4), self.label_function_params['sigma_factor'],\n                                                      self.label_function_params['kernel_sz'],\n                                                      self.label_function_params['feature_sz'], self.output_sz,\n                                                      end_pad_if_even=self.label_function_params.get('end_pad_if_even', True))\n\n        return gauss_label\n\n    def __call__(self, data: TensorDict):\n        \"\"\"\n        args:\n            data - The input data, should contain the following fields:\n                'train_images', test_images', 'train_anno', 'test_anno'\n        returns:\n            TensorDict - output data block with following fields:\n                'train_images', 'test_images', 'train_anno', 'test_anno', 'test_proposals', 'proposal_iou',\n                'test_label' (optional), 'train_label' (optional), 'test_label_density' (optional), 'train_label_density' (optional)\n        \"\"\"\n\n        if self.transform['joint'] is not None:\n            data['train_images'], data['train_anno'] = self.transform['joint'](image=data['train_images'], bbox=data['train_anno'])\n            data['test_images'], data['test_anno'] = self.transform['joint'](image=data['test_images'], bbox=data['test_anno'], new_roll=False)\n\n        for s in ['train', 'test']:\n            assert self.mode == 'sequence' or len(data[s + '_images']) == 1, \\\n                \"In pair mode, num train/test frames must be 1\"\n\n            # Add a uniform noise to the center pos\n            jittered_anno = [self._get_jittered_box(a, s) for a in data[s + '_anno']]\n\n            crops, boxes = prutils.target_image_crop(data[s + '_images'], jittered_anno, data[s + '_anno'],\n                                                     self.search_area_factor, self.output_sz, mode=self.crop_type,\n                                                     max_scale_change=self.max_scale_change)\n\n            data[s + '_images'], data[s + '_anno'] = self.transform[s](image=crops, bbox=boxes, joint=False)\n\n        # Generate proposals\n        if self.proposal_params:\n            frame2_proposals, gt_iou = zip(*[self._generate_proposals(a) for a in data['test_anno']])\n\n            data['test_proposals'] = list(frame2_proposals)\n            data['proposal_iou'] = list(gt_iou)\n\n        # Prepare output\n        if self.mode == 'sequence':\n            data = data.apply(stack_tensors)\n        else:\n            data = data.apply(lambda x: x[0] if isinstance(x, list) else x)\n\n        # Generate label functions\n        if self.label_function_params is not None:\n            data['train_label'] = self._generate_label_function(data['train_anno'])\n            data['test_label'] = self._generate_label_function(data['test_anno'])\n\n        return data\n\n\nclass KLDiMPProcessing(BaseProcessing):\n    \"\"\" The processing class used for training PrDiMP that additionally supports the probabilistic classifier and\n    bounding box regressor. See DiMPProcessing for details.\n    \"\"\"\n\n    def __init__(self, search_area_factor, output_sz, center_jitter_factor, scale_jitter_factor, crop_type='replicate',\n                 max_scale_change=None, mode='pair', proposal_params=None,\n                 label_function_params=None, label_density_params=None, *args, **kwargs):\n        \"\"\"\n        args:\n            search_area_factor - The size of the search region  relative to the target size.\n            output_sz - An integer, denoting the size to which the search region is resized. The search region is always\n                        square.\n            center_jitter_factor - A dict containing the amount of jittering to be applied to the target center before\n                                    extracting the search region. See _get_jittered_box for how the jittering is done.\n            scale_jitter_factor - A dict containing the amount of jittering to be applied to the target size before\n                                    extracting the search region. See _get_jittered_box for how the jittering is done.\n            crop_type - If 'replicate', the boundary pixels are replicated in case the search region crop goes out of image.\n                        If 'inside', the search region crop is shifted/shrunk to fit completely inside the image.\n                        If 'inside_major', the search region crop is shifted/shrunk to fit completely inside one axis of the image.\n            max_scale_change - Maximum allowed scale change when performing the crop (only applicable for 'inside' and 'inside_major')\n            mode - Either 'pair' or 'sequence'. If mode='sequence', then output has an extra dimension for frames\n            proposal_params - Arguments for the proposal generation process. See _generate_proposals for details.\n            label_function_params - Arguments for the label generation process. See _generate_label_function for details.\n            label_density_params - Arguments for the label density generation process. See _generate_label_function for details.\n        \"\"\"\n        super().__init__(*args, **kwargs)\n        self.search_area_factor = search_area_factor\n        self.output_sz = output_sz\n        self.center_jitter_factor = center_jitter_factor\n        self.scale_jitter_factor = scale_jitter_factor\n        self.crop_type = crop_type\n        self.mode = mode\n        self.max_scale_change = max_scale_change\n\n        self.proposal_params = proposal_params\n        self.label_function_params = label_function_params\n        self.label_density_params = label_density_params\n\n    def _get_jittered_box(self, box, mode):\n        \"\"\" Jitter the input box\n        args:\n            box - input bounding box\n            mode - string 'train' or 'test' indicating train or test data\n\n        returns:\n            torch.Tensor - jittered box\n        \"\"\"\n\n        jittered_size = box[2:4] * torch.exp(torch.randn(2) * self.scale_jitter_factor[mode])\n        max_offset = (jittered_size.prod().sqrt() * torch.tensor(self.center_jitter_factor[mode]).float())\n        jittered_center = box[0:2] + 0.5 * box[2:4] + max_offset * (torch.rand(2) - 0.5)\n\n        return torch.cat((jittered_center - 0.5 * jittered_size, jittered_size), dim=0)\n\n    def _generate_proposals(self, box):\n        \"\"\" Generate proposal sample boxes from a GMM proposal distribution and compute their ground-truth density.\n        This is used for ML and KL based regression learning of the bounding box regressor.\n        args:\n            box - input bounding box\n        \"\"\"\n        # Generate proposals\n        proposals, proposal_density, gt_density = prutils.sample_box_gmm(box, self.proposal_params['proposal_sigma'],\n                                                                         gt_sigma=self.proposal_params['gt_sigma'],\n                                                                         num_samples=self.proposal_params['boxes_per_frame'],\n                                                                         add_mean_box=self.proposal_params.get('add_mean_box', False))\n\n        return proposals, proposal_density, gt_density\n\n    def _generate_label_function(self, target_bb):\n        \"\"\" Generates the gaussian label function centered at target_bb\n        args:\n            target_bb - target bounding box (num_images, 4)\n\n        returns:\n            torch.Tensor - Tensor of shape (num_images, label_sz, label_sz) containing the label for each sample\n        \"\"\"\n\n        gauss_label = prutils.gaussian_label_function(target_bb.view(-1, 4), self.label_function_params['sigma_factor'],\n                                                      self.label_function_params['kernel_sz'],\n                                                      self.label_function_params['feature_sz'], self.output_sz,\n                                                      end_pad_if_even=self.label_function_params.get('end_pad_if_even', True))\n\n        return gauss_label\n\n    def _generate_label_density(self, target_bb):\n        \"\"\" Generates the gaussian label density centered at target_bb\n        args:\n            target_bb - target bounding box (num_images, 4)\n\n        returns:\n            torch.Tensor - Tensor of shape (num_images, label_sz, label_sz) containing the label for each sample\n        \"\"\"\n\n        feat_sz = self.label_density_params['feature_sz'] * self.label_density_params.get('interp_factor', 1)\n        gauss_label = prutils.gaussian_label_function(target_bb.view(-1, 4), self.label_density_params['sigma_factor'],\n                                                      self.label_density_params['kernel_sz'],\n                                                      feat_sz, self.output_sz,\n                                                      end_pad_if_even=self.label_density_params.get('end_pad_if_even', True),\n                                                      density=True,\n                                                      uni_bias=self.label_density_params.get('uni_weight', 0.0))\n\n        gauss_label *= (gauss_label > self.label_density_params.get('threshold', 0.0)).float()\n\n        if self.label_density_params.get('normalize', False):\n            g_sum = gauss_label.sum(dim=(-2,-1))\n            valid = g_sum>0.01\n            gauss_label[valid, :, :] /= g_sum[valid].view(-1, 1, 1)\n            gauss_label[~valid, :, :] = 1.0 / (gauss_label.shape[-2] * gauss_label.shape[-1])\n\n        gauss_label *= 1.0 - self.label_density_params.get('shrink', 0.0)\n\n        return gauss_label\n\n    def __call__(self, data: TensorDict):\n        \"\"\"\n        args:\n            data - The input data, should contain the following fields:\n                'train_images', test_images', 'train_anno', 'test_anno'\n        returns:\n            TensorDict - output data block with following fields:\n                'train_images', 'test_images', 'train_anno', 'test_anno', 'test_proposals', 'proposal_density', 'gt_density',\n                'test_label' (optional), 'train_label' (optional), 'test_label_density' (optional), 'train_label_density' (optional)\n        \"\"\"\n\n        if self.transform['joint'] is not None:\n            data['train_images'], data['train_anno'] = self.transform['joint'](image=data['train_images'], bbox=data['train_anno'])\n            data['test_images'], data['test_anno'] = self.transform['joint'](image=data['test_images'], bbox=data['test_anno'], new_roll=False)\n\n        for s in ['train', 'test']:\n            assert self.mode == 'sequence' or len(data[s + '_images']) == 1, \\\n                \"In pair mode, num train/test frames must be 1\"\n\n            # Add a uniform noise to the center pos\n            jittered_anno = [self._get_jittered_box(a, s) for a in data[s + '_anno']]\n\n            crops, boxes = prutils.target_image_crop(data[s + '_images'], jittered_anno, data[s + '_anno'],\n                                                     self.search_area_factor, self.output_sz, mode=self.crop_type,\n                                                     max_scale_change=self.max_scale_change)\n\n            data[s + '_images'], data[s + '_anno'] = self.transform[s](image=crops, bbox=boxes, joint=False)\n\n        # Generate proposals\n        proposals, proposal_density, gt_density = zip(*[self._generate_proposals(a) for a in data['test_anno']])\n\n        data['test_proposals'] = proposals\n        data['proposal_density'] = proposal_density\n        data['gt_density'] = gt_density\n\n        for s in ['train', 'test']:\n            is_distractor = data.get('is_distractor_{}_frame'.format(s), None)\n            if is_distractor is not None:\n                for is_dist, box in zip(is_distractor, data[s+'_anno']):\n                    if is_dist:\n                        box[0] = 99999999.9\n                        box[1] = 99999999.9\n\n        # Prepare output\n        if self.mode == 'sequence':\n            data = data.apply(stack_tensors)\n        else:\n            data = data.apply(lambda x: x[0] if isinstance(x, list) else x)\n\n        # Generate label functions\n        if self.label_function_params is not None:\n            data['train_label'] = self._generate_label_function(data['train_anno'])\n            data['test_label'] = self._generate_label_function(data['test_anno'])\n        if self.label_density_params is not None:\n            data['train_label_density'] = self._generate_label_density(data['train_anno'])\n            data['test_label_density'] = self._generate_label_density(data['test_anno'])\n\n        return data\n"
  },
  {
    "path": "external/AR/ltr/data/processing_utils.py",
    "content": "import torch\nimport math\nimport cv2 as cv\nimport random\nimport torch.nn.functional as F\nfrom .bounding_box_utils import rect_to_rel, rel_to_rect\n\n\ndef sample_target(im, target_bb, search_area_factor, output_sz=None, mask=None):\n    \"\"\" Extracts a square crop centered at target_bb box, of area search_area_factor^2 times target_bb area\n\n    args:\n        im - cv image\n        target_bb - target box [x, y, w, h]\n        search_area_factor - Ratio of crop size to target size\n        output_sz - (float) Size to which the extracted crop is resized (always square). If None, no resizing is done.\n\n    returns:\n        cv image - extracted crop\n        float - the factor by which the crop has been resized to make the crop size equal output_size\n    \"\"\"\n    x, y, w, h = target_bb.tolist()\n\n    # Crop image\n    crop_sz = math.ceil(math.sqrt(w * h) * search_area_factor)\n\n    if crop_sz < 1:\n        raise Exception('Too small bounding box.')\n\n    x1 = round(x + 0.5 * w - crop_sz * 0.5)\n    x2 = x1 + crop_sz\n\n    y1 = round(y + 0.5 * h - crop_sz * 0.5)\n    y2 = y1 + crop_sz\n\n    x1_pad = max(0, -x1)\n    x2_pad = max(x2 - im.shape[1] + 1, 0)\n\n    y1_pad = max(0, -y1)\n    y2_pad = max(y2 - im.shape[0] + 1, 0)\n\n    # Crop target\n    im_crop = im[y1 + y1_pad:y2 - y2_pad, x1 + x1_pad:x2 - x2_pad, :]\n    if mask is not None:\n        mask_crop = mask[y1 + y1_pad:y2 - y2_pad, x1 + x1_pad:x2 - x2_pad]\n\n    # Pad\n    im_crop_padded = cv.copyMakeBorder(im_crop, y1_pad, y2_pad, x1_pad, x2_pad, cv.BORDER_REPLICATE)\n    if mask is not None:\n        mask_crop_padded = F.pad(mask_crop, pad=(x1_pad, x2_pad, y1_pad, y2_pad), mode='constant', value=0)\n\n    if output_sz is not None:\n        resize_factor = output_sz / crop_sz\n        im_crop_padded = cv.resize(im_crop_padded, (output_sz, output_sz))\n\n        if mask is None:\n            return im_crop_padded, resize_factor\n        mask_crop_padded = \\\n        F.interpolate(mask_crop_padded[None, None], (output_sz, output_sz), mode='bilinear', align_corners=False)[0, 0]\n        return im_crop_padded, resize_factor, mask_crop_padded\n\n    else:\n        if mask is None:\n            return im_crop_padded, 1.0\n        return im_crop_padded, 1.0, mask_crop_padded\n\n\ndef transform_image_to_crop(box_in: torch.Tensor, box_extract: torch.Tensor, resize_factor: float,\n                            crop_sz: torch.Tensor) -> torch.Tensor:\n    \"\"\" Transform the box co-ordinates from the original image co-ordinates to the co-ordinates of the cropped image\n    args:\n        box_in - the box for which the co-ordinates are to be transformed\n        box_extract - the box about which the image crop has been extracted.\n        resize_factor - the ratio between the original image scale and the scale of the image crop\n        crop_sz - size of the cropped image\n\n    returns:\n        torch.Tensor - transformed co-ordinates of box_in\n    \"\"\"\n    box_extract_center = box_extract[0:2] + 0.5 * box_extract[2:4]\n\n    box_in_center = box_in[0:2] + 0.5 * box_in[2:4]\n\n    box_out_center = (crop_sz - 1) / 2 + (box_in_center - box_extract_center) * resize_factor\n    box_out_wh = box_in[2:4] * resize_factor\n\n    box_out = torch.cat((box_out_center - 0.5 * box_out_wh, box_out_wh))\n    return box_out\n\n\ndef jittered_center_crop(frames, box_extract, box_gt, search_area_factor, output_sz, masks=None):\n    \"\"\" For each frame in frames, extracts a square crop centered at box_extract, of area search_area_factor^2\n    times box_extract area. The extracted crops are then resized to output_sz. Further, the co-ordinates of the box\n    box_gt are transformed to the image crop co-ordinates\n\n    args:\n        frames - list of frames\n        box_extract - list of boxes of same length as frames. The crops are extracted using anno_extract\n        box_gt - list of boxes of same length as frames. The co-ordinates of these boxes are transformed from\n                    image co-ordinates to the crop co-ordinates\n        search_area_factor - The area of the extracted crop is search_area_factor^2 times box_extract area\n        output_sz - The size to which the extracted crops are resized\n\n    returns:\n        list - list of image crops\n        list - box_gt location in the crop co-ordinates\n        \"\"\"\n\n    if masks is None:\n        crops_resize_factors = [sample_target(f, a, search_area_factor, output_sz)\n                                for f, a in zip(frames, box_extract)]\n        frames_crop, resize_factors = zip(*crops_resize_factors)\n        masks_crop = None\n    else:\n        crops_resize_factors = [sample_target(f, a, search_area_factor, output_sz, m)\n                                for f, a, m in zip(frames, box_extract, masks)]\n        frames_crop, resize_factors, masks_crop = zip(*crops_resize_factors)\n\n    crop_sz = torch.Tensor([output_sz, output_sz])\n\n    # find the bb location in the crop\n    box_crop = [transform_image_to_crop(a_gt, a_ex, rf, crop_sz)\n                for a_gt, a_ex, rf in zip(box_gt, box_extract, resize_factors)]\n\n    return frames_crop, box_crop, masks_crop\n\n\ndef sample_target_adaptive(im, target_bb, search_area_factor, output_sz, mode: str = 'replicate',\n                           max_scale_change=None, mask=None):\n    \"\"\" Extracts a crop centered at target_bb box, of area search_area_factor^2. If the crop area contains regions\n    outside the image, it is shifted so that the it is inside the image. Further, if the crop area exceeds the image\n    size, a smaller crop which fits the image is returned instead.\n\n    args:\n        im - Input numpy image to crop.\n        target_bb - target box [x, y, w, h]\n        search_area_factor - Ratio of crop size to target size\n        output_sz - (float) Size to which the extracted crop is resized (always square). If None, no resizing is done.\n        mode - If 'replicate', the boundary pixels are replicated in case the search region crop goes out of image.\n               If 'inside', the search region crop is shifted/shrunk to fit completely inside the image.\n               If 'inside_major', the search region crop is shifted/shrunk to fit completely inside one axis of the image.\n        max_scale_change - Maximum allowed scale change when performing the crop (only applicable for 'inside' and 'inside_major')\n        mask - Optional mask to apply the same crop.\n\n    returns:\n        numpy image - Extracted crop.\n        torch.Tensor - A bounding box denoting the cropped region in the image.\n        numpy mask - Cropped mask returned only if mask is not None.\n    \"\"\"\n\n    if max_scale_change is None:\n        max_scale_change = float('inf')\n    if isinstance(output_sz, (float, int)):\n        output_sz = (output_sz, output_sz)\n    output_sz = torch.Tensor(output_sz)\n\n    im_h = im.shape[0]\n    im_w = im.shape[1]\n\n    bbx, bby, bbw, bbh = target_bb.tolist()\n\n    # Crop image\n    crop_sz_x, crop_sz_y = (output_sz * (\n                target_bb[2:].prod() / output_sz.prod()).sqrt() * search_area_factor).ceil().long().tolist()\n\n    # Get new sample size if forced inside the image\n    if mode == 'inside' or mode == 'inside_major':\n        # Calculate rescaling factor if outside the image\n        rescale_factor = [crop_sz_x / im_w, crop_sz_y / im_h]\n        if mode == 'inside':\n            rescale_factor = max(rescale_factor)\n        elif mode == 'inside_major':\n            rescale_factor = min(rescale_factor)\n        rescale_factor = min(max(1, rescale_factor), max_scale_change)\n\n        crop_sz_x = math.floor(crop_sz_x / rescale_factor)\n        crop_sz_y = math.floor(crop_sz_y / rescale_factor)\n\n    if crop_sz_x < 1 or crop_sz_y < 1:\n        raise Exception('Too small bounding box.')\n\n    x1 = round(bbx + 0.5 * bbw - crop_sz_x * 0.5)\n    x2 = x1 + crop_sz_x\n\n    y1 = round(bby + 0.5 * bbh - crop_sz_y * 0.5)\n    y2 = y1 + crop_sz_y\n\n    # Move box inside image\n    shift_x = max(0, -x1) + min(0, im_w - x2)\n    x1 += shift_x\n    x2 += shift_x\n\n    shift_y = max(0, -y1) + min(0, im_h - y2)\n    y1 += shift_y\n    y2 += shift_y\n\n    out_x = (max(0, -x1) + max(0, x2 - im_w)) // 2\n    out_y = (max(0, -y1) + max(0, y2 - im_h)) // 2\n    shift_x = (-x1 - out_x) * (out_x > 0)\n    shift_y = (-y1 - out_y) * (out_y > 0)\n\n    x1 += shift_x\n    x2 += shift_x\n    y1 += shift_y\n    y2 += shift_y\n\n    x1_pad = max(0, -x1)\n    x2_pad = max(x2 - im.shape[1] + 1, 0)\n\n    y1_pad = max(0, -y1)\n    y2_pad = max(y2 - im.shape[0] + 1, 0)\n\n    # Crop target\n    im_crop = im[y1 + y1_pad:y2 - y2_pad, x1 + x1_pad:x2 - x2_pad, :]\n\n    if mask is not None:\n        mask_crop = mask[y1 + y1_pad:y2 - y2_pad, x1 + x1_pad:x2 - x2_pad]\n\n    # Pad\n    im_crop_padded = cv.copyMakeBorder(im_crop, y1_pad, y2_pad, x1_pad, x2_pad, cv.BORDER_REPLICATE)\n\n    if mask is not None:\n        mask_crop_padded = F.pad(mask_crop, pad=(x1_pad, x2_pad, y1_pad, y2_pad), mode='constant', value=0)\n\n    # Resize image\n    im_out = cv.resize(im_crop_padded, tuple(output_sz.long().tolist()))\n\n    if mask is not None:\n        mask_out = \\\n        F.interpolate(mask_crop_padded[None, None], tuple(output_sz.flip(0).long().tolist()), mode='nearest')[0, 0]\n\n    crop_box = torch.Tensor([x1, y1, x2 - x1, y2 - y1])\n\n    if mask is None:\n        return im_out, crop_box\n    else:\n        return im_out, crop_box, mask_out\n\n\ndef crop_and_resize(im, box, crop_bb, output_sz, mask=None):\n    if isinstance(output_sz, (float, int)):\n        output_sz = (output_sz, output_sz)\n\n    im_h = im.shape[0]\n    im_w = im.shape[1]\n\n    if crop_bb[2] < 1 or crop_bb[3] < 1:\n        raise Exception('Too small bounding box.')\n\n    x1 = crop_bb[0]\n    x2 = crop_bb[0] + crop_bb[2]\n\n    y1 = crop_bb[1]\n    y2 = crop_bb[1] + crop_bb[3]\n\n    x1_pad = max(0, -x1)\n    x2_pad = max(x2 - im.shape[1] + 1, 0)\n\n    y1_pad = max(0, -y1)\n    y2_pad = max(y2 - im.shape[0] + 1, 0)\n\n    # Crop target\n    im_crop = im[y1 + y1_pad:y2 - y2_pad, x1 + x1_pad:x2 - x2_pad, :]\n\n    if mask is not None:\n        mask_crop = mask[y1 + y1_pad:y2 - y2_pad, x1 + x1_pad:x2 - x2_pad]\n\n    # Pad\n    im_crop_padded = cv.copyMakeBorder(im_crop, y1_pad, y2_pad, x1_pad, x2_pad, cv.BORDER_REPLICATE)\n\n    if mask is not None:\n        mask_crop_padded = F.pad(mask_crop, pad=(x1_pad, x2_pad, y1_pad, y2_pad), mode='constant', value=0)\n\n    # Resize image\n    im_out = cv.resize(im_crop_padded, output_sz)\n\n    if mask is not None:\n        mask_out = F.interpolate(mask_crop_padded[None, None], (output_sz[1], output_sz[0]), mode='nearest')[0, 0]\n\n    rescale_factor = output_sz[0] / crop_bb[2]\n\n    # Hack\n    if box is not None:\n        box_crop = box.clone()\n        box_crop[0] -= crop_bb[0]\n        box_crop[1] -= crop_bb[1]\n\n        box_crop *= rescale_factor\n    else:\n        box_crop = None\n\n    if mask is None:\n        return im_out, box_crop\n    else:\n        return im_out, box_crop, mask_out\n\n\ndef transform_box_to_crop(box: torch.Tensor, crop_box: torch.Tensor, crop_sz: torch.Tensor) -> torch.Tensor:\n    \"\"\" Transform the box co-ordinates from the original image co-ordinates to the co-ordinates of the cropped image\n    args:\n        box - the box for which the co-ordinates are to be transformed\n        crop_box - bounding box defining the crop in the original image\n        crop_sz - size of the cropped image\n\n    returns:\n        torch.Tensor - transformed co-ordinates of box_in\n    \"\"\"\n\n    box_out = box.clone()\n    box_out[:2] -= crop_box[:2]\n\n    scale_factor = crop_sz / crop_box[2:]\n\n    box_out[:2] *= scale_factor\n    box_out[2:] *= scale_factor\n    return box_out\n\n\ndef target_image_crop(frames, box_extract, box_gt, search_area_factor, output_sz, mode: str = 'replicate',\n                      max_scale_change=None, masks=None):\n    \"\"\" For each frame in frames, extracts a square crop centered at box_extract, of area search_area_factor^2\n    times box_extract area. If the crop area contains regions outside the image, it is shifted / shrunk so that it\n    completely fits inside the image. The extracted crops are then resized to output_sz. Further, the co-ordinates of\n    the box box_gt are transformed to the image crop co-ordinates\n\n    args:\n        frames - list of frames\n        box_extract - list of boxes of same length as frames. The crops are extracted using anno_extract\n        box_gt - list of boxes of same length as frames. The co-ordinates of these boxes are transformed from\n                    image co-ordinates to the crop co-ordinates\n        search_area_factor - The area of the extracted crop is search_area_factor^2 times box_extract area\n        output_sz - The size to which the extracted crops are resized\n        mode - If 'replicate', the boundary pixels are replicated in case the search region crop goes out of image.\n               If 'inside', the search region crop is shifted/shrunk to fit completely inside the image.\n               If 'inside_major', the search region crop is shifted/shrunk to fit completely inside one axis of the image.\n        max_scale_change - Maximum allowed scale change when performing the crop (only applicable for 'inside' and 'inside_major')\n        masks - Optional masks to apply the same crop.\n\n    returns:\n        list - list of image crops\n        list - box_gt location in the crop co-ordinates\n        \"\"\"\n\n    if isinstance(output_sz, (float, int)):\n        output_sz = (output_sz, output_sz)\n\n    if masks is None:\n        frame_crops_boxes = [sample_target_adaptive(f, a, search_area_factor, output_sz, mode, max_scale_change)\n                             for f, a in zip(frames, box_extract)]\n\n        frames_crop, crop_boxes = zip(*frame_crops_boxes)\n    else:\n        frame_crops_boxes_masks = [\n            sample_target_adaptive(f, a, search_area_factor, output_sz, mode, max_scale_change, mask=m)\n            for f, a, m in zip(frames, box_extract, masks)]\n\n        frames_crop, crop_boxes, masks_crop = zip(*frame_crops_boxes_masks)\n\n    crop_sz = torch.Tensor(output_sz)\n\n    # find the bb location in the crop\n    box_crop = [transform_box_to_crop(bb_gt, crop_bb, crop_sz)\n                for bb_gt, crop_bb in zip(box_gt, crop_boxes)]\n\n    if masks is None:\n        return frames_crop, box_crop\n    else:\n        return frames_crop, box_crop, masks_crop\n\n\ndef iou(reference, proposals):\n    \"\"\"Compute the IoU between a reference box with multiple proposal boxes.\n\n    args:\n        reference - Tensor of shape (1, 4).\n        proposals - Tensor of shape (num_proposals, 4)\n\n    returns:\n        torch.Tensor - Tensor of shape (num_proposals,) containing IoU of reference box with each proposal box.\n    \"\"\"\n\n    # Intersection box\n    tl = torch.max(reference[:, :2], proposals[:, :2])\n    br = torch.min(reference[:, :2] + reference[:, 2:], proposals[:, :2] + proposals[:, 2:])\n    sz = (br - tl).clamp(0)\n\n    # Area\n    intersection = sz.prod(dim=1)\n    union = reference[:, 2:].prod(dim=1) + proposals[:, 2:].prod(dim=1) - intersection\n\n    return intersection / union\n\n\ndef rand_uniform(a, b, shape=1):\n    \"\"\" sample numbers uniformly between a and b.\n    args:\n        a - lower bound\n        b - upper bound\n        shape - shape of the output tensor\n\n    returns:\n        torch.Tensor - tensor of shape=shape\n    \"\"\"\n    return (b - a) * torch.rand(shape) + a\n\n\ndef perturb_box(box, min_iou=0.5, sigma_factor=0.1):\n    \"\"\" Perturb the input box by adding gaussian noise to the co-ordinates\n\n     args:\n        box - input box\n        min_iou - minimum IoU overlap between input box and the perturbed box\n        sigma_factor - amount of perturbation, relative to the box size. Can be either a single element, or a list of\n                        sigma_factors, in which case one of them will be uniformly sampled. Further, each of the\n                        sigma_factor element can be either a float, or a tensor\n                        of shape (4,) specifying the sigma_factor per co-ordinate\n\n    returns:\n        torch.Tensor - the perturbed box\n    \"\"\"\n\n    if isinstance(sigma_factor, list):\n        # If list, sample one sigma_factor as current sigma factor\n        c_sigma_factor = random.choice(sigma_factor)\n    else:\n        c_sigma_factor = sigma_factor\n\n    if not isinstance(c_sigma_factor, torch.Tensor):\n        c_sigma_factor = c_sigma_factor * torch.ones(4)\n\n    perturb_factor = torch.sqrt(box[2] * box[3]) * c_sigma_factor\n\n    # multiple tries to ensure that the perturbed box has iou > min_iou with the input box\n    for i_ in range(100):\n        c_x = box[0] + 0.5 * box[2]\n        c_y = box[1] + 0.5 * box[3]\n        c_x_per = random.gauss(c_x, perturb_factor[0])\n        c_y_per = random.gauss(c_y, perturb_factor[1])\n\n        w_per = random.gauss(box[2], perturb_factor[2])\n        h_per = random.gauss(box[3], perturb_factor[3])\n\n        if w_per <= 1:\n            w_per = box[2] * rand_uniform(0.15, 0.5)\n\n        if h_per <= 1:\n            h_per = box[3] * rand_uniform(0.15, 0.5)\n\n        box_per = torch.Tensor([c_x_per - 0.5 * w_per, c_y_per - 0.5 * h_per, w_per, h_per]).round()\n\n        if box_per[2] <= 1:\n            box_per[2] = box[2] * rand_uniform(0.15, 0.5)\n\n        if box_per[3] <= 1:\n            box_per[3] = box[3] * rand_uniform(0.15, 0.5)\n\n        box_iou = iou(box.view(1, 4), box_per.view(1, 4))\n\n        # if there is sufficient overlap, return\n        if box_iou > min_iou:\n            return box_per, box_iou\n\n        # else reduce the perturb factor\n        perturb_factor *= 0.9\n\n    return box_per, box_iou\n\n\ndef gauss_1d(sz, sigma, center, end_pad=0, density=False):\n    k = torch.arange(-(sz - 1) / 2, (sz + 1) / 2 + end_pad).reshape(1, -1)\n    gauss = torch.exp(-1.0 / (2 * sigma ** 2) * (k - center.reshape(-1, 1)) ** 2)\n    if density:\n        gauss /= math.sqrt(2 * math.pi) * sigma\n    return gauss\n\n\ndef gauss_2d(sz, sigma, center, end_pad=(0, 0), density=False):\n    if isinstance(sigma, (float, int)):\n        sigma = (sigma, sigma)\n    return gauss_1d(sz[0].item(), sigma[0], center[:, 0], end_pad[0], density).reshape(center.shape[0], 1, -1) * \\\n           gauss_1d(sz[1].item(), sigma[1], center[:, 1], end_pad[1], density).reshape(center.shape[0], -1, 1)\n\n\ndef gaussian_label_function(target_bb, sigma_factor, kernel_sz, feat_sz, image_sz, end_pad_if_even=True, density=False,\n                            uni_bias=0):\n    \"\"\"Construct Gaussian label function.\"\"\"\n\n    if isinstance(kernel_sz, (float, int)):\n        kernel_sz = (kernel_sz, kernel_sz)\n    if isinstance(feat_sz, (float, int)):\n        feat_sz = (feat_sz, feat_sz)\n    if isinstance(image_sz, (float, int)):\n        image_sz = (image_sz, image_sz)\n\n    image_sz = torch.Tensor(image_sz)\n    feat_sz = torch.Tensor(feat_sz)\n\n    target_center = target_bb[:, 0:2] + 0.5 * target_bb[:, 2:4]\n    target_center_norm = (target_center - image_sz / 2) / image_sz\n\n    center = feat_sz * target_center_norm + 0.5 * \\\n             torch.Tensor([(kernel_sz[0] + 1) % 2, (kernel_sz[1] + 1) % 2])\n\n    sigma = sigma_factor * feat_sz.prod().sqrt().item()\n\n    if end_pad_if_even:\n        end_pad = (int(kernel_sz[0] % 2 == 0), int(kernel_sz[1] % 2 == 0))\n    else:\n        end_pad = (0, 0)\n\n    gauss_label = gauss_2d(feat_sz, sigma, center, end_pad, density=density)\n    if density:\n        sz = (feat_sz + torch.Tensor(end_pad)).prod()\n        label = (1.0 - uni_bias) * gauss_label + uni_bias / sz\n    else:\n        label = gauss_label + uni_bias\n    return label\n\n\ndef gauss_density_centered(x, std):\n    \"\"\"Evaluate the probability density of a Gaussian centered at zero.\n    args:\n        x - Samples.\n        std - List of standard deviations\n    \"\"\"\n    return torch.exp(-0.5 * (x / std) ** 2) / (math.sqrt(2 * math.pi) * std)\n\n\ndef gmm_density_centered(x, std):\n    \"\"\"Evaluate the probability density of a GMM centered at zero.\n    args:\n        x - Samples. Assumes dim=-1 is the component dimension and dim=-2 is feature dimension. Rest are sample dimension.\n        std - Tensor of standard deviations\n    \"\"\"\n    if x.dim() == std.dim() - 1:\n        x = x.unsqueeze(-1)\n    elif not (x.dim() == std.dim() and x.shape[-1] == 1):\n        raise ValueError('Last dimension must be the gmm stds.')\n    return gauss_density_centered(x, std).prod(-2).mean(-1)\n\n\ndef sample_gmm_centered(std, num_samples=1):\n    \"\"\"Sample from a GMM distribution centered at zero:\n    args:\n        std - Tensor of standard deviations\n        num_samples - number of samples\n    \"\"\"\n    num_components = std.shape[-1]\n    num_dims = std.numel() // num_components\n\n    std = std.view(1, num_dims, num_components)\n\n    # Sample component ids\n    k = torch.randint(num_components, (num_samples,), dtype=torch.int64)\n    std_samp = std[0, :, k].t()\n\n    # Sample\n    x_centered = std_samp * torch.randn(num_samples, num_dims)\n    prob_dens = gmm_density_centered(x_centered, std)\n\n    return x_centered, prob_dens\n\n\ndef sample_gmm(mean, std, num_samples=1):\n    \"\"\"Sample from a GMM distribution:\n    args:\n        mean - a single mean vector\n        std - Tensor of standard deviations\n        num_samples - number of samples\n    \"\"\"\n    num_dims = mean.numel()\n    num_components = std.shape[-1]\n\n    mean = mean.view(1, num_dims)\n    std = std.view(1, -1, num_components)\n\n    # Sample component ids\n    k = torch.randint(num_components, (num_samples,), dtype=torch.int64)\n    std_samp = std[0, :, k].t()\n\n    # Sample\n    x_centered = std_samp * torch.randn(num_samples, num_dims)\n    x = x_centered + mean\n    prob_dens = gmm_density_centered(x_centered, std)\n\n    return x, prob_dens\n\n\ndef sample_box_gmm(mean_box, proposal_sigma, gt_sigma=None, num_samples=1, add_mean_box=False):\n    \"\"\"Sample boxes from a Gaussian mixture model.\n    args:\n        mean_box - Center (or mean) bounding box\n        proposal_sigma - List of standard deviations for each Gaussian\n        gt_sigma - Standard deviation of the ground truth distribution\n        num_samples - Number of sampled boxes\n        add_mean_box - Also add mean box as first element\n\n    returns:\n        proposals, proposal density and ground truth density for all samples\n    \"\"\"\n    center_std = torch.Tensor([s[0] for s in proposal_sigma])\n    sz_std = torch.Tensor([s[1] for s in proposal_sigma])\n    std = torch.stack([center_std, center_std, sz_std, sz_std])\n\n    mean_box = mean_box.view(1, 4)\n    sz_norm = mean_box[:, 2:].clone()\n\n    # Sample boxes\n    proposals_rel_centered, proposal_density = sample_gmm_centered(std, num_samples)\n\n    # Add mean and map back\n    mean_box_rel = rect_to_rel(mean_box, sz_norm)\n    proposals_rel = proposals_rel_centered + mean_box_rel\n    proposals = rel_to_rect(proposals_rel, sz_norm)\n\n    if gt_sigma is None or gt_sigma[0] == 0 and gt_sigma[1] == 0:\n        gt_density = torch.zeros_like(proposal_density)\n    else:\n        std_gt = torch.Tensor([gt_sigma[0], gt_sigma[0], gt_sigma[1], gt_sigma[1]]).view(1, 4)\n        gt_density = gauss_density_centered(proposals_rel_centered, std_gt).prod(-1)\n\n    if add_mean_box:\n        proposals = torch.cat((mean_box, proposals))\n        proposal_density = torch.cat((torch.Tensor([-1]), proposal_density))\n        gt_density = torch.cat((torch.Tensor([1]), gt_density))\n\n    return proposals, proposal_density, gt_density"
  },
  {
    "path": "external/AR/ltr/data/processing_utils_SE.py",
    "content": "import torch\nimport math\nimport cv2 as cv\nimport random\n\nimport numpy as np\n\ndef stack_tensors(x):\n    if isinstance(x, list) and isinstance(x[0], torch.Tensor):\n        return torch.stack(x)\n    return x\n\n\n'''Added on 2019.12.23'''\ndef sample_target_SE(im, target_bb, search_area_factor, output_sz=None, mode=cv.BORDER_REPLICATE):\n    \"\"\" Extracts a crop centered at target_bb box, of size search_area_factor times target_bb(Both height and width)\n\n    args:\n        im - cv image\n        target_bb - target box [x, y, w, h]\n        search_area_factor - Ratio of crop size to target size\n        output_sz - (float) Size to which the extracted crop is resized (always square). If None, no resizing is done.\n\n    returns:\n        cv image - extracted crop\n        float - the factor by which the crop has been resized to make the crop size equal output_size\n    \"\"\"\n\n    x, y, w, h = target_bb.tolist()\n\n    # Crop image\n    ws = math.ceil(search_area_factor * w)\n    hs = math.ceil(search_area_factor * h)\n\n    if ws < 1 or hs < 1:\n        raise Exception('Too small bounding box.')\n\n    x1 = round(x + 0.5*w - ws*0.5)\n    x2 = x1 + ws\n\n    y1 = round(y + 0.5 * h - hs * 0.5)\n    y2 = y1 + hs\n\n    x1_pad = max(0, -x1)\n    x2_pad = max(x2-im.shape[1]+1, 0)\n\n    y1_pad = max(0, -y1)\n    y2_pad = max(y2-im.shape[0]+1, 0)\n\n    # Crop target\n    im_crop = im[y1+y1_pad:y2-y2_pad, x1+x1_pad:x2-x2_pad, :]\n\n    # Pad\n    im_crop_padded = cv.copyMakeBorder(im_crop, y1_pad, y2_pad, x1_pad, x2_pad, mode)\n\n    if output_sz is not None:\n        w_rsz_f = output_sz / ws\n        h_rsz_f = output_sz / hs\n        im_crop_padded_rsz = cv.resize(im_crop_padded, (output_sz, output_sz))\n        if len(im_crop_padded_rsz.shape)==2:\n            im_crop_padded_rsz = im_crop_padded_rsz[...,np.newaxis]\n        return im_crop_padded_rsz, h_rsz_f, w_rsz_f\n    else:\n        return im_crop_padded, 1.0, 1.0\n'''把mask映射到原图上'''\ndef map_mask_back(im, target_bb, search_area_factor, mask, mode=cv.BORDER_REPLICATE):\n    \"\"\" Extracts a crop centered at target_bb box, of size search_area_factor times target_bb(Both height and width)\n\n    args:\n        im - cv image\n        target_bb - target box [x, y, w, h]\n        search_area_factor - Ratio of crop size to target size\n        output_sz - (float) Size to which the extracted crop is resized (always square). If None, no resizing is done.\n\n    returns:\n        cv image - extracted crop\n        float - the factor by which the crop has been resized to make the crop size equal output_size\n    \"\"\"\n    H,W = (im.shape[0],im.shape[1])\n    base = np.zeros((H,W))\n    x, y, w, h = target_bb.tolist()\n\n    # Crop image\n    ws = math.ceil(search_area_factor * w)\n    hs = math.ceil(search_area_factor * h)\n\n    if ws < 1 or hs < 1:\n        raise Exception('Too small bounding box.')\n\n    x1 = round(x + 0.5*w - ws*0.5)\n    x2 = x1 + ws\n\n    y1 = round(y + 0.5 * h - hs * 0.5)\n    y2 = y1 + hs\n\n    x1_pad = max(0, -x1)\n    x2_pad = max(x2-im.shape[1]+1, 0)\n\n    y1_pad = max(0, -y1)\n    y2_pad = max(y2-im.shape[0]+1, 0)\n\n    '''pad base'''\n    base_padded = cv.copyMakeBorder(base, y1_pad, y2_pad, x1_pad, x2_pad, mode)\n    '''Resize mask'''\n    mask_rsz = cv.resize(mask,(ws,hs))\n    '''fill region with mask'''\n    base_padded[y1+y1_pad:y2+y1_pad, x1+x1_pad:x2+x1_pad] = mask_rsz.copy()\n    '''crop base_padded to get final mask'''\n    final_mask = base_padded[y1_pad:y1_pad+H,x1_pad:x1_pad+W]\n    assert (final_mask.shape == (H,W))\n    return final_mask\n\n'''Added on 2019.12.23'''\ndef transform_image_to_crop_SE(box_in: torch.Tensor, box_extract: torch.Tensor, resize_factor_h: float, resize_factor_w: float,\n                            crop_sz: torch.Tensor) -> torch.Tensor:\n    \"\"\" Transform the box co-ordinates from the original image co-ordinates to the co-ordinates of the cropped image\n    args:\n        box_in - the box for which the co-ordinates are to be transformed\n        box_extract - the box about which the image crop has been extracted.\n        resize_factor - the ratio between the original image scale and the scale of the image crop\n        crop_sz - size of the cropped image\n\n    returns:\n        torch.Tensor - transformed co-ordinates of box_in\n    \"\"\"\n    box_extract_center = box_extract[0:2] + 0.5*box_extract[2:4]\n\n    box_in_center = box_in[0:2] + 0.5*box_in[2:4]\n\n    box_out_xc = (crop_sz[0] -1)/2 + (box_in_center[0] - box_extract_center[0])*resize_factor_w\n    box_out_yc = (crop_sz[0] -1)/2 + (box_in_center[1] - box_extract_center[1])*resize_factor_h\n    box_out_w = box_in[2] * resize_factor_w\n    box_out_h = box_in[3] * resize_factor_h\n\n    '''2019.12.28 为了避免出现(x1,y1)小于0,或者(x2,y2)大于256的情况,这里我对它们加上了一些限制'''\n    max_sz = crop_sz[0].item()\n    box_out_x1 = torch.clamp(box_out_xc - 0.5 * box_out_w,0,max_sz)\n    box_out_y1 = torch.clamp(box_out_yc - 0.5 * box_out_h,0,max_sz)\n    box_out_x2 = torch.clamp(box_out_xc + 0.5 * box_out_w,0,max_sz)\n    box_out_y2 = torch.clamp(box_out_yc + 0.5 * box_out_h,0,max_sz)\n    box_out_w_new = box_out_x2 - box_out_x1\n    box_out_h_new = box_out_y2 - box_out_y1\n    box_out = torch.stack((box_out_x1, box_out_y1, box_out_w_new, box_out_h_new))\n    return box_out\n\ndef centered_crop(frames, anno, area_factor, output_sz):\n    crops_resize_factors = [sample_target(f, a, area_factor, output_sz)\n                            for f, a in zip(frames, anno)]\n\n    frames_crop, resize_factors = zip(*crops_resize_factors)\n\n    crop_sz = torch.Tensor([output_sz, output_sz])\n\n    # find the bb location in the crop\n    anno_crop = [transform_image_to_crop(a, a, rf, crop_sz)\n                 for a, rf in zip(anno, resize_factors)]\n\n    return frames_crop, anno_crop\n\n'''Added by Bin Yan 2019.12.23, \nchanged on 2020.1.4(add a new args: \"get_bbox_coord\")'''\ndef jittered_center_crop_SE(frames, box_extract, box_gt, search_area_factor, output_sz, get_bbox_coord=True, mode=cv.BORDER_REPLICATE):\n    \"\"\"\n    Crop a patch centered at box_extract. The height and width of cropped region is search_area_factor times that of box_extract.\n    The extracted crops are then resized to output_sz. Further, the co-ordinates of the box box_gt are transformed to the image crop co-ordinates\n    args:\n        frames - list of frames\n        box_extract - list of boxes of same length as frames. The crops are extracted using anno_extract\n        box_gt - list of boxes of same length as frames. The co-ordinates of these boxes are transformed from\n                    image co-ordinates to the crop co-ordinates\n        search_area_factor - The area of the extracted crop is search_area_factor^2 times box_extract area\n        output_sz - The size to which the extracted crops are resized\n\n    returns:\n        list - list of image crops\n        list - box_gt location in the crop co-ordinates\n    \"\"\"\n    '''call function \"sample_target_SE\" and function \"transform_image_to_crop_SE\"'''\n    '''get cropped patch(fixed size)'''\n    crops_resize_factors = [sample_target_SE(f, a, search_area_factor, output_sz, mode=mode)\n                            for f, a in zip(frames, box_extract)]\n\n    frames_crop, resize_factors_h, resize_factors_w = zip(*crops_resize_factors)\n    if get_bbox_coord:\n        crop_sz = torch.Tensor([output_sz, output_sz])\n\n        # find the bb location in the crop\n        '''get GT's cooridinate on the cropped patch'''\n        box_crop = [transform_image_to_crop_SE(a_gt, a_ex, h_rsf, w_rsf, crop_sz)\n                    for a_gt, a_ex, h_rsf, w_rsf in zip(box_gt, box_extract, resize_factors_h, resize_factors_w)]\n\n        return frames_crop, box_crop\n    else:\n        return frames_crop\n\ndef sample_target_nopad(im, target_bb, search_area_factor, output_sz):\n    \"\"\" Extracts a crop centered at target_bb box, of area search_area_factor^2. If the crop area contains regions\n    outside the image, it is shifted so that the it is inside the image. Further, if the crop area exceeds the image\n    size, a smaller crop which fits the image is returned instead.\n\n    args:\n        im - cv image\n        target_bb - target box [x, y, w, h]\n        search_area_factor - Ratio of crop size to target size\n        output_sz - (float) Size to which the extracted crop is resized (always square). If None, no resizing is done.\n\n    returns:\n        cv image - extracted crop\n        torch.Tensor - a bounding box denoting the cropped region in the image.\n    \"\"\"\n\n    if isinstance(output_sz, (float, int)):\n        output_sz = (output_sz, output_sz)\n    output_sz = torch.Tensor(output_sz)\n\n    im_h = im.shape[0]\n    im_w = im.shape[1]\n\n    bbx, bby, bbw, bbh = target_bb.tolist()\n\n    # Crop image\n    crop_sz_x, crop_sz_y = (output_sz * (target_bb[2:].prod()/output_sz.prod()).sqrt() * search_area_factor).ceil()\n\n    # Calculate rescaling factor if outside the image\n    rescale_factor = max(1, crop_sz_x/im_w, crop_sz_y/im_h)\n    crop_sz_x = math.floor(crop_sz_x / rescale_factor)\n    crop_sz_y = math.floor(crop_sz_y / rescale_factor)\n\n    if crop_sz_x < 1 or crop_sz_y < 1:\n        raise Exception('Too small bounding box.')\n\n    x1 = round(bbx + 0.5*bbw - crop_sz_x*0.5)\n    x2 = x1 + crop_sz_x\n\n    y1 = round(bby + 0.5*bbh - crop_sz_y*0.5)\n    y2 = y1 + crop_sz_y\n\n    # Move box inside image\n    shift_x = max(0, -x1) + min(0, im_w - x2)\n    x1 += shift_x\n    x2 += shift_x\n\n    shift_y = max(0, -y1) + min(0, im_h - y2)\n    y1 += shift_y\n    y2 += shift_y\n\n    # Crop and resize image\n    im_crop = im[y1:y2, x1:x2, :]\n    im_out = cv.resize(im_crop, tuple(output_sz.long().tolist()))\n\n    crop_box = torch.Tensor([x1, y1, x2-x1, y2-y1])\n    return im_out, crop_box\n\n\ndef transform_box_to_crop(box: torch.Tensor, crop_box: torch.Tensor, crop_sz: torch.Tensor) -> torch.Tensor:\n    \"\"\" Transform the box co-ordinates from the original image co-ordinates to the co-ordinates of the cropped image\n    args:\n        box - the box for which the co-ordinates are to be transformed\n        crop_box - bounding box defining the crop in the original image\n        crop_sz - size of the cropped image\n\n    returns:\n        torch.Tensor - transformed co-ordinates of box_in\n    \"\"\"\n\n    box_out = box.clone()\n    box_out[:2] -= crop_box[:2]\n\n    scale_factor = crop_sz / crop_box[2:]\n\n    box_out[:2] *= scale_factor\n    box_out[2:] *= scale_factor\n    return box_out\n\n\ndef jittered_center_crop_nopad(frames, box_extract, box_gt, search_area_factor, output_sz):\n    \"\"\" For each frame in frames, extracts a square crop centered at box_extract, of area search_area_factor^2\n    times box_extract area. If the crop area contains regions outside the image, it is shifted / shrunk so that it\n    completely fits inside the image. The extracted crops are then resized to output_sz. Further, the co-ordinates of\n    the box box_gt are transformed to the image crop co-ordinates\n\n    args:\n        frames - list of frames\n        box_extract - list of boxes of same length as frames. The crops are extracted using anno_extract\n        box_gt - list of boxes of same length as frames. The co-ordinates of these boxes are transformed from\n                    image co-ordinates to the crop co-ordinates\n        search_area_factor - The area of the extracted crop is search_area_factor^2 times box_extract area\n        output_sz - The size to which the extracted crops are resized\n\n    returns:\n        list - list of image crops\n        list - box_gt location in the crop co-ordinates\n        \"\"\"\n\n    if isinstance(output_sz, (float, int)):\n        output_sz = (output_sz, output_sz)\n\n    frame_crops_boxes = [sample_target_nopad(f, a, search_area_factor, output_sz)\n                            for f, a in zip(frames, box_extract)]\n\n    frames_crop, crop_boxes = zip(*frame_crops_boxes)\n\n    crop_sz = torch.Tensor(output_sz)\n\n    # find the bb location in the crop\n    box_crop = [transform_box_to_crop(bb_gt, crop_bb, crop_sz)\n                for bb_gt, crop_bb in zip(box_gt, crop_boxes)]\n\n    return frames_crop, box_crop\n\n\ndef iou(reference, proposals):\n    \"\"\"Compute the IoU between a reference box with multiple proposal boxes.\n\n    args:\n        reference - Tensor of shape (1, 4).\n        proposals - Tensor of shape (num_proposals, 4)\n\n    returns:\n        torch.Tensor - Tensor of shape (num_proposals,) containing IoU of reference box with each proposal box.\n    \"\"\"\n\n    # Intersection box\n    tl = torch.max(reference[:,:2], proposals[:,:2])\n    br = torch.min(reference[:,:2] + reference[:,2:], proposals[:,:2] + proposals[:,2:])\n    sz = (br - tl).clamp(0)\n\n    # Area\n    intersection = sz.prod(dim=1)\n    union = reference[:,2:].prod(dim=1) + proposals[:,2:].prod(dim=1) - intersection\n\n    return intersection / union\n\n\ndef rand_uniform(a, b, shape=1):\n    \"\"\" sample numbers uniformly between a and b.\n    args:\n        a - lower bound\n        b - upper bound\n        shape - shape of the output tensor\n\n    returns:\n        torch.Tensor - tensor of shape=shape\n    \"\"\"\n    return (b - a) * torch.rand(shape) + a\n\n\ndef perturb_box(box, min_iou=0.5, sigma_factor=0.1):\n    \"\"\" Perturb the input box by adding gaussian noise to the co-ordinates\n\n     args:\n        box - input box\n        min_iou - minimum IoU overlap between input box and the perturbed box\n        sigma_factor - amount of perturbation, relative to the box size. Can be either a single element, or a list of\n                        sigma_factors, in which case one of them will be uniformly sampled. Further, each of the\n                        sigma_factor element can be either a float, or a tensor\n                        of shape (4,) specifying the sigma_factor per co-ordinate\n\n    returns:\n        torch.Tensor - the perturbed box\n    \"\"\"\n\n    if isinstance(sigma_factor, list):\n        # If list, sample one sigma_factor as current sigma factor\n        c_sigma_factor = random.choice(sigma_factor)\n    else:\n        c_sigma_factor = sigma_factor\n\n    if not isinstance(c_sigma_factor, torch.Tensor):\n        c_sigma_factor = c_sigma_factor * torch.ones(4)\n\n    perturb_factor = torch.sqrt(box[2]*box[3])*c_sigma_factor\n\n    # multiple tries to ensure that the perturbed box has iou > min_iou with the input box\n    for i_ in range(100):\n        c_x = box[0] + 0.5*box[2]\n        c_y = box[1] + 0.5 * box[3]\n        c_x_per = random.gauss(c_x, perturb_factor[0])\n        c_y_per = random.gauss(c_y, perturb_factor[1])\n\n        w_per = random.gauss(box[2], perturb_factor[2])\n        h_per = random.gauss(box[3], perturb_factor[3])\n\n        if w_per <= 1:\n            w_per = box[2]*rand_uniform(0.15, 0.5)\n\n        if h_per <= 1:\n            h_per = box[3]*rand_uniform(0.15, 0.5)\n\n        box_per = torch.Tensor([c_x_per - 0.5*w_per, c_y_per - 0.5*h_per, w_per, h_per]).round()\n\n        if box_per[2] <= 1:\n            box_per[2] = box[2]*rand_uniform(0.15, 0.5)\n\n        if box_per[3] <= 1:\n            box_per[3] = box[3]*rand_uniform(0.15, 0.5)\n\n        box_iou = iou(box.view(1, 4), box_per.view(1, 4))\n\n        # if there is sufficient overlap, return\n        if box_iou > min_iou:\n            return box_per, box_iou\n\n        # else reduce the perturb factor\n        perturb_factor *= 0.9\n\n    return box_per, box_iou\n\n\ndef gauss_1d(sz, sigma, center, end_pad=0):\n    k = torch.arange(-(sz-1)/2, (sz+1)/2 + end_pad).reshape(1, -1)\n    return torch.exp(-1.0/(2*sigma**2) * (k - center.reshape(-1, 1))**2)\n\n\ndef gauss_2d(sz, sigma, center, end_pad=(0, 0)):\n    if isinstance(sigma, (float, int)):\n        sigma = (sigma, sigma)\n    return gauss_1d(sz[0].item(), sigma[0], center[:, 0], end_pad[0]).reshape(center.shape[0], 1, -1) * \\\n           gauss_1d(sz[1].item(), sigma[1], center[:, 1], end_pad[1]).reshape(center.shape[0], -1, 1)\n\n\ndef gaussian_label_function(target_bb, sigma_factor, kernel_sz, feat_sz, image_sz, end_pad_if_even=True):\n    \"\"\"Construct Gaussian label function.\"\"\"\n\n    if isinstance(kernel_sz, (float, int)):\n        kernel_sz = (kernel_sz, kernel_sz)\n    if isinstance(feat_sz, (float, int)):\n        feat_sz = (feat_sz, feat_sz)\n    if isinstance(image_sz, (float, int)):\n        image_sz = (image_sz, image_sz)\n\n    image_sz = torch.Tensor(image_sz)\n    feat_sz = torch.Tensor(feat_sz)\n\n    target_center = target_bb[:, 0:2] + 0.5 * target_bb[:, 2:4]\n    target_center_norm = (target_center - image_sz / 2) / image_sz\n\n    center = feat_sz * target_center_norm + 0.5 * \\\n             torch.Tensor([(kernel_sz[0] + 1) % 2, (kernel_sz[1] + 1) % 2])\n\n    sigma = sigma_factor * feat_sz.prod().sqrt().item()\n\n    if end_pad_if_even:\n        end_pad = (int(kernel_sz[0]%2 == 0), int(kernel_sz[1]%2 == 0))\n    else:\n        end_pad = (0, 0)\n\n    gauss_label = gauss_2d(feat_sz, sigma, center, end_pad)\n    return gauss_label\n\n"
  },
  {
    "path": "external/AR/ltr/data/sampler.py",
    "content": "import random\nimport torch.utils.data\nfrom pytracking import TensorDict\n\n\ndef no_processing(data):\n    return data\n\n\nclass TrackingSampler(torch.utils.data.Dataset):\n    \"\"\" Class responsible for sampling frames from training sequences to form batches. Each training sample is a\n    tuple consisting of i) a set of train frames, used to learn the DiMP classification model and obtain the\n    modulation vector for IoU-Net, and ii) a set of test frames on which target classification loss for the predicted\n    DiMP model, and the IoU prediction loss for the IoU-Net is calculated.\n\n    The sampling is done in the following ways. First a dataset is selected at random. Next, a sequence is selected\n    from that dataset. A base frame is then sampled randomly from the sequence. Next, a set of 'train frames' and\n    'test frames' are sampled from the sequence from the range [base_frame_id - max_gap, base_frame_id]  and\n    (base_frame_id, base_frame_id + max_gap] respectively. Only the frames in which the target is visible are sampled.\n    If enough visible frames are not found, the 'max_gap' is increased gradually till enough frames are found.\n\n    The sampled frames are then passed through the input 'processing' function for the necessary processing-\n    \"\"\"\n\n    def __init__(self, datasets, p_datasets, samples_per_epoch, max_gap,\n                 num_test_frames, num_train_frames=1, processing=no_processing, frame_sample_mode='causal'):\n        \"\"\"\n        args:\n            datasets - List of datasets to be used for training\n            p_datasets - List containing the probabilities by which each dataset will be sampled\n            samples_per_epoch - Number of training samples per epoch\n            max_gap - Maximum gap, in frame numbers, between the train frames and the test frames.\n            num_test_frames - Number of test frames to sample.\n            num_train_frames - Number of train frames to sample.\n            processing - An instance of Processing class which performs the necessary processing of the data.\n            frame_sample_mode - Either 'causal' or 'interval'. If 'causal', then the test frames are sampled in a causally,\n                                otherwise randomly within the interval.\n        \"\"\"\n        self.datasets = datasets\n\n        # If p not provided, sample uniformly from all videos\n        if p_datasets is None:\n            p_datasets = [len(d) for d in self.datasets]\n\n        # Normalize\n        p_total = sum(p_datasets)\n        self.p_datasets = [x / p_total for x in p_datasets]\n\n        self.samples_per_epoch = samples_per_epoch\n        self.max_gap = max_gap\n        self.num_test_frames = num_test_frames\n        self.num_train_frames = num_train_frames\n        self.processing = processing\n        self.frame_sample_mode = frame_sample_mode\n\n    def __len__(self):\n        return self.samples_per_epoch\n\n    def _sample_visible_ids(self, visible, num_ids=1, min_id=None, max_id=None):\n        \"\"\" Samples num_ids frames between min_id and max_id for which target is visible\n\n        args:\n            visible - 1d Tensor indicating whether target is visible for each frame\n            num_ids - number of frames to be samples\n            min_id - Minimum allowed frame number\n            max_id - Maximum allowed frame number\n\n        returns:\n            list - List of sampled frame numbers. None if not sufficient visible frames could be found.\n        \"\"\"\n        if num_ids == 0:\n            return []\n        if min_id is None or min_id < 0:\n            min_id = 0\n        if max_id is None or max_id > len(visible):\n            max_id = len(visible)\n\n        valid_ids = [i for i in range(min_id, max_id) if visible[i]]\n\n        # No visible ids\n        if len(valid_ids) == 0:\n            return None\n\n        return random.choices(valid_ids, k=num_ids)\n\n    def __getitem__(self, index):\n        \"\"\"\n        args:\n            index (int): Index (Ignored since we sample randomly)\n\n        returns:\n            TensorDict - dict containing all the data blocks\n        \"\"\"\n\n        # Select a dataset\n        dataset = random.choices(self.datasets, self.p_datasets)[0]\n        is_video_dataset = dataset.is_video_sequence()\n\n        # Sample a sequence with enough visible frames\n        enough_visible_frames = False\n        while not enough_visible_frames:\n            # Sample a sequence\n            seq_id = random.randint(0, dataset.get_num_sequences() - 1)\n\n            # Sample frames\n            seq_info_dict = dataset.get_sequence_info(seq_id)\n            visible = seq_info_dict['visible']\n\n            enough_visible_frames = visible.type(torch.int64).sum().item() > 2 * (\n                    self.num_test_frames + self.num_train_frames) and len(visible) >= 20\n\n            enough_visible_frames = enough_visible_frames or not is_video_dataset\n\n        if is_video_dataset:\n            train_frame_ids = None\n            test_frame_ids = None\n            gap_increase = 0\n\n            if self.frame_sample_mode == 'interval':\n                # Sample frame numbers within interval defined by the first frame\n                while test_frame_ids is None:\n                    base_frame_id = self._sample_visible_ids(visible, num_ids=1)\n                    extra_train_frame_ids = self._sample_visible_ids(visible, num_ids=self.num_train_frames - 1,\n                                                                     min_id=base_frame_id[\n                                                                                0] - self.max_gap - gap_increase,\n                                                                     max_id=base_frame_id[\n                                                                                0] + self.max_gap + gap_increase)\n                    if extra_train_frame_ids is None:\n                        gap_increase += 5\n                        continue\n                    train_frame_ids = base_frame_id + extra_train_frame_ids\n                    test_frame_ids = self._sample_visible_ids(visible, num_ids=self.num_test_frames,\n                                                              min_id=train_frame_ids[0] - self.max_gap - gap_increase,\n                                                              max_id=train_frame_ids[0] + self.max_gap + gap_increase)\n                    gap_increase += 5  # Increase gap until a frame is found\n\n            elif self.frame_sample_mode == 'causal':\n                # Sample test and train frames in a causal manner, i.e. test_frame_ids > train_frame_ids\n                while test_frame_ids is None:\n                    base_frame_id = self._sample_visible_ids(visible, num_ids=1, min_id=self.num_train_frames - 1,\n                                                             max_id=len(visible) - self.num_test_frames)\n                    prev_frame_ids = self._sample_visible_ids(visible, num_ids=self.num_train_frames - 1,\n                                                              min_id=base_frame_id[0] - self.max_gap - gap_increase,\n                                                              max_id=base_frame_id[0])\n                    if prev_frame_ids is None:\n                        gap_increase += 5\n                        continue\n                    train_frame_ids = base_frame_id + prev_frame_ids\n                    test_frame_ids = self._sample_visible_ids(visible, min_id=train_frame_ids[0] + 1,\n                                                              max_id=train_frame_ids[0] + self.max_gap + gap_increase,\n                                                              num_ids=self.num_test_frames)\n                    # Increase gap until a frame is found\n                    gap_increase += 5\n        else:\n            # In case of image dataset, just repeat the image to generate synthetic video\n            train_frame_ids = [1] * self.num_train_frames\n            test_frame_ids = [1] * self.num_test_frames\n\n        train_frames, train_anno, meta_obj_train = dataset.get_frames(seq_id, train_frame_ids, seq_info_dict)\n        test_frames, test_anno, meta_obj_test = dataset.get_frames(seq_id, test_frame_ids, seq_info_dict)\n\n        data = TensorDict({'train_images': train_frames,\n                           'train_anno': train_anno['bbox'],\n                           'test_images': test_frames,\n                           'test_anno': test_anno['bbox'],\n                           'dataset': dataset.get_name(),\n                           'test_class': meta_obj_test.get('object_class_name')})\n\n        return self.processing(data)\n\n\nclass DiMPSampler(TrackingSampler):\n    \"\"\" See TrackingSampler.\"\"\"\n\n    def __init__(self, datasets, p_datasets, samples_per_epoch, max_gap,\n                 num_test_frames, num_train_frames=1, processing=no_processing, frame_sample_mode='causal'):\n        super().__init__(datasets=datasets, p_datasets=p_datasets, samples_per_epoch=samples_per_epoch, max_gap=max_gap,\n                         num_test_frames=num_test_frames, num_train_frames=num_train_frames, processing=processing,\n                         frame_sample_mode=frame_sample_mode)\n\n\nclass ATOMSampler(TrackingSampler):\n    \"\"\" See TrackingSampler.\"\"\"\n\n    def __init__(self, datasets, p_datasets, samples_per_epoch, max_gap,\n                 num_test_frames=1, num_train_frames=1, processing=no_processing, frame_sample_mode='interval'):\n        super().__init__(datasets=datasets, p_datasets=p_datasets, samples_per_epoch=samples_per_epoch, max_gap=max_gap,\n                         num_test_frames=num_test_frames, num_train_frames=num_train_frames, processing=processing,\n                         frame_sample_mode=frame_sample_mode)"
  },
  {
    "path": "external/AR/ltr/data/transforms.py",
    "content": "import random\nimport numpy as np\nimport math\nimport cv2 as cv\nimport torch\nimport torch.nn.functional as F\nimport torchvision.transforms.functional as tvisf\n\n\nclass Transform:\n    \"\"\"A set of transformations, used for e.g. data augmentation.\n    Args of constructor:\n        transforms: An arbitrary number of transformations, derived from the TransformBase class.\n                    They are applied in the order they are given.\n\n    The Transform object can jointly transform images, bounding boxes and segmentation masks.\n    This is done by calling the object with the following key-word arguments (all are optional).\n\n    The following arguments are inputs to be transformed. They are either supplied as a single instance, or a list of instances.\n        image  -  Image\n        coords  -  2xN dimensional Tensor of 2D image coordinates [y, x]\n        bbox  -  Bounding box on the form [x, y, w, h]\n        mask  -  Segmentation mask with discrete classes\n\n    The following parameters can be supplied with calling the transform object:\n        joint [Bool]  -  If True then transform all images/coords/bbox/mask in the list jointly using the same transformation.\n                         Otherwise each tuple (images, coords, bbox, mask) will be transformed independently using\n                         different random rolls. Default: True.\n        new_roll [Bool]  -  If False, then no new random roll is performed, and the saved result from the previous roll\n                            is used instead. Default: True.\n\n    Check the DiMPProcessing class for examples.\n    \"\"\"\n\n    def __init__(self, *transforms):\n        if len(transforms) == 1 and isinstance(transforms[0], (list, tuple)):\n            transforms = transforms[0]\n        self.transforms = transforms\n        self._valid_inputs = ['image', 'coords', 'bbox', 'mask']\n        self._valid_args = ['joint', 'new_roll']\n        self._valid_all = self._valid_inputs + self._valid_args\n\n    def __call__(self, **inputs):\n        var_names = [k for k in inputs.keys() if k in self._valid_inputs]\n        for v in inputs.keys():\n            if v not in self._valid_all:\n                raise ValueError('Incorrect input \\\"{}\\\" to transform. Only supports inputs {} and arguments {}.'.format(v, self._valid_inputs, self._valid_args))\n\n        joint_mode = inputs.get('joint', True)\n        new_roll = inputs.get('new_roll', True)\n\n        if not joint_mode:\n            out = zip(*[self(**inp) for inp in self._split_inputs(inputs)])\n            return tuple(list(o) for o in out)\n\n        out = {k: v for k, v in inputs.items() if k in self._valid_inputs}\n\n        for t in self.transforms:\n            out = t(**out, joint=joint_mode, new_roll=new_roll)\n        if len(var_names) == 1:\n            return out[var_names[0]]\n        # Make sure order is correct\n        return tuple(out[v] for v in var_names)\n\n    def _split_inputs(self, inputs):\n        var_names = [k for k in inputs.keys() if k in self._valid_inputs]\n        split_inputs = [{k: v for k, v in zip(var_names, vals)} for vals in zip(*[inputs[vn] for vn in var_names])]\n        for arg_name, arg_val in filter(lambda it: it[0]!='joint' and it[0] in self._valid_args, inputs.items()):\n            if isinstance(arg_val, list):\n                for inp, av in zip(split_inputs, arg_val):\n                    inp[arg_name] = av\n            else:\n                for inp in split_inputs:\n                    inp[arg_name] = arg_val\n        return split_inputs\n\n    def __repr__(self):\n        format_string = self.__class__.__name__ + '('\n        for t in self.transforms:\n            format_string += '\\n'\n            format_string += '    {0}'.format(t)\n        format_string += '\\n)'\n        return format_string\n\n\nclass TransformBase:\n    \"\"\"Base class for transformation objects. See the Transform class for details.\"\"\"\n    def __init__(self):\n        self._valid_inputs = ['image', 'coords', 'bbox', 'mask']\n        self._valid_args = ['new_roll']\n        self._valid_all = self._valid_inputs + self._valid_args\n        self._rand_params = None\n\n    def __call__(self, **inputs):\n        # Split input\n        input_vars = {k: v for k, v in inputs.items() if k in self._valid_inputs}\n        input_args = {k: v for k, v in inputs.items() if k in self._valid_args}\n\n        # Roll random parameters for the transform\n        if input_args.get('new_roll', True):\n            rand_params = self.roll()\n            if rand_params is None:\n                rand_params = ()\n            elif not isinstance(rand_params, tuple):\n                rand_params = (rand_params,)\n            self._rand_params = rand_params\n\n        outputs = dict()\n        for var_name, var in input_vars.items():\n            if var is not None:\n                transform_func = getattr(self, 'transform_' + var_name)\n                if var_name in ['coords', 'bbox']:\n                    params = (self._get_image_size(input_vars),) + self._rand_params\n                else:\n                    params = self._rand_params\n                if isinstance(var, (list, tuple)):\n                    outputs[var_name] = [transform_func(x, *params) for x in var]\n                else:\n                    outputs[var_name] = transform_func(var, *params)\n        return outputs\n\n    def _get_image_size(self, inputs):\n        im = None\n        for var_name in ['image', 'mask']:\n            if inputs.get(var_name) is not None:\n                im = inputs[var_name]\n                break\n        if im is None:\n            return None\n        if isinstance(im, (list, tuple)):\n            im = im[0]\n        if isinstance(im, np.ndarray):\n            return im.shape[:2]\n        if torch.is_tensor(im):\n            return (im.shape[-2], im.shape[-1])\n        raise Exception('Unknown image type')\n\n    def roll(self):\n        return None\n\n    def transform_image(self, image, *rand_params):\n        \"\"\"Must be deterministic\"\"\"\n        return image\n\n    def transform_coords(self, coords, image_shape, *rand_params):\n        \"\"\"Must be deterministic\"\"\"\n        return coords\n\n    def transform_bbox(self, bbox, image_shape, *rand_params):\n        \"\"\"Assumes [x, y, w, h]\"\"\"\n        # Check if not overloaded\n        if self.transform_coords.__code__ == TransformBase.transform_coords.__code__:\n            return bbox\n\n        coord = bbox.clone().view(-1,2).t().flip(0)\n\n        x1 = coord[1, 0]\n        x2 = coord[1, 0] + coord[1, 1]\n\n        y1 = coord[0, 0]\n        y2 = coord[0, 0] + coord[0, 1]\n\n        coord_all = torch.tensor([[y1, y1, y2, y2], [x1, x2, x2, x1]])\n\n        coord_transf = self.transform_coords(coord_all, image_shape, *rand_params).flip(0)\n        tl = torch.min(coord_transf, dim=1)[0]\n        sz = torch.max(coord_transf, dim=1)[0] - tl\n        bbox_out = torch.cat((tl, sz), dim=-1).reshape(bbox.shape)\n        return bbox_out\n\n    def transform_mask(self, mask, *rand_params):\n        \"\"\"Must be deterministic\"\"\"\n        return mask\n\n\nclass ToTensor(TransformBase):\n    \"\"\"Convert to a Tensor\"\"\"\n\n    def transform_image(self, image):\n        # handle numpy array\n        if image.ndim == 2:\n            image = image[:, :, None]\n\n        image = torch.from_numpy(image.transpose((2, 0, 1)))\n        # backward compatibility\n        if isinstance(image, torch.ByteTensor):\n            return image.float().div(255)\n        else:\n            return image\n\n    def transfrom_mask(self, mask):\n        if isinstance(mask, np.ndarray):\n            return torch.from_numpy(mask)\n\n\n\nclass ToTensorAndJitter(TransformBase):\n    \"\"\"Convert to a Tensor and jitter brightness\"\"\"\n    def __init__(self, brightness_jitter=0.0, normalize=True):\n        super().__init__()\n        self.brightness_jitter = brightness_jitter\n        self.normalize = normalize\n\n    def roll(self):\n        return np.random.uniform(max(0, 1 - self.brightness_jitter), 1 + self.brightness_jitter)\n\n    def transform_image(self, image, brightness_factor):\n        # handle numpy array\n        image = torch.from_numpy(image.transpose((2, 0, 1)))\n\n        # backward compatibility\n        if self.normalize:\n            return image.float().mul(brightness_factor/255.0).clamp(0.0, 1.0)\n        else:\n            return image.float().mul(brightness_factor).clamp(0.0, 255.0)\n\n    def transform_mask(self, mask, brightness_factor):\n        if isinstance(mask, np.ndarray):\n            return torch.from_numpy(mask)\n        else:\n            return mask\n\n\nclass Normalize(TransformBase):\n    \"\"\"Normalize image\"\"\"\n    def __init__(self, mean, std, inplace=False):\n        super().__init__()\n        self.mean = mean\n        self.std = std\n        self.inplace = inplace\n\n    def transform_image(self, image):\n        return tvisf.normalize(image, self.mean, self.std, self.inplace)\n\n\nclass ToGrayscale(TransformBase):\n    \"\"\"Converts image to grayscale with probability\"\"\"\n    def __init__(self, probability = 0.5):\n        super().__init__()\n        self.probability = probability\n        self.color_weights = np.array([0.2989, 0.5870, 0.1140], dtype=np.float32)\n\n    def roll(self):\n        return random.random() < self.probability\n\n    def transform_image(self, image, do_grayscale):\n        if do_grayscale:\n            if torch.is_tensor(image):\n                raise NotImplementedError('Implement torch variant.')\n            img_gray = cv.cvtColor(image, cv.COLOR_RGB2GRAY)\n            return np.stack([img_gray, img_gray, img_gray], axis=2)\n            # return np.repeat(np.sum(img * self.color_weights, axis=2, keepdims=True).astype(np.uint8), 3, axis=2)\n        return image\n\n\nclass ToBGR(TransformBase):\n    \"\"\"Converts image to BGR\"\"\"\n    def transform_image(self, image):\n        if torch.is_tensor(image):\n            raise NotImplementedError('Implement torch variant.')\n        img_bgr = cv.cvtColor(image, cv.COLOR_RGB2BGR)\n        return img_bgr\n\n\nclass RandomHorizontalFlip(TransformBase):\n    \"\"\"Horizontally flip image randomly with a probability p.\"\"\"\n    def __init__(self, probability = 0.5):\n        super().__init__()\n        self.probability = probability\n\n    def roll(self):\n        return random.random() < self.probability\n\n    def transform_image(self, image, do_flip):\n        if do_flip:\n            if torch.is_tensor(image):\n                return image.flip((2,))\n            return np.fliplr(image).copy()\n        return image\n\n    def transform_coords(self, coords, image_shape, do_flip):\n        if do_flip:\n            coords = coords.clone()\n            coords[1,:] = (image_shape[1] - 1) - coords[1,:]\n        return coords\n\n    def transform_mask(self, mask, do_flip):\n        if do_flip:\n            if torch.is_tensor(mask):\n                return mask.flip((-1,))\n            return np.fliplr(mask).copy()\n        return mask\n\n\nclass Blur(TransformBase):\n    \"\"\" Blur the image by applying a gaussian kernel with given sigma\"\"\"\n    def __init__(self, sigma):\n        super().__init__()\n        if isinstance(sigma, (float, int)):\n            sigma = (sigma, sigma)\n        self.sigma = sigma\n        self.filter_size = [math.ceil(2*s) for s in self.sigma]\n        x_coord = [torch.arange(-sz, sz+1, dtype=torch.float32) for sz in self.filter_size]\n        self.filter = [torch.exp(-(x**2)/(2*s**2)) for x, s in zip(x_coord, self.sigma)]\n        self.filter[0] = self.filter[0].view(1,1,-1,1) / self.filter[0].sum()\n        self.filter[1] = self.filter[1].view(1,1,1,-1) / self.filter[1].sum()\n\n    def transform_image(self, image):\n        if torch.is_tensor(image):\n            sz = image.shape[2:]\n            im1 = F.conv2d(image.view(-1, 1, sz[0], sz[1]), self.filter[0], padding=(self.filter_size[0], 0))\n            return F.conv2d(im1, self.filter[1], padding=(0,self.filter_size[1])).view(-1,sz[0],sz[1])\n        else:\n            raise NotImplementedError\n\n\nclass RandomBlur(TransformBase):\n    \"\"\" Blur the image, with a given probability, by applying a gaussian kernel with given sigma\"\"\"\n    def __init__(self, sigma, probability=0.1):\n        super().__init__()\n        self.probability = probability\n\n        if isinstance(sigma, (float, int)):\n            sigma = (sigma, sigma)\n        self.sigma = sigma\n        self.filter_size = [math.ceil(2*s) for s in self.sigma]\n        x_coord = [torch.arange(-sz, sz+1, dtype=torch.float32) for sz in self.filter_size]\n        self.filter = [torch.exp(-(x**2)/(2*s**2)) for x, s in zip(x_coord, self.sigma)]\n        self.filter[0] = self.filter[0].view(1,1,-1,1) / self.filter[0].sum()\n        self.filter[1] = self.filter[1].view(1,1,1,-1) / self.filter[1].sum()\n\n    def roll(self):\n        return random.random() < self.probability\n\n    def transform(self, image, do_blur=None):\n        if do_blur is None:\n            do_blur = False\n\n        if do_blur:\n            if torch.is_tensor(image):\n                sz = image.shape[1:]\n                im1 = F.conv2d(image.view(-1, 1, sz[0], sz[1]), self.filter[0], padding=(self.filter_size[0], 0))\n                return F.conv2d(im1, self.filter[1], padding=(0,self.filter_size[1])).view(-1,sz[0],sz[1])\n            else:\n                raise NotImplementedError\n        else:\n            return image\n\n\nclass RandomAffine(TransformBase):\n    \"\"\"Apply random affine transformation.\"\"\"\n    def __init__(self, p_flip=0.0, max_rotation=0.0, max_shear=0.0, max_scale=0.0, max_ar_factor=0.0,\n                 border_mode='constant', pad_amount=0):\n        super().__init__()\n        self.p_flip = p_flip\n        self.max_rotation = max_rotation\n        self.max_shear = max_shear\n        self.max_scale = max_scale\n        self.max_ar_factor = max_ar_factor\n\n        if border_mode == 'constant':\n            self.border_flag = cv.BORDER_CONSTANT\n        elif border_mode == 'replicate':\n            self.border_flag == cv.BORDER_REPLICATE\n        else:\n            raise Exception\n\n        self.pad_amount = pad_amount\n\n    def roll(self):\n        do_flip = random.random() < self.p_flip\n        theta = random.uniform(-self.max_rotation, self.max_rotation)\n\n        shear_x = random.uniform(-self.max_shear, self.max_shear)\n        shear_y = random.uniform(-self.max_shear, self.max_shear)\n\n        ar_factor = np.exp(random.uniform(-self.max_ar_factor, self.max_ar_factor))\n        scale_factor = np.exp(random.uniform(-self.max_scale, self.max_scale))\n\n        return do_flip, theta, (shear_x, shear_y), (scale_factor, scale_factor * ar_factor)\n\n    def _construct_t_mat(self, image_shape, do_flip, theta, shear_values, scale_factors):\n        im_h, im_w = image_shape\n        t_mat = np.identity(3)\n\n        if do_flip:\n            if do_flip:\n                t_mat[0, 0] = -1.0\n                t_mat[0, 2] = im_w\n\n        t_rot = cv.getRotationMatrix2D((im_w * 0.5, im_h * 0.5), theta, 1.0)\n        t_rot = np.concatenate((t_rot, np.array([0.0, 0.0, 1.0]).reshape(1, 3)))\n\n        t_shear = np.array([[1.0, shear_values[0], -shear_values[0] * 0.5 * im_w],\n                            [shear_values[1], 1.0, -shear_values[1] * 0.5 * im_h],\n                            [0.0, 0.0, 1.0]])\n\n        t_scale = np.array([[scale_factors[0], 0.0, (1.0 - scale_factors[0]) * 0.5 * im_w],\n                            [0.0, scale_factors[1], (1.0 - scale_factors[1]) * 0.5 * im_h],\n                            [0.0, 0.0, 1.0]])\n\n        t_mat = t_scale @ t_rot @ t_shear @ t_mat\n\n        t_mat[0, 2] += self.pad_amount\n        t_mat[1, 2] += self.pad_amount\n\n        t_mat = t_mat[:2, :]\n\n        return t_mat\n\n    def transform_image(self, image, do_flip, theta, shear_values, scale_factors):\n        if torch.is_tensor(image):\n            raise Exception('Only supported for numpy input')\n\n        t_mat = self._construct_t_mat(image.shape[:2], do_flip, theta, shear_values, scale_factors)\n        output_sz = (image.shape[1] + 2*self.pad_amount, image.shape[0] + 2*self.pad_amount)\n        image_t = cv.warpAffine(image, t_mat, output_sz, flags=cv.INTER_LINEAR,\n                                borderMode=self.border_flag)\n\n        return image_t\n\n    def transform_coords(self, coords, image_shape, do_flip, theta, shear_values, scale_factors):\n        t_mat = self._construct_t_mat(image_shape, do_flip, theta, shear_values, scale_factors)\n\n        t_mat_tensor = torch.from_numpy(t_mat).float()\n\n        coords_xy1 = torch.stack((coords[1, :], coords[0, :], torch.ones_like(coords[1, :])))\n\n        coords_xy_t = torch.mm(t_mat_tensor, coords_xy1)\n\n        return coords_xy_t[[1, 0], :]\n\n    def transform_mask(self, mask, do_flip, theta, shear_values, scale_factors):\n        t_mat = self._construct_t_mat(mask.shape[:2], do_flip, theta, shear_values, scale_factors)\n        output_sz = (mask.shape[1] + 2*self.pad_amount, mask.shape[0] + 2*self.pad_amount)\n\n        mask_t = cv.warpAffine(mask.numpy(), t_mat, output_sz, flags=cv.INTER_NEAREST,\n                               borderMode=self.border_flag)\n\n        return torch.from_numpy(mask_t)\n"
  },
  {
    "path": "external/AR/ltr/dataset/__init__.py",
    "content": "from .lasot import Lasot\nfrom .got10k import Got10k\nfrom .tracking_net import TrackingNet\nfrom .imagenetvid import ImagenetVID\nfrom .coco import MSCOCO\nfrom .coco_seq import MSCOCOSeq\nfrom .youtubevos import YouTubeVOS\nfrom .davis import Davis\nfrom .lvis import LVIS\nfrom .ecssd import ECSSD\nfrom .msra10k import MSRA10k\nfrom .hku_is import HKUIS\nfrom .sbd import SBD\nfrom .synthetic_video import SyntheticVideo\nfrom .synthetic_video_blend import SyntheticVideoBlend\n"
  },
  {
    "path": "external/AR/ltr/dataset/base_image_dataset.py",
    "content": "import torch.utils.data\nfrom ltr.data.image_loader import jpeg4py_loader\n\n\nclass BaseImageDataset(torch.utils.data.Dataset):\n    \"\"\" Base class for image datasets \"\"\"\n\n    def __init__(self, name, root, image_loader=jpeg4py_loader):\n        \"\"\"\n        args:\n            root - The root path to the dataset\n            image_loader (jpeg4py_loader) -  The function to read the images. jpeg4py (https://github.com/ajkxyz/jpeg4py)\n                                            is used by default.\n        \"\"\"\n        self.name = name\n        self.root = root\n        self.image_loader = image_loader\n\n        self.image_list = []     # Contains the list of sequences.\n        self.class_list = []\n\n    def __len__(self):\n        \"\"\" Returns size of the dataset\n        returns:\n            int - number of samples in the dataset\n        \"\"\"\n        return self.get_num_images()\n\n    def __getitem__(self, index):\n        \"\"\" Not to be used! Check get_frames() instead.\n        \"\"\"\n        return None\n\n    def get_name(self):\n        \"\"\" Name of the dataset\n\n        returns:\n            string - Name of the dataset\n        \"\"\"\n        raise NotImplementedError\n\n    def get_num_images(self):\n        \"\"\" Number of sequences in a dataset\n\n        returns:\n            int - number of sequences in the dataset.\"\"\"\n        return len(self.image_list)\n\n    def has_class_info(self):\n        return False\n\n    def get_class_name(self, image_id):\n        return None\n\n    def get_num_classes(self):\n        return len(self.class_list)\n\n    def get_class_list(self):\n        return self.class_list\n\n    def get_images_in_class(self, class_name):\n        raise NotImplementedError\n\n    def has_segmentation_info(self):\n        return False\n\n    def get_image_info(self, seq_id):\n        \"\"\" Returns information about a particular image,\n\n        args:\n            seq_id - index of the image\n\n        returns:\n            Dict\n            \"\"\"\n        raise NotImplementedError\n\n    def get_image(self, image_id, anno=None):\n        \"\"\" Get a image\n\n        args:\n            image_id      - index of image\n            anno(None)  - The annotation for the sequence (see get_sequence_info). If None, they will be loaded.\n\n        returns:\n            image -\n            anno -\n            dict - A dict containing meta information about the sequence, e.g. class of the target object.\n\n        \"\"\"\n        raise NotImplementedError\n\n"
  },
  {
    "path": "external/AR/ltr/dataset/base_video_dataset.py",
    "content": "import torch.utils.data\nfrom ltr.data.image_loader import jpeg4py_loader\n\n\nclass BaseVideoDataset(torch.utils.data.Dataset):\n    \"\"\" Base class for video datasets \"\"\"\n\n    def __init__(self, name, root, image_loader=jpeg4py_loader):\n        \"\"\"\n        args:\n            root - The root path to the dataset\n            image_loader (jpeg4py_loader) -  The function to read the images. jpeg4py (https://github.com/ajkxyz/jpeg4py)\n                                            is used by default.\n        \"\"\"\n        self.name = name\n        self.root = root\n        self.image_loader = image_loader\n\n        self.sequence_list = []     # Contains the list of sequences.\n        self.class_list = []\n\n    def __len__(self):\n        \"\"\" Returns size of the dataset\n        returns:\n            int - number of samples in the dataset\n        \"\"\"\n        return self.get_num_sequences()\n\n    def __getitem__(self, index):\n        \"\"\" Not to be used! Check get_frames() instead.\n        \"\"\"\n        return None\n\n    def is_video_sequence(self):\n        \"\"\" Returns whether the dataset is a video dataset or an image dataset\n\n        returns:\n            bool - True if a video dataset\n        \"\"\"\n        return True\n\n    def is_synthetic_video_dataset(self):\n        \"\"\" Returns whether the dataset contains real videos or synthetic\n\n        returns:\n            bool - True if a video dataset\n        \"\"\"\n        return False\n\n    def get_name(self):\n        \"\"\" Name of the dataset\n\n        returns:\n            string - Name of the dataset\n        \"\"\"\n        raise NotImplementedError\n\n    def get_num_sequences(self):\n        \"\"\" Number of sequences in a dataset\n\n        returns:\n            int - number of sequences in the dataset.\"\"\"\n        return len(self.sequence_list)\n\n    def has_class_info(self):\n        return False\n\n    def has_occlusion_info(self):\n        return False\n\n    def get_num_classes(self):\n        return len(self.class_list)\n\n    def get_class_list(self):\n        return self.class_list\n\n    def get_sequences_in_class(self, class_name):\n        raise NotImplementedError\n\n    def has_segmentation_info(self):\n        return False\n\n    def get_sequence_info(self, seq_id):\n        \"\"\" Returns information about a particular sequences,\n\n        args:\n            seq_id - index of the sequence\n\n        returns:\n            Dict\n            \"\"\"\n        raise NotImplementedError\n\n    def get_frames(self, seq_id, frame_ids, anno=None):\n        \"\"\" Get a set of frames from a particular sequence\n\n        args:\n            seq_id      - index of sequence\n            frame_ids   - a list of frame numbers\n            anno(None)  - The annotation for the sequence (see get_sequence_info). If None, they will be loaded.\n\n        returns:\n            list - List of frames corresponding to frame_ids\n            list - List of dicts for each frame\n            dict - A dict containing meta information about the sequence, e.g. class of the target object.\n\n        \"\"\"\n        raise NotImplementedError\n\n"
  },
  {
    "path": "external/AR/ltr/dataset/coco.py",
    "content": "import os\nfrom .base_image_dataset import BaseImageDataset\nfrom ltr.data.image_loader import jpeg4py_loader\nimport torch\nfrom pycocotools.coco import COCO\nimport random\nfrom collections import OrderedDict\nfrom ltr.admin.environment import env_settings\n\n\nclass MSCOCO(BaseImageDataset):\n    \"\"\" The COCO object detection dataset.\n\n    Publication:\n        Microsoft COCO: Common Objects in Context.\n        Tsung-Yi Lin, Michael Maire, Serge J. Belongie, Lubomir D. Bourdev, Ross B. Girshick, James Hays, Pietro Perona,\n        Deva Ramanan, Piotr Dollar and C. Lawrence Zitnick\n        ECCV, 2014\n        https://arxiv.org/pdf/1405.0312.pdf\n\n    Download the images along with annotations from http://cocodataset.org/#download. The root folder should be\n    organized as follows.\n        - coco_root\n            - annotations\n                - instances_train2014.json\n                - instances_train2017.json\n            - images\n                - train2014\n                - train2017\n\n    Note: You also have to install the coco pythonAPI from https://github.com/cocodataset/cocoapi.\n    \"\"\"\n\n    def __init__(self, root=None, image_loader=jpeg4py_loader, data_fraction=None, min_area=None,\n                 split=\"train\", version=\"2014\"):\n        \"\"\"\n        args:\n            root - path to coco root folder\n            image_loader (jpeg4py_loader) - The function to read the images. jpeg4py (https://github.com/ajkxyz/jpeg4py)\n                                            is used by default.\n            data_fraction - Fraction of dataset to be used. The complete dataset is used by default\n            min_area - Objects with area less than min_area are filtered out. Default is 0.0\n            split - 'train' or 'val'.\n            version - version of coco dataset (2014 or 2017)\n        \"\"\"\n\n        root = env_settings().coco_dir if root is None else root\n        super().__init__('COCO', root, image_loader)\n\n        self.img_pth = os.path.join(root, 'images/{}{}/'.format(split, version))\n        self.anno_path = os.path.join(root, 'annotations/instances_{}{}.json'.format(split, version))\n\n        self.coco_set = COCO(self.anno_path)\n\n        self.cats = self.coco_set.cats\n\n        self.class_list = self.get_class_list()  # the parent class thing would happen in the sampler\n\n        self.image_list = self._get_image_list(min_area=min_area)\n\n        if data_fraction is not None:\n            self.image_list = random.sample(self.image_list, int(len(self.image_list) * data_fraction))\n        self.im_per_class = self._build_im_per_class()\n\n    def _get_image_list(self, min_area=None):\n        ann_list = list(self.coco_set.anns.keys())\n        image_list = [a for a in ann_list if self.coco_set.anns[a]['iscrowd'] == 0]\n\n        if min_area is not None:\n            image_list = [a for a in image_list if self.coco_set.anns[a]['area'] > min_area]\n\n        return image_list\n\n    def get_num_classes(self):\n        return len(self.class_list)\n\n    def get_name(self):\n        return 'coco'\n\n    def has_class_info(self):\n        return True\n\n    def has_segmentation_info(self):\n        return True\n\n    def get_class_list(self):\n        class_list = []\n        for cat_id in self.cats.keys():\n            class_list.append(self.cats[cat_id]['name'])\n        return class_list\n\n    def _build_im_per_class(self):\n        im_per_class = {}\n        for i, im in enumerate(self.image_list):\n            class_name = self.cats[self.coco_set.anns[im]['category_id']]['name']\n            if class_name not in im_per_class:\n                im_per_class[class_name] = [i]\n            else:\n                im_per_class[class_name].append(i)\n\n        return im_per_class\n\n    def get_images_in_class(self, class_name):\n        return self.im_per_class[class_name]\n\n    def get_image_info(self, im_id):\n        anno = self._get_anno(im_id)\n\n        bbox = torch.Tensor(anno['bbox']).view(4,)\n\n        mask = torch.Tensor(self.coco_set.annToMask(anno))\n\n        valid = (bbox[2] > 0) & (bbox[3] > 0)\n        visible = valid.clone().byte()\n\n        return {'bbox': bbox, 'mask': mask, 'valid': valid, 'visible': visible}\n\n    def _get_anno(self, im_id):\n        anno = self.coco_set.anns[self.image_list[im_id]]\n\n        return anno\n\n    def _get_image(self, im_id):\n        path = self.coco_set.loadImgs([self.coco_set.anns[self.image_list[im_id]]['image_id']])[0]['file_name']\n        img = self.image_loader(os.path.join(self.img_pth, path))\n        return img\n\n    def get_meta_info(self, im_id):\n        try:\n            cat_dict_current = self.cats[self.coco_set.anns[self.image_list[im_id]]['category_id']]\n            object_meta = OrderedDict({'object_class_name': cat_dict_current['name'],\n                                       'motion_class': None,\n                                       'major_class': cat_dict_current['supercategory'],\n                                       'root_class': None,\n                                       'motion_adverb': None})\n        except:\n            object_meta = OrderedDict({'object_class_name': None,\n                                       'motion_class': None,\n                                       'major_class': None,\n                                       'root_class': None,\n                                       'motion_adverb': None})\n        return object_meta\n\n    def get_class_name(self, im_id):\n        cat_dict_current = self.cats[self.coco_set.anns[self.image_list[im_id]]['category_id']]\n        return cat_dict_current['name']\n\n    def get_image(self, image_id, anno=None):\n        frame = self._get_image(image_id)\n\n        if anno is None:\n            anno = self.get_image_info(image_id)\n\n        object_meta = self.get_meta_info(image_id)\n\n        return frame, anno, object_meta\n"
  },
  {
    "path": "external/AR/ltr/dataset/coco_seq.py",
    "content": "import os\nfrom .base_video_dataset import BaseVideoDataset\nfrom ltr.data.image_loader import jpeg4py_loader\nimport torch\nimport random\nfrom pycocotools.coco import COCO\nfrom collections import OrderedDict\nfrom ltr.admin.environment import env_settings\n\n\nclass MSCOCOSeq(BaseVideoDataset):\n    \"\"\" The COCO dataset. COCO is an image dataset. Thus, we treat each image as a sequence of length 1.\n\n    Publication:\n        Microsoft COCO: Common Objects in Context.\n        Tsung-Yi Lin, Michael Maire, Serge J. Belongie, Lubomir D. Bourdev, Ross B. Girshick, James Hays, Pietro Perona,\n        Deva Ramanan, Piotr Dollar and C. Lawrence Zitnick\n        ECCV, 2014\n        https://arxiv.org/pdf/1405.0312.pdf\n\n    Download the images along with annotations from http://cocodataset.org/#download. The root folder should be\n    organized as follows.\n        - coco_root\n            - annotations\n                - instances_train2014.json\n                - instances_train2017.json\n            - images\n                - train2014\n                - train2017\n\n    Note: You also have to install the coco pythonAPI from https://github.com/cocodataset/cocoapi.\n    \"\"\"\n\n    def __init__(self, root=None, image_loader=jpeg4py_loader, data_fraction=None, split=\"train\", version=\"2014\"):\n        \"\"\"\n        args:\n            root - path to the coco dataset.\n            image_loader (default_image_loader) -  The function to read the images. If installed,\n                                                   jpeg4py (https://github.com/ajkxyz/jpeg4py) is used by default. Else,\n                                                   opencv's imread is used.\n            data_fraction (None) - Fraction of images to be used. The images are selected randomly. If None, all the\n                                  images  will be used\n            split - 'train' or 'val'.\n            version - version of coco dataset (2014 or 2017)\n        \"\"\"\n        root = env_settings().coco_dir if root is None else root\n        super().__init__('COCO', root, image_loader)\n\n        self.img_pth = os.path.join(root, 'images/{}{}/'.format(split, version))\n        self.anno_path = os.path.join(root, 'annotations/instances_{}{}.json'.format(split, version))\n\n        # Load the COCO set.\n        self.coco_set = COCO(self.anno_path)\n\n        self.cats = self.coco_set.cats\n\n        self.class_list = self.get_class_list()\n\n        self.sequence_list = self._get_sequence_list()\n\n        if data_fraction is not None:\n            self.sequence_list = random.sample(self.sequence_list, int(len(self.sequence_list)*data_fraction))\n        self.seq_per_class = self._build_seq_per_class()\n\n    def _get_sequence_list(self):\n        ann_list = list(self.coco_set.anns.keys())\n        seq_list = [a for a in ann_list if self.coco_set.anns[a]['iscrowd'] == 0]\n\n        return seq_list\n\n    def is_video_sequence(self):\n        return False\n\n    def get_num_classes(self):\n        return len(self.class_list)\n\n    def get_name(self):\n        return 'coco'\n\n    def has_class_info(self):\n        return True\n\n    def get_class_list(self):\n        class_list = []\n        for cat_id in self.cats.keys():\n            class_list.append(self.cats[cat_id]['name'])\n        return class_list\n\n    def has_segmentation_info(self):\n        return True\n\n    def get_num_sequences(self):\n        return len(self.sequence_list)\n\n    def _build_seq_per_class(self):\n        seq_per_class = {}\n        for i, seq in enumerate(self.sequence_list):\n            class_name = self.cats[self.coco_set.anns[seq]['category_id']]['name']\n            if class_name not in seq_per_class:\n                seq_per_class[class_name] = [i]\n            else:\n                seq_per_class[class_name].append(i)\n\n        return seq_per_class\n\n    def get_sequences_in_class(self, class_name):\n        return self.seq_per_class[class_name]\n\n    def get_sequence_info(self, seq_id):\n        anno = self._get_anno(seq_id)\n\n        bbox = torch.Tensor(anno['bbox']).view(1, 4)\n\n        mask = torch.Tensor(self.coco_set.annToMask(anno)).unsqueeze(dim=0)\n\n        valid = (bbox[:, 2] > 0) & (bbox[:, 3] > 0)\n        visible = valid.clone().byte()\n\n        return {'bbox': bbox, 'mask': mask, 'valid': valid, 'visible': visible}\n\n    def _get_anno(self, seq_id):\n        anno = self.coco_set.anns[self.sequence_list[seq_id]]\n\n        return anno\n\n    def _get_frames(self, seq_id):\n        path = self.coco_set.loadImgs([self.coco_set.anns[self.sequence_list[seq_id]]['image_id']])[0]['file_name']\n        img = self.image_loader(os.path.join(self.img_pth, path))\n        return img\n\n    def get_meta_info(self, seq_id):\n        try:\n            cat_dict_current = self.cats[self.coco_set.anns[self.sequence_list[seq_id]]['category_id']]\n            object_meta = OrderedDict({'object_class_name': cat_dict_current['name'],\n                                       'motion_class': None,\n                                       'major_class': cat_dict_current['supercategory'],\n                                       'root_class': None,\n                                       'motion_adverb': None})\n        except:\n            object_meta = OrderedDict({'object_class_name': None,\n                                       'motion_class': None,\n                                       'major_class': None,\n                                       'root_class': None,\n                                       'motion_adverb': None})\n        return object_meta\n\n\n    def get_class_name(self, seq_id):\n        cat_dict_current = self.cats[self.coco_set.anns[self.sequence_list[seq_id]]['category_id']]\n        return cat_dict_current['name']\n\n    def get_frames(self, seq_id=None, frame_ids=None, anno=None):\n        # COCO is an image dataset. Thus we replicate the image denoted by seq_id len(frame_ids) times, and return a\n        # list containing these replicated images.\n        frame = self._get_frames(seq_id)\n\n        frame_list = [frame.copy() for _ in frame_ids]\n\n        if anno is None:\n            anno = self.get_sequence_info(seq_id)\n\n        anno_frames = {}\n        for key, value in anno.items():\n            anno_frames[key] = [value[0, ...] for _ in frame_ids]\n\n        object_meta = self.get_meta_info(seq_id)\n\n        return frame_list, anno_frames, object_meta\n"
  },
  {
    "path": "external/AR/ltr/dataset/davis.py",
    "content": "from pathlib import Path\nfrom ltr.dataset.vos_base import VOSDatasetBase, VOSMeta\nfrom pytracking.evaluation import Sequence\nfrom ltr.admin.environment import env_settings\nfrom ltr.data.image_loader import jpeg4py_loader\n\n\nclass Davis(VOSDatasetBase):\n    \"\"\" The Davis VOS dataset\n\n        Publication:\n            A Benchmark Dataset and Evaluation Methodology for Video Object Segmentation\n            F. Perazzi, J. Pont-Tuset, B. McWilliams, L. Van Gool, M. Gross, and A. Sorkine-Hornung\n            CVPR, 2016\n            http://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/Perazzi_A_Benchmark_Dataset_CVPR_2016_paper.pdf\n\n        Download the dataset from https://davischallenge.org/davis2017/code.html\n        \"\"\"\n    def __init__(self, root=None, sequences=None, version='2017', split='train', multiobj=True,\n                 vis_threshold=10, image_loader=jpeg4py_loader):\n        \"\"\"\n        args:\n             root - Dataset root path. If unset, it uses the path in your local.py config.\n             sequences - List of sequence names. Limit to a subset of sequences if not None.\n             version - '2016' or '2017\n             split - Any name in DAVIS/ImageSets/<year>\n             multiobj - Whether the dataset will return all objects in a sequence or multiple sequences with one object\n                        in each.\n             vis_threshold - Minimum number of pixels required to consider a target object \"visible\".\n             image_loader (jpeg4py_loader) - The function to read the images. jpeg4py (https://github.com/ajkxyz/jpeg4py)\n                                            is used by default.\n        \"\"\"\n        if version == '2017':\n            if split in ['train', 'val']:\n                root = env_settings().davis_dir if root is None else root\n            elif split in ['test-dev']:\n                root = env_settings().davis_testdev_dir if root is None else root\n            else:\n                raise Exception('Unknown split {}'.format(split))\n        else:\n            root = env_settings().davis16_dir if root is None else root\n            \n        super().__init__(name='DAVIS', root=Path(root), version=version, split=split, multiobj=multiobj,\n                         vis_threshold=vis_threshold, image_loader=image_loader)\n\n        dset_path = self.root\n        self._jpeg_path = dset_path / 'JPEGImages' / '480p'\n        self._anno_path = dset_path / 'Annotations' / '480p'\n\n        meta_path = dset_path / \"generated_meta.json\"\n        if meta_path.exists():\n            self.gmeta = VOSMeta(filename=meta_path)\n        else:\n            self.gmeta = VOSMeta.generate('DAVIS', self._jpeg_path, self._anno_path)\n            self.gmeta.save(meta_path)\n\n        if sequences is None:\n            if self.split != 'all':\n                fname = dset_path / 'ImageSets' / self.version / (self.split + '.txt')\n                sequences = open(fname).read().splitlines()\n            else:\n                sequences = [p for p in sorted(self._jpeg_path.glob(\"*\")) if p.is_dir()]\n\n        self.sequence_names = sequences\n        self._samples = []\n\n        for seq in sequences:\n            obj_ids = self.gmeta.get_obj_ids(seq)\n            if self.multiobj:  # Multiple objects per sample\n                self._samples.append((seq, obj_ids))\n            else:  # One object per sample\n                self._samples.extend([(seq, [obj_id]) for obj_id in obj_ids])\n\n        print(\"%s loaded.\" % self.get_name())\n\n    def _construct_sequence(self, sequence_info):\n\n        seq_name = sequence_info['sequence']\n        images, gt_labels, gt_bboxes = self.get_paths_and_bboxes(sequence_info)\n\n        return Sequence(name=seq_name, frames=images, dataset='DAVIS', ground_truth_rect=gt_bboxes,\n                        ground_truth_seg=gt_labels, object_ids=sequence_info['object_ids'],\n                        multiobj_mode=self.multiobj)\n"
  },
  {
    "path": "external/AR/ltr/dataset/ecssd.py",
    "content": "import os\nfrom .base_image_dataset import BaseImageDataset\nfrom ltr.data.image_loader import jpeg4py_loader, opencv_loader, imread_indexed\nimport torch\nfrom collections import OrderedDict\nfrom ltr.admin.environment import env_settings\nfrom ltr.data.bounding_box_utils import masks_to_bboxes\n\n\nclass ECSSD(BaseImageDataset):\n    \"\"\"\n    Extended Complex Scene Saliency Dataset (ECSSD)\n\n    Publication:\n            Hierarchical Image Saliency Detection on Extended CSSD\n            Jianping Shi, Qiong Yan, Li Xu, Jiaya Jia\n            TPAMI, 2016\n            https://arxiv.org/pdf/1408.5418.pdf\n\n        Download the dataset from http://www.cse.cuhk.edu.hk/leojia/projects/hsaliency/dataset.html\n    \"\"\"\n    def __init__(self, root=None, image_loader=jpeg4py_loader, data_fraction=None, min_area=None):\n        \"\"\"\n        args:\n            root - path to ECSSD root folder\n            image_loader (jpeg4py_loader) - The function to read the images. jpeg4py (https://github.com/ajkxyz/jpeg4py)\n                                            is used by default.\n            data_fraction - Fraction of dataset to be used. The complete dataset is used by default\n            min_area - Objects with area less than min_area are filtered out. Default is 0.0\n        \"\"\"\n        root = env_settings().ecssd_dir if root is None else root\n        super().__init__('ECSSD', root, image_loader)\n\n        self.image_list = self._load_dataset(min_area=min_area)\n\n        if data_fraction is not None:\n            raise NotImplementedError\n\n    def _load_dataset(self, min_area=None):\n        images = []\n\n        for i in range(1, 1001):\n            a = imread_indexed(os.path.join(self.root, 'ground_truth_mask', '{:04d}.png'.format(i)))\n\n            if min_area is None or (a > 0).sum() > min_area:\n                images.append(i)\n\n        return images\n\n    def get_name(self):\n        return 'ecssd'\n\n    def has_segmentation_info(self):\n        return True\n\n    def get_image_info(self, im_id):\n        mask = imread_indexed(os.path.join(self.root, 'ground_truth_mask', '{:04d}.png'.format(self.image_list[im_id])))\n\n        mask = torch.Tensor(mask == 255)\n        bbox = masks_to_bboxes(mask, fmt='t').view(4,)\n\n        valid = (bbox[2] > 0) & (bbox[3] > 0)\n        visible = valid.clone().byte()\n\n        return {'bbox': bbox, 'mask': mask, 'valid': valid, 'visible': visible}\n\n    def get_meta_info(self, im_id):\n        object_meta = OrderedDict({'object_class_name': None,\n                                   'motion_class': None,\n                                   'major_class': None,\n                                   'root_class': None,\n                                   'motion_adverb': None})\n\n        return object_meta\n\n    def get_image(self, image_id, anno=None):\n        frame = self.image_loader(os.path.join(self.root, 'images', '{:04d}.jpg'.format(self.image_list[image_id])))\n\n        if anno is None:\n            anno = self.get_image_info(image_id)\n\n        object_meta = self.get_meta_info(image_id)\n\n        return frame, anno, object_meta\n"
  },
  {
    "path": "external/AR/ltr/dataset/got10k.py",
    "content": "import os\nimport os.path\nimport numpy as np\nimport torch\nimport csv\nimport pandas\nimport random\nfrom collections import OrderedDict\nfrom .base_video_dataset import BaseVideoDataset\nfrom ltr.data.image_loader import jpeg4py_loader\nfrom ltr.admin.environment import env_settings\n\n\nclass Got10k(BaseVideoDataset):\n    \"\"\" GOT-10k dataset.\n\n    Publication:\n        GOT-10k: A Large High-Diversity Benchmark for Generic Object Tracking in the Wild\n        Lianghua Huang, Xin Zhao, and Kaiqi Huang\n        arXiv:1810.11981, 2018\n        https://arxiv.org/pdf/1810.11981.pdf\n\n    Download dataset from http://got-10k.aitestunion.com/downloads\n    \"\"\"\n\n    def __init__(self, root=None, image_loader=jpeg4py_loader, split=None, seq_ids=None, data_fraction=None):\n        \"\"\"\n        args:\n            root - path to the got-10k training data. Note: This should point to the 'train' folder inside GOT-10k\n            image_loader (jpeg4py_loader) -  The function to read the images. jpeg4py (https://github.com/ajkxyz/jpeg4py)\n                                            is used by default.\n            split - 'train' or 'val'. Note: The validation split here is a subset of the official got-10k train split,\n                    not NOT the official got-10k validation split. To use the official validation split, provide that as\n                    the root folder instead.\n            seq_ids - List containing the ids of the videos to be used for training. Note: Only one of 'split' or 'seq_ids'\n                        options can be used at the same time.\n            data_fraction - Fraction of dataset to be used. The complete dataset is used by default\n        \"\"\"\n        root = env_settings().got10k_dir if root is None else root\n        super().__init__('GOT10k', root, image_loader)\n\n        # all folders inside the root\n        self.sequence_list = self._get_sequence_list()\n\n        # seq_id is the index of the folder inside the got10k root path\n        if split is not None:\n            if seq_ids is not None:\n                raise ValueError('Cannot set both split_name and seq_ids.')\n            ltr_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..')\n            if split == 'train':\n                file_path = os.path.join(ltr_path, 'data_specs', 'got10k_train_split.txt')\n            elif split == 'val':\n                file_path = os.path.join(ltr_path, 'data_specs', 'got10k_val_split.txt')\n            elif split == 'vottrain':\n                file_path = os.path.join(ltr_path, 'data_specs', 'got10k_vot_train_split.txt')\n            elif split == 'votval':\n                file_path = os.path.join(ltr_path, 'data_specs', 'got10k_vot_val_split.txt')\n            else:\n                raise ValueError('Unknown split name.')\n            seq_ids = pandas.read_csv(file_path, header=None, squeeze=True, dtype=np.int64).values.tolist()\n        elif seq_ids is None:\n            seq_ids = list(range(0, len(self.sequence_list)))\n\n        self.sequence_list = [self.sequence_list[i] for i in seq_ids]\n\n        if data_fraction is not None:\n            self.sequence_list = random.sample(self.sequence_list, int(len(self.sequence_list)*data_fraction))\n\n        self.sequence_meta_info = self._load_meta_info()\n        self.seq_per_class = self._build_seq_per_class()\n\n        self.class_list = list(self.seq_per_class.keys())\n        self.class_list.sort()\n\n    def get_name(self):\n        return 'got10k'\n\n    def has_class_info(self):\n        return True\n\n    def has_occlusion_info(self):\n        return True\n\n    def _load_meta_info(self):\n        sequence_meta_info = {s: self._read_meta(os.path.join(self.root, s)) for s in self.sequence_list}\n        return sequence_meta_info\n\n    def _read_meta(self, seq_path):\n        try:\n            with open(os.path.join(seq_path, 'meta_info.ini')) as f:\n                meta_info = f.readlines()\n            object_meta = OrderedDict({'object_class_name': meta_info[5].split(': ')[-1][:-1],\n                                       'motion_class': meta_info[6].split(': ')[-1][:-1],\n                                       'major_class': meta_info[7].split(': ')[-1][:-1],\n                                       'root_class': meta_info[8].split(': ')[-1][:-1],\n                                       'motion_adverb': meta_info[9].split(': ')[-1][:-1]})\n        except:\n            object_meta = OrderedDict({'object_class_name': None,\n                                       'motion_class': None,\n                                       'major_class': None,\n                                       'root_class': None,\n                                       'motion_adverb': None})\n        return object_meta\n\n    def _build_seq_per_class(self):\n        seq_per_class = {}\n\n        for i, s in enumerate(self.sequence_list):\n            object_class = self.sequence_meta_info[s]['object_class_name']\n            if object_class in seq_per_class:\n                seq_per_class[object_class].append(i)\n            else:\n                seq_per_class[object_class] = [i]\n\n        return seq_per_class\n\n    def get_sequences_in_class(self, class_name):\n        return self.seq_per_class[class_name]\n\n    def _get_sequence_list(self):\n        with open(os.path.join(self.root, 'list.txt')) as f:\n            dir_list = list(csv.reader(f))\n        dir_list = [dir_name[0] for dir_name in dir_list]\n        return dir_list\n\n    def _read_bb_anno(self, seq_path):\n        bb_anno_file = os.path.join(seq_path, \"groundtruth.txt\")\n        gt = pandas.read_csv(bb_anno_file, delimiter=',', header=None, dtype=np.float32, na_filter=False, low_memory=False).values\n        return torch.tensor(gt)\n\n    def _read_target_visible(self, seq_path):\n        # Read full occlusion and out_of_view\n        occlusion_file = os.path.join(seq_path, \"absence.label\")\n        cover_file = os.path.join(seq_path, \"cover.label\")\n\n        with open(occlusion_file, 'r', newline='') as f:\n            occlusion = torch.ByteTensor([int(v[0]) for v in csv.reader(f)])\n        with open(cover_file, 'r', newline='') as f:\n            cover = torch.ByteTensor([int(v[0]) for v in csv.reader(f)])\n\n        target_visible = ~occlusion & (cover>0).byte()\n\n        visible_ratio = cover.float() / 8\n        return target_visible, visible_ratio\n\n    def _get_sequence_path(self, seq_id):\n        return os.path.join(self.root, self.sequence_list[seq_id])\n\n    def get_sequence_info(self, seq_id):\n        seq_path = self._get_sequence_path(seq_id)\n        bbox = self._read_bb_anno(seq_path)\n\n        valid = (bbox[:, 2] > 0) & (bbox[:, 3] > 0)\n        visible, visible_ratio = self._read_target_visible(seq_path)\n        visible = visible & valid.byte()\n\n        return {'bbox': bbox, 'valid': valid, 'visible': visible, 'visible_ratio': visible_ratio}\n\n    def _get_frame_path(self, seq_path, frame_id):\n        return os.path.join(seq_path, '{:08}.jpg'.format(frame_id+1))    # frames start from 1\n\n    def _get_frame(self, seq_path, frame_id):\n        return self.image_loader(self._get_frame_path(seq_path, frame_id))\n\n    def get_class_name(self, seq_id):\n        obj_meta = self.sequence_meta_info[self.sequence_list[seq_id]]\n\n        return obj_meta['object_class_name']\n\n    def get_frames(self, seq_id, frame_ids, anno=None):\n        seq_path = self._get_sequence_path(seq_id)\n        obj_meta = self.sequence_meta_info[self.sequence_list[seq_id]]\n\n        frame_list = [self._get_frame(seq_path, f_id) for f_id in frame_ids]\n\n        if anno is None:\n            anno = self.get_sequence_info(seq_id)\n\n        anno_frames = {}\n        for key, value in anno.items():\n            anno_frames[key] = [value[f_id, ...].clone() for f_id in frame_ids]\n\n        return frame_list, anno_frames, obj_meta\n"
  },
  {
    "path": "external/AR/ltr/dataset/hku_is.py",
    "content": "import os\nfrom .base_image_dataset import BaseImageDataset\nfrom ltr.data.image_loader import jpeg4py_loader, opencv_loader, imread_indexed\nimport torch\nfrom collections import OrderedDict\nfrom ltr.admin.environment import env_settings\nfrom ltr.data.bounding_box_utils import masks_to_bboxes\n\n\nclass HKUIS(BaseImageDataset):\n    \"\"\"\n    HKU-IS salient object detection dataset\n\n    Publication:\n        Visual saliency based on multiscale deep features\n        Guanbin Li and Yizhou Yu\n        CVPR, 2015\n        https://arxiv.org/pdf/1503.08663.pdf\n\n    Dowload dataset from https://sites.google.com/site/ligb86/hkuis\n    \"\"\"\n\n    def __init__(self, root=None, image_loader=jpeg4py_loader, data_fraction=None, min_area=None):\n        \"\"\"\n        args:\n            root - path to HKU-IS root folder\n            image_loader (jpeg4py_loader) - The function to read the images. jpeg4py (https://github.com/ajkxyz/jpeg4py)\n                                            is used by default.\n            data_fraction - Fraction of dataset to be used. The complete dataset is used by default\n            min_area - Objects with area less than min_area are filtered out. Default is 0.0\n        \"\"\"\n        root = env_settings().hkuis_dir if root is None else root\n        super().__init__('HKUIS', root, image_loader)\n\n        self.image_list, self.anno_list = self._load_dataset(min_area=min_area)\n\n        if data_fraction is not None:\n            raise NotImplementedError\n\n    def _load_dataset(self, min_area=None):\n        files_list = os.listdir(os.path.join(self.root, 'imgs'))\n        image_list = [f[:-4] for f in files_list]\n\n        images = []\n        annos = []\n\n        for f in image_list:\n            a = imread_indexed(os.path.join(self.root, 'gt', '{}.png'.format(f)))\n\n            if min_area is None or (a > 0).sum() > min_area:\n                im = opencv_loader(os.path.join(self.root, 'imgs', '{}.png'.format(f)))\n                images.append(im)\n                annos.append(a)\n\n        return images, annos\n\n    def get_name(self):\n        return 'hku-is'\n\n    def has_segmentation_info(self):\n        return True\n\n    def get_image_info(self, im_id):\n        mask = self.anno_list[im_id]\n        mask = torch.Tensor(mask == 255)\n        bbox = masks_to_bboxes(mask, fmt='t').view(4,)\n\n        valid = (bbox[2] > 0) & (bbox[3] > 0)\n        visible = valid.clone().byte()\n\n        return {'bbox': bbox, 'mask': mask, 'valid': valid, 'visible': visible}\n\n    def get_meta_info(self, im_id):\n        object_meta = OrderedDict({'object_class_name': None,\n                                   'motion_class': None,\n                                   'major_class': None,\n                                   'root_class': None,\n                                   'motion_adverb': None})\n\n        return object_meta\n\n    def get_image(self, image_id, anno=None):\n        frame = self.image_list[image_id]\n\n        if anno is None:\n            anno = self.get_image_info(image_id)\n\n        object_meta = self.get_meta_info(image_id)\n\n        return frame, anno, object_meta\n"
  },
  {
    "path": "external/AR/ltr/dataset/imagenetvid.py",
    "content": "import os\nfrom .base_video_dataset import BaseVideoDataset\nfrom ltr.data.image_loader import default_image_loader\nimport xml.etree.ElementTree as ET\nimport json\nimport torch\nimport random\nfrom collections import OrderedDict\nfrom ltr.admin.environment import env_settings\n\n\ndef get_target_to_image_ratio(seq):\n    anno = torch.Tensor(seq['anno'])\n    img_sz = torch.Tensor(seq['image_size'])\n    return (anno[0, 2:4].prod() / (img_sz.prod())).sqrt()\n\n\nclass ImagenetVID(BaseVideoDataset):\n    \"\"\" Imagenet VID dataset.\n\n    Publication:\n        ImageNet Large Scale Visual Recognition Challenge\n        Olga Russakovsky, Jia Deng, Hao Su, Jonathan Krause, Sanjeev Satheesh, Sean Ma, Zhiheng Huang, Andrej Karpathy,\n        Aditya Khosla, Michael Bernstein, Alexander C. Berg and Li Fei-Fei\n        IJCV, 2015\n        https://arxiv.org/pdf/1409.0575.pdf\n\n    Download the dataset from http://image-net.org/\n    \"\"\"\n    def __init__(self, root=None, image_loader=default_image_loader, min_length=0, max_target_area=1):\n        \"\"\"\n        args:\n            root - path to the imagenet vid dataset.\n            image_loader (default_image_loader) -  The function to read the images. If installed,\n                                                   jpeg4py (https://github.com/ajkxyz/jpeg4py) is used by default. Else,\n                                                   opencv's imread is used.\n            min_length - Minimum allowed sequence length.\n            max_target_area - max allowed ratio between target area and image area. Can be used to filter out targets\n                                which cover complete image.\n        \"\"\"\n        root = env_settings().imagenet_dir if root is None else root\n        super().__init__(root, image_loader)\n\n        cache_file = os.path.join(root, 'cache.json')\n        if os.path.isfile(cache_file):\n            # If available, load the pre-processed cache file containing meta-info for each sequence\n            with open(cache_file, 'r') as f:\n                sequence_list_dict = json.load(f)\n\n            self.sequence_list = sequence_list_dict\n        else:\n            # Else process the imagenet annotations and generate the cache file\n            self.sequence_list = self._process_anno(root)\n\n            with open(cache_file, 'w') as f:\n                json.dump(self.sequence_list, f)\n\n        # Filter the sequences based on min_length and max_target_area in the first frame\n        self.sequence_list = [x for x in self.sequence_list if len(x['anno']) >= min_length and\n                              get_target_to_image_ratio(x) < max_target_area]\n\n    def get_name(self):\n        return 'imagenetvid'\n\n    def get_num_sequences(self):\n        return len(self.sequence_list)\n\n    def get_sequence_info(self, seq_id):\n        bb_anno = torch.Tensor(self.sequence_list[seq_id]['anno'])\n        valid = (bb_anno[:, 2] > 0) & (bb_anno[:, 3] > 0)\n        visible = torch.ByteTensor(self.sequence_list[seq_id]['target_visible']) & valid.byte()\n        return {'bbox': bb_anno, 'valid': valid, 'visible': visible}\n\n    def _get_frame(self, sequence, frame_id):\n        set_name = 'ILSVRC2015_VID_train_{:04d}'.format(sequence['set_id'])\n        vid_name = 'ILSVRC2015_train_{:08d}'.format(sequence['vid_id'])\n        frame_number = frame_id + sequence['start_frame']\n\n        frame_path = os.path.join(self.root, 'Data', 'VID', 'train', set_name, vid_name,\n                                  '{:06d}.JPEG'.format(frame_number))\n        return self.image_loader(frame_path)\n\n    def get_frames(self, seq_id, frame_ids, anno=None):\n        sequence = self.sequence_list[seq_id]\n\n        frame_list = [self._get_frame(sequence, f) for f in frame_ids]\n\n        if anno is None:\n            anno = self.get_sequence_info(seq_id)\n\n        # Create anno dict\n        anno_frames = {}\n        for key, value in anno.items():\n            anno_frames[key] = [value[f_id, ...].clone() for f_id in frame_ids]\n\n        # added the class info to the meta info\n        object_meta = OrderedDict({'object_class': sequence['class_name'],\n                                   'motion_class': None,\n                                   'major_class': None,\n                                   'root_class': None,\n                                   'motion_adverb': None})\n\n        return frame_list, anno_frames, object_meta\n\n    def _process_anno(self, root):\n        # Builds individual tracklets\n        base_vid_anno_path = os.path.join(root, 'Annotations', 'VID', 'train')\n\n        all_sequences = []\n        for set in sorted(os.listdir(base_vid_anno_path)):\n            set_id = int(set.split('_')[-1])\n            for vid in sorted(os.listdir(os.path.join(base_vid_anno_path, set))):\n\n                vid_id = int(vid.split('_')[-1])\n                anno_files = sorted(os.listdir(os.path.join(base_vid_anno_path, set, vid)))\n\n                frame1_anno = ET.parse(os.path.join(base_vid_anno_path, set, vid, anno_files[0]))\n                image_size = [int(frame1_anno.find('size/width').text), int(frame1_anno.find('size/height').text)]\n\n                objects = [ET.ElementTree(file=os.path.join(base_vid_anno_path, set, vid, f)).findall('object')\n                           for f in anno_files]\n\n                tracklets = {}\n\n                # Find all tracklets along with start frame\n                for f_id, all_targets in enumerate(objects):\n                    for target in all_targets:\n                        tracklet_id = target.find('trackid').text\n                        if tracklet_id not in tracklets:\n                            tracklets[tracklet_id] = f_id\n\n                for tracklet_id, tracklet_start in tracklets.items():\n                    tracklet_anno = []\n                    target_visible = []\n                    class_name_id = None\n\n                    for f_id in range(tracklet_start, len(objects)):\n                        found = False\n                        for target in objects[f_id]:\n                            if target.find('trackid').text == tracklet_id:\n                                if not class_name_id:\n                                    class_name_id = target.find('name').text\n                                x1 = int(target.find('bndbox/xmin').text)\n                                y1 = int(target.find('bndbox/ymin').text)\n                                x2 = int(target.find('bndbox/xmax').text)\n                                y2 = int(target.find('bndbox/ymax').text)\n\n                                tracklet_anno.append([x1, y1, x2 - x1, y2 - y1])\n                                target_visible.append(target.find('occluded').text == '0')\n\n                                found = True\n                                break\n                        if not found:\n                            break\n\n                    new_sequence = {'set_id': set_id, 'vid_id': vid_id, 'class_name': class_name_id,\n                                    'start_frame': tracklet_start, 'anno': tracklet_anno,\n                                    'target_visible': target_visible, 'image_size': image_size}\n                    all_sequences.append(new_sequence)\n\n        return all_sequences\n"
  },
  {
    "path": "external/AR/ltr/dataset/lasot.py",
    "content": "import os\nimport os.path\nimport torch\nimport numpy as np\nimport pandas\nimport csv\nimport random\nfrom collections import OrderedDict\nfrom .base_video_dataset import BaseVideoDataset\nfrom ltr.data.image_loader import jpeg4py_loader\nfrom ltr.admin.environment import env_settings\n\n\nclass Lasot(BaseVideoDataset):\n    \"\"\" LaSOT dataset.\n\n    Publication:\n        LaSOT: A High-quality Benchmark for Large-scale Single Object Tracking\n        Heng Fan, Liting Lin, Fan Yang, Peng Chu, Ge Deng, Sijia Yu, Hexin Bai, Yong Xu, Chunyuan Liao and Haibin Ling\n        CVPR, 2019\n        https://arxiv.org/pdf/1809.07845.pdf\n\n    Download the dataset from https://cis.temple.edu/lasot/download.html\n    \"\"\"\n\n    def __init__(self, root=None, image_loader=jpeg4py_loader, vid_ids=None, split=None, data_fraction=None):\n        \"\"\"\n        args:\n            root - path to the lasot dataset.\n            image_loader (jpeg4py_loader) -  The function to read the images. jpeg4py (https://github.com/ajkxyz/jpeg4py)\n                                            is used by default.\n            vid_ids - List containing the ids of the videos (1 - 20) used for training. If vid_ids = [1, 3, 5], then the\n                    videos with subscripts -1, -3, and -5 from each class will be used for training.\n            split - If split='train', the official train split (protocol-II) is used for training. Note: Only one of\n                    vid_ids or split option can be used at a time.\n            data_fraction - Fraction of dataset to be used. The complete dataset is used by default\n        \"\"\"\n        root = env_settings().lasot_dir if root is None else root\n        super().__init__('LaSOT', root, image_loader)\n\n        # Keep a list of all classes\n        self.class_list = [f for f in os.listdir(self.root)]\n        self.class_to_id = {cls_name: cls_id for cls_id, cls_name in enumerate(self.class_list)}\n\n        self.sequence_list = self._build_sequence_list(vid_ids, split)\n\n        if data_fraction is not None:\n            self.sequence_list = random.sample(self.sequence_list, int(len(self.sequence_list)*data_fraction))\n\n        self.seq_per_class = self._build_class_list()\n\n    def _build_sequence_list(self, vid_ids=None, split=None):\n        if split is not None:\n            if vid_ids is not None:\n                raise ValueError('Cannot set both split_name and vid_ids.')\n            ltr_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..')\n            if split == 'train':\n                file_path = os.path.join(ltr_path, 'data_specs', 'lasot_train_split.txt')\n            else:\n                raise ValueError('Unknown split name.')\n            sequence_list = pandas.read_csv(file_path, header=None, squeeze=True).values.tolist()\n        elif vid_ids is not None:\n            sequence_list = [c+'-'+str(v) for c in self.class_list for v in vid_ids]\n        else:\n            raise ValueError('Set either split_name or vid_ids.')\n\n        return sequence_list\n\n    def _build_class_list(self):\n        seq_per_class = {}\n        for seq_id, seq_name in enumerate(self.sequence_list):\n            class_name = seq_name.split('-')[0]\n            if class_name in seq_per_class:\n                seq_per_class[class_name].append(seq_id)\n            else:\n                seq_per_class[class_name] = [seq_id]\n\n        return seq_per_class\n\n    def get_name(self):\n        return 'lasot'\n\n    def has_class_info(self):\n        return True\n\n    def has_occlusion_info(self):\n        return True\n\n    def get_num_sequences(self):\n        return len(self.sequence_list)\n\n    def get_num_classes(self):\n        return len(self.class_list)\n\n    def get_sequences_in_class(self, class_name):\n        return self.seq_per_class[class_name]\n\n    def _read_bb_anno(self, seq_path):\n        bb_anno_file = os.path.join(seq_path, \"groundtruth.txt\")\n        gt = pandas.read_csv(bb_anno_file, delimiter=',', header=None, dtype=np.float32, na_filter=False, low_memory=False).values\n        return torch.tensor(gt)\n\n    def _read_target_visible(self, seq_path):\n        # Read full occlusion and out_of_view\n        occlusion_file = os.path.join(seq_path, \"full_occlusion.txt\")\n        out_of_view_file = os.path.join(seq_path, \"out_of_view.txt\")\n\n        with open(occlusion_file, 'r', newline='') as f:\n            occlusion = torch.ByteTensor([int(v) for v in list(csv.reader(f))[0]])\n        with open(out_of_view_file, 'r') as f:\n            out_of_view = torch.ByteTensor([int(v) for v in list(csv.reader(f))[0]])\n\n        target_visible = ~occlusion & ~out_of_view\n\n        return target_visible\n\n    def _get_sequence_path(self, seq_id):\n        seq_name = self.sequence_list[seq_id]\n        class_name = seq_name.split('-')[0]\n        vid_id = seq_name.split('-')[1]\n\n        return os.path.join(self.root, class_name, class_name + '-' + vid_id)\n\n    def get_sequence_info(self, seq_id):\n        seq_path = self._get_sequence_path(seq_id)\n        bbox = self._read_bb_anno(seq_path)\n\n        valid = (bbox[:, 2] > 0) & (bbox[:, 3] > 0)\n        visible = self._read_target_visible(seq_path) & valid.byte()\n\n        return {'bbox': bbox, 'valid': valid, 'visible': visible}\n\n    def _get_frame_path(self, seq_path, frame_id):\n        return os.path.join(seq_path, 'img', '{:08}.jpg'.format(frame_id+1))    # frames start from 1\n\n    def _get_frame(self, seq_path, frame_id):\n        return self.image_loader(self._get_frame_path(seq_path, frame_id))\n\n    def _get_class(self, seq_path):\n        raw_class = seq_path.split('/')[-2]\n        return raw_class\n\n    def get_class_name(self, seq_id):\n        seq_path = self._get_sequence_path(seq_id)\n        obj_class = self._get_class(seq_path)\n\n        return obj_class\n\n    def get_frames(self, seq_id, frame_ids, anno=None):\n        seq_path = self._get_sequence_path(seq_id)\n\n        obj_class = self._get_class(seq_path)\n        frame_list = [self._get_frame(seq_path, f_id) for f_id in frame_ids]\n\n        if anno is None:\n            anno = self.get_sequence_info(seq_id)\n\n        anno_frames = {}\n        for key, value in anno.items():\n            anno_frames[key] = [value[f_id, ...].clone() for f_id in frame_ids]\n\n        object_meta = OrderedDict({'object_class_name': obj_class,\n                                   'motion_class': None,\n                                   'major_class': None,\n                                   'root_class': None,\n                                   'motion_adverb': None})\n\n        return frame_list, anno_frames, object_meta\n"
  },
  {
    "path": "external/AR/ltr/dataset/lvis.py",
    "content": "import os\nfrom .base_image_dataset import BaseImageDataset\nfrom ltr.data.image_loader import jpeg4py_loader_w_failsafe\nimport torch\nimport random\nimport lvis.lvis as lvis_pk\nfrom collections import OrderedDict\nfrom ltr.admin.environment import env_settings\n\n\nclass LVIS(BaseImageDataset):\n    \"\"\" The LVIS object detection dataset\n\n    Publication:\n        LVIS: A Dataset for Large Vocabulary Instance Segmentation\n        Agrim Gupta, Piotr Dollár, and Ross Girshick\n        CVPR, 2019\n        https://arxiv.org/pdf/1908.03195.pdf\n\n    Download the images along with annotations from https://www.lvisdataset.org/dataset. The root folder should be\n    organized as follows.\n        - lvis_root\n            - annotations\n                - lvis_v0.5_train.json\n                - lvis_v0.5_val.json\n            - images\n                - val2017\n                - train2017\n\n    Note: You also have to install the lvis Python API from https://github.com/lvis-dataset/lvis-api\n    \"\"\"\n\n    def __init__(self, root=None, image_loader=jpeg4py_loader_w_failsafe, data_fraction=None, min_area=None, split=\"train\"):\n        \"\"\"\n        args:\n            root - path to lvis root folder\n            image_loader (jpeg4py_loader) - The function to read the images. jpeg4py (https://github.com/ajkxyz/jpeg4py)\n                                            is used by default.\n            data_fraction - Fraction of dataset to be used. The complete dataset is used by default\n            min_area - Objects with area less than min_area are filtered out. Default is 0.0\n            split - 'train' or 'val'.\n        \"\"\"\n        root = env_settings().lvis_dir if root is None else root\n        super().__init__('LVIS', root, image_loader)\n\n        self.img_pth = os.path.join(root, 'images', f'{split}2017/')\n        self.anno_path = os.path.join(root, 'annotations', f'lvis_v0.5_{split}.json')\n\n        # Load the LVIS set.\n        self.lvis_set = lvis_pk.LVIS(self.anno_path)\n\n        self.cats = self.lvis_set.cats\n\n        self.class_list = self.get_class_list()     # the parent class thing would happen in the sampler\n\n        self.image_list = self._get_image_list(min_area=min_area)\n\n        if data_fraction is not None:\n            self.image_list = random.sample(self.image_list, int(len(self.image_list) * data_fraction))\n        self.im_per_class = self._build_im_per_class()\n\n    def _get_image_list(self, min_area=None):\n        im_list = list(self.lvis_set.anns.keys())  # No 'iscrowd' information in LVIS\n\n        if min_area is not None:\n            im_list = [s for s in im_list if self.lvis_set.anns[s]['area'] > min_area]\n\n        return im_list\n\n    def get_num_classes(self):\n        return len(self.class_list)\n\n    def get_name(self):\n        return 'lvis'\n\n    def has_class_info(self):\n        return True\n\n    def get_class_list(self):\n        class_list = []\n        for cat_id in self.cats.keys():\n            class_list.append(self.cats[cat_id]['name'])\n        return class_list\n\n    def has_segmentation_info(self):\n        return True\n\n    def _build_im_per_class(self):\n        im_per_class = {}\n        for i, im in enumerate(self.image_list):\n            class_name = self.cats[self.lvis_set.anns[im]['category_id']]['name']\n            if class_name not in im_per_class:\n                im_per_class[class_name] = [i]\n            else:\n                im_per_class[class_name].append(i)\n\n        return im_per_class\n\n    def get_images_in_class(self, class_name):\n        return self.im_per_class[class_name]\n\n    def get_image_info(self, im_id):\n        anno = self._get_anno(im_id)\n\n        bbox = torch.Tensor(anno['bbox']).view(4,)\n\n        mask = torch.Tensor(self.lvis_set.ann_to_mask(anno))\n\n        valid = (bbox[2] > 0) & (bbox[3] > 0)\n        visible = valid.clone().byte()\n\n        return {'bbox': bbox, 'mask': mask, 'valid': valid, 'visible': visible}\n\n    def _get_anno(self, im_id):\n        anno = self.lvis_set.anns[self.image_list[im_id]]\n\n        return anno\n\n    def _get_image(self, im_id):\n        path = self.lvis_set.load_imgs([self.lvis_set.anns[self.image_list[im_id]]['image_id']])[0]['file_name']\n        img = self.image_loader(os.path.join(self.img_pth, path))\n        return img\n\n    def get_meta_info(self, im_id):\n        try:\n            cat_dict_current = self.cats[self.lvis_set.anns[self.image_list[im_id]]['category_id']]\n            object_meta = OrderedDict({'object_class_name': cat_dict_current['name'],\n                                       'motion_class': None,\n                                       'major_class': None,  # No 'supercategory' information available in LVIS\n                                       'root_class': None,\n                                       'motion_adverb': None})\n        except:\n            object_meta = OrderedDict({'object_class_name': None,\n                                       'motion_class': None,\n                                       'major_class': None,\n                                       'root_class': None,\n                                       'motion_adverb': None})\n        return object_meta\n\n    def get_class_name(self, im_id):\n        cat_dict_current = self.cats[self.lvis_set.anns[self.image_list[im_id]]['category_id']]\n        return cat_dict_current['name']\n\n    def get_image(self, image_id, anno=None):\n        frame = self._get_image(image_id)\n\n        if anno is None:\n            anno = self.get_image_info(image_id)\n\n        object_meta = self.get_meta_info(image_id)\n\n        return frame, anno, object_meta\n"
  },
  {
    "path": "external/AR/ltr/dataset/msra10k.py",
    "content": "import os\nfrom .base_image_dataset import BaseImageDataset\nfrom ltr.data.image_loader import jpeg4py_loader, imread_indexed\nimport torch\nfrom collections import OrderedDict\nfrom ltr.admin.environment import env_settings\nfrom ltr.data.bounding_box_utils import masks_to_bboxes\n\n\nclass MSRA10k(BaseImageDataset):\n    \"\"\"\n    MSRA10k salient object detection dataset\n\n    Publication:\n        Global contrast based salient region detection\n        Ming-Ming Cheng, Niloy J. Mitra, Xiaolei Huang, Philip H. S. Torr, and Shi-Min Hu\n        TPAMI, 2015\n        https://mmcheng.net/mftp/Papers/SaliencyTPAMI.pdf\n\n    Download dataset from https://mmcheng.net/msra10k/\n    \"\"\"\n\n    def __init__(self, root=None, image_loader=jpeg4py_loader, data_fraction=None, min_area=None):\n        \"\"\"\n        args:\n            root - path to MSRA10k root folder\n            image_loader (jpeg4py_loader) - The function to read the images. jpeg4py (https://github.com/ajkxyz/jpeg4py)\n                                            is used by default.\n            data_fraction - Fraction of dataset to be used. The complete dataset is used by default\n            min_area - Objects with area less than min_area are filtered out. Default is 0.0\n        \"\"\"\n        root = env_settings().msra10k_dir if root is None else root\n        super().__init__('MSRA10k', root, image_loader)\n\n        self.image_list = self._load_dataset(min_area=min_area)\n\n        if data_fraction is not None:\n            raise NotImplementedError\n\n    def _load_dataset(self, min_area=None):\n        files_list = os.listdir(os.path.join(self.root, 'Imgs'))\n        image_list = [f[:-4] for f in files_list if f[-3:] == 'jpg']\n\n        images = []\n\n        for f in image_list:\n            a = imread_indexed(os.path.join(self.root, 'Imgs', '{}.png'.format(f)))\n\n            if min_area is None or (a > 0).sum() > min_area:\n                images.append(f)\n\n        return images\n\n    def get_name(self):\n        return 'msra10k'\n\n    def has_segmentation_info(self):\n        return True\n\n    def get_image_info(self, im_id):\n        mask = imread_indexed(os.path.join(self.root, 'Imgs', '{}.png'.format(self.image_list[im_id])))\n        mask = torch.Tensor(mask == 255)\n        bbox = masks_to_bboxes(mask, fmt='t').view(4,)\n\n        valid = (bbox[2] > 0) & (bbox[3] > 0)\n        visible = valid.clone().byte()\n\n        return {'bbox': bbox, 'mask': mask, 'valid': valid, 'visible': visible}\n\n    def get_meta_info(self, im_id):\n        object_meta = OrderedDict({'object_class_name': None,\n                                   'motion_class': None,\n                                   'major_class': None,\n                                   'root_class': None,\n                                   'motion_adverb': None})\n\n        return object_meta\n\n    def get_image(self, image_id, anno=None):\n        frame = self.image_loader(os.path.join(self.root, 'Imgs', '{}.jpg'.format(self.image_list[image_id])))\n\n        if anno is None:\n            anno = self.get_image_info(image_id)\n\n        object_meta = self.get_meta_info(image_id)\n\n        return frame, anno, object_meta\n"
  },
  {
    "path": "external/AR/ltr/dataset/sbd.py",
    "content": "from .base_image_dataset import BaseImageDataset\nfrom ltr.data.image_loader import jpeg4py_loader_w_failsafe\nimport torch\nfrom collections import OrderedDict\nimport os\nfrom scipy.io import loadmat\nfrom ltr.data.bounding_box_utils import masks_to_bboxes\n\nfrom ltr.admin.environment import env_settings\n\n\nclass SBD(BaseImageDataset):\n    \"\"\"\n    Semantic Boundaries Dataset and Benchmark (SBD)\n\n    Publication:\n        Semantic contours from inverse detectors\n        Bharath Hariharan, Pablo Arbelaez, Lubomir Bourdev, Subhransu Maji and Jitendra Malik\n        ICCV, 2011\n        http://home.bharathh.info/pubs/pdfs/BharathICCV2011.pdf\n\n    Download dataset from: http://home.bharathh.info/pubs/codes/SBD/download.html\n    \"\"\"\n    def __init__(self, root=None, image_loader=jpeg4py_loader_w_failsafe, data_fraction=None, split=\"train\"):\n        \"\"\"\n        args:\n            root - path to SBD root folder\n            image_loader - The function to read the images. jpeg4py (https://github.com/ajkxyz/jpeg4py)\n                           is used by default.\n            data_fraction - Fraction of dataset to be used. The complete dataset is used by default\n            split - dataset split (\"train\", \"train_noval\", \"val\")\n        \"\"\"\n        root = env_settings().sbd_dir if root is None else root\n        super().__init__('SBD', root, image_loader)\n\n        assert split in [\"train\", \"train_noval\", \"val\"]\n\n        self.root = root\n\n        self.image_path_list, self.anno_file_list = self._load_dataset(split)\n\n        # Load mat fine\n        anno_list = [loadmat(a) for a in self.anno_file_list]\n\n        self.image_list = self._construct_image_list(anno_list)\n        if data_fraction is not None:\n            raise NotImplementedError\n\n    def _load_dataset(self, split):\n        split_f = os.path.join(self.root, split.rstrip('\\n') + '.txt')\n\n        with open(os.path.join(split_f), \"r\") as f:\n            file_names = [x.strip() for x in f.readlines()]\n\n        image_list = [os.path.join(self.root, 'img', x + \".jpg\") for x in file_names]\n        anno_list = [os.path.join(self.root, 'inst', x + \".mat\") for x in file_names]\n\n        assert (len(image_list) == len(anno_list))\n\n        return image_list, anno_list\n\n    def _get_mask_from_mat(self, mat):\n        return torch.tensor(mat['GTinst'][0]['Segmentation'][0])\n\n    def _construct_image_list(self, anno_list):\n        image_list = []\n\n        for im_id, a in enumerate(anno_list):\n            mask = self._get_mask_from_mat(a)\n            for instance_id in range(1, mask.max().item() + 1):\n                image_list.append((im_id, instance_id))\n\n        return image_list\n\n    def get_name(self):\n        return 'sbd'\n\n    def has_segmentation_info(self):\n        return True\n\n    def get_image_info(self, im_id):\n        image_id, instance_id = self.image_list[im_id]\n        anno_mat = loadmat(self.anno_file_list[image_id])\n        mask = self._get_mask_from_mat(anno_mat)\n\n        mask = (mask == instance_id).float()\n        bbox = masks_to_bboxes(mask, fmt='t')\n        valid = (bbox[2] > 0) & (bbox[3] > 0)\n        visible = valid.clone().byte()\n\n        return {'bbox': bbox, 'mask': mask, 'valid': valid, 'visible': visible}\n\n    def _get_image(self, im_id):\n        image_id, _ = self.image_list[im_id]\n\n        img = self.image_loader(self.image_path_list[image_id])\n        return img\n\n    def get_meta_info(self, im_id):\n        object_meta = OrderedDict({'object_class_name': None,\n                                   'motion_class': None,\n                                   'major_class': None,\n                                   'root_class': None,\n                                   'motion_adverb': None})\n        return object_meta\n\n    def get_image(self, image_id, anno=None):\n        image = self._get_image(image_id)\n\n        if anno is None:\n            anno = self.get_image_info(image_id)\n\n        object_meta = self.get_meta_info(image_id)\n\n        return image, anno, object_meta\n"
  },
  {
    "path": "external/AR/ltr/dataset/synthetic_video.py",
    "content": "from collections import OrderedDict\nfrom .base_video_dataset import BaseVideoDataset\nfrom ltr.data.bounding_box_utils import masks_to_bboxes\n\n\nclass SyntheticVideo(BaseVideoDataset):\n    \"\"\"\n    Create a synthetic video dataset from an image dataset by applying a random transformation to images.\n    \"\"\"\n    def __init__(self, base_image_dataset, transform=None):\n        \"\"\"\n        args:\n            base_image_dataset - Image dataset used for generating synthetic videos\n            transform - Set of transforms to be applied to the images to generate synthetic video.\n        \"\"\"\n        super().__init__(base_image_dataset.get_name() + '_syn_vid', base_image_dataset.root,\n                         base_image_dataset.image_loader)\n        self.base_image_dataset = base_image_dataset\n        self.transform = transform\n\n    def get_name(self):\n        return self.name\n\n    def is_video_sequence(self):\n        return False\n\n    def has_class_info(self):\n        return self.base_image_dataset.has_class_info()\n\n    def has_occlusion_info(self):\n        return True\n\n    def get_num_sequences(self):\n        return self.base_image_dataset.get_num_images()\n\n    def get_num_classes(self):\n        return len(self.class_list)\n\n    def get_sequences_in_class(self, class_name):\n        return self.get_images_in_class[class_name]\n\n    def get_sequence_info(self, seq_id):\n        image_info = self.base_image_dataset.get_image_info(seq_id)\n\n        image_info = {k: v.unsqueeze(0) for k, v in image_info.items()}\n        return image_info\n\n    def get_class_name(self, seq_id):\n        return self.base_image_dataset.get_class_name(seq_id)\n\n    def get_frames(self, seq_id, frame_ids, anno=None):\n        frame, anno, object_meta = self.base_image_dataset.get_image(seq_id, anno=anno)\n\n        frame_list = [frame.copy() for _ in frame_ids]\n\n        if anno is None:\n            anno = self.get_sequence_info(seq_id)\n\n        anno_frames = {}\n        for key, value in anno.items():\n            anno_frames[key] = [value[0].clone() for f_id in frame_ids]\n\n        if self.transform is not None:\n            if 'mask' in anno_frames.keys():\n                frame_list, anno_frames['bbox'], anno_frames['mask'] = self.transform(image=frame_list,\n                                                                                      bbox=anno_frames['bbox'],\n                                                                                      mask=anno_frames['mask'],\n                                                                                      joint=False)\n\n                anno_frames['bbox'] = [masks_to_bboxes(m, fmt='t') for m in anno_frames['mask']]\n            else:\n                frame_list, anno_frames['bbox'] = self.transform(image=frame_list,\n                                                                 bbox=anno_frames['bbox'],\n                                                                 joint=False)\n\n        object_meta = OrderedDict({'object_class_name': self.get_class_name(seq_id),\n                                   'motion_class': None,\n                                   'major_class': None,\n                                   'root_class': None,\n                                   'motion_adverb': None})\n\n        return frame_list, anno_frames, object_meta\n"
  },
  {
    "path": "external/AR/ltr/dataset/synthetic_video_blend.py",
    "content": "from collections import OrderedDict\nfrom .base_video_dataset import BaseVideoDataset\nfrom ltr.data.bounding_box_utils import masks_to_bboxes\nimport random\nimport torch\n\n\nclass SyntheticVideoBlend(BaseVideoDataset):\n    \"\"\"\n    Create a synthetic video by applying random transformations to an object (foreground) and pasting it in a\n    background image.  Currently, the foreground object is pasted at random locations in different frames.\n    \"\"\"\n    def __init__(self, foreground_image_dataset, background_image_dataset, foreground_transform=None,\n                 background_transform=None):\n        \"\"\"\n        args:\n            foreground_image_dataset - A segmentation dataset from which foreground objects are cropped using the\n                                       segmentation mask\n            background_image_dataset - Dataset used to sample background image for the synthetic video\n            foreground_transform - Random transformations to be applied to the foreground object in every frame\n            background_transform - Random transformations to be applied to the background image in every frame\n        \"\"\"\n        assert foreground_image_dataset.has_segmentation_info()\n\n        super().__init__(foreground_image_dataset.get_name() + '_syn_vid_blend', foreground_image_dataset.root,\n                         foreground_image_dataset.image_loader)\n        self.foreground_image_dataset = foreground_image_dataset\n        self.background_image_dataset = background_image_dataset\n\n        self.foreground_transform = foreground_transform\n        self.background_transform = background_transform\n\n    def get_name(self):\n        return self.name\n\n    def is_video_sequence(self):\n        return False\n\n    def has_class_info(self):\n        return self.foreground_image_dataset.has_class_info()\n\n    def has_occlusion_info(self):\n        return True\n\n    def get_num_sequences(self):\n        return self.foreground_image_dataset.get_num_images()\n\n    def get_num_classes(self):\n        return len(self.class_list)\n\n    def get_sequences_in_class(self, class_name):\n        return self.get_images_in_class[class_name]\n\n    def get_sequence_info(self, seq_id):\n        image_info = self.foreground_image_dataset.get_image_info(seq_id)\n\n        image_info = {k: v.unsqueeze(0) for k, v in image_info.items()}\n        return image_info\n\n    def get_class_name(self, seq_id):\n        return self.foreground_image_dataset.get_class_name(seq_id)\n\n    def _paste_target(self, fg_image, fg_box, fg_mask, bg_image, paste_loc):\n        fg_mask = fg_mask.view(fg_mask.shape[0], fg_mask.shape[1], 1)\n        fg_box = fg_box.long().tolist()\n\n        x1 = int(paste_loc[0] - 0.5 * fg_box[2])\n        x2 = x1 + fg_box[2]\n\n        y1 = int(paste_loc[1] - 0.5 * fg_box[3])\n        y2 = y1 + fg_box[3]\n\n        x1_pad = max(-x1, 0)\n        y1_pad = max(-y1, 0)\n\n        x2_pad = max(x2 - bg_image.shape[1], 0)\n        y2_pad = max(y2 - bg_image.shape[0], 0)\n\n        bg_mask = torch.zeros((bg_image.shape[0], bg_image.shape[1], 1), dtype=fg_mask.dtype,\n                              device=fg_mask.device)\n\n        if x1_pad >= fg_mask.shape[1] or x2_pad >= fg_mask.shape[1] or y1_pad >= fg_mask.shape[0] or y2_pad >= \\\n                fg_mask.shape[0]:\n            return bg_image, bg_mask.squeeze(-1)\n\n        fg_mask_patch = fg_mask[fg_box[1] + y1_pad:fg_box[1] + fg_box[3] - y2_pad,\n                                fg_box[0] + x1_pad:fg_box[0] + fg_box[2] - x2_pad, :]\n\n        fg_image_patch = fg_image[fg_box[1] + y1_pad:fg_box[1] + fg_box[3] - y2_pad,\n                         fg_box[0] + x1_pad:fg_box[0] + fg_box[2] - x2_pad, :]\n\n        bg_image[y1 + y1_pad:y2 - y2_pad, x1 + x1_pad:x2 - x2_pad, :] = \\\n            bg_image[y1 + y1_pad:y2 - y2_pad, x1 + x1_pad:x2 - x2_pad, :] * (1 - fg_mask_patch.numpy()) \\\n            + fg_mask_patch.numpy() * fg_image_patch\n\n        bg_mask[y1 + y1_pad:y2 - y2_pad, x1 + x1_pad:x2 - x2_pad, :] = fg_mask_patch\n\n        return bg_image, bg_mask.squeeze(-1)\n\n    def get_frames(self, seq_id, frame_ids, anno=None):\n        # Handle foreground\n        fg_frame, fg_anno, fg_object_meta = self.foreground_image_dataset.get_image(seq_id, anno=anno)\n\n        fg_frame_list = [fg_frame.copy() for _ in frame_ids]\n\n        fg_anno_frames = {}\n        for key, value in fg_anno.items():\n            fg_anno_frames[key] = [value[0].clone() for f_id in frame_ids]\n\n        if self.foreground_transform is not None:\n            fg_frame_list, fg_anno_frames['bbox'], fg_anno_frames['mask'] = self.foreground_transform(\n                image=fg_frame_list,\n                bbox=fg_anno_frames['bbox'],\n                mask=fg_anno_frames['mask'],\n                joint=False)\n\n        # Sample a random background\n        bg_seq_id = random.randint(0, self.background_image_dataset.get_num_images() - 1)\n\n        bg_frame, bg_anno, _ = self.background_image_dataset.get_image(bg_seq_id)\n\n        bg_frame_list = [bg_frame.copy() for _ in frame_ids]\n\n        bg_anno_frames = {}\n        for key, value in bg_anno.items():\n            # Note: Since we get bg anno from image dataset, it does not has frame dimension\n            bg_anno_frames[key] = [value.clone() for f_id in frame_ids]\n\n        if self.background_transform is not None:\n            if 'mask' in bg_anno_frames.keys():\n                bg_frame_list, bg_anno_frames['bbox'], bg_anno_frames['mask'] = self.background_transform(\n                    image=bg_frame_list,\n                    bbox=bg_anno_frames['bbox'],\n                    mask=bg_anno_frames['mask'],\n                    joint=False)\n            else:\n                bg_frame_list, bg_anno_frames['bbox'] = self.background_transform(\n                    image=bg_frame_list,\n                    bbox=bg_anno_frames['bbox'],\n                    joint=False)\n\n        for i in range(len(frame_ids)):\n            # To be safe, get target bb for the mask\n            bbox = masks_to_bboxes(fg_anno_frames['mask'][i], fmt='t')\n\n            loc_y = random.randint(0, bg_frame_list[i].shape[0] - 1)\n            loc_x = random.randint(0, bg_frame_list[i].shape[1] - 1)\n\n            paste_loc = (loc_x, loc_y)\n            fg_frame_list[i], fg_anno_frames['mask'][i] = self._paste_target(fg_frame_list[i], bbox,\n                                                                             fg_anno_frames['mask'][i],\n                                                                             bg_frame_list[i], paste_loc)\n\n            fg_anno_frames['bbox'][i] = masks_to_bboxes(fg_anno_frames['mask'][i], fmt='t')\n\n        object_meta = OrderedDict({'object_class_name': self.get_class_name(seq_id),\n                                   'motion_class': None,\n                                   'major_class': None,\n                                   'root_class': None,\n                                   'motion_adverb': None})\n\n        return fg_frame_list, fg_anno_frames, object_meta\n"
  },
  {
    "path": "external/AR/ltr/dataset/tracking_net.py",
    "content": "import torch\nimport os\nimport os.path\nimport numpy as np\nimport pandas\nimport random\nfrom collections import OrderedDict\n\nfrom ltr.data.image_loader import jpeg4py_loader\nfrom .base_video_dataset import BaseVideoDataset\nfrom ltr.admin.environment import env_settings\n\n\ndef list_sequences(root, set_ids):\n    \"\"\" Lists all the videos in the input set_ids. Returns a list of tuples (set_id, video_name)\n\n    args:\n        root: Root directory to TrackingNet\n        set_ids: Sets (0-11) which are to be used\n\n    returns:\n        list - list of tuples (set_id, video_name) containing the set_id and video_name for each sequence\n    \"\"\"\n    sequence_list = []\n\n    for s in set_ids:\n        anno_dir = os.path.join(root, \"TRAIN_\" + str(s), \"anno\")\n\n        sequences_cur_set = [(s, os.path.splitext(f)[0]) for f in os.listdir(anno_dir) if f.endswith('.txt')]\n        sequence_list += sequences_cur_set\n\n    return sequence_list\n\n\nclass TrackingNet(BaseVideoDataset):\n    \"\"\" TrackingNet dataset.\n\n    Publication:\n        TrackingNet: A Large-Scale Dataset and Benchmark for Object Tracking in the Wild.\n        Matthias Mueller,Adel Bibi, Silvio Giancola, Salman Al-Subaihi and Bernard Ghanem\n        ECCV, 2018\n        https://ivul.kaust.edu.sa/Documents/Publications/2018/TrackingNet%20A%20Large%20Scale%20Dataset%20and%20Benchmark%20for%20Object%20Tracking%20in%20the%20Wild.pdf\n\n    Download the dataset using the toolkit https://github.com/SilvioGiancola/TrackingNet-devkit.\n    \"\"\"\n    def __init__(self, root=None, image_loader=jpeg4py_loader, set_ids=None, data_fraction=None):\n        \"\"\"\n        args:\n            root        - The path to the TrackingNet folder, containing the training sets.\n            image_loader (jpeg4py_loader) -  The function to read the images. jpeg4py (https://github.com/ajkxyz/jpeg4py)\n                                            is used by default.\n            set_ids (None) - List containing the ids of the TrackingNet sets to be used for training. If None, all the\n                            sets (0 - 11) will be used.\n            data_fraction - Fraction of dataset to be used. The complete dataset is used by default\n        \"\"\"\n        root = env_settings().trackingnet_dir if root is None else root\n        super().__init__('TrackingNet', root, image_loader)\n\n        if set_ids is None:\n            set_ids = [i for i in range(12)]\n\n        self.set_ids = set_ids\n\n        # Keep a list of all videos. Sequence list is a list of tuples (set_id, video_name) containing the set_id and\n        # video_name for each sequence\n        self.sequence_list = list_sequences(self.root, self.set_ids)\n\n        if data_fraction is not None:\n            self.sequence_list = random.sample(self.sequence_list, int(len(self.sequence_list) * data_fraction))\n\n        self.seq_to_class_map, self.seq_per_class = self._load_class_info()\n\n        # we do not have the class_lists for the tracking net\n        self.class_list = list(self.seq_per_class.keys())\n        self.class_list.sort()\n\n    def _load_class_info(self):\n        ltr_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..')\n        class_map_path = os.path.join(ltr_path, 'data_specs', 'trackingnet_classmap.txt')\n\n        with open(class_map_path, 'r') as f:\n            seq_to_class_map = {seq_class.split('\\t')[0]: seq_class.rstrip().split('\\t')[1] for seq_class in f}\n\n        seq_per_class = {}\n        for i, seq in enumerate(self.sequence_list):\n            class_name = seq_to_class_map[seq[1]]\n            if class_name not in seq_per_class:\n                seq_per_class[class_name] = [i]\n            else:\n                seq_per_class[class_name].append(i)\n\n        return seq_to_class_map, seq_per_class\n\n    def get_name(self):\n        return 'trackingnet'\n\n    def has_class_info(self):\n        return True\n\n    def get_sequences_in_class(self, class_name):\n        return self.seq_per_class[class_name]\n\n    def _read_bb_anno(self, seq_id):\n        set_id = self.sequence_list[seq_id][0]\n        vid_name = self.sequence_list[seq_id][1]\n        bb_anno_file = os.path.join(self.root, \"TRAIN_\" + str(set_id), \"anno\", vid_name + \".txt\")\n        gt = pandas.read_csv(bb_anno_file, delimiter=',', header=None, dtype=np.float32, na_filter=False,\n                             low_memory=False).values\n        return torch.tensor(gt)\n\n    def get_sequence_info(self, seq_id):\n        bbox = self._read_bb_anno(seq_id)\n\n        valid = (bbox[:, 2] > 0) & (bbox[:, 3] > 0)\n        visible = valid.clone().byte()\n        return {'bbox': bbox, 'valid': valid, 'visible': visible}\n\n    def _get_frame(self, seq_id, frame_id):\n        set_id = self.sequence_list[seq_id][0]\n        vid_name = self.sequence_list[seq_id][1]\n        frame_path = os.path.join(self.root, \"TRAIN_\" + str(set_id), \"frames\", vid_name, str(frame_id) + \".jpg\")\n        return self.image_loader(frame_path)\n\n    def _get_class(self, seq_id):\n        seq_name = self.sequence_list[seq_id][1]\n        return self.seq_to_class_map[seq_name]\n\n    def get_class_name(self, seq_id):\n        obj_class = self._get_class(seq_id)\n\n        return obj_class\n\n    def get_frames(self, seq_id, frame_ids, anno=None):\n        frame_list = [self._get_frame(seq_id, f) for f in frame_ids]\n\n        if anno is None:\n            anno = self.get_sequence_info(seq_id)\n\n        anno_frames = {}\n        for key, value in anno.items():\n            anno_frames[key] = [value[f_id, ...].clone() for f_id in frame_ids]\n\n        obj_class = self._get_class(seq_id)\n\n        object_meta = OrderedDict({'object_class_name': obj_class,\n                                   'motion_class': None,\n                                   'major_class': None,\n                                   'root_class': None,\n                                   'motion_adverb': None})\n\n        return frame_list, anno_frames, object_meta\n"
  },
  {
    "path": "external/AR/ltr/dataset/vos_base.py",
    "content": "import torch\nfrom pathlib import Path\nfrom collections import OrderedDict, defaultdict\nimport json\nimport numpy as np\nimport os\n\nfrom .base_video_dataset import BaseVideoDataset\nfrom ltr.data.image_loader import jpeg4py_loader, imread_indexed\nfrom ltr.data.bounding_box_utils import masks_to_bboxes\n\n\nclass VOSMeta:\n    def __init__(self, data=None, filename=None):\n        if filename is not None:\n            self.load(filename)\n        elif data is not None:\n            self._data = data\n        else:\n            raise ValueError(\"Must set either data or filename parameter\")\n\n    def save(self, gen_meta: Path):\n        gen_meta.parent.mkdir(exist_ok=True, parents=True)\n        json.dump(self._data, open(gen_meta, \"w\"))\n\n    def load(self, gen_meta: Path):\n        if not gen_meta.exists():\n            print(\"Generated metadata file %s is not found.\" % gen_meta)\n            print(\"Find and run VOSMeta.generate() to create it.\")\n            raise FileNotFoundError(gen_meta)\n        self._data = json.load(open(gen_meta), object_pairs_hook=OrderedDict)\n\n    @classmethod\n    def generate(cls, dset_name: str, dset_images_path: Path, dset_annos_path: Path):\n        \"\"\"\n        Count the annotation mask pixels per object, per frame, in all sequences in a dataset\n        :param dset_name:        Dataset name, for printing the progress bar.\n        :param dset_annos_path:  Path to annotations directory, containing sequence directories,\n                                 with annotation frames in them.\n\n        :return: Dataset meta dict:\n\n        {'sequence0':\n            {\n             'shape': (height, width)\n\n             'obj_sizes':  # Object pixels per frame\n                {'frame0': {'object0': px_count, 'object1': px_count, ...},\n                 'frame1': {'object0': px_count, 'object1': px_count, ...},\n                ... },\n\n             'bboxes':  # Bounding boxes per frame\n                {'frame0': {'object0': bbox, 'object1': bbox, ...},\n                 'frame1': {'object0': bbox, 'object1': bbox, ...},\n                ... },\n            ...\n        }\n        \"\"\"\n        assert(dset_annos_path.exists())\n\n        dset_meta = OrderedDict()\n        sequences = [p.stem for p in sorted(dset_annos_path.glob(\"*\")) if p.is_dir()]\n\n        try:\n            from tqdm import tqdm\n        except:\n            def tqdm(x, *args, **kwargs):\n                return x\n\n        for seq in tqdm(sequences, desc=dset_name, unit=\"seq\"):\n\n            obj_sizes2 = defaultdict(OrderedDict)\n            bboxes = defaultdict(OrderedDict)\n            shape = None\n            frame_names = [file.stem for file in sorted((dset_images_path / seq).glob(\"*.jpg\"))]\n            anno_paths = list(sorted((dset_annos_path / seq).glob(\"*.png\")))\n\n            # Extract information from the given label frames\n            for path in anno_paths:\n                f_id = path.stem\n\n                # Count label-pixels per frame\n                labels = imread_indexed(path)\n                # labels = np.array(Image.open(path))\n                obj_ids, obj_sizes = np.unique(labels, return_counts=True)\n                obj_ids = [str(oid) for oid in obj_ids]\n                obj_sizes = obj_sizes.tolist()\n\n                if '0' in obj_ids:  # Remove background id\n                    obj_ids = obj_ids[1:]\n                    obj_sizes = obj_sizes[1:]\n                obj_sizes2[f_id] = OrderedDict(zip(obj_ids, obj_sizes))\n\n                # Generate per-label bounding boxes\n                for obj_id in obj_ids:\n                    bboxes[f_id][obj_id] = cls._mask_to_bbox(labels == int(obj_id))\n\n                if shape is None:\n                    shape = labels.shape[:2]\n\n            # Format result\n\n            dset_meta[seq] = dict(shape=shape, obj_sizes=obj_sizes2, bboxes=bboxes, frame_names=frame_names)\n\n        return VOSMeta(dset_meta)\n\n    @staticmethod\n    def _mask_to_bbox(mask: np.ndarray):\n\n        mask = mask.astype(int)\n        xs = mask.sum(axis=-2).nonzero()[0].tolist()\n        ys = mask.sum(axis=-1).nonzero()[0].tolist()\n\n        if len(ys) > 0 and len(xs) > 0:\n            x, y, w, h = xs[0], ys[0], xs[-1] - xs[0], ys[-1] - ys[0]\n        else:\n            x, y, w, h = 0, 0, 0, 0\n\n        return [x, y, w, h]\n\n    @staticmethod\n    def _transpose_nested_dict(d):\n        \"\"\" Permute a 2-level nested dict such that the inner and outer keys swap places. \"\"\"\n        d2 = defaultdict(OrderedDict)\n        for key1, inner in d.items():\n            for key2, value in inner.items():\n                d2[key2][key1] = value\n        return d2\n\n    def select_split(self, dataset_name, split):\n        ltr_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..')\n        sequences = set([s.strip() for s in open(os.path.join(ltr_path, 'data_specs', dataset_name + '_' + split + '.txt')).readlines()])\n        all_sequences = set(self._data.keys())\n        to_remove = all_sequences.difference(sequences)\n        for seq_name in to_remove:\n            self._data.pop(seq_name)\n\n    def get_sequence_names(self):\n        return list(self._data.keys())\n\n    def get_shape(self, seq_name):\n        \"\"\" Sequence image shape (h,w) \"\"\"\n        h, w = self._data[seq_name]['shape']\n        return h, w\n\n    def get_obj_ids(self, seq_name):\n        \"\"\" All objects in the sequence \"\"\"\n        return list(self.get_obj_sizes_per_object(seq_name).keys())\n\n    def get_frame_names(self, seq_name):\n        \"\"\" All filename stems of the frames in the sequence \"\"\"\n        return self._data[seq_name]['frame_names']\n\n    def enable_all_frames(self, dset_images_path):\n        \"\"\" For YouTubeVOS: Update the frame names with (jpeg) files from the <split>_all_frames set\n        :param dset_images_path:  /path/to/train_all_frames/JPEGImages (or valid or test)\n        :param seq: Sequence name\n        :return:\n        \"\"\"\n\n        # Try load the cached index\n        idx_file = dset_images_path.parent / \"frame_names.json\"\n        if idx_file.exists():\n            print('Loading cached frame names from %s' % idx_file)\n            all_frame_names = json.load(open(idx_file))\n        else:\n            # Cache the data to the user's home directory (guaranteed to be writable)\n            all_frame_names = dict()\n            user_idx_file = Path.home() / (dset_images_path.parent.stem + \"_frame_names.json\")\n            print('Indexing YouTubeVOS \"all_frames\" frame names to %s' % user_idx_file)\n            for seq in self._data:\n                all_frame_names[seq] = [file.stem for file in sorted((dset_images_path / seq).glob(\"*.jpg\"))]\n            json.dump(all_frame_names, open(user_idx_file, \"w\"))\n            print('Done. Move %s to %s to load faster next time.' % (user_idx_file, idx_file))\n\n        for seq, frame_names in all_frame_names.items():\n            self._data[seq]['frame_names'] = frame_names\n\n    def get_aspect_ratio(self, seq_name):\n        \"\"\" Sequence aspect ratio \"\"\"\n        h, w = self._data[seq_name]['shape']\n        return w / h\n\n    def get_obj_sizes_per_frame(self, seq_name):\n        \"\"\" Get object pixel counts, grouped by frame names \"\"\"\n        return self._data[seq_name]['obj_sizes']\n\n    def get_bboxes_per_frame(self, seq_name):\n        \"\"\" Object bounding boxes, grouped by frame names \"\"\"\n        return self._data[seq_name]['bboxes']\n\n    def get_obj_sizes_per_object(self, seq_name):\n        \"\"\" Object pixel counts, grouped by object \"\"\"\n        return self._transpose_nested_dict(self.get_obj_sizes_per_frame(seq_name))\n\n    def get_bboxes_per_object(self, seq_name):\n        \"\"\" Object bounding boxes, grouped by object \"\"\"\n        return self._transpose_nested_dict(self.get_bboxes_per_frame(seq_name))\n\n    @staticmethod\n    def generate_datasets_meta(src, dst=Path(\"~/vosdataset_meta\").expanduser()):\n        VOSMeta.generate(\"SyntheticCoco\", src / \"JPEGImages\", src / \"Annotations\").save(src / \"generated_meta.json\")\n\n\nclass VOSDatasetBase(BaseVideoDataset):\n\n    \"\"\" Generic VOS dataset reader base class, for both DAVIS and YouTubeVOS \"\"\"\n\n    def __init__(self, name: str, root: Path, version=None, split='train',\n                 multiobj=True, vis_threshold=10, image_loader=jpeg4py_loader):\n        \"\"\"\n        :param root:            Dataset root path, eg /path/to/DAVIS or /path/to/YouTubeVOS/\n                                Note: YouTubeVOS 2018 and 2019 are expected to be in\n                                /path/to/YouTubeVOS/2018 and /path/to/YouTubeVOS/2019, respectively\n        :param name:            'DAVIS' or 'YouTubeVOS' (case sensitive)\n        :param version:         DAVIS: '2016', '2017, YouTubeVOS: '2018' or '2019'\n        :param split:           DAVIS: Any name in DAVIS/ImageSets/<year>,\n                                YouTubeVOS: 'test', 'train', 'valid' or 'jjtrain', 'jjvalid'\n        :param multiobj:        Whether the dataset will return all objects in a sequence or\n                                multiple sequences with one object in each.\n        :param vis_threshold:   Minimum number of pixels required to consider a target object \"visible\".\n        :param image_loader:    Image loader.\n        \"\"\"\n\n        assert root.exists() and root.is_dir()\n\n        super().__init__(name, root, image_loader)\n\n        self.version = version\n        self.split = split\n        self.vis_threshold = vis_threshold\n        self.multiobj = multiobj\n\n    def _load_image(self, path):\n        im = self.image_loader(str(path))\n        assert im is not None\n        im = np.atleast_3d(im)\n        return im\n\n    @staticmethod\n    def _load_anno(path):\n        if not path.exists():\n            return None\n        # im = np.atleast_3d(np.array(Image.open(path)))\n        im = imread_indexed(path)\n        return im\n\n    def get_num_sequences(self):\n        return len(self._samples)\n\n    def get_sequence_info(self, sample_id):\n        \"\"\" Get sample meta data.\n        :param sample_id:  Sample to query.\n        :return: dict of metadata:\n                sequence:    Sequence name\n                frame_shape: (height, width) of the images\n                frame_names: List of frame filename stems in the sequence\n                object_ids:  Id numbers of all objects occurring in the sequence\n                obj_sizes:   Matrix shape=(frames, object) of the number of pixels for each object in each frame\n                             Coordinates in this matrix relate to the frame_names and object_ids\n                visible:     Boolean matrix of the same shape as obj_sizes. Entries with more pixels\n                             than self.visible_threshold are True.\n        \"\"\"\n        m = self.gmeta\n        seq_name, obj_ids = self._samples[sample_id]\n        f_names = m.get_frame_names(seq_name)  # All frames\n\n        f2i = {f: i for i, f in enumerate(f_names)}  # Frame name to matrix index\n        o2i = {o: i for i, o in enumerate(obj_ids)}  # Object id to matrix index\n\n        # Get a matrix of object sizes: shape=(frames, objects)\n        obj_sizes = torch.zeros((len(f_names), len(obj_ids)), dtype=torch.int)\n        sizes_per_object = m.get_obj_sizes_per_object(seq_name)\n\n        for obj_id in obj_ids:\n            frames = sizes_per_object[obj_id]\n            oid = o2i[obj_id]\n            for f, sz in frames.items():\n                obj_sizes[f2i[f], oid] = sz\n\n        visible = (obj_sizes > self.vis_threshold).byte()\n\n        return dict(sequence=seq_name, frame_shape=m.get_shape(seq_name), frame_names=f_names, object_ids=obj_ids,\n                    object_sizes=obj_sizes, visible=visible, valid=visible)\n\n    def get_paths_and_bboxes(self, sequence_info):\n\n        seq_name = sequence_info['sequence']\n        annos_root = self._anno_path / seq_name\n        images_root = self._jpeg_path / seq_name\n\n        frame_names = sequence_info['frame_names']\n        f2i = {f: i for i, f in enumerate(frame_names)}\n\n        images = [str(images_root / (f + \".jpg\")) for f in frame_names]\n\n        # Find the frames where ground truth is available and\n        # get the bounding boxes and segmentation labels of those frames\n        all_bboxes = self.gmeta.get_bboxes_per_frame(seq_name)\n        gt_labels = [str(annos_root / (f + \".png\")) if f in all_bboxes.keys() else None for f in frame_names]\n\n        gt_bboxes = OrderedDict()\n        for obj_id in sequence_info['object_ids']:\n            gt_bboxes[obj_id] = np.array([all_bboxes.get(frame, {}).get(obj_id, [-1, -1, -1, -1]) for frame in frame_names])\n\n        return images, gt_labels, gt_bboxes\n\n    def _construct_sequence(self, sequence_info):\n        raise NotImplementedError\n\n    def get_sequence_list(self):\n        if len(self.sequence_list) > 0:\n            return self.sequence_list\n        self.sequence_list = [self._construct_sequence(self.get_sequence_info(i)) for i in range(len(self._samples))]\n        return self.sequence_list\n\n    def __len__(self):\n        return len(self._samples)\n\n    def _get_image_path(self, meta, frame_id):\n        return self._jpeg_path / meta['sequence'] / (meta['frame_names'][frame_id] + \".jpg\")\n\n    def _get_anno_path(self, meta, frame_id):\n        return self._anno_path / meta['sequence'] / (meta['frame_names'][frame_id] + \".png\")\n\n    def get_frames(self, sample_id, frame_ids, anno=None):\n        \"\"\"  Fetch frames with the given ids.\n        :param sample_id:  Sample to get.\n        :param frame_ids:  List of frame indices in the sequence belonging to the sample_id\n        :return: dict of metadata and data:\n                sequence:  Sequence name\n                images:    List of images. No entries may be None\n                labels:    List of label/mask images. Entries may be None if the data is missing\n                bboxes:    List of bounding boxes. Entries may be None if the data is missing\n        \"\"\"\n        seq_name, obj_ids = self._samples[sample_id]\n\n        meta = self.get_sequence_info(sample_id) if anno is None else anno\n        frame_names = meta['frame_names']\n        images = [self._load_image(self._jpeg_path / seq_name / (frame_names[f] + \".jpg\")) for f in frame_ids]\n        labels = [self._load_anno(self._anno_path / seq_name / (frame_names[f] + \".png\")) for f in frame_ids]\n\n        # Generate bounding boxes for the requested objects\n        bboxes = []\n        for lb in labels:\n            lb = torch.from_numpy(lb.squeeze())\n            frame_bbs = {}\n            for obj_id in obj_ids:\n                bbox = masks_to_bboxes(lb == int(obj_id), fmt='t')\n                if bbox[3] == 0 or bbox[2] == 0:\n                    print(\"!\")\n                frame_bbs[obj_id] = bbox\n            bboxes.append(frame_bbs)\n\n        # Insert empty bboxes for missing object ids\n        for bbox in bboxes:\n            for obj_id in obj_ids:\n                if obj_id not in bbox:\n                    bbox[obj_id] = torch.zeros(4, dtype=torch.float32)\n\n        # Remap to object id 1, if requested - for training\n        if not self.multiobj:\n            assert len(obj_ids) == 1\n            obj_id = obj_ids[0]\n            labels = [torch.Tensor(lb == int(obj_id)) for lb in labels]\n            bboxes = [bbox[obj_id] for bbox in bboxes]\n        else:\n            labels = [torch.Tensor(lb) for lb in labels]\n\n        object_meta = {key: meta[key] for key in ['sequence', 'frame_shape', 'frame_names', 'object_ids']}\n\n        anno_frames = dict(bbox=bboxes, mask=labels)\n        for key in ['object_sizes', 'visible', 'valid']:\n            value = meta[key]\n            anno_frames[key] = [value[f_id, ...].clone() for f_id in frame_ids]\n\n        return images, anno_frames, object_meta\n\n    def get_name(self):\n        return \"%s/%s/%s\" % (self.name, self.version, self.split)\n\n    def has_class_info(self):\n        return False\n\n    def has_occlusion_info(self):\n        return True\n\n    def get_num_classes(self):\n        return 0\n\n    def get_class_list(self):\n        return []\n\n    def get_sequences_in_class(self, class_name):\n        raise []\n\n    def has_segmentation_info(self):\n        return True\n"
  },
  {
    "path": "external/AR/ltr/dataset/youtubevos.py",
    "content": "from pathlib import Path\nimport os\nfrom ltr.dataset.vos_base import VOSDatasetBase, VOSMeta\nfrom pytracking.evaluation import Sequence\nimport json\nfrom ltr.admin.environment import env_settings\nfrom ltr.data.image_loader import jpeg4py_loader\n\n\nclass YouTubeVOSMeta:\n    \"\"\" Thin wrapper for YouTubeVOS meta data\n    meta.json\n    {\n        \"videos\": {\n            \"<video_id>\": {\n                \"objects\": {\n                    \"<object_id>\": {\n                        \"category\": \"<category>\",\n                        \"frames\": [\n                            \"<frame_id>\",\n                            \"<frame_id>\",\n                        ]\n                    }\n                }\n            }\n        }\n    }\n    # <object_id> is the same as the pixel values of object in annotated segmentation PNG files.\n    # <frame_id> is the 5-digit index of frame in video, and not necessary to start from 0.\n    \"\"\"\n\n    def __init__(self, dset_split_path):\n        self._data = json.load(open(dset_split_path / 'meta.json'))['videos']\n\n    def sequences(self):\n        return list(self._data.keys())\n\n    def seq_frames(self, seq_name):\n        \"\"\" All filename stems of the frames in the sequence \"\"\"\n        frames = set()\n        for obj_id in self.object_ids(seq_name):\n            for f in self.object_frames(seq_name, obj_id):\n                frames.add(f)\n        return list(sorted(frames))\n\n    def object_ids(self, seq_name):\n        \"\"\" All objects in the sequence \"\"\"\n        return list(self._data[seq_name]['objects'].keys())\n\n    def object_category(self, seq_name, obj_id):\n        return self._data[seq_name]['objects'][str(obj_id)]['category']\n\n    def object_frames(self, seq_name, obj_id):\n        return self._data[seq_name]['objects'][str(obj_id)]['frames']\n\n    def object_first_frame(self, seq_name, obj_id):\n        return self.object_frames(seq_name, obj_id)[0]\n\n\nclass YouTubeVOS(VOSDatasetBase):\n    \"\"\"\n    YoutubeVOS video object segmentation dataset.\n\n    Publication:\n        YouTube-VOS: A Large-Scale Video Object Segmentation Benchmark\n        Ning Xu, Linjie Yang, Yuchen Fan, Dingcheng Yue, Yuchen Liang, Jianchao Yang, and Thomas Huang\n        ECCV, 2018\n        https://arxiv.org/pdf/1809.03327.pdf\n\n    Download dataset from: https://youtube-vos.org/dataset/\n    \"\"\"\n    def __init__(self, root=None, version='2019', split='train', cleanup=None, all_frames=False, sequences=None,\n                 multiobj=True, vis_threshold=10, image_loader=jpeg4py_loader):\n        \"\"\"\n        args:\n            root - Dataset root path. If unset, it uses the path in your local.py config.\n            version - '2018' or '2019'\n            split - 'test', 'train', 'valid', or 'jjtrain', 'jjvalid'. 'jjvalid' corresponds to a custom validation\n                    dataset consisting of 300 videos randomly sampled from the train set. 'jjtrain' contains the\n                    remaining videos used for training.\n            cleanup - List of actions to take to to clean up known problems in the dataset.\n                      'aspects': remove frames with weird aspect ratios,\n                      'starts': fix up start frames from original meta data\n            all_frames - Whether to use an \"all_frames\" split.\n            sequences - List of sequence names. Limit to a subset of sequences if not None.\n            multiobj - Whether the dataset will return all objects in a sequence or multiple sequences with one\n                       object in each.\n            vis_threshold - Minimum number of pixels required to consider a target object \"visible\".\n            image_loader - Image loader.\n        \"\"\"\n        root = env_settings().youtubevos_dir if root is None else root\n        super().__init__(name=\"YouTubeVOS\", root=Path(root), version=version, split=split, multiobj=multiobj,\n                         vis_threshold=vis_threshold, image_loader=image_loader)\n\n        split_folder = self.split\n        if self.split.startswith(\"jj\"):\n            split_folder = \"train\"\n\n        dset_path = self.root / self.version / split_folder\n\n        self._anno_path = dset_path / 'Annotations'\n\n        if all_frames:\n            self._jpeg_path = self.root / self.version / (split_folder + \"_all_frames\") / 'JPEGImages'\n        else:\n            self._jpeg_path = dset_path / 'JPEGImages'\n\n        self.meta = YouTubeVOSMeta(dset_path)\n        meta_path = dset_path / \"generated_meta.json\"\n        if meta_path.exists():\n            self.gmeta = VOSMeta(filename=meta_path)\n        else:\n            self.gmeta = VOSMeta.generate('YouTubeVOS', self._jpeg_path, self._anno_path)\n            self.gmeta.save(meta_path)\n\n        if all_frames:\n            self.gmeta.enable_all_frames(self._jpeg_path)\n\n        if self.split not in ['train', 'valid', 'test']:\n            self.gmeta.select_split('youtubevos', self.split)\n\n        if sequences is None:\n            sequences = self.gmeta.get_sequence_names()\n\n        to_remove = set()\n        cleanup = {} if cleanup is None else set(cleanup)\n\n        if 'aspect' in cleanup:\n            # Remove sequences with unusual aspect ratios\n            for seq_name in sequences:\n                a = self.gmeta.get_aspect_ratio(seq_name)\n                if a < 1.45 or a > 1.9:\n                    to_remove.add(seq_name)\n\n        if 'starts' in cleanup:\n            # Fix incorrect start frames for some objects found with ytvos_start_frames_test()\n            bad_start_frames = [(\"0e27472bea\", '2', ['00055', '00060'], '00065'),\n                                (\"5937b08d69\", '4', ['00000'], '00005'),\n                                (\"5e1ce354fd\", '5', ['00010', '00015'], '00020'),\n                                (\"7053e4f41e\", '2', ['00000', '00005', '00010', '00015'], '00020'),\n                                (\"720e3fa04c\", '2', ['00050'], '00055'),\n                                (\"c73c8e747f\", '2', ['00035'], '00040')]\n            for seq_name, obj_id, bad_frames, good_frame in bad_start_frames:\n                # bad_frames is from meta.json included with the dataset\n                # good_frame is from the generated meta - and the first actual frame where the object was seen.\n                if seq_name in self.meta._data:\n                    frames = self.meta.object_frames(seq_name, obj_id)\n                    for f in bad_frames:\n                        frames.remove(f)\n                    assert frames[0] == good_frame\n\n        sequences = [seq for seq in sequences if seq not in to_remove]\n\n        self.sequence_names = sequences\n        self._samples = []\n\n        for seq in sequences:\n            obj_ids = self.meta.object_ids(seq)\n            if self.multiobj:  # Multiple objects per sample\n                self._samples.append((seq, obj_ids))\n            else:  # One object per sample\n                self._samples.extend([(seq, [obj_id]) for obj_id in obj_ids])\n\n        print(\"%s loaded.\" % self.get_name())\n        if len(to_remove) > 0:\n            print(\"   %d sequences were removed, (%d remaining).\" % (len(to_remove), len(sequences)))\n\n    def _construct_sequence(self, sequence_info):\n\n        seq_name = sequence_info['sequence']\n        frame_names = sequence_info['frame_names']\n        fname_to_fid = {f: i for i, f in enumerate(frame_names)}\n        images, gt_segs, gt_bboxes = self.get_paths_and_bboxes(sequence_info)\n\n        init_data = dict()\n        for obj_id in sequence_info['object_ids']:\n            if obj_id == '0':\n                print(\"!\")\n            f_name = self.meta.object_first_frame(seq_name, obj_id)\n            f_id = fname_to_fid[f_name]\n            if f_id not in init_data:\n                init_data[f_id] = {'object_ids': [obj_id],\n                                   'bbox': {obj_id: gt_bboxes[obj_id][f_id,:]},\n                                   'mask': os.path.join(os.path.dirname(gt_segs[f_id]), (f_name + \".png\"))}\n                assert init_data[f_id]['mask'] in gt_segs  # If this fails, some file is missing\n            else:\n                init_data[f_id]['object_ids'].append(obj_id)\n                init_data[f_id]['bbox'][obj_id] = gt_bboxes[obj_id][f_id,:]\n\n        return Sequence(name=seq_name, frames=images, dataset='YouTubeVOS', ground_truth_rect=gt_bboxes,\n                        init_data=init_data, ground_truth_seg=gt_segs, object_ids=sequence_info['object_ids'],\n                        multiobj_mode=self.multiobj)\n"
  },
  {
    "path": "external/AR/ltr/external/PreciseRoIPooling/.gitignore",
    "content": "# Byte-compiled / optimized / DLL files\n__pycache__/\n*.py[cod]\n*$py.class\n\n.vim-template*\n\n# C extensions\n*.so\n\n# Distribution / packaging\n.Python\nbuild/\ndevelop-eggs/\ndist/\ndownloads/\neggs/\n.eggs/\nlib/\nlib64/\nparts/\nsdist/\nvar/\nwheels/\n*.egg-info/\n.installed.cfg\n*.egg\nMANIFEST\n\n# PyInstaller\n#  Usually these files are written by a python script from a template\n#  before PyInstaller builds the exe, so as to inject date/other infos into it.\n*.manifest\n*.spec\n\n# Installer logs\npip-log.txt\npip-delete-this-directory.txt\n\n# Unit test / coverage reports\nhtmlcov/\n.tox/\n.coverage\n.coverage.*\n.cache\nnosetests.xml\ncoverage.xml\n*.cover\n.hypothesis/\n.pytest_cache/\n\n# Translations\n*.mo\n*.pot\n\n# Django stuff:\n*.log\nlocal_settings.py\ndb.sqlite3\n\n# Flask stuff:\ninstance/\n.webassets-cache\n\n# Scrapy stuff:\n.scrapy\n\n# Sphinx documentation\ndocs/_build/\n\n# PyBuilder\ntarget/\n\n# Jupyter Notebook\n.ipynb_checkpoints\n\n# pyenv\n.python-version\n\n# celery beat schedule file\ncelerybeat-schedule\n\n# SageMath parsed files\n*.sage.py\n\n# Environments\n.env\n.venv\nenv/\nvenv/\nENV/\nenv.bak/\nvenv.bak/\n\n# Spyder project settings\n.spyderproject\n.spyproject\n\n# Rope project settings\n.ropeproject\n\n# mkdocs documentation\n/site\n\n# mypy\n.mypy_cache/\n"
  },
  {
    "path": "external/AR/ltr/external/PreciseRoIPooling/LICENSE",
    "content": "MIT License\n\nCopyright (c) 2018 Jiayuan Mao\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n"
  },
  {
    "path": "external/AR/ltr/external/PreciseRoIPooling/README.md",
    "content": "# PreciseRoIPooling\nThis repo implements the **Precise RoI Pooling** (PrRoI Pooling), proposed in the paper **Acquisition of Localization Confidence for Accurate Object Detection** published at ECCV 2018 (Oral Presentation).\n\n**Acquisition of Localization Confidence for Accurate Object Detection**\n\n_Borui Jiang*, Ruixuan Luo*, Jiayuan Mao*, Tete Xiao, Yuning Jiang_ (* indicates equal contribution.)\n\nhttps://arxiv.org/abs/1807.11590\n\n## Brief\n\nIn short, Precise RoI Pooling is an integration-based (bilinear interpolation) average pooling method for RoI Pooling. It avoids any quantization and has a continuous gradient on bounding box coordinates. It is:\n\n- different from the original RoI Pooling proposed in [Fast R-CNN](https://arxiv.org/abs/1504.08083). PrRoI Pooling uses average pooling instead of max pooling for each bin and has a continuous gradient on bounding box coordinates. That is, one can take the derivatives of some loss function w.r.t the coordinates of each RoI and optimize the RoI coordinates.\n- different from the RoI Align proposed in [Mask R-CNN](https://arxiv.org/abs/1703.06870). PrRoI Pooling uses a full integration-based average pooling instead of sampling a constant number of points. This makes the gradient w.r.t. the coordinates continuous.\n\nFor a better illustration, we illustrate RoI Pooling, RoI Align and PrRoI Pooing in the following figure. More details including the gradient computation can be found in our paper.\n\n<center><img src=\"./_assets/prroi_visualization.png\" width=\"80%\"></center>\n\n## Implementation\n\nPrRoI Pooling was originally implemented by [Tete Xiao](http://tetexiao.com/) based on MegBrain, an (internal) deep learning framework built by Megvii Inc. It was later adapted into open-source deep learning frameworks. Currently, we only support PyTorch. Unfortunately, we don't have any specific plan for the adaptation into other frameworks such as TensorFlow, but any contributions (pull requests) will be more than welcome.\n\n## Usage (PyTorch 1.0)\n\nIn the directory `pytorch/`, we provide a PyTorch-based implementation of PrRoI Pooling. It requires PyTorch 1.0+ and only supports CUDA (CPU mode is not implemented).\nSince we use PyTorch JIT for cxx/cuda code compilation, to use the module in your code, simply do:\n\n```\nfrom prroi_pool import PrRoIPool2D\n\navg_pool = PrRoIPool2D(window_height, window_width, spatial_scale)\nroi_features = avg_pool(features, rois)\n\n# for those who want to use the \"functional\"\n\nfrom prroi_pool.functional import prroi_pool2d\nroi_features = prroi_pool2d(features, rois, window_height, window_width, spatial_scale)\n```\n\n\n## Usage (PyTorch 0.4)\n\n**!!! Please first checkout to the branch pytorch0.4.**\n\nIn the directory `pytorch/`, we provide a PyTorch-based implementation of PrRoI Pooling. It requires PyTorch 0.4 and only supports CUDA (CPU mode is not implemented).\nTo use the PrRoI Pooling module, first goto `pytorch/prroi_pool` and execute `./travis.sh` to compile the essential components (you may need `nvcc` for this step). To use the module in your code, simply do:\n\n```\nfrom prroi_pool import PrRoIPool2D\n\navg_pool = PrRoIPool2D(window_height, window_width, spatial_scale)\nroi_features = avg_pool(features, rois)\n\n# for those who want to use the \"functional\"\n\nfrom prroi_pool.functional import prroi_pool2d\nroi_features = prroi_pool2d(features, rois, window_height, window_width, spatial_scale)\n```\n\nHere,\n\n- RoI is an `m * 5` float tensor of format `(batch_index, x0, y0, x1, y1)`, following the convention in the original Caffe implementation of RoI Pooling, although in some frameworks the batch indices are provided by an integer tensor.\n- `spatial_scale` is multiplied to the RoIs. For example, if your feature maps are down-sampled by a factor of 16 (w.r.t. the input image), you should use a spatial scale of `1/16`.\n- The coordinates for RoI follows the [L, R) convension. That is, `(0, 0, 4, 4)` denotes a box of size `4x4`.\n"
  },
  {
    "path": "external/AR/ltr/external/PreciseRoIPooling/pytorch/prroi_pool/.gitignore",
    "content": "*.o\n/_prroi_pooling\n"
  },
  {
    "path": "external/AR/ltr/external/PreciseRoIPooling/pytorch/prroi_pool/__init__.py",
    "content": "#! /usr/bin/env python3\n# -*- coding: utf-8 -*-\n# File   : __init__.py\n# Author : Jiayuan Mao, Tete Xiao\n# Email  : maojiayuan@gmail.com, jasonhsiao97@gmail.com\n# Date   : 07/13/2018\n# \n# This file is part of PreciseRoIPooling.\n# Distributed under terms of the MIT license.\n# Copyright (c) 2017 Megvii Technology Limited.\n\nfrom .prroi_pool import *\n\n"
  },
  {
    "path": "external/AR/ltr/external/PreciseRoIPooling/pytorch/prroi_pool/functional.py",
    "content": "#! /usr/bin/env python3\n# -*- coding: utf-8 -*-\n# File   : functional.py\n# Author : Jiayuan Mao, Tete Xiao\n# Email  : maojiayuan@gmail.com, jasonhsiao97@gmail.com\n# Date   : 07/13/2018\n#\n# This file is part of PreciseRoIPooling.\n# Distributed under terms of the MIT license.\n# Copyright (c) 2017 Megvii Technology Limited.\n\nimport torch\nimport torch.autograd as ag\n\n__all__ = ['prroi_pool2d']\n\n\n_prroi_pooling = None\n\n\ndef _import_prroi_pooling():\n    global _prroi_pooling\n\n    if _prroi_pooling is None:\n        try:\n            from os.path import join as pjoin, dirname\n            from torch.utils.cpp_extension import load as load_extension\n            root_dir = pjoin(dirname(__file__), 'src')\n\n            _prroi_pooling = load_extension(\n                '_prroi_pooling',\n                [pjoin(root_dir, 'prroi_pooling_gpu.c'), pjoin(root_dir, 'prroi_pooling_gpu_impl.cu')],\n                verbose=True\n            )\n        except ImportError:\n            raise ImportError('Can not compile Precise RoI Pooling library.')\n\n    return _prroi_pooling\n\n\nclass PrRoIPool2DFunction(ag.Function):\n    @staticmethod\n    def forward(ctx, features, rois, pooled_height, pooled_width, spatial_scale):\n        _prroi_pooling = _import_prroi_pooling()\n\n        assert 'FloatTensor' in features.type() and 'FloatTensor' in rois.type(), \\\n                'Precise RoI Pooling only takes float input, got {} for features and {} for rois.'.format(features.type(), rois.type())\n\n        pooled_height = int(pooled_height)\n        pooled_width = int(pooled_width)\n        spatial_scale = float(spatial_scale)\n\n        features = features.contiguous()\n        rois = rois.contiguous()\n        params = (pooled_height, pooled_width, spatial_scale)\n\n        if features.is_cuda:\n            output = _prroi_pooling.prroi_pooling_forward_cuda(features, rois, *params)\n            ctx.params = params\n            # everything here is contiguous.\n            ctx.save_for_backward(features, rois, output)\n        else:\n            raise NotImplementedError('Precise RoI Pooling only supports GPU (cuda) implememtations.')\n\n        return output\n\n    @staticmethod\n    def backward(ctx, grad_output):\n        _prroi_pooling = _import_prroi_pooling()\n\n        features, rois, output = ctx.saved_tensors\n        grad_input = grad_coor = None\n\n        if features.requires_grad:\n            grad_output = grad_output.contiguous()\n            grad_input = _prroi_pooling.prroi_pooling_backward_cuda(features, rois, output, grad_output, *ctx.params)\n        if rois.requires_grad:\n            grad_output = grad_output.contiguous()\n            grad_coor = _prroi_pooling.prroi_pooling_coor_backward_cuda(features, rois, output, grad_output, *ctx.params)\n\n        return grad_input, grad_coor, None, None, None\n\n\nprroi_pool2d = PrRoIPool2DFunction.apply\n\n"
  },
  {
    "path": "external/AR/ltr/external/PreciseRoIPooling/pytorch/prroi_pool/prroi_pool.py",
    "content": "#! /usr/bin/env python3\n# -*- coding: utf-8 -*-\n# File   : prroi_pool.py\n# Author : Jiayuan Mao, Tete Xiao\n# Email  : maojiayuan@gmail.com, jasonhsiao97@gmail.com\n# Date   : 07/13/2018\n#\n# This file is part of PreciseRoIPooling.\n# Distributed under terms of the MIT license.\n# Copyright (c) 2017 Megvii Technology Limited.\n\nimport torch.nn as nn\n\nfrom .functional import prroi_pool2d\n\n__all__ = ['PrRoIPool2D']\n\n\nclass PrRoIPool2D(nn.Module):\n    def __init__(self, pooled_height, pooled_width, spatial_scale):\n        super().__init__()\n\n        self.pooled_height = int(pooled_height)\n        self.pooled_width = int(pooled_width)\n        self.spatial_scale = float(spatial_scale)\n\n    def forward(self, features, rois):\n        return prroi_pool2d(features, rois, self.pooled_height, self.pooled_width, self.spatial_scale)\n\n    def extra_repr(self):\n        return 'kernel_size=({pooled_height}, {pooled_width}), spatial_scale={spatial_scale}'.format(**self.__dict__)\n\n"
  },
  {
    "path": "external/AR/ltr/external/PreciseRoIPooling/pytorch/prroi_pool/src/prroi_pooling_gpu.c",
    "content": "/*\n * File   : prroi_pooling_gpu.c\n * Author : Jiayuan Mao, Tete Xiao\n * Email  : maojiayuan@gmail.com, jasonhsiao97@gmail.com\n * Date   : 07/13/2018\n *\n * Distributed under terms of the MIT license.\n * Copyright (c) 2017 Megvii Technology Limited.\n */\n\n#include <math.h>\n#include <torch/extension.h>\n\n#include <ATen/ATen.h>\n#include <ATen/cuda/CUDAContext.h>\n\n#include <THC/THC.h>\n\n#include \"prroi_pooling_gpu_impl.cuh\"\n\n\nat::Tensor prroi_pooling_forward_cuda(const at::Tensor &features, const at::Tensor &rois, int pooled_height, int pooled_width, float spatial_scale) {\n    int nr_rois = rois.size(0);\n    int nr_channels = features.size(1);\n    int height = features.size(2);\n    int width = features.size(3);\n    int top_count = nr_rois * nr_channels * pooled_height * pooled_width;\n    auto output = at::zeros({nr_rois, nr_channels, pooled_height, pooled_width}, features.options());\n\n    if (output.numel() == 0) {\n        THCudaCheck(cudaGetLastError());\n        return output;\n    }\n\n    cudaStream_t stream = at::cuda::getCurrentCUDAStream();\n    PrRoIPoolingForwardGpu(\n        stream, features.data<float>(), rois.data<float>(), output.data<float>(),\n        nr_channels, height, width, pooled_height, pooled_width, spatial_scale,\n        top_count\n    );\n\n    THCudaCheck(cudaGetLastError());\n    return output;\n}\n\nat::Tensor prroi_pooling_backward_cuda(\n    const at::Tensor &features, const at::Tensor &rois, const at::Tensor &output, const at::Tensor &output_diff,\n    int pooled_height, int pooled_width, float spatial_scale) {\n\n    auto features_diff = at::zeros_like(features);\n\n    int nr_rois = rois.size(0);\n    int batch_size = features.size(0);\n    int nr_channels = features.size(1);\n    int height = features.size(2);\n    int width = features.size(3);\n    int top_count = nr_rois * nr_channels * pooled_height * pooled_width;\n    int bottom_count = batch_size * nr_channels * height * width;\n\n    if (output.numel() == 0) {\n        THCudaCheck(cudaGetLastError());\n        return features_diff;\n    }\n\n    cudaStream_t stream = at::cuda::getCurrentCUDAStream();\n    PrRoIPoolingBackwardGpu(\n        stream,\n        features.data<float>(), rois.data<float>(), output.data<float>(), output_diff.data<float>(),\n        features_diff.data<float>(),\n        nr_channels, height, width, pooled_height, pooled_width, spatial_scale,\n        top_count, bottom_count\n    );\n\n    THCudaCheck(cudaGetLastError());\n    return features_diff;\n}\n\nat::Tensor prroi_pooling_coor_backward_cuda(\n    const at::Tensor &features, const at::Tensor &rois, const at::Tensor &output, const at::Tensor &output_diff,\n    int pooled_height, int pooled_width, float spatial_scale) {\n\n    auto coor_diff = at::zeros_like(rois);\n\n    int nr_rois = rois.size(0);\n    int nr_channels = features.size(1);\n    int height = features.size(2);\n    int width = features.size(3);\n    int top_count = nr_rois * nr_channels * pooled_height * pooled_width;\n    int bottom_count = nr_rois * 5;\n\n    if (output.numel() == 0) {\n        THCudaCheck(cudaGetLastError());\n        return coor_diff;\n    }\n\n    cudaStream_t stream = at::cuda::getCurrentCUDAStream();\n    PrRoIPoolingCoorBackwardGpu(\n        stream,\n        features.data<float>(), rois.data<float>(), output.data<float>(), output_diff.data<float>(),\n        coor_diff.data<float>(),\n        nr_channels, height, width, pooled_height, pooled_width, spatial_scale,\n        top_count, bottom_count\n    );\n\n    THCudaCheck(cudaGetLastError());\n    return coor_diff;\n}\n\nPYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {\n    m.def(\"prroi_pooling_forward_cuda\", &prroi_pooling_forward_cuda, \"PRRoIPooling_forward\");\n    m.def(\"prroi_pooling_backward_cuda\", &prroi_pooling_backward_cuda, \"PRRoIPooling_backward\");\n    m.def(\"prroi_pooling_coor_backward_cuda\", &prroi_pooling_coor_backward_cuda, \"PRRoIPooling_backward_coor\");\n}\n"
  },
  {
    "path": "external/AR/ltr/external/PreciseRoIPooling/pytorch/prroi_pool/src/prroi_pooling_gpu.h",
    "content": "/*\n * File   : prroi_pooling_gpu.h\n * Author : Jiayuan Mao, Tete Xiao\n * Email  : maojiayuan@gmail.com, jasonhsiao97@gmail.com \n * Date   : 07/13/2018\n * \n * Distributed under terms of the MIT license.\n * Copyright (c) 2017 Megvii Technology Limited.\n */\n\nint prroi_pooling_forward_cuda(THCudaTensor *features, THCudaTensor *rois, THCudaTensor *output, int pooled_height, int pooled_width, float spatial_scale);\n\nint prroi_pooling_backward_cuda(\n    THCudaTensor *features, THCudaTensor *rois, THCudaTensor *output, THCudaTensor *output_diff, THCudaTensor *features_diff,\n    int pooled_height, int pooled_width, float spatial_scale\n);\n\nint prroi_pooling_coor_backward_cuda(\n    THCudaTensor *features, THCudaTensor *rois, THCudaTensor *output, THCudaTensor *output_diff, THCudaTensor *features_diff,\n    int pooled_height, int pooled_width, float spatial_scal\n);\n\n"
  },
  {
    "path": "external/AR/ltr/external/PreciseRoIPooling/pytorch/prroi_pool/src/prroi_pooling_gpu_impl.cu",
    "content": "/*\n * File   : prroi_pooling_gpu_impl.cu\n * Author : Tete Xiao, Jiayuan Mao\n * Email  : jasonhsiao97@gmail.com\n *\n * Distributed under terms of the MIT license.\n * Copyright (c) 2017 Megvii Technology Limited.\n */\n\n#include \"prroi_pooling_gpu_impl.cuh\"\n\n#include <cstdio>\n#include <cfloat>\n\n#define CUDA_KERNEL_LOOP(i, n) \\\n    for (int i = blockIdx.x * blockDim.x + threadIdx.x; \\\n        i < (n); \\\n        i += blockDim.x * gridDim.x)\n\n#define CUDA_POST_KERNEL_CHECK \\\n    do { \\\n        cudaError_t err = cudaGetLastError(); \\\n        if (cudaSuccess != err) { \\\n            fprintf(stderr, \"cudaCheckError() failed : %s\\n\", cudaGetErrorString(err)); \\\n            exit(-1); \\\n        } \\\n    } while(0)\n\n#define CUDA_NUM_THREADS 512\n\nnamespace {\n\nstatic int CUDA_NUM_BLOCKS(const int N) {\n  return (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS;\n}\n\n__device__ static float PrRoIPoolingGetData(F_DEVPTR_IN data, const int h, const int w, const int height, const int width)\n{\n    bool overflow = (h < 0) || (w < 0) || (h >= height) || (w >= width);\n    float retVal = overflow ? 0.0f : data[h * width + w];\n    return retVal;\n}\n\n__device__ static float PrRoIPoolingGetCoeff(float dh, float dw){\n    dw = dw > 0 ? dw : -dw;\n    dh = dh > 0 ? dh : -dh;\n    return (1.0f - dh) * (1.0f - dw);\n}\n\n__device__ static float PrRoIPoolingSingleCoorIntegral(float s, float t, float c1, float c2) {\n    return 0.5 * (t * t - s * s) * c2 + (t - 0.5 * t * t - s + 0.5 * s * s) * c1;\n}\n\n__device__ static float PrRoIPoolingInterpolation(F_DEVPTR_IN data, const float h, const float w, const int height, const int width){\n    float retVal = 0.0f;\n    int h1 = floorf(h);\n    int w1 = floorf(w);\n    retVal += PrRoIPoolingGetData(data, h1, w1, height, width) * PrRoIPoolingGetCoeff(h - float(h1), w - float(w1));\n    h1 = floorf(h)+1;\n    w1 = floorf(w);\n    retVal += PrRoIPoolingGetData(data, h1, w1, height, width) * PrRoIPoolingGetCoeff(h - float(h1), w - float(w1));\n    h1 = floorf(h);\n    w1 = floorf(w)+1;\n    retVal += PrRoIPoolingGetData(data, h1, w1, height, width) * PrRoIPoolingGetCoeff(h - float(h1), w - float(w1));\n    h1 = floorf(h)+1;\n    w1 = floorf(w)+1;\n    retVal += PrRoIPoolingGetData(data, h1, w1, height, width) * PrRoIPoolingGetCoeff(h - float(h1), w - float(w1));\n    return retVal;\n}\n\n__device__ static float PrRoIPoolingMatCalculation(F_DEVPTR_IN this_data, const int s_h, const int s_w, const int e_h, const int e_w,\n        const float y0, const float x0, const float y1, const float x1, const int h0, const int w0)\n{\n    float alpha, beta, lim_alpha, lim_beta, tmp;\n    float sum_out = 0;\n\n    alpha = x0 - float(s_w);\n    beta = y0 - float(s_h);\n    lim_alpha = x1 - float(s_w);\n    lim_beta = y1 - float(s_h);\n    tmp = (lim_alpha - 0.5f * lim_alpha * lim_alpha - alpha + 0.5f * alpha * alpha)\n        * (lim_beta - 0.5f * lim_beta * lim_beta - beta + 0.5f * beta * beta);\n    sum_out += PrRoIPoolingGetData(this_data, s_h, s_w, h0, w0) * tmp;\n\n    alpha = float(e_w) - x1;\n    lim_alpha = float(e_w) - x0;\n    tmp = (lim_alpha - 0.5f * lim_alpha * lim_alpha - alpha + 0.5f * alpha * alpha)\n        * (lim_beta - 0.5f * lim_beta * lim_beta - beta + 0.5f * beta * beta);\n    sum_out += PrRoIPoolingGetData(this_data, s_h, e_w, h0, w0) * tmp;\n\n    alpha = x0 - float(s_w);\n    beta = float(e_h) - y1;\n    lim_alpha = x1 - float(s_w);\n    lim_beta = float(e_h) - y0;\n    tmp = (lim_alpha - 0.5f * lim_alpha * lim_alpha - alpha + 0.5f * alpha * alpha)\n        * (lim_beta - 0.5f * lim_beta * lim_beta - beta + 0.5f * beta * beta);\n    sum_out += PrRoIPoolingGetData(this_data, e_h, s_w, h0, w0) * tmp;\n\n    alpha = float(e_w) - x1;\n    lim_alpha = float(e_w) - x0;\n    tmp = (lim_alpha - 0.5f * lim_alpha * lim_alpha - alpha + 0.5f * alpha * alpha)\n        * (lim_beta - 0.5f * lim_beta * lim_beta - beta + 0.5f * beta * beta);\n    sum_out += PrRoIPoolingGetData(this_data, e_h, e_w, h0, w0) * tmp;\n\n    return sum_out;\n}\n\n__device__ static void PrRoIPoolingDistributeDiff(F_DEVPTR_OUT diff, const float top_diff, const int h, const int w, const int height, const int width, const float coeff)\n{\n    bool overflow = (h < 0) || (w < 0) || (h >= height) || (w >= width);\n    if (!overflow)\n        atomicAdd(diff + h * width + w, top_diff * coeff);\n}\n\n__device__ static void PrRoIPoolingMatDistributeDiff(F_DEVPTR_OUT diff, const float top_diff, const int s_h, const int s_w, const int e_h, const int e_w,\n        const float y0, const float x0, const float y1, const float x1, const int h0, const int w0)\n{\n    float alpha, beta, lim_alpha, lim_beta, tmp;\n\n    alpha = x0 - float(s_w);\n    beta = y0 - float(s_h);\n    lim_alpha = x1 - float(s_w);\n    lim_beta = y1 - float(s_h);\n    tmp = (lim_alpha - 0.5f * lim_alpha * lim_alpha - alpha + 0.5f * alpha * alpha)\n        * (lim_beta - 0.5f * lim_beta * lim_beta - beta + 0.5f * beta * beta);\n    PrRoIPoolingDistributeDiff(diff, top_diff, s_h, s_w, h0, w0, tmp);\n\n    alpha = float(e_w) - x1;\n    lim_alpha = float(e_w) - x0;\n    tmp = (lim_alpha - 0.5f * lim_alpha * lim_alpha - alpha + 0.5f * alpha * alpha)\n        * (lim_beta - 0.5f * lim_beta * lim_beta - beta + 0.5f * beta * beta);\n    PrRoIPoolingDistributeDiff(diff, top_diff, s_h, e_w, h0, w0, tmp);\n\n    alpha = x0 - float(s_w);\n    beta = float(e_h) - y1;\n    lim_alpha = x1 - float(s_w);\n    lim_beta = float(e_h) - y0;\n    tmp = (lim_alpha - 0.5f * lim_alpha * lim_alpha - alpha + 0.5f * alpha * alpha)\n        * (lim_beta - 0.5f * lim_beta * lim_beta - beta + 0.5f * beta * beta);\n    PrRoIPoolingDistributeDiff(diff, top_diff, e_h, s_w, h0, w0, tmp);\n\n    alpha = float(e_w) - x1;\n    lim_alpha = float(e_w) - x0;\n    tmp = (lim_alpha - 0.5f * lim_alpha * lim_alpha - alpha + 0.5f * alpha * alpha)\n        * (lim_beta - 0.5f * lim_beta * lim_beta - beta + 0.5f * beta * beta);\n    PrRoIPoolingDistributeDiff(diff, top_diff, e_h, e_w, h0, w0, tmp);\n}\n\n__global__ void PrRoIPoolingForward(\n        const int nthreads,\n        F_DEVPTR_IN bottom_data,\n        F_DEVPTR_IN bottom_rois,\n        F_DEVPTR_OUT top_data,\n        const int channels,\n        const int height,\n        const int width,\n        const int pooled_height,\n        const int pooled_width,\n        const float spatial_scale) {\n\n  CUDA_KERNEL_LOOP(index, nthreads) {\n    // (n, c, ph, pw) is an element in the pooled output\n    int pw = index % pooled_width;\n    int ph = (index / pooled_width) % pooled_height;\n    int c = (index / pooled_width / pooled_height) % channels;\n    int n = index / pooled_width / pooled_height / channels;\n\n    bottom_rois += n * 5;\n    int roi_batch_ind = bottom_rois[0];\n\n    float roi_start_w = bottom_rois[1] * spatial_scale;\n    float roi_start_h = bottom_rois[2] * spatial_scale;\n    float roi_end_w = bottom_rois[3] * spatial_scale;\n    float roi_end_h = bottom_rois[4] * spatial_scale;\n\n    float roi_width = max(roi_end_w - roi_start_w, ((float)0.0));\n    float roi_height = max(roi_end_h - roi_start_h, ((float)0.0));\n    float bin_size_h = roi_height / static_cast<float>(pooled_height);\n    float bin_size_w = roi_width / static_cast<float>(pooled_width);\n\n    const float *this_data = bottom_data + (roi_batch_ind * channels + c) * height * width;\n    float *this_out = top_data + index;\n\n    float win_start_w = roi_start_w + bin_size_w * pw;\n    float win_start_h = roi_start_h + bin_size_h * ph;\n    float win_end_w = win_start_w + bin_size_w;\n    float win_end_h = win_start_h + bin_size_h;\n\n    float win_size = max(float(0.0), bin_size_w * bin_size_h);\n    if (win_size == 0) {\n        *this_out = 0;\n        return;\n    }\n\n    float sum_out = 0;\n\n    int s_w, s_h, e_w, e_h;\n\n    s_w = floorf(win_start_w);\n    e_w = ceilf(win_end_w);\n    s_h = floorf(win_start_h);\n    e_h = ceilf(win_end_h);\n\n    for (int w_iter = s_w; w_iter < e_w; ++w_iter)\n        for (int h_iter = s_h; h_iter < e_h; ++h_iter)\n            sum_out += PrRoIPoolingMatCalculation(this_data, h_iter, w_iter, h_iter + 1, w_iter + 1,\n                max(win_start_h, float(h_iter)), max(win_start_w, float(w_iter)),\n                min(win_end_h, float(h_iter) + 1.0), min(win_end_w, float(w_iter + 1.0)),\n                height, width);\n    *this_out = sum_out / win_size;\n  }\n}\n\n__global__ void PrRoIPoolingBackward(\n        const int nthreads,\n        F_DEVPTR_IN bottom_rois,\n        F_DEVPTR_IN top_diff,\n        F_DEVPTR_OUT bottom_diff,\n        const int channels,\n        const int height,\n        const int width,\n        const int pooled_height,\n        const int pooled_width,\n        const float spatial_scale) {\n\n  CUDA_KERNEL_LOOP(index, nthreads) {\n    // (n, c, ph, pw) is an element in the pooled output\n    int pw = index % pooled_width;\n    int ph = (index / pooled_width) % pooled_height;\n    int c = (index / pooled_width / pooled_height) % channels;\n    int n = index / pooled_width / pooled_height / channels;\n    bottom_rois += n * 5;\n\n    int roi_batch_ind = bottom_rois[0];\n    float roi_start_w = bottom_rois[1] * spatial_scale;\n    float roi_start_h = bottom_rois[2] * spatial_scale;\n    float roi_end_w = bottom_rois[3] * spatial_scale;\n    float roi_end_h = bottom_rois[4] * spatial_scale;\n\n    float roi_width = max(roi_end_w - roi_start_w, (float)0);\n    float roi_height = max(roi_end_h - roi_start_h, (float)0);\n    float bin_size_h = roi_height / static_cast<float>(pooled_height);\n    float bin_size_w = roi_width / static_cast<float>(pooled_width);\n\n    const float *this_out_grad = top_diff + index;\n    float *this_data_grad = bottom_diff + (roi_batch_ind * channels + c) * height * width;\n\n    float win_start_w = roi_start_w + bin_size_w * pw;\n    float win_start_h = roi_start_h + bin_size_h * ph;\n    float win_end_w = win_start_w + bin_size_w;\n    float win_end_h = win_start_h + bin_size_h;\n\n    float win_size = max(float(0.0), bin_size_w * bin_size_h);\n\n    float sum_out = win_size == float(0) ? float(0) : *this_out_grad / win_size;\n\n    int s_w, s_h, e_w, e_h;\n\n    s_w = floorf(win_start_w);\n    e_w = ceilf(win_end_w);\n    s_h = floorf(win_start_h);\n    e_h = ceilf(win_end_h);\n\n    for (int w_iter = s_w; w_iter < e_w; ++w_iter)\n        for (int h_iter = s_h; h_iter < e_h; ++h_iter)\n            PrRoIPoolingMatDistributeDiff(this_data_grad, sum_out, h_iter, w_iter, h_iter + 1, w_iter + 1,\n                max(win_start_h, float(h_iter)), max(win_start_w, float(w_iter)),\n                min(win_end_h, float(h_iter) + 1.0), min(win_end_w, float(w_iter + 1.0)),\n                height, width);\n\n  }\n}\n\n__global__ void PrRoIPoolingCoorBackward(\n        const int nthreads,\n        F_DEVPTR_IN bottom_data,\n        F_DEVPTR_IN bottom_rois,\n        F_DEVPTR_IN top_data,\n        F_DEVPTR_IN top_diff,\n        F_DEVPTR_OUT bottom_diff,\n        const int channels,\n        const int height,\n        const int width,\n        const int pooled_height,\n        const int pooled_width,\n        const float spatial_scale) {\n\n  CUDA_KERNEL_LOOP(index, nthreads) {\n    // (n, c, ph, pw) is an element in the pooled output\n    int pw = index % pooled_width;\n    int ph = (index / pooled_width) % pooled_height;\n    int c = (index / pooled_width / pooled_height) % channels;\n    int n = index / pooled_width / pooled_height / channels;\n    bottom_rois += n * 5;\n\n    int roi_batch_ind = bottom_rois[0];\n    float roi_start_w = bottom_rois[1] * spatial_scale;\n    float roi_start_h = bottom_rois[2] * spatial_scale;\n    float roi_end_w = bottom_rois[3] * spatial_scale;\n    float roi_end_h = bottom_rois[4] * spatial_scale;\n\n    float roi_width = max(roi_end_w - roi_start_w, (float)0);\n    float roi_height = max(roi_end_h - roi_start_h, (float)0);\n    float bin_size_h = roi_height / static_cast<float>(pooled_height);\n    float bin_size_w = roi_width / static_cast<float>(pooled_width);\n\n    const float *this_out_grad = top_diff + index;\n    const float *this_bottom_data = bottom_data + (roi_batch_ind * channels + c) * height * width;\n    const float *this_top_data = top_data + index;\n    float *this_data_grad = bottom_diff + n * 5;\n\n    float win_start_w = roi_start_w + bin_size_w * pw;\n    float win_start_h = roi_start_h + bin_size_h * ph;\n    float win_end_w = win_start_w + bin_size_w;\n    float win_end_h = win_start_h + bin_size_h;\n\n    float win_size = max(float(0.0), bin_size_w * bin_size_h);\n\n    float sum_out = win_size == float(0) ? float(0) : *this_out_grad / win_size;\n\n    // WARNING: to be discussed\n    if (sum_out == 0)\n        return;\n\n    int s_w, s_h, e_w, e_h;\n\n    s_w = floorf(win_start_w);\n    e_w = ceilf(win_end_w);\n    s_h = floorf(win_start_h);\n    e_h = ceilf(win_end_h);\n\n    float g_x1_y = 0, g_x2_y = 0, g_x_y1 = 0, g_x_y2 = 0;\n    for (int h_iter = s_h; h_iter < e_h; ++h_iter) {\n        g_x1_y += PrRoIPoolingSingleCoorIntegral(max(win_start_h, float(h_iter)) - h_iter,\n                min(win_end_h, float(h_iter + 1)) - h_iter,\n                PrRoIPoolingInterpolation(this_bottom_data, h_iter, win_start_w, height, width),\n                PrRoIPoolingInterpolation(this_bottom_data, h_iter + 1, win_start_w, height, width));\n\n        g_x2_y += PrRoIPoolingSingleCoorIntegral(max(win_start_h, float(h_iter)) - h_iter,\n                min(win_end_h, float(h_iter + 1)) - h_iter,\n                PrRoIPoolingInterpolation(this_bottom_data, h_iter, win_end_w, height, width),\n                PrRoIPoolingInterpolation(this_bottom_data, h_iter + 1, win_end_w, height, width));\n    }\n\n    for (int w_iter = s_w; w_iter < e_w; ++w_iter) {\n        g_x_y1 += PrRoIPoolingSingleCoorIntegral(max(win_start_w, float(w_iter)) - w_iter,\n                min(win_end_w, float(w_iter + 1)) - w_iter,\n                PrRoIPoolingInterpolation(this_bottom_data, win_start_h, w_iter, height, width),\n                PrRoIPoolingInterpolation(this_bottom_data, win_start_h, w_iter + 1, height, width));\n\n        g_x_y2 += PrRoIPoolingSingleCoorIntegral(max(win_start_w, float(w_iter)) - w_iter,\n                min(win_end_w, float(w_iter + 1)) - w_iter,\n                PrRoIPoolingInterpolation(this_bottom_data, win_end_h, w_iter, height, width),\n                PrRoIPoolingInterpolation(this_bottom_data, win_end_h, w_iter + 1, height, width));\n    }\n\n    float partial_x1 = -g_x1_y + (win_end_h - win_start_h) * (*this_top_data);\n    float partial_y1 = -g_x_y1 + (win_end_w - win_start_w) * (*this_top_data);\n    float partial_x2 = g_x2_y - (win_end_h - win_start_h) * (*this_top_data);\n    float partial_y2 = g_x_y2 - (win_end_w - win_start_w) * (*this_top_data);\n\n    partial_x1 = partial_x1 / win_size * spatial_scale;\n    partial_x2 = partial_x2 / win_size * spatial_scale;\n    partial_y1 = partial_y1 / win_size * spatial_scale;\n    partial_y2 = partial_y2 / win_size * spatial_scale;\n\n    // (b, x1, y1, x2, y2)\n\n    this_data_grad[0] = 0;\n    atomicAdd(this_data_grad + 1, (partial_x1 * (1.0 - float(pw) / pooled_width) + partial_x2 * (1.0 - float(pw + 1) / pooled_width))\n            * (*this_out_grad));\n    atomicAdd(this_data_grad + 2, (partial_y1 * (1.0 - float(ph) / pooled_height) + partial_y2 * (1.0 - float(ph + 1) / pooled_height))\n            * (*this_out_grad));\n    atomicAdd(this_data_grad + 3, (partial_x2 * float(pw + 1) / pooled_width + partial_x1 * float(pw) / pooled_width)\n            * (*this_out_grad));\n    atomicAdd(this_data_grad + 4, (partial_y2 * float(ph + 1) / pooled_height + partial_y1 * float(ph) / pooled_height)\n            * (*this_out_grad));\n  }\n}\n\n} /* !anonymous namespace */\n\n#ifdef __cplusplus\nextern \"C\" {\n#endif\n\nvoid PrRoIPoolingForwardGpu(\n    cudaStream_t stream,\n    F_DEVPTR_IN bottom_data,\n    F_DEVPTR_IN bottom_rois,\n    F_DEVPTR_OUT top_data,\n    const int channels_, const int height_, const int width_,\n    const int pooled_height_, const int pooled_width_,\n    const float spatial_scale_,\n    const int top_count) {\n\n    PrRoIPoolingForward<<<CUDA_NUM_BLOCKS(top_count), CUDA_NUM_THREADS, 0, stream>>>(\n        top_count, bottom_data, bottom_rois, top_data,\n        channels_, height_, width_, pooled_height_, pooled_width_, spatial_scale_);\n\n    CUDA_POST_KERNEL_CHECK;\n}\n\nvoid PrRoIPoolingBackwardGpu(\n    cudaStream_t stream,\n    F_DEVPTR_IN bottom_data,\n    F_DEVPTR_IN bottom_rois,\n    F_DEVPTR_IN top_data,\n    F_DEVPTR_IN top_diff,\n    F_DEVPTR_OUT bottom_diff,\n    const int channels_, const int height_, const int width_,\n    const int pooled_height_, const int pooled_width_,\n    const float spatial_scale_,\n    const int top_count, const int bottom_count) {\n\n    cudaMemsetAsync(bottom_diff, 0, sizeof(float) * bottom_count, stream);\n    PrRoIPoolingBackward<<<CUDA_NUM_BLOCKS(top_count), CUDA_NUM_THREADS, 0, stream>>>(\n        top_count, bottom_rois, top_diff, bottom_diff,\n        channels_, height_, width_, pooled_height_, pooled_width_, spatial_scale_);\n    CUDA_POST_KERNEL_CHECK;\n}\n\nvoid PrRoIPoolingCoorBackwardGpu(\n    cudaStream_t stream,\n    F_DEVPTR_IN bottom_data,\n    F_DEVPTR_IN bottom_rois,\n    F_DEVPTR_IN top_data,\n    F_DEVPTR_IN top_diff,\n    F_DEVPTR_OUT bottom_diff,\n    const int channels_, const int height_, const int width_,\n    const int pooled_height_, const int pooled_width_,\n    const float spatial_scale_,\n    const int top_count, const int bottom_count) {\n\n    cudaMemsetAsync(bottom_diff, 0, sizeof(float) * bottom_count, stream);\n    PrRoIPoolingCoorBackward<<<CUDA_NUM_BLOCKS(top_count), CUDA_NUM_THREADS, 0, stream>>>(\n        top_count, bottom_data, bottom_rois, top_data, top_diff, bottom_diff,\n        channels_, height_, width_, pooled_height_, pooled_width_, spatial_scale_);\n    CUDA_POST_KERNEL_CHECK;\n}\n\n} /* !extern \"C\" */\n\n"
  },
  {
    "path": "external/AR/ltr/external/PreciseRoIPooling/pytorch/prroi_pool/src/prroi_pooling_gpu_impl.cuh",
    "content": "/*\n * File   : prroi_pooling_gpu_impl.cuh\n * Author : Tete Xiao, Jiayuan Mao\n * Email  : jasonhsiao97@gmail.com\n *\n * Distributed under terms of the MIT license.\n * Copyright (c) 2017 Megvii Technology Limited.\n */\n\n#ifndef PRROI_POOLING_GPU_IMPL_CUH\n#define PRROI_POOLING_GPU_IMPL_CUH\n\n#ifdef __cplusplus\nextern \"C\" {\n#endif\n\n#define F_DEVPTR_IN const float *\n#define F_DEVPTR_OUT float *\n\nvoid PrRoIPoolingForwardGpu(\n    cudaStream_t stream,\n    F_DEVPTR_IN bottom_data,\n    F_DEVPTR_IN bottom_rois,\n    F_DEVPTR_OUT top_data,\n    const int channels_, const int height_, const int width_,\n    const int pooled_height_, const int pooled_width_,\n    const float spatial_scale_,\n    const int top_count);\n\nvoid PrRoIPoolingBackwardGpu(\n    cudaStream_t stream,\n    F_DEVPTR_IN bottom_data,\n    F_DEVPTR_IN bottom_rois,\n    F_DEVPTR_IN top_data,\n    F_DEVPTR_IN top_diff,\n    F_DEVPTR_OUT bottom_diff,\n    const int channels_, const int height_, const int width_,\n    const int pooled_height_, const int pooled_width_,\n    const float spatial_scale_,\n    const int top_count, const int bottom_count);\n\nvoid PrRoIPoolingCoorBackwardGpu(\n    cudaStream_t stream,\n    F_DEVPTR_IN bottom_data,\n    F_DEVPTR_IN bottom_rois,\n    F_DEVPTR_IN top_data,\n    F_DEVPTR_IN top_diff,\n    F_DEVPTR_OUT bottom_diff,\n    const int channels_, const int height_, const int width_,\n    const int pooled_height_, const int pooled_width_,\n    const float spatial_scale_,\n    const int top_count, const int bottom_count);\n\n#ifdef __cplusplus\n} /* !extern \"C\" */\n#endif\n\n#endif /* !PRROI_POOLING_GPU_IMPL_CUH */\n\n"
  },
  {
    "path": "external/AR/ltr/external/PreciseRoIPooling/pytorch/tests/test_prroi_pooling2d.py",
    "content": "# -*- coding: utf-8 -*-\n# File   : test_prroi_pooling2d.py\n# Author : Jiayuan Mao\n# Email  : maojiayuan@gmail.com\n# Date   : 18/02/2018\n#\n# This file is part of Jacinle.\n\nimport unittest\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom jactorch.utils.unittest import TorchTestCase\n\nfrom prroi_pool import PrRoIPool2D\n\n\nclass TestPrRoIPool2D(TorchTestCase):\n    def test_forward(self):\n        pool = PrRoIPool2D(7, 7, spatial_scale=0.5)\n        features = torch.rand((4, 16, 24, 32)).cuda()\n        rois = torch.tensor([\n            [0, 0, 0, 14, 14],\n            [1, 14, 14, 28, 28],\n        ]).float().cuda()\n\n        out = pool(features, rois)\n        out_gold = F.avg_pool2d(features, kernel_size=2, stride=1)\n\n        self.assertTensorClose(out, torch.stack((\n            out_gold[0, :, :7, :7],\n            out_gold[1, :, 7:14, 7:14],\n        ), dim=0))\n\n    def test_backward_shapeonly(self):\n        pool = PrRoIPool2D(2, 2, spatial_scale=0.5)\n\n        features = torch.rand((4, 2, 24, 32)).cuda()\n        rois = torch.tensor([\n            [0, 0, 0, 4, 4],\n            [1, 14, 14, 18, 18],\n        ]).float().cuda()\n        features.requires_grad = rois.requires_grad = True\n        out = pool(features, rois)\n\n        loss = out.sum()\n        loss.backward()\n\n        self.assertTupleEqual(features.size(), features.grad.size())\n        self.assertTupleEqual(rois.size(), rois.grad.size())\n\n\nif __name__ == '__main__':\n    unittest.main()\n"
  },
  {
    "path": "external/AR/ltr/external/PreciseRoIPooling/src/prroi_pooling_gpu_impl.cu",
    "content": "/*\n * File   : prroi_pooling_gpu_impl.cu\n * Author : Tete Xiao, Jiayuan Mao\n * Email  : jasonhsiao97@gmail.com\n *\n * Distributed under terms of the MIT license.\n * Copyright (c) 2017 Megvii Technology Limited.\n */\n\n#include \"prroi_pooling_gpu_impl.cuh\"\n\n#include <cstdio>\n#include <cfloat>\n\n#define CUDA_KERNEL_LOOP(i, n) \\\n    for (int i = blockIdx.x * blockDim.x + threadIdx.x; \\\n        i < (n); \\\n        i += blockDim.x * gridDim.x)\n\n#define CUDA_POST_KERNEL_CHECK \\\n    do { \\\n        cudaError_t err = cudaGetLastError(); \\\n        if (cudaSuccess != err) { \\\n            fprintf(stderr, \"cudaCheckError() failed : %s\\n\", cudaGetErrorString(err)); \\\n            exit(-1); \\\n        } \\\n    } while(0)\n\n#define CUDA_NUM_THREADS 512\n\nnamespace {\n\nstatic int CUDA_NUM_BLOCKS(const int N) {\n  return (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS;\n}\n\n__device__ static float PrRoIPoolingGetData(F_DEVPTR_IN data, const int h, const int w, const int height, const int width)\n{\n    bool overflow = (h < 0) || (w < 0) || (h >= height) || (w >= width);\n    float retVal = overflow ? 0.0f : data[h * width + w];\n    return retVal;\n}\n\n__device__ static float PrRoIPoolingGetCoeff(float dh, float dw){\n    dw = dw > 0 ? dw : -dw;\n    dh = dh > 0 ? dh : -dh;\n    return (1.0f - dh) * (1.0f - dw);\n}\n\n__device__ static float PrRoIPoolingSingleCoorIntegral(float s, float t, float c1, float c2) {\n    return 0.5 * (t * t - s * s) * c2 + (t - 0.5 * t * t - s + 0.5 * s * s) * c1;\n}\n\n__device__ static float PrRoIPoolingInterpolation(F_DEVPTR_IN data, const float h, const float w, const int height, const int width){\n    float retVal = 0.0f;\n    int h1 = floorf(h);\n    int w1 = floorf(w);\n    retVal += PrRoIPoolingGetData(data, h1, w1, height, width) * PrRoIPoolingGetCoeff(h - float(h1), w - float(w1));\n    h1 = floorf(h)+1;\n    w1 = floorf(w);\n    retVal += PrRoIPoolingGetData(data, h1, w1, height, width) * PrRoIPoolingGetCoeff(h - float(h1), w - float(w1));\n    h1 = floorf(h);\n    w1 = floorf(w)+1;\n    retVal += PrRoIPoolingGetData(data, h1, w1, height, width) * PrRoIPoolingGetCoeff(h - float(h1), w - float(w1));\n    h1 = floorf(h)+1;\n    w1 = floorf(w)+1;\n    retVal += PrRoIPoolingGetData(data, h1, w1, height, width) * PrRoIPoolingGetCoeff(h - float(h1), w - float(w1));\n    return retVal;\n}\n\n__device__ static float PrRoIPoolingMatCalculation(F_DEVPTR_IN this_data, const int s_h, const int s_w, const int e_h, const int e_w,\n        const float y0, const float x0, const float y1, const float x1, const int h0, const int w0)\n{\n    float alpha, beta, lim_alpha, lim_beta, tmp;\n    float sum_out = 0;\n\n    alpha = x0 - float(s_w);\n    beta = y0 - float(s_h);\n    lim_alpha = x1 - float(s_w);\n    lim_beta = y1 - float(s_h);\n    tmp = (lim_alpha - 0.5f * lim_alpha * lim_alpha - alpha + 0.5f * alpha * alpha)\n        * (lim_beta - 0.5f * lim_beta * lim_beta - beta + 0.5f * beta * beta);\n    sum_out += PrRoIPoolingGetData(this_data, s_h, s_w, h0, w0) * tmp;\n\n    alpha = float(e_w) - x1;\n    lim_alpha = float(e_w) - x0;\n    tmp = (lim_alpha - 0.5f * lim_alpha * lim_alpha - alpha + 0.5f * alpha * alpha)\n        * (lim_beta - 0.5f * lim_beta * lim_beta - beta + 0.5f * beta * beta);\n    sum_out += PrRoIPoolingGetData(this_data, s_h, e_w, h0, w0) * tmp;\n\n    alpha = x0 - float(s_w);\n    beta = float(e_h) - y1;\n    lim_alpha = x1 - float(s_w);\n    lim_beta = float(e_h) - y0;\n    tmp = (lim_alpha - 0.5f * lim_alpha * lim_alpha - alpha + 0.5f * alpha * alpha)\n        * (lim_beta - 0.5f * lim_beta * lim_beta - beta + 0.5f * beta * beta);\n    sum_out += PrRoIPoolingGetData(this_data, e_h, s_w, h0, w0) * tmp;\n\n    alpha = float(e_w) - x1;\n    lim_alpha = float(e_w) - x0;\n    tmp = (lim_alpha - 0.5f * lim_alpha * lim_alpha - alpha + 0.5f * alpha * alpha)\n        * (lim_beta - 0.5f * lim_beta * lim_beta - beta + 0.5f * beta * beta);\n    sum_out += PrRoIPoolingGetData(this_data, e_h, e_w, h0, w0) * tmp;\n\n    return sum_out;\n}\n\n__device__ static void PrRoIPoolingDistributeDiff(F_DEVPTR_OUT diff, const float top_diff, const int h, const int w, const int height, const int width, const float coeff)\n{\n    bool overflow = (h < 0) || (w < 0) || (h >= height) || (w >= width);\n    if (!overflow)\n        atomicAdd(diff + h * width + w, top_diff * coeff);\n}\n\n__device__ static void PrRoIPoolingMatDistributeDiff(F_DEVPTR_OUT diff, const float top_diff, const int s_h, const int s_w, const int e_h, const int e_w,\n        const float y0, const float x0, const float y1, const float x1, const int h0, const int w0)\n{\n    float alpha, beta, lim_alpha, lim_beta, tmp;\n\n    alpha = x0 - float(s_w);\n    beta = y0 - float(s_h);\n    lim_alpha = x1 - float(s_w);\n    lim_beta = y1 - float(s_h);\n    tmp = (lim_alpha - 0.5f * lim_alpha * lim_alpha - alpha + 0.5f * alpha * alpha)\n        * (lim_beta - 0.5f * lim_beta * lim_beta - beta + 0.5f * beta * beta);\n    PrRoIPoolingDistributeDiff(diff, top_diff, s_h, s_w, h0, w0, tmp);\n\n    alpha = float(e_w) - x1;\n    lim_alpha = float(e_w) - x0;\n    tmp = (lim_alpha - 0.5f * lim_alpha * lim_alpha - alpha + 0.5f * alpha * alpha)\n        * (lim_beta - 0.5f * lim_beta * lim_beta - beta + 0.5f * beta * beta);\n    PrRoIPoolingDistributeDiff(diff, top_diff, s_h, e_w, h0, w0, tmp);\n\n    alpha = x0 - float(s_w);\n    beta = float(e_h) - y1;\n    lim_alpha = x1 - float(s_w);\n    lim_beta = float(e_h) - y0;\n    tmp = (lim_alpha - 0.5f * lim_alpha * lim_alpha - alpha + 0.5f * alpha * alpha)\n        * (lim_beta - 0.5f * lim_beta * lim_beta - beta + 0.5f * beta * beta);\n    PrRoIPoolingDistributeDiff(diff, top_diff, e_h, s_w, h0, w0, tmp);\n\n    alpha = float(e_w) - x1;\n    lim_alpha = float(e_w) - x0;\n    tmp = (lim_alpha - 0.5f * lim_alpha * lim_alpha - alpha + 0.5f * alpha * alpha)\n        * (lim_beta - 0.5f * lim_beta * lim_beta - beta + 0.5f * beta * beta);\n    PrRoIPoolingDistributeDiff(diff, top_diff, e_h, e_w, h0, w0, tmp);\n}\n\n__global__ void PrRoIPoolingForward(\n        const int nthreads,\n        F_DEVPTR_IN bottom_data,\n        F_DEVPTR_IN bottom_rois,\n        F_DEVPTR_OUT top_data,\n        const int channels,\n        const int height,\n        const int width,\n        const int pooled_height,\n        const int pooled_width,\n        const float spatial_scale) {\n\n  CUDA_KERNEL_LOOP(index, nthreads) {\n    // (n, c, ph, pw) is an element in the pooled output\n    int pw = index % pooled_width;\n    int ph = (index / pooled_width) % pooled_height;\n    int c = (index / pooled_width / pooled_height) % channels;\n    int n = index / pooled_width / pooled_height / channels;\n\n    bottom_rois += n * 5;\n    int roi_batch_ind = bottom_rois[0];\n\n    float roi_start_w = bottom_rois[1] * spatial_scale;\n    float roi_start_h = bottom_rois[2] * spatial_scale;\n    float roi_end_w = bottom_rois[3] * spatial_scale;\n    float roi_end_h = bottom_rois[4] * spatial_scale;\n\n    float roi_width = max(roi_end_w - roi_start_w, ((float)0.0));\n    float roi_height = max(roi_end_h - roi_start_h, ((float)0.0));\n    float bin_size_h = roi_height / static_cast<float>(pooled_height);\n    float bin_size_w = roi_width / static_cast<float>(pooled_width);\n\n    const float *this_data = bottom_data + (roi_batch_ind * channels + c) * height * width;\n    float *this_out = top_data + index;\n\n    float win_start_w = roi_start_w + bin_size_w * pw;\n    float win_start_h = roi_start_h + bin_size_h * ph;\n    float win_end_w = win_start_w + bin_size_w;\n    float win_end_h = win_start_h + bin_size_h;\n\n    float win_size = max(float(0.0), bin_size_w * bin_size_h);\n    if (win_size == 0) {\n        *this_out = 0;\n        return;\n    }\n\n    float sum_out = 0;\n\n    int s_w, s_h, e_w, e_h;\n\n    s_w = floorf(win_start_w);\n    e_w = ceilf(win_end_w);\n    s_h = floorf(win_start_h);\n    e_h = ceilf(win_end_h);\n\n    for (int w_iter = s_w; w_iter < e_w; ++w_iter)\n        for (int h_iter = s_h; h_iter < e_h; ++h_iter)\n            sum_out += PrRoIPoolingMatCalculation(this_data, h_iter, w_iter, h_iter + 1, w_iter + 1,\n                max(win_start_h, float(h_iter)), max(win_start_w, float(w_iter)),\n                min(win_end_h, float(h_iter) + 1.0), min(win_end_w, float(w_iter + 1.0)),\n                height, width);\n    *this_out = sum_out / win_size;\n  }\n}\n\n__global__ void PrRoIPoolingBackward(\n        const int nthreads,\n        F_DEVPTR_IN bottom_rois,\n        F_DEVPTR_IN top_diff,\n        F_DEVPTR_OUT bottom_diff,\n        const int channels,\n        const int height,\n        const int width,\n        const int pooled_height,\n        const int pooled_width,\n        const float spatial_scale) {\n\n  CUDA_KERNEL_LOOP(index, nthreads) {\n    // (n, c, ph, pw) is an element in the pooled output\n    int pw = index % pooled_width;\n    int ph = (index / pooled_width) % pooled_height;\n    int c = (index / pooled_width / pooled_height) % channels;\n    int n = index / pooled_width / pooled_height / channels;\n    bottom_rois += n * 5;\n\n    int roi_batch_ind = bottom_rois[0];\n    float roi_start_w = bottom_rois[1] * spatial_scale;\n    float roi_start_h = bottom_rois[2] * spatial_scale;\n    float roi_end_w = bottom_rois[3] * spatial_scale;\n    float roi_end_h = bottom_rois[4] * spatial_scale;\n\n    float roi_width = max(roi_end_w - roi_start_w, (float)0);\n    float roi_height = max(roi_end_h - roi_start_h, (float)0);\n    float bin_size_h = roi_height / static_cast<float>(pooled_height);\n    float bin_size_w = roi_width / static_cast<float>(pooled_width);\n\n    const float *this_out_grad = top_diff + index;\n    float *this_data_grad = bottom_diff + (roi_batch_ind * channels + c) * height * width;\n\n    float win_start_w = roi_start_w + bin_size_w * pw;\n    float win_start_h = roi_start_h + bin_size_h * ph;\n    float win_end_w = win_start_w + bin_size_w;\n    float win_end_h = win_start_h + bin_size_h;\n\n    float win_size = max(float(0.0), bin_size_w * bin_size_h);\n\n    float sum_out = win_size == float(0) ? float(0) : *this_out_grad / win_size;\n\n    int s_w, s_h, e_w, e_h;\n\n    s_w = floorf(win_start_w);\n    e_w = ceilf(win_end_w);\n    s_h = floorf(win_start_h);\n    e_h = ceilf(win_end_h);\n\n    for (int w_iter = s_w; w_iter < e_w; ++w_iter)\n        for (int h_iter = s_h; h_iter < e_h; ++h_iter)\n            PrRoIPoolingMatDistributeDiff(this_data_grad, sum_out, h_iter, w_iter, h_iter + 1, w_iter + 1,\n                max(win_start_h, float(h_iter)), max(win_start_w, float(w_iter)),\n                min(win_end_h, float(h_iter) + 1.0), min(win_end_w, float(w_iter + 1.0)),\n                height, width);\n\n  }\n}\n\n__global__ void PrRoIPoolingCoorBackward(\n        const int nthreads,\n        F_DEVPTR_IN bottom_data,\n        F_DEVPTR_IN bottom_rois,\n        F_DEVPTR_IN top_data,\n        F_DEVPTR_IN top_diff,\n        F_DEVPTR_OUT bottom_diff,\n        const int channels,\n        const int height,\n        const int width,\n        const int pooled_height,\n        const int pooled_width,\n        const float spatial_scale) {\n\n  CUDA_KERNEL_LOOP(index, nthreads) {\n    // (n, c, ph, pw) is an element in the pooled output\n    int pw = index % pooled_width;\n    int ph = (index / pooled_width) % pooled_height;\n    int c = (index / pooled_width / pooled_height) % channels;\n    int n = index / pooled_width / pooled_height / channels;\n    bottom_rois += n * 5;\n\n    int roi_batch_ind = bottom_rois[0];\n    float roi_start_w = bottom_rois[1] * spatial_scale;\n    float roi_start_h = bottom_rois[2] * spatial_scale;\n    float roi_end_w = bottom_rois[3] * spatial_scale;\n    float roi_end_h = bottom_rois[4] * spatial_scale;\n\n    float roi_width = max(roi_end_w - roi_start_w, (float)0);\n    float roi_height = max(roi_end_h - roi_start_h, (float)0);\n    float bin_size_h = roi_height / static_cast<float>(pooled_height);\n    float bin_size_w = roi_width / static_cast<float>(pooled_width);\n\n    const float *this_out_grad = top_diff + index;\n    const float *this_bottom_data = bottom_data + (roi_batch_ind * channels + c) * height * width;\n    const float *this_top_data = top_data + index;\n    float *this_data_grad = bottom_diff + n * 5;\n\n    float win_start_w = roi_start_w + bin_size_w * pw;\n    float win_start_h = roi_start_h + bin_size_h * ph;\n    float win_end_w = win_start_w + bin_size_w;\n    float win_end_h = win_start_h + bin_size_h;\n\n    float win_size = max(float(0.0), bin_size_w * bin_size_h);\n\n    float sum_out = win_size == float(0) ? float(0) : *this_out_grad / win_size;\n\n    // WARNING: to be discussed\n    if (sum_out == 0)\n        return;\n\n    int s_w, s_h, e_w, e_h;\n\n    s_w = floorf(win_start_w);\n    e_w = ceilf(win_end_w);\n    s_h = floorf(win_start_h);\n    e_h = ceilf(win_end_h);\n\n    float g_x1_y = 0, g_x2_y = 0, g_x_y1 = 0, g_x_y2 = 0;\n    for (int h_iter = s_h; h_iter < e_h; ++h_iter) {\n        g_x1_y += PrRoIPoolingSingleCoorIntegral(max(win_start_h, float(h_iter)) - h_iter,\n                min(win_end_h, float(h_iter + 1)) - h_iter,\n                PrRoIPoolingInterpolation(this_bottom_data, h_iter, win_start_w, height, width),\n                PrRoIPoolingInterpolation(this_bottom_data, h_iter + 1, win_start_w, height, width));\n\n        g_x2_y += PrRoIPoolingSingleCoorIntegral(max(win_start_h, float(h_iter)) - h_iter,\n                min(win_end_h, float(h_iter + 1)) - h_iter,\n                PrRoIPoolingInterpolation(this_bottom_data, h_iter, win_end_w, height, width),\n                PrRoIPoolingInterpolation(this_bottom_data, h_iter + 1, win_end_w, height, width));\n    }\n\n    for (int w_iter = s_w; w_iter < e_w; ++w_iter) {\n        g_x_y1 += PrRoIPoolingSingleCoorIntegral(max(win_start_w, float(w_iter)) - w_iter,\n                min(win_end_w, float(w_iter + 1)) - w_iter,\n                PrRoIPoolingInterpolation(this_bottom_data, win_start_h, w_iter, height, width),\n                PrRoIPoolingInterpolation(this_bottom_data, win_start_h, w_iter + 1, height, width));\n\n        g_x_y2 += PrRoIPoolingSingleCoorIntegral(max(win_start_w, float(w_iter)) - w_iter,\n                min(win_end_w, float(w_iter + 1)) - w_iter,\n                PrRoIPoolingInterpolation(this_bottom_data, win_end_h, w_iter, height, width),\n                PrRoIPoolingInterpolation(this_bottom_data, win_end_h, w_iter + 1, height, width));\n    }\n\n    float partial_x1 = -g_x1_y + (win_end_h - win_start_h) * (*this_top_data);\n    float partial_y1 = -g_x_y1 + (win_end_w - win_start_w) * (*this_top_data);\n    float partial_x2 = g_x2_y - (win_end_h - win_start_h) * (*this_top_data);\n    float partial_y2 = g_x_y2 - (win_end_w - win_start_w) * (*this_top_data);\n\n    partial_x1 = partial_x1 / win_size * spatial_scale;\n    partial_x2 = partial_x2 / win_size * spatial_scale;\n    partial_y1 = partial_y1 / win_size * spatial_scale;\n    partial_y2 = partial_y2 / win_size * spatial_scale;\n\n    // (b, x1, y1, x2, y2)\n\n    this_data_grad[0] = 0;\n    atomicAdd(this_data_grad + 1, (partial_x1 * (1.0 - float(pw) / pooled_width) + partial_x2 * (1.0 - float(pw + 1) / pooled_width))\n            * (*this_out_grad));\n    atomicAdd(this_data_grad + 2, (partial_y1 * (1.0 - float(ph) / pooled_height) + partial_y2 * (1.0 - float(ph + 1) / pooled_height))\n            * (*this_out_grad));\n    atomicAdd(this_data_grad + 3, (partial_x2 * float(pw + 1) / pooled_width + partial_x1 * float(pw) / pooled_width)\n            * (*this_out_grad));\n    atomicAdd(this_data_grad + 4, (partial_y2 * float(ph + 1) / pooled_height + partial_y1 * float(ph) / pooled_height)\n            * (*this_out_grad));\n  }\n}\n\n} /* !anonymous namespace */\n\n#ifdef __cplusplus\nextern \"C\" {\n#endif\n\nvoid PrRoIPoolingForwardGpu(\n    cudaStream_t stream,\n    F_DEVPTR_IN bottom_data,\n    F_DEVPTR_IN bottom_rois,\n    F_DEVPTR_OUT top_data,\n    const int channels_, const int height_, const int width_,\n    const int pooled_height_, const int pooled_width_,\n    const float spatial_scale_,\n    const int top_count) {\n\n    PrRoIPoolingForward<<<CUDA_NUM_BLOCKS(top_count), CUDA_NUM_THREADS, 0, stream>>>(\n        top_count, bottom_data, bottom_rois, top_data,\n        channels_, height_, width_, pooled_height_, pooled_width_, spatial_scale_);\n\n    CUDA_POST_KERNEL_CHECK;\n}\n\nvoid PrRoIPoolingBackwardGpu(\n    cudaStream_t stream,\n    F_DEVPTR_IN bottom_data,\n    F_DEVPTR_IN bottom_rois,\n    F_DEVPTR_IN top_data,\n    F_DEVPTR_IN top_diff,\n    F_DEVPTR_OUT bottom_diff,\n    const int channels_, const int height_, const int width_,\n    const int pooled_height_, const int pooled_width_,\n    const float spatial_scale_,\n    const int top_count, const int bottom_count) {\n\n    cudaMemsetAsync(bottom_diff, 0, sizeof(float) * bottom_count, stream);\n    PrRoIPoolingBackward<<<CUDA_NUM_BLOCKS(top_count), CUDA_NUM_THREADS, 0, stream>>>(\n        top_count, bottom_rois, top_diff, bottom_diff,\n        channels_, height_, width_, pooled_height_, pooled_width_, spatial_scale_);\n    CUDA_POST_KERNEL_CHECK;\n}\n\nvoid PrRoIPoolingCoorBackwardGpu(\n    cudaStream_t stream,\n    F_DEVPTR_IN bottom_data,\n    F_DEVPTR_IN bottom_rois,\n    F_DEVPTR_IN top_data,\n    F_DEVPTR_IN top_diff,\n    F_DEVPTR_OUT bottom_diff,\n    const int channels_, const int height_, const int width_,\n    const int pooled_height_, const int pooled_width_,\n    const float spatial_scale_,\n    const int top_count, const int bottom_count) {\n\n    cudaMemsetAsync(bottom_diff, 0, sizeof(float) * bottom_count, stream);\n    PrRoIPoolingCoorBackward<<<CUDA_NUM_BLOCKS(top_count), CUDA_NUM_THREADS, 0, stream>>>(\n        top_count, bottom_data, bottom_rois, top_data, top_diff, bottom_diff,\n        channels_, height_, width_, pooled_height_, pooled_width_, spatial_scale_);\n    CUDA_POST_KERNEL_CHECK;\n}\n\n} /* !extern \"C\" */\n\n"
  },
  {
    "path": "external/AR/ltr/external/PreciseRoIPooling/src/prroi_pooling_gpu_impl.cuh",
    "content": "/*\n * File   : prroi_pooling_gpu_impl.cuh\n * Author : Tete Xiao, Jiayuan Mao\n * Email  : jasonhsiao97@gmail.com\n *\n * Distributed under terms of the MIT license.\n * Copyright (c) 2017 Megvii Technology Limited.\n */\n\n#ifndef PRROI_POOLING_GPU_IMPL_CUH\n#define PRROI_POOLING_GPU_IMPL_CUH\n\n#ifdef __cplusplus\nextern \"C\" {\n#endif\n\n#define F_DEVPTR_IN const float *\n#define F_DEVPTR_OUT float *\n\nvoid PrRoIPoolingForwardGpu(\n    cudaStream_t stream,\n    F_DEVPTR_IN bottom_data,\n    F_DEVPTR_IN bottom_rois,\n    F_DEVPTR_OUT top_data,\n    const int channels_, const int height_, const int width_,\n    const int pooled_height_, const int pooled_width_,\n    const float spatial_scale_,\n    const int top_count);\n\nvoid PrRoIPoolingBackwardGpu(\n    cudaStream_t stream,\n    F_DEVPTR_IN bottom_data,\n    F_DEVPTR_IN bottom_rois,\n    F_DEVPTR_IN top_data,\n    F_DEVPTR_IN top_diff,\n    F_DEVPTR_OUT bottom_diff,\n    const int channels_, const int height_, const int width_,\n    const int pooled_height_, const int pooled_width_,\n    const float spatial_scale_,\n    const int top_count, const int bottom_count);\n\nvoid PrRoIPoolingCoorBackwardGpu(\n    cudaStream_t stream,\n    F_DEVPTR_IN bottom_data,\n    F_DEVPTR_IN bottom_rois,\n    F_DEVPTR_IN top_data,\n    F_DEVPTR_IN top_diff,\n    F_DEVPTR_OUT bottom_diff,\n    const int channels_, const int height_, const int width_,\n    const int pooled_height_, const int pooled_width_,\n    const float spatial_scale_,\n    const int top_count, const int bottom_count);\n\n#ifdef __cplusplus\n} /* !extern \"C\" */\n#endif\n\n#endif /* !PRROI_POOLING_GPU_IMPL_CUH */\n\n"
  },
  {
    "path": "external/AR/ltr/models/AR_seg_mask/AR_seg_mask.py",
    "content": "import torch.nn as nn\nfrom ltr.models.neck import CorrNL\nfrom ltr import model_constructor\nimport torch\nimport ltr.models.backbone.resnet_seg as resnet_seg\n\nfrom ltr.models.head import seg_network\nfrom easydict import EasyDict as edict\n\n'''2020.4.14 replace mask head with frtm for higher-quality mask'''\n'''2020.4.22 Only use the mask branch'''\n\n\nclass ARnet_seg_mask(nn.Module):\n    \"\"\" Scale Estimation network module with three branches: bbox, coner and mask. \"\"\"\n    def __init__(self, feature_extractor, neck_module, head_module, used_layers,\n                 extractor_grad=True,output_size=(256,256)):\n        \"\"\"\n        args:\n            feature_extractor - backbone feature extractor\n            bb_regressor - IoU prediction module\n            bb_regressor_layer - List containing the name of the layers from feature_extractor, which are input to\n                                    bb_regressor\n            extractor_grad - Bool indicating whether backbone feature extractor requires gradients\n        \"\"\"\n        super(ARnet_seg_mask, self).__init__()\n\n        self.feature_extractor = feature_extractor\n        self.neck = neck_module\n        self.refiner = head_module\n        self.used_layers = used_layers\n        self.output_size = output_size\n\n        if not extractor_grad:\n            for p in self.feature_extractor.parameters():\n                p.requires_grad_(False)\n\n    def forward(self, train_imgs, test_imgs, train_bb, mode='train'):\n        \"\"\" Forward pass\n        Note: If the training is done in sequence mode, that is, test_imgs.dim() == 5, then the batch dimension\n        corresponds to the first dimensions. test_imgs is thus of the form [sequence, batch, feature, row, col]\n        \"\"\"\n        self.forward_ref(train_imgs, train_bb)\n        pred_dict = self.forward_test(test_imgs, mode)\n        return pred_dict\n\n    def forward_ref(self, train_imgs, train_bb):\n        \"\"\" Forward pass of reference branch.\n        size of train_imgs is (1,batch,3,H,W), train_bb is (1,batch,4)\"\"\"\n        num_sequences = train_imgs.shape[-4] # batch\n        num_train_images = train_imgs.shape[0] if train_imgs.dim() == 5 else 1 # 1\n\n        # Extract backbone features\n        '''train_feat OrderedDict, key:'layer4' '''\n        train_feat_dict = self.extract_backbone_features(train_imgs.view(-1, *train_imgs.shape[-3:])) # 输入size是(batch,3,256,256)\n\n        train_feat_list = [feat for feat in train_feat_dict.values()] #list,其中每个元素对应一层输出的特征(tensor)\n\n        # get reference feature\n        self.neck.get_ref_kernel(train_feat_list, train_bb.view(num_train_images, num_sequences, 4))\n\n\n    def forward_test(self, test_imgs, mode='train'):\n        \"\"\" Forward pass of test branch. size of test_imgs is (1,batch,3,256,256)\"\"\"\n        output = {}\n        # Extract backbone features\n        test_feat_dict = self.extract_backbone_features(test_imgs.view(-1, *test_imgs.shape[-3:]),\n                                                        layers=['layer1','layer2','layer3','layer4','layer5'])# 输入size是(batch,3,256,256)\n        '''list,tensor'''\n        # Save low-level feature list\n        # Lfeat_list = [feat for name, feat in test_feat_dict.items() if name != 'layer3']\n\n        # fuse feature from two branches\n        fusion_feat = self.neck.fuse_feat([test_feat_dict['layer4']])\n        # Obtain bbox prediction\n        if mode=='train':\n            output['mask'] = torch.sigmoid(self.refiner(fusion_feat, test_feat_dict, self.output_size))\n        elif mode == 'mask':\n            output = torch.sigmoid(self.refiner(fusion_feat, test_feat_dict, self.output_size))\n        else:\n            raise ValueError(\"mode should be train or test\")\n        return output\n\n    def extract_backbone_features(self, im, layers=None):\n        if layers is None:\n            layers = self.used_layers\n        return self.feature_extractor(im, layers)\n\n    def extract_features(self, im, layers):\n        return self.feature_extractor(im, layers)\n\n\n\n@model_constructor\ndef ARnet_seg_mask_resnet50(backbone_pretrained=True,used_layers=('layer4',),pool_size=None):\n    # backbone\n    backbone_net = resnet_seg.resnet50(pretrained=backbone_pretrained)\n    # neck\n    neck_net = CorrNL.CorrNL(pool_size=pool_size)\n    # multiple heads\n    '''create segnet'''\n    in_channels = 1024\n    # disc_params = edict(layer=\"layer4\", in_channels=in_channels, c_channels=96, out_channels=64) # non-local feat (64 channels rather than 1)\n    '''2020.4.22 change \"out_channels\" to pool_size * pool_size'''\n    disc_params = edict(layer=\"layer4\", in_channels=in_channels, c_channels=96, out_channels=pool_size*pool_size) # non-local feat (64 channels rather than 1)\n    refnet_params = edict(\n        layers=(\"layer5\", \"layer4\", \"layer3\", \"layer2\"),\n        nchannels=64, use_batch_norm=True)\n    disc_params.in_channels = backbone_net.get_out_channels()[disc_params.layer]\n\n    p = refnet_params\n    refinement_layers_channels = {L: nch for L, nch in backbone_net.get_out_channels().items() if L in p.layers}\n    refiner = seg_network.SegNetwork(disc_params.out_channels, p.nchannels, refinement_layers_channels, p.use_batch_norm)\n    '''create Alpha-Refine'''\n    net = ARnet_seg_mask(feature_extractor=backbone_net, neck_module=neck_net,\n                         head_module=refiner,\n                         used_layers=used_layers, extractor_grad=True,\n                         output_size=(int(pool_size*2*16),int(pool_size*2*16)))\n    return net\n"
  },
  {
    "path": "external/AR/ltr/models/AR_seg_mask/__init__.py",
    "content": ""
  },
  {
    "path": "external/AR/ltr/models/__init__.py",
    "content": ""
  },
  {
    "path": "external/AR/ltr/models/backbone/__init__.py",
    "content": "from .resnet import resnet18, resnet50, resnet_baby\nfrom .resnet18_vggm import resnet18_vggmconv1\n"
  },
  {
    "path": "external/AR/ltr/models/backbone/base.py",
    "content": "import torch\nimport torch.nn as nn\n\n\nclass Backbone(nn.Module):\n    \"\"\"Base class for backbone networks. Handles freezing layers etc.\n    args:\n        frozen_layers  -  Name of layers to freeze. Either list of strings, 'none' or 'all'. Default: 'none'.\n    \"\"\"\n    def __init__(self, frozen_layers=()):\n        super().__init__()\n\n        if isinstance(frozen_layers, str):\n            if frozen_layers.lower() == 'none':\n                frozen_layers = ()\n            elif frozen_layers.lower() != 'all':\n                raise ValueError('Unknown option for frozen layers: \\\"{}\\\". Should be \\\"all\\\", \\\"none\\\" or list of layer names.'.format(frozen_layers))\n\n        self.frozen_layers = frozen_layers\n        self._is_frozen_nograd = False\n\n\n    def train(self, mode=True):\n        super().train(mode)\n        if mode == True:\n            self._set_frozen_to_eval()\n        if not self._is_frozen_nograd:\n            self._set_frozen_to_nograd()\n            self._is_frozen_nograd = True\n\n\n    def _set_frozen_to_eval(self):\n        if isinstance(self.frozen_layers, str) and self.frozen_layers.lower() == 'all':\n            self.eval()\n        else:\n            for layer in self.frozen_layers:\n                getattr(self, layer).eval()\n\n\n    def _set_frozen_to_nograd(self):\n        if isinstance(self.frozen_layers, str) and self.frozen_layers.lower() == 'all':\n            for p in self.parameters():\n                p.requires_grad_(False)\n        else:\n            for layer in self.frozen_layers:\n                for p in getattr(self, layer).parameters():\n                    p.requires_grad_(False)"
  },
  {
    "path": "external/AR/ltr/models/backbone/resnet.py",
    "content": "import math\nimport torch.nn as nn\nfrom collections import OrderedDict\nimport torch.utils.model_zoo as model_zoo\nfrom torchvision.models.resnet import model_urls\nfrom .base import Backbone\n\n\ndef conv3x3(in_planes, out_planes, stride=1, dilation=1):\n    \"\"\"3x3 convolution with padding\"\"\"\n    return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n                     padding=dilation, bias=False, dilation=dilation)\n\n\nclass BasicBlock(nn.Module):\n    expansion = 1\n\n    def __init__(self, inplanes, planes, stride=1, downsample=None, dilation=1, use_bn=True):\n        super(BasicBlock, self).__init__()\n        self.use_bn = use_bn\n        self.conv1 = conv3x3(inplanes, planes, stride, dilation=dilation)\n\n        if use_bn:\n            self.bn1 = nn.BatchNorm2d(planes)\n        self.relu = nn.ReLU(inplace=True)\n        self.conv2 = conv3x3(planes, planes, dilation=dilation)\n\n        if use_bn:\n            self.bn2 = nn.BatchNorm2d(planes)\n        self.downsample = downsample\n        self.stride = stride\n\n    def forward(self, x):\n        residual = x\n\n        out = self.conv1(x)\n\n        if self.use_bn:\n            out = self.bn1(out)\n        out = self.relu(out)\n\n        out = self.conv2(out)\n\n        if self.use_bn:\n            out = self.bn2(out)\n\n        if self.downsample is not None:\n            residual = self.downsample(x)\n\n        out += residual\n        out = self.relu(out)\n\n        return out\n\n\nclass Bottleneck(nn.Module):\n    expansion = 4\n\n    def __init__(self, inplanes, planes, stride=1, downsample=None, dilation=1):\n        super(Bottleneck, self).__init__()\n        self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)\n        self.bn1 = nn.BatchNorm2d(planes)\n        self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,\n                               padding=dilation, bias=False, dilation=dilation)\n        self.bn2 = nn.BatchNorm2d(planes)\n        self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)\n        self.bn3 = nn.BatchNorm2d(planes * 4)\n        self.relu = nn.ReLU(inplace=True)\n        self.downsample = downsample\n        self.stride = stride\n\n    def forward(self, x):\n        residual = x\n\n        out = self.conv1(x)\n        out = self.bn1(out)\n        out = self.relu(out)\n\n        out = self.conv2(out)\n        out = self.bn2(out)\n        out = self.relu(out)\n\n        out = self.conv3(out)\n        out = self.bn3(out)\n\n        if self.downsample is not None:\n            residual = self.downsample(x)\n\n        out += residual\n        out = self.relu(out)\n\n        return out\n\n\nclass ResNet(Backbone):\n    \"\"\" ResNet network module. Allows extracting specific feature blocks.\"\"\"\n    def __init__(self, block, layers, output_layers, num_classes=1000, inplanes=64, dilation_factor=1, frozen_layers=()):\n        self.inplanes = inplanes\n        super(ResNet, self).__init__(frozen_layers=frozen_layers)\n        self.output_layers = output_layers\n        self.conv1 = nn.Conv2d(3, inplanes, kernel_size=7, stride=2, padding=3,\n                               bias=False)\n        self.bn1 = nn.BatchNorm2d(inplanes)\n        self.relu = nn.ReLU(inplace=True)\n        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\n        stride = [1 + (dilation_factor < l) for l in (8, 4, 2)]\n        self.layer1 = self._make_layer(block, inplanes, layers[0], dilation=max(dilation_factor//8, 1))\n        self.layer2 = self._make_layer(block, inplanes*2, layers[1], stride=stride[0], dilation=max(dilation_factor//4, 1))\n        self.layer3 = self._make_layer(block, inplanes*4, layers[2], stride=stride[1], dilation=max(dilation_factor//2, 1))\n        self.layer4 = self._make_layer(block, inplanes*8, layers[3], stride=stride[2], dilation=dilation_factor)\n\n        out_feature_strides = {'conv1': 4, 'layer1': 4, 'layer2': 4*stride[0], 'layer3': 4*stride[0]*stride[1],\n                               'layer4': 4*stride[0]*stride[1]*stride[2]}\n\n        # TODO better way?\n        if isinstance(self.layer1[0], BasicBlock):\n            out_feature_channels = {'conv1': inplanes, 'layer1': inplanes, 'layer2': inplanes*2, 'layer3': inplanes*4,\n                               'layer4': inplanes*8}\n        elif isinstance(self.layer1[0], Bottleneck):\n            base_num_channels = 4 * inplanes\n            out_feature_channels = {'conv1': inplanes, 'layer1': base_num_channels, 'layer2': base_num_channels * 2,\n                                    'layer3': base_num_channels * 4, 'layer4': base_num_channels * 8}\n        else:\n            raise Exception('block not supported')\n\n        self._out_feature_strides = out_feature_strides\n        self._out_feature_channels = out_feature_channels\n\n        # self.avgpool = nn.AvgPool2d(7, stride=1)\n        self.avgpool = nn.AdaptiveAvgPool2d((1,1))\n        self.fc = nn.Linear(inplanes*8 * block.expansion, num_classes)\n\n        for m in self.modules():\n            if isinstance(m, nn.Conv2d):\n                n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n                m.weight.data.normal_(0, math.sqrt(2. / n))\n            elif isinstance(m, nn.BatchNorm2d):\n                m.weight.data.fill_(1)\n                m.bias.data.zero_()\n\n    def out_feature_strides(self, layer=None):\n        if layer is None:\n            return self._out_feature_strides\n        else:\n            return self._out_feature_strides[layer]\n\n    def out_feature_channels(self, layer=None):\n        if layer is None:\n            return self._out_feature_channels\n        else:\n            return self._out_feature_channels[layer]\n\n    def _make_layer(self, block, planes, blocks, stride=1, dilation=1):\n        downsample = None\n        if stride != 1 or self.inplanes != planes * block.expansion:\n            downsample = nn.Sequential(\n                nn.Conv2d(self.inplanes, planes * block.expansion,\n                          kernel_size=1, stride=stride, bias=False),\n                nn.BatchNorm2d(planes * block.expansion),\n            )\n\n        layers = []\n        layers.append(block(self.inplanes, planes, stride, downsample, dilation=dilation))\n        self.inplanes = planes * block.expansion\n        for i in range(1, blocks):\n            layers.append(block(self.inplanes, planes))\n\n        return nn.Sequential(*layers)\n\n    def _add_output_and_check(self, name, x, outputs, output_layers):\n        if name in output_layers:\n            outputs[name] = x\n        return len(output_layers) == len(outputs)\n\n    def forward(self, x, output_layers=None):\n        \"\"\" Forward pass with input x. The output_layers specify the feature blocks which must be returned \"\"\"\n        outputs = OrderedDict()\n\n        if output_layers is None:\n            output_layers = self.output_layers\n\n        x = self.conv1(x)\n        x = self.bn1(x)\n        x = self.relu(x)\n\n        if self._add_output_and_check('conv1', x, outputs, output_layers):\n            return outputs\n\n        x = self.maxpool(x)\n\n        x = self.layer1(x)\n\n        if self._add_output_and_check('layer1', x, outputs, output_layers):\n            return outputs\n\n        x = self.layer2(x)\n\n        if self._add_output_and_check('layer2', x, outputs, output_layers):\n            return outputs\n\n        x = self.layer3(x)\n\n        if self._add_output_and_check('layer3', x, outputs, output_layers):\n            return outputs\n\n        x = self.layer4(x)\n\n        if self._add_output_and_check('layer4', x, outputs, output_layers):\n            return outputs\n\n        x = self.avgpool(x)\n        x = x.view(x.size(0), -1)\n        x = self.fc(x)\n\n        if self._add_output_and_check('fc', x, outputs, output_layers):\n            return outputs\n\n        if len(output_layers) == 1 and output_layers[0] == 'default':\n            return x\n\n        raise ValueError('output_layer is wrong.')\n\n\ndef resnet_baby(output_layers=None, pretrained=False, inplanes=16, **kwargs):\n    \"\"\"Constructs a ResNet-18 model.\n    \"\"\"\n\n    if output_layers is None:\n        output_layers = ['default']\n    else:\n        for l in output_layers:\n            if l not in ['conv1', 'layer1', 'layer2', 'layer3', 'layer4', 'fc']:\n                raise ValueError('Unknown layer: {}'.format(l))\n\n    model = ResNet(BasicBlock, [2, 2, 2, 2], output_layers, inplanes=inplanes, **kwargs)\n\n    if pretrained:\n        raise NotImplementedError\n    return model\n\n\ndef resnet18(output_layers=None, pretrained=False, **kwargs):\n    \"\"\"Constructs a ResNet-18 model.\n    \"\"\"\n\n    if output_layers is None:\n        output_layers = ['default']\n    else:\n        for l in output_layers:\n            if l not in ['conv1', 'layer1', 'layer2', 'layer3', 'layer4', 'fc']:\n                raise ValueError('Unknown layer: {}'.format(l))\n\n    model = ResNet(BasicBlock, [2, 2, 2, 2], output_layers, **kwargs)\n\n    if pretrained:\n        model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))\n    return model\n\n\ndef resnet50(output_layers=None, pretrained=False, **kwargs):\n    \"\"\"Constructs a ResNet-50 model.\n    \"\"\"\n\n    if output_layers is None:\n        output_layers = ['default']\n    else:\n        for l in output_layers:\n            if l not in ['conv1', 'layer1', 'layer2', 'layer3', 'layer4', 'fc']:\n                raise ValueError('Unknown layer: {}'.format(l))\n\n    model = ResNet(Bottleneck, [3, 4, 6, 3], output_layers, **kwargs)\n    if pretrained:\n        model.load_state_dict(model_zoo.load_url(model_urls['resnet50']))\n    return model"
  },
  {
    "path": "external/AR/ltr/models/backbone/resnet18_vggm.py",
    "content": "import math\nimport torch\nimport torch.nn as nn\nfrom collections import OrderedDict\nfrom torchvision.models.resnet import BasicBlock\nfrom .base import Backbone\n\n\nclass SpatialCrossMapLRN(nn.Module):\n    def __init__(self, local_size=1, alpha=1.0, beta=0.75, k=1, ACROSS_CHANNELS=True):\n        super(SpatialCrossMapLRN, self).__init__()\n        self.ACROSS_CHANNELS = ACROSS_CHANNELS\n        if ACROSS_CHANNELS:\n            self.average=nn.AvgPool3d(kernel_size=(local_size, 1, 1),\n                    stride=1,\n                    padding=(int((local_size-1.0)/2), 0, 0))\n        else:\n            self.average=nn.AvgPool2d(kernel_size=local_size,\n                    stride=1,\n                    padding=int((local_size-1.0)/2))\n        self.alpha = alpha\n        self.beta = beta\n        self.k = k\n\n    def forward(self, x):\n        if self.ACROSS_CHANNELS:\n            div = x.pow(2).unsqueeze(1)\n            div = self.average(div).squeeze(1)\n            div = div.mul(self.alpha).add(self.k).pow(self.beta)\n        else:\n            div = x.pow(2)\n            div = self.average(div)\n            div = div.mul(self.alpha).add(self.k).pow(self.beta)\n        x = x.div(div)\n        return x\n\n\nclass ResNetVGGm1(Backbone):\n\n    def __init__(self, block, layers, output_layers, num_classes=1000, frozen_layers=()):\n        self.inplanes = 64\n        super(ResNetVGGm1, self).__init__(frozen_layers=frozen_layers)\n        self.output_layers = output_layers\n        self.vggmconv1 = nn.Conv2d(3,96,(7, 7),(2, 2), padding=3)\n        self.vgglrn = SpatialCrossMapLRN(5, 0.0005, 0.75, 2)\n        self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,\n                               bias=False)\n        self.bn1 = nn.BatchNorm2d(64)\n        self.relu = nn.ReLU(inplace=True)\n        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\n        self.layer1 = self._make_layer(block, 64, layers[0])\n        self.layer2 = self._make_layer(block, 128, layers[1], stride=2)\n        self.layer3 = self._make_layer(block, 256, layers[2], stride=2)\n        self.layer4 = self._make_layer(block, 512, layers[3], stride=2)\n        # self.avgpool = nn.AvgPool2d(7, stride=1)\n        self.avgpool = nn.AdaptiveAvgPool2d((1,1))\n        self.fc = nn.Linear(512 * block.expansion, num_classes)\n\n        for m in self.modules():\n            if isinstance(m, nn.Conv2d):\n                n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n                m.weight.data.normal_(0, math.sqrt(2. / n))\n            elif isinstance(m, nn.BatchNorm2d):\n                m.weight.data.fill_(1)\n                m.bias.data.zero_()\n\n    def _make_layer(self, block, planes, blocks, stride=1):\n        downsample = None\n        if stride != 1 or self.inplanes != planes * block.expansion:\n            downsample = nn.Sequential(\n                nn.Conv2d(self.inplanes, planes * block.expansion,\n                          kernel_size=1, stride=stride, bias=False),\n                nn.BatchNorm2d(planes * block.expansion),\n            )\n\n        layers = []\n        layers.append(block(self.inplanes, planes, stride, downsample))\n        self.inplanes = planes * block.expansion\n        for i in range(1, blocks):\n            layers.append(block(self.inplanes, planes))\n\n        return nn.Sequential(*layers)\n\n\n    def _add_output_and_check(self, name, x, outputs, output_layers):\n        if name in output_layers:\n            outputs[name] = x\n        return len(output_layers) == len(outputs)\n\n\n    def forward(self, x, output_layers=None):\n        outputs = OrderedDict()\n\n        if output_layers is None:\n            output_layers = self.output_layers\n\n        if 'vggconv1' in output_layers:\n            c1 = self.vgglrn(self.relu(self.vggmconv1(x)))\n            if self._add_output_and_check('vggconv1', c1, outputs, output_layers):\n                return outputs\n\n        x = self.conv1(x)\n        x = self.bn1(x)\n        x = self.relu(x)\n\n        if self._add_output_and_check('conv1', x, outputs, output_layers):\n            return outputs\n\n        x = self.maxpool(x)\n\n        x = self.layer1(x)\n\n        if self._add_output_and_check('layer1', x, outputs, output_layers):\n            return outputs\n\n        x = self.layer2(x)\n\n        if self._add_output_and_check('layer2', x, outputs, output_layers):\n            return outputs\n\n        x = self.layer3(x)\n\n        if self._add_output_and_check('layer3', x, outputs, output_layers):\n            return outputs\n\n        x = self.layer4(x)\n\n        if self._add_output_and_check('layer4', x, outputs, output_layers):\n            return outputs\n\n        x = self.avgpool(x)\n        x = x.view(x.size(0), -1)\n        x = self.fc(x)\n\n        if self._add_output_and_check('fc', x, outputs, output_layers):\n            return outputs\n\n        if len(output_layers) == 1 and output_layers[0] == 'default':\n            return x\n\n        raise ValueError('output_layer is wrong.')\n\n\ndef resnet18_vggmconv1(output_layers=None, path=None, **kwargs):\n    \"\"\"Constructs a ResNet-18 model with first-layer VGGm features.\n    \"\"\"\n\n    if output_layers is None:\n        output_layers = ['default']\n    else:\n        for l in output_layers:\n            if l not in ['vggconv1', 'conv1', 'layer1', 'layer2', 'layer3', 'layer4', 'fc']:\n                raise ValueError('Unknown layer: {}'.format(l))\n\n    model = ResNetVGGm1(BasicBlock, [2, 2, 2, 2], output_layers, **kwargs)\n\n    if path is not None:\n        model.load_state_dict(torch.load(path), strict=False)\n    return model"
  },
  {
    "path": "external/AR/ltr/models/backbone/resnet_seg.py",
    "content": "import math\nimport torch.nn as nn\nfrom collections import OrderedDict\nimport torch.utils.model_zoo as model_zoo\nfrom torchvision.models.resnet import model_urls\n'''2020.4.14 newly added'''\nfrom collections import OrderedDict as odict\nfrom ltr.models.head.utils import get_out_channels\n\ndef conv3x3(in_planes, out_planes, stride=1, dilation=1):\n    \"\"\"3x3 convolution with padding\"\"\"\n    return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n                     padding=dilation, bias=False, dilation=dilation)\n\n\nclass BasicBlock(nn.Module):\n    expansion = 1\n\n    def __init__(self, inplanes, planes, stride=1, downsample=None, dilation=1):\n        super(BasicBlock, self).__init__()\n        self.conv1 = conv3x3(inplanes, planes, stride, dilation=dilation)\n        self.bn1 = nn.BatchNorm2d(planes)\n        self.relu = nn.ReLU(inplace=True)\n        self.conv2 = conv3x3(planes, planes, dilation=dilation)\n        self.bn2 = nn.BatchNorm2d(planes)\n        self.downsample = downsample\n        self.stride = stride\n\n    def forward(self, x):\n        residual = x\n\n        out = self.conv1(x)\n        out = self.bn1(out)\n        out = self.relu(out)\n\n        out = self.conv2(out)\n        out = self.bn2(out)\n\n        if self.downsample is not None:\n            residual = self.downsample(x)\n\n        out += residual\n        out = self.relu(out)\n\n        return out\n\n\nclass Bottleneck(nn.Module):\n    expansion = 4\n\n    def __init__(self, inplanes, planes, stride=1, downsample=None, dilation=1):\n        super(Bottleneck, self).__init__()\n        self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)\n        self.bn1 = nn.BatchNorm2d(planes)\n        self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,\n                               padding=dilation, bias=False, dilation=dilation)\n        self.bn2 = nn.BatchNorm2d(planes)\n        self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)\n        self.bn3 = nn.BatchNorm2d(planes * 4)\n        self.relu = nn.ReLU(inplace=True)\n        self.downsample = downsample\n        self.stride = stride\n\n    def forward(self, x):\n        residual = x\n\n        out = self.conv1(x)\n        out = self.bn1(out)\n        out = self.relu(out)\n\n        out = self.conv2(out)\n        out = self.bn2(out)\n        out = self.relu(out)\n\n        out = self.conv3(out)\n        out = self.bn3(out)\n\n        if self.downsample is not None:\n            residual = self.downsample(x)\n\n        out += residual\n        out = self.relu(out)\n\n        return out\n\n\nclass ResNet(nn.Module):\n    \"\"\" ResNet network module. Allows extracting specific feature blocks.\"\"\"\n    def __init__(self, block, layers, output_layers, num_classes=1000, inplanes=64, dilation_factor=1):\n        self.inplanes = inplanes\n        super(ResNet, self).__init__()\n        self.output_layers = output_layers\n        self.conv1 = nn.Conv2d(3, inplanes, kernel_size=7, stride=2, padding=3,\n                               bias=False)\n        self.bn1 = nn.BatchNorm2d(inplanes)\n        self.relu = nn.ReLU(inplace=True)\n        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\n        stride = [1 + (dilation_factor < l) for l in (8, 4, 2)]\n        self.layer1 = self._make_layer(block, inplanes, layers[0], dilation=max(dilation_factor//8, 1))\n        self.layer2 = self._make_layer(block, inplanes*2, layers[1], stride=stride[0], dilation=max(dilation_factor//4, 1))\n        self.layer3 = self._make_layer(block, inplanes*4, layers[2], stride=stride[1], dilation=max(dilation_factor//2, 1))\n        self.layer4 = self._make_layer(block, inplanes*8, layers[3], stride=stride[2], dilation=dilation_factor)\n        # self.avgpool = nn.AvgPool2d(7, stride=1)\n        self.avgpool = nn.AdaptiveAvgPool2d((1,1))\n        self.fc = nn.Linear(inplanes*8 * block.expansion, num_classes)\n\n        for m in self.modules():\n            if isinstance(m, nn.Conv2d):\n                n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n                m.weight.data.normal_(0, math.sqrt(2. / n))\n            elif isinstance(m, nn.BatchNorm2d):\n                m.weight.data.fill_(1)\n                m.bias.data.zero_()\n        '''2020.4.14 newly added'''\n        self._out_channels = odict(  # Deep-to-shallow order is required by SegNetwork\n            layer5=get_out_channels(self.layer4),\n            layer4=get_out_channels(self.layer3),\n            layer3=get_out_channels(self.layer2),\n            layer2=get_out_channels(self.layer1),\n            layer1=get_out_channels(self.conv1))\n    def _make_layer(self, block, planes, blocks, stride=1, dilation=1):\n        downsample = None\n        if stride != 1 or self.inplanes != planes * block.expansion:\n            downsample = nn.Sequential(\n                nn.Conv2d(self.inplanes, planes * block.expansion,\n                          kernel_size=1, stride=stride, bias=False),\n                nn.BatchNorm2d(planes * block.expansion),\n            )\n\n        layers = []\n        layers.append(block(self.inplanes, planes, stride, downsample, dilation=dilation))\n        self.inplanes = planes * block.expansion\n        for i in range(1, blocks):\n            layers.append(block(self.inplanes, planes))\n\n        return nn.Sequential(*layers)\n\n    def _add_output_and_check(self, name, x, outputs, output_layers):\n        if name in output_layers:\n            outputs[name] = x\n        return len(output_layers) == len(outputs)\n\n    def forward(self, x, output_layers=None):\n        \"\"\" Forward pass with input x. The output_layers specify the feature blocks which must be returned \"\"\"\n        outputs = OrderedDict()\n\n        if output_layers is None:\n            output_layers = self.output_layers\n\n        x = self.conv1(x)\n        x = self.bn1(x)\n        x = self.relu(x)\n        x = self.maxpool(x)# conv1: (batch,64,128,128)\n        '''2020.4.14 change names for every layers'''\n        if self._add_output_and_check('layer1', x, outputs, output_layers):\n            return outputs\n\n        x = self.layer1(x) # (batch,256,64,64)\n        if self._add_output_and_check('layer2', x, outputs, output_layers):\n            return outputs\n\n        x = self.layer2(x) # (batch,512,32,32)\n\n        if self._add_output_and_check('layer3', x, outputs, output_layers):\n            return outputs\n\n        x = self.layer3(x) # (batch,1024,16,16)\n\n        if self._add_output_and_check('layer4', x, outputs, output_layers):\n            return outputs\n\n        x = self.layer4(x)\n\n        if self._add_output_and_check('layer5', x, outputs, output_layers):\n            return outputs\n\n        x = self.avgpool(x)\n        x = x.view(x.size(0), -1)\n        x = self.fc(x)\n\n        if self._add_output_and_check('fc', x, outputs, output_layers):\n            return outputs\n\n        if len(output_layers) == 1 and output_layers[0] == 'default':\n            return x\n\n        raise ValueError('output_layer is wrong.')\n\n    '''2020.4.14 newly added'''\n    def get_out_channels(self):\n        return self._out_channels\n\n\ndef resnet18(output_layers=None, pretrained=False, dilation_factor=1):\n    \"\"\"Constructs a ResNet-18 model.\n    \"\"\"\n\n    if output_layers is None:\n        output_layers = ['default']\n    else:\n        for l in output_layers:\n            if l not in ['conv1', 'layer1', 'layer2', 'layer3', 'layer4', 'fc']:\n                raise ValueError('Unknown layer: {}'.format(l))\n\n    model = ResNet(BasicBlock, [2, 2, 2, 2], output_layers, dilation_factor=dilation_factor)\n\n    if pretrained:\n        model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))\n    return model\n\n\ndef resnet50(output_layers=None, pretrained=False, dilation_factor=1):\n    \"\"\"Constructs a ResNet-50 model.\n    \"\"\"\n\n    if output_layers is None:\n        output_layers = ['default']\n    else:\n        for l in output_layers:\n            if l not in ['conv1', 'layer1', 'layer2', 'layer3', 'layer4', 'fc']:\n                raise ValueError('Unknown layer: {}'.format(l))\n\n    model = ResNet(Bottleneck, [3, 4, 6, 3], output_layers, dilation_factor=dilation_factor)\n    if pretrained:\n        model.load_state_dict(model_zoo.load_url(model_urls['resnet50']))\n    return model\n\n'''newly added'''\ndef resnet101(output_layers=None, pretrained=False, dilation_factor=1):\n    \"\"\"Constructs a ResNet-101 model.\n    \"\"\"\n\n    if output_layers is None:\n        output_layers = ['default']\n    else:\n        for l in output_layers:\n            if l not in ['conv1', 'layer1', 'layer2', 'layer3', 'layer4', 'fc']:\n                raise ValueError('Unknown layer: {}'.format(l))\n\n    model = ResNet(Bottleneck, [3, 4, 23, 3], output_layers, dilation_factor=dilation_factor)\n    if pretrained:\n        model.load_state_dict(model_zoo.load_url(model_urls['resnet101']))\n    return model\n\n"
  },
  {
    "path": "external/AR/ltr/models/bbreg/__init__.py",
    "content": "from .atom_iou_net import AtomIoUNet\n"
  },
  {
    "path": "external/AR/ltr/models/bbreg/atom.py",
    "content": "import torch.nn as nn\nimport ltr.models.backbone as backbones\nimport ltr.models.bbreg as bbmodels\nfrom ltr import model_constructor\n\n\nclass ATOMnet(nn.Module):\n    \"\"\" ATOM network module\"\"\"\n    def __init__(self, feature_extractor, bb_regressor, bb_regressor_layer, extractor_grad=True):\n        \"\"\"\n        args:\n            feature_extractor - backbone feature extractor\n            bb_regressor - IoU prediction module\n            bb_regressor_layer - List containing the name of the layers from feature_extractor, which are input to\n                                    bb_regressor\n            extractor_grad - Bool indicating whether backbone feature extractor requires gradients\n        \"\"\"\n        super(ATOMnet, self).__init__()\n\n        self.feature_extractor = feature_extractor\n        self.bb_regressor = bb_regressor\n        self.bb_regressor_layer = bb_regressor_layer\n\n        if not extractor_grad:\n            for p in self.feature_extractor.parameters():\n                p.requires_grad_(False)\n\n    def forward(self, train_imgs, test_imgs, train_bb, test_proposals):\n        \"\"\" Forward pass\n        Note: If the training is done in sequence mode, that is, test_imgs.dim() == 5, then the batch dimension\n        corresponds to the first dimensions. test_imgs is thus of the form [sequence, batch, feature, row, col]\n        \"\"\"\n        num_sequences = train_imgs.shape[-4]\n        num_train_images = train_imgs.shape[0] if train_imgs.dim() == 5 else 1\n        num_test_images = test_imgs.shape[0] if test_imgs.dim() == 5 else 1\n\n        # Extract backbone features\n        train_feat = self.extract_backbone_features(train_imgs.reshape(-1, *train_imgs.shape[-3:]))\n        test_feat = self.extract_backbone_features(test_imgs.reshape(-1, *test_imgs.shape[-3:]))\n\n        train_feat_iou = [feat for feat in train_feat.values()]\n        test_feat_iou = [feat for feat in test_feat.values()]\n\n        # Obtain iou prediction\n        iou_pred = self.bb_regressor(train_feat_iou, test_feat_iou,\n                                     train_bb.reshape(num_train_images, num_sequences, 4),\n                                     test_proposals.reshape(num_train_images, num_sequences, -1, 4))\n        return iou_pred\n\n    def extract_backbone_features(self, im, layers=None):\n        if layers is None:\n            layers = self.bb_regressor_layer\n        return self.feature_extractor(im, layers)\n\n    def extract_features(self, im, layers):\n        return self.feature_extractor(im, layers)\n\n\n\n@model_constructor\ndef atom_resnet18(iou_input_dim=(256,256), iou_inter_dim=(256,256), backbone_pretrained=True):\n    # backbone\n    backbone_net = backbones.resnet18(pretrained=backbone_pretrained)\n\n    # Bounding box regressor\n    iou_predictor = bbmodels.AtomIoUNet(pred_input_dim=iou_input_dim, pred_inter_dim=iou_inter_dim)\n\n    net = ATOMnet(feature_extractor=backbone_net, bb_regressor=iou_predictor, bb_regressor_layer=['layer2', 'layer3'],\n                  extractor_grad=False)\n\n    return net\n\n\n@model_constructor\ndef atom_resnet50(iou_input_dim=(256,256), iou_inter_dim=(256,256), backbone_pretrained=True):\n    # backbone\n    backbone_net = backbones.resnet50(pretrained=backbone_pretrained)\n\n    # Bounding box regressor\n    iou_predictor = bbmodels.AtomIoUNet(input_dim=(4*128,4*256), pred_input_dim=iou_input_dim, pred_inter_dim=iou_inter_dim)\n\n    net = ATOMnet(feature_extractor=backbone_net, bb_regressor=iou_predictor, bb_regressor_layer=['layer2', 'layer3'],\n                  extractor_grad=False)\n\n    return net\n"
  },
  {
    "path": "external/AR/ltr/models/bbreg/atom_iou_net.py",
    "content": "import torch.nn as nn\nimport torch\nfrom ltr.models.layers.blocks import LinearBlock\nfrom ltr.external.PreciseRoIPooling.pytorch.prroi_pool import PrRoIPool2D\n\n\ndef conv(in_planes, out_planes, kernel_size=3, stride=1, padding=1, dilation=1):\n    return nn.Sequential(\n            nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride,\n                      padding=padding, dilation=dilation, bias=True),\n            nn.BatchNorm2d(out_planes),\n            nn.ReLU(inplace=True))\n\n\nclass AtomIoUNet(nn.Module):\n    \"\"\"Network module for IoU prediction. Refer to the ATOM paper for an illustration of the architecture.\n    It uses two backbone feature layers as input.\n    args:\n        input_dim:  Feature dimensionality of the two input backbone layers.\n        pred_input_dim:  Dimensionality input the the prediction network.\n        pred_inter_dim:  Intermediate dimensionality in the prediction network.\"\"\"\n\n    def __init__(self, input_dim=(128,256), pred_input_dim=(256,256), pred_inter_dim=(256,256)):\n        super().__init__()\n        # _r for reference, _t for test\n        self.conv3_1r = conv(input_dim[0], 128, kernel_size=3, stride=1)\n        self.conv3_1t = conv(input_dim[0], 256, kernel_size=3, stride=1)\n\n        self.conv3_2t = conv(256, pred_input_dim[0], kernel_size=3, stride=1)\n\n        self.prroi_pool3r = PrRoIPool2D(3, 3, 1/8)\n        self.prroi_pool3t = PrRoIPool2D(5, 5, 1/8)\n\n        self.fc3_1r = conv(128, 256, kernel_size=3, stride=1, padding=0)\n\n        self.conv4_1r = conv(input_dim[1], 256, kernel_size=3, stride=1)\n        self.conv4_1t = conv(input_dim[1], 256, kernel_size=3, stride=1)\n\n        self.conv4_2t = conv(256, pred_input_dim[1], kernel_size=3, stride=1)\n\n        self.prroi_pool4r = PrRoIPool2D(1, 1, 1/16)\n        self.prroi_pool4t = PrRoIPool2D(3, 3, 1 / 16)\n\n        self.fc34_3r = conv(256 + 256, pred_input_dim[0], kernel_size=1, stride=1, padding=0)\n        self.fc34_4r = conv(256 + 256, pred_input_dim[1], kernel_size=1, stride=1, padding=0)\n\n        self.fc3_rt = LinearBlock(pred_input_dim[0], pred_inter_dim[0], 5)\n        self.fc4_rt = LinearBlock(pred_input_dim[1], pred_inter_dim[1], 3)\n\n        self.iou_predictor = nn.Linear(pred_inter_dim[0]+pred_inter_dim[1], 1, bias=True)\n\n        # Init weights\n        for m in self.modules():\n            if isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d) or isinstance(m, nn.Linear):\n                nn.init.kaiming_normal_(m.weight.data, mode='fan_in')\n                if m.bias is not None:\n                    m.bias.data.zero_()\n            elif isinstance(m, nn.BatchNorm2d):\n                # In earlier versions batch norm parameters was initialized with default initialization,\n                # which changed in pytorch 1.2. In 1.1 and earlier the weight was set to U(0,1).\n                # So we use the same initialization here.\n                # m.weight.data.fill_(1)\n                m.weight.data.uniform_()\n                m.bias.data.zero_()\n\n    def forward(self, feat1, feat2, bb1, proposals2):\n        \"\"\"Runs the ATOM IoUNet during training operation.\n        This forward pass is mainly used for training. Call the individual functions during tracking instead.\n        args:\n            feat1:  Features from the reference frames (4 or 5 dims).\n            feat2:  Features from the test frames (4 or 5 dims).\n            bb1:  Target boxes (x,y,w,h) in image coords in the reference samples. Dims (images, sequences, 4).\n            proposals2:  Proposal boxes for which the IoU will be predicted (images, sequences, num_proposals, 4).\"\"\"\n\n        assert bb1.dim() == 3\n        assert proposals2.dim() == 4\n\n        num_images = proposals2.shape[0]\n        num_sequences = proposals2.shape[1]\n\n        # Extract first train sample\n        feat1 = [f[0,...] if f.dim()==5 else f.reshape(-1, num_sequences, *f.shape[-3:])[0,...] for f in feat1]\n        bb1 = bb1[0,...]\n\n        # Get modulation vector\n        modulation = self.get_modulation(feat1, bb1)\n\n        iou_feat = self.get_iou_feat(feat2)\n\n        modulation = [f.reshape(1, num_sequences, -1).repeat(num_images, 1, 1).reshape(num_sequences*num_images, -1) for f in modulation]\n\n        proposals2 = proposals2.reshape(num_sequences*num_images, -1, 4)\n        pred_iou = self.predict_iou(modulation, iou_feat, proposals2)\n        return pred_iou.reshape(num_images, num_sequences, -1)\n\n    def predict_iou(self, modulation, feat, proposals):\n        \"\"\"Predicts IoU for the give proposals.\n        args:\n            modulation:  Modulation vectors for the targets. Dims (batch, feature_dim).\n            feat:  IoU features (from get_iou_feat) for test images. Dims (batch, feature_dim, H, W).\n            proposals:  Proposal boxes for which the IoU will be predicted (batch, num_proposals, 4).\"\"\"\n\n        fc34_3_r, fc34_4_r = modulation\n        c3_t, c4_t = feat\n\n        batch_size = c3_t.size()[0]\n\n        # Modulation\n        c3_t_att = c3_t * fc34_3_r.reshape(batch_size, -1, 1, 1)\n        c4_t_att = c4_t * fc34_4_r.reshape(batch_size, -1, 1, 1)\n\n        # Add batch_index to rois\n        batch_index = torch.arange(batch_size, dtype=torch.float32).reshape(-1, 1).to(c3_t.device)\n\n        # Push the different rois for the same image along the batch dimension\n        num_proposals_per_batch = proposals.shape[1]\n\n        # input proposals2 is in format xywh, convert it to x0y0x1y1 format\n        proposals_xyxy = torch.cat((proposals[:, :, 0:2], proposals[:, :, 0:2] + proposals[:, :, 2:4]), dim=2)\n\n        # Add batch index\n        roi2 = torch.cat((batch_index.reshape(batch_size, -1, 1).expand(-1, num_proposals_per_batch, -1),\n                          proposals_xyxy), dim=2)\n        roi2 = roi2.reshape(-1, 5).to(proposals_xyxy.device)\n\n        roi3t = self.prroi_pool3t(c3_t_att, roi2)\n        roi4t = self.prroi_pool4t(c4_t_att, roi2)\n\n        fc3_rt = self.fc3_rt(roi3t)\n        fc4_rt = self.fc4_rt(roi4t)\n\n        fc34_rt_cat = torch.cat((fc3_rt, fc4_rt), dim=1)\n\n        iou_pred = self.iou_predictor(fc34_rt_cat).reshape(batch_size, num_proposals_per_batch)\n\n        return iou_pred\n\n    def get_modulation(self, feat, bb):\n        \"\"\"Get modulation vectors for the targets.\n        args:\n            feat: Backbone features from reference images. Dims (batch, feature_dim, H, W).\n            bb:  Target boxes (x,y,w,h) in image coords in the reference samples. Dims (batch, 4).\"\"\"\n\n        feat3_r, feat4_r = feat\n\n        c3_r = self.conv3_1r(feat3_r)\n\n        # Add batch_index to rois\n        batch_size = bb.shape[0]\n        batch_index = torch.arange(batch_size, dtype=torch.float32).reshape(-1, 1).to(bb.device)\n\n        # input bb is in format xywh, convert it to x0y0x1y1 format\n        bb = bb.clone()\n        bb[:, 2:4] = bb[:, 0:2] + bb[:, 2:4]\n        roi1 = torch.cat((batch_index, bb), dim=1)\n\n        roi3r = self.prroi_pool3r(c3_r, roi1)\n\n        c4_r = self.conv4_1r(feat4_r)\n        roi4r = self.prroi_pool4r(c4_r, roi1)\n\n        fc3_r = self.fc3_1r(roi3r)\n\n        # Concatenate from block 3 and 4\n        fc34_r = torch.cat((fc3_r, roi4r), dim=1)\n\n        fc34_3_r = self.fc34_3r(fc34_r)\n        fc34_4_r = self.fc34_4r(fc34_r)\n\n        return fc34_3_r, fc34_4_r\n\n    def get_iou_feat(self, feat2):\n        \"\"\"Get IoU prediction features from a 4 or 5 dimensional backbone input.\"\"\"\n        feat2 = [f.reshape(-1, *f.shape[-3:]) if f.dim()==5 else f for f in feat2]\n        feat3_t, feat4_t = feat2\n        c3_t = self.conv3_2t(self.conv3_1t(feat3_t))\n        c4_t = self.conv4_2t(self.conv4_1t(feat4_t))\n\n        return c3_t, c4_t\n"
  },
  {
    "path": "external/AR/ltr/models/head/__init__.py",
    "content": ""
  },
  {
    "path": "external/AR/ltr/models/head/seg_network.py",
    "content": "import torch\nfrom torch import nn as nn\nfrom torch.nn import functional as F\nfrom ltr.models.head.utils import conv, relu, interpolate, adaptive_cat\n\n\nclass TSE(nn.Module):\n\n    def __init__(self, fc, ic, oc):\n        super().__init__()\n\n        nc = ic + oc\n        self.reduce = nn.Sequential(conv(fc, oc, 1), relu(), conv(oc, oc, 1))\n        self.transform = nn.Sequential(conv(nc, nc, 3), relu(), conv(nc, nc, 3), relu(), conv(nc, oc, 3), relu())\n\n    def forward(self, ft, score, x=None):\n        h = self.reduce(ft)\n        hpool = F.adaptive_avg_pool2d(h, (1, 1)) if x is None else x\n        h = adaptive_cat((h, score), dim=1, ref_tensor=0)\n        h = self.transform(h)\n        return h, hpool\n\n\nclass CAB(nn.Module):\n\n    def __init__(self, oc, deepest):\n        super().__init__()\n\n        self.convreluconv = nn.Sequential(conv(2 * oc, oc, 1), relu(), conv(oc, oc, 1))\n        self.deepest = deepest\n\n    def forward(self, deeper, shallower, att_vec=None):\n\n        shallow_pool = F.adaptive_avg_pool2d(shallower, (1, 1))\n        deeper_pool = deeper if self.deepest else F.adaptive_avg_pool2d(deeper, (1, 1))\n        if att_vec is not None:\n            global_pool = torch.cat([shallow_pool, deeper_pool, att_vec], dim=1)\n        else:\n            global_pool = torch.cat((shallow_pool, deeper_pool), dim=1)\n        conv_1x1 = self.convreluconv(global_pool)\n        inputs = shallower * torch.sigmoid(conv_1x1)\n        out = inputs + interpolate(deeper, inputs.shape[-2:])\n\n        return out\n\n\nclass RRB(nn.Module):\n\n    def __init__(self, oc, use_bn=False):\n        super().__init__()\n        self.conv1x1 = conv(oc, oc, 1)\n        if use_bn:\n            self.bblock = nn.Sequential(conv(oc, oc, 3), nn.BatchNorm2d(oc), relu(), conv(oc, oc, 3, bias=False))\n        else:\n            self.bblock = nn.Sequential(conv(oc, oc, 3), relu(), conv(oc, oc, 3, bias=False))  # Basic block\n\n    def forward(self, x):\n        h = self.conv1x1(x)\n        return F.relu(h + self.bblock(h))\n\n\nclass Upsampler(nn.Module):\n\n    def __init__(self, in_channels=64):\n        super().__init__()\n\n        self.conv1 = conv(in_channels, in_channels // 2, 3)\n        self.conv2 = conv(in_channels // 2, 1, 3)\n\n    def forward(self, x, image_size):\n        print(x.shape)\n        x = F.interpolate(x, (2 * x.shape[-2], 2 * x.shape[-1]), mode='bicubic', align_corners=False)\n        x = F.relu(self.conv1(x))\n        x = F.interpolate(x, image_size[-2:], mode='bicubic', align_corners=False)\n        x = self.conv2(x)\n        return x\n\n\nclass PyrUpBicubic2d(nn.Module):\n\n    def __init__(self, channels):\n        super().__init__()\n\n        self.channels = channels\n\n        def kernel(d):\n            x = d + torch.arange(-1, 3, dtype=torch.float32)\n            x = torch.abs(x)\n            a = -0.75\n            f = (x < 1).float() * ((a + 2) * x * x * x - (a + 3) * x * x + 1) + \\\n                ((x >= 1) * (x < 2)).float() * (a * x * x * x - 5 * a * x * x + 8 * a * x - 4 * a)\n            W = f.reshape(1, 1, 1, len(x)).float()\n            Wt = W.permute(0, 1, 3, 2)\n            return W, Wt\n\n        We, We_t = kernel(-0.25)\n        Wo, Wo_t = kernel(-0.25 - 0.5)\n\n        # Building non-separable filters for now. It would make sense to\n        # have separable filters if it proves to be faster.\n\n        # .contiguous() is needed until a bug is fixed in nn.Conv2d.\n        self.W00 = (We_t @ We).expand(channels, 1, 4, 4).contiguous()\n        self.W01 = (We_t @ Wo).expand(channels, 1, 4, 4).contiguous()\n        self.W10 = (Wo_t @ We).expand(channels, 1, 4, 4).contiguous()\n        self.W11 = (Wo_t @ Wo).expand(channels, 1, 4, 4).contiguous()\n\n    def forward(self, input):\n\n        if input.device != self.W00.device:\n            self.W00 = self.W00.to(input.device)\n            self.W01 = self.W01.to(input.device)\n            self.W10 = self.W10.to(input.device)\n            self.W11 = self.W11.to(input.device)\n\n        a = F.pad(input, (2, 2, 2, 2), 'replicate')\n\n        I00 = F.conv2d(a, self.W00, groups=self.channels)\n        I01 = F.conv2d(a, self.W01, groups=self.channels)\n        I10 = F.conv2d(a, self.W10, groups=self.channels)\n        I11 = F.conv2d(a, self.W11, groups=self.channels)\n\n        n, c, h, w = I11.shape\n\n        J0 = torch.stack((I00, I01), dim=-1).view(n, c, h, 2 * w)\n        J1 = torch.stack((I10, I11), dim=-1).view(n, c, h, 2 * w)\n        out = torch.stack((J0, J1), dim=-2).view(n, c, 2 * h, 2 * w)\n\n        out = F.pad(out, (-1, -1, -1, -1))\n        return out\n\n\nclass BackwardCompatibleUpsampler(nn.Module):\n    \"\"\" Upsampler with bicubic interpolation that works with Pytorch 1.0.1 \"\"\"\n\n    def __init__(self, in_channels=64):\n        super().__init__()\n\n        self.conv1 = conv(in_channels, in_channels // 2, 3)\n        self.up1 = PyrUpBicubic2d(in_channels)\n        self.conv2 = conv(in_channels // 2, 1, 3)\n        self.up2 = PyrUpBicubic2d(in_channels // 2)\n\n    def forward(self, x, image_size):\n        x = self.up1(x)\n        x = F.relu(self.conv1(x))\n        x = self.up2(x)\n        x = F.interpolate(x, image_size[-2:], mode='bilinear', align_corners=False)\n        x = self.conv2(x)\n        return x\n\n\nclass SegNetwork(nn.Module):\n\n    def __init__(self, in_channels=1, out_channels=32, ft_channels=None, use_bn=False):\n\n        super().__init__()\n\n        assert ft_channels is not None\n        self.ft_channels = ft_channels\n\n        self.TSE = nn.ModuleDict()\n        self.RRB1 = nn.ModuleDict()\n        self.CAB = nn.ModuleDict()\n        self.RRB2 = nn.ModuleDict()\n\n        ic = in_channels\n        oc = out_channels\n\n        for L, fc in self.ft_channels.items():\n            self.TSE[L] = TSE(fc, ic, oc)\n            self.RRB1[L] = RRB(oc, use_bn=use_bn)\n            self.CAB[L] = CAB(oc, L == 'layer5')\n            self.RRB2[L] = RRB(oc, use_bn=use_bn)\n\n        #if torch.__version__ == '1.0.1'\n        self.project = BackwardCompatibleUpsampler(out_channels)\n        #self.project = Upsampler(out_channels)\n\n    def forward(self, scores, features, image_size):\n\n        num_targets = scores.shape[0]\n        num_fmaps = features[next(iter(self.ft_channels))].shape[0]\n        if num_targets > num_fmaps:\n            multi_targets = True\n        else:\n            multi_targets = False\n\n        x = None\n        for i, L in enumerate(self.ft_channels):\n            ft = features[L]\n            s = interpolate(scores, ft.shape[-2:])  # Resample scores to match features size\n\n            if multi_targets:\n                h, hpool = self.TSE[L](ft.repeat(num_targets, 1, 1, 1), s, x)\n            else:\n                h, hpool = self.TSE[L](ft, s, x)\n\n            h = self.RRB1[L](h)\n            h = self.CAB[L](hpool, h)\n            x = self.RRB2[L](h)\n\n        x = self.project(x, image_size)\n        return x\n\n\n"
  },
  {
    "path": "external/AR/ltr/models/head/utils.py",
    "content": "from collections import OrderedDict as odict\n\nimport numpy as np\nimport torch\nfrom torch import nn as nn\nfrom torch.nn import functional as F\n\n\ndef text_bargraph(values):\n    blocks = np.array(('u', ' ', '▁', '▂', '▃', '▄', '▅', '▆', '▇', '█', 'o'))\n    nsteps = len(blocks) - 2 - 1\n    hstep = 1 / (2 * nsteps)\n    values = np.array(values)\n    nans = np.isnan(values)\n    values[nans] = 0  # '░'\n    indices = ((values + hstep) * nsteps + 1).astype(np.int)\n    indices[values < 0] = 0\n    indices[values > 1] = len(blocks) - 1\n    graph = blocks[indices]\n    graph[nans] = '░'\n    graph = str.join('', graph)\n    return graph\n\n\nclass ModuleWrapper:\n    \"\"\" A wrapper for hiding modules from PyTorch, so that the same module can be used in multiple places.\n    and yet saved only once in a checkpoint, or not at all. \"\"\"\n\n    # https://stackoverflow.com/questions/1466676/create-a-wrapper-class-to-call-a-pre-and-post-function-around-existing-functions\n\n    def __init__(self, wrapped_module):\n        self.__wrapped_module__ = wrapped_module\n\n    def __getattr__(self, attr):\n        orig_attr = self.__wrapped_module__.__getattribute__(attr)\n        if callable(orig_attr):\n            def hooked(*args, **kwargs):\n                result = orig_attr(*args, **kwargs)\n                # prevent wrapped_class from becoming unwrapped\n                if result == self.__wrapped_module__:\n                    return self\n                return result\n\n            return hooked\n        else:\n            return orig_attr\n\n    def __call__(self, *args, **kwargs):\n        return self.__wrapped_module__(*args, **kwargs)\n\n\ndef conv(ic, oc, ksize, bias=True, dilation=1, stride=1):\n    return nn.Conv2d(ic, oc, ksize, padding=ksize // 2, bias=bias, dilation=dilation, stride=stride)\n\n\ndef relu(negative_slope=0.0, inplace=False):\n    return nn.LeakyReLU(negative_slope, inplace=inplace)\n\n\ndef interpolate(t, sz):\n    sz = sz.tolist() if torch.is_tensor(sz) else sz\n    return F.interpolate(t, sz, mode='bilinear', align_corners=False) if t.shape[-2:] != sz else t\n\n\ndef adaptive_cat(seq, dim=0, ref_tensor=0):\n    sz = seq[ref_tensor].shape[-2:]\n    t = torch.cat([interpolate(t, sz) for t in seq], dim=dim)\n    return t\n\n\ndef get_out_channels(layer):\n    if hasattr(layer, 'out_channels'):\n        oc = layer.out_channels\n    elif hasattr(layer, '_modules'):\n        oc = get_out_channels(layer._modules)\n    else:\n        ocs = []\n        for key in reversed(layer):\n            ocs.append(get_out_channels(layer[key]))\n\n        oc = 0\n        for elem in ocs:\n            if elem:\n                return elem\n\n    return oc\n\n\ndef is_finite(t):\n    return (torch.isnan(t) + torch.isinf(t)) == 0\n\n\nclass AverageMeter:\n    \"\"\"Computes and stores the average and current value\"\"\"\n\n    def __init__(self):\n        self.val = 0\n        self.avg = 0\n        self.sum = 0\n        self.count = 0\n        self.seq_avg = []\n\n    def reset(self):\n        self.__init__()\n\n    def update(self, val, n=1):\n        if not np.isnan(val):\n            self.val = val\n            self.sum += val * n\n            self.count += n\n            self.avg = self.sum / self.count\n\n    def update_multi(self, val):\n        val = np.array(val)\n        v = val[~np.isnan(val)]\n        n = len(v)\n        self.val = val\n        self.sum += np.nansum(v)\n        self.count += n\n        self.avg = self.sum / self.count\n\n"
  },
  {
    "path": "external/AR/ltr/models/layers/__init__.py",
    "content": ""
  },
  {
    "path": "external/AR/ltr/models/layers/activation.py",
    "content": "import math\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\ndef softmax_reg(x: torch.Tensor, dim, reg=None):\n    \"\"\"Softmax with optinal denominator regularization.\"\"\"\n    if reg is None:\n        return torch.softmax(x, dim=dim)\n    dim %= x.dim()\n    if isinstance(reg, (float, int)):\n        reg = x.new_tensor([reg])\n    reg = reg.expand([1 if d==dim else x.shape[d] for d in range(x.dim())])\n    x = torch.cat((x, reg), dim=dim)\n    return torch.softmax(x, dim=dim)[[slice(-1) if d==dim else slice(None) for d in range(x.dim())]]\n\n\n\nclass MLU(nn.Module):\n    r\"\"\"MLU activation\n    \"\"\"\n    def __init__(self, min_val, inplace=False):\n        super().__init__()\n        self.min_val = min_val\n        self.inplace = inplace\n\n    def forward(self, input):\n        return F.elu(F.leaky_relu(input, 1/self.min_val, inplace=self.inplace), self.min_val, inplace=self.inplace)\n\n\nclass LeakyReluPar(nn.Module):\n    r\"\"\"LeakyRelu parametric activation\n    \"\"\"\n\n    def forward(self, x, a):\n        return (1.0 - a)/2.0 * torch.abs(x) + (1.0 + a)/2.0 * x\n\nclass LeakyReluParDeriv(nn.Module):\n    r\"\"\"Derivative of the LeakyRelu parametric activation, wrt x.\n    \"\"\"\n\n    def forward(self, x, a):\n        return (1.0 - a)/2.0 * torch.sign(x.detach()) + (1.0 + a)/2.0\n\n\nclass BentIdentPar(nn.Module):\n    r\"\"\"BentIdent parametric activation\n    \"\"\"\n    def __init__(self, b=1.0):\n        super().__init__()\n        self.b = b\n\n    def forward(self, x, a):\n        return (1.0 - a)/2.0 * (torch.sqrt(x*x + 4.0*self.b*self.b) - 2.0*self.b) + (1.0 + a)/2.0 * x\n\n\nclass BentIdentParDeriv(nn.Module):\n    r\"\"\"BentIdent parametric activation deriv\n    \"\"\"\n    def __init__(self, b=1.0):\n        super().__init__()\n        self.b = b\n\n    def forward(self, x, a):\n        return (1.0 - a)/2.0 * (x / torch.sqrt(x*x + 4.0*self.b*self.b)) + (1.0 + a)/2.0\n\n"
  },
  {
    "path": "external/AR/ltr/models/layers/blocks.py",
    "content": "from torch import nn\n\n\ndef conv_block(in_planes, out_planes, kernel_size=3, stride=1, padding=1, dilation=1, bias=True,\n               batch_norm=True, relu=True, padding_mode='zeros'):\n    layers = []\n    assert padding_mode == 'zeros' or padding_mode == 'replicate'\n\n    if padding_mode == 'replicate' and padding > 0:\n        assert isinstance(padding, int)\n        layers.append(nn.ReflectionPad2d(padding))\n        padding = 0\n\n    layers.append(nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride,\n                  padding=padding, dilation=dilation, bias=bias))\n    if batch_norm:\n        layers.append(nn.BatchNorm2d(out_planes))\n    if relu:\n        layers.append(nn.ReLU(inplace=True))\n    return nn.Sequential(*layers)\n\n\nclass LinearBlock(nn.Module):\n    def __init__(self, in_planes, out_planes, input_sz, bias=True, batch_norm=True, relu=True):\n        super().__init__()\n        self.linear = nn.Linear(in_planes*input_sz*input_sz, out_planes, bias=bias)\n        self.bn = nn.BatchNorm2d(out_planes) if batch_norm else None\n        self.relu = nn.ReLU(inplace=True) if relu else None\n\n    def forward(self, x):\n        x = self.linear(x.reshape(x.shape[0], -1))\n        if self.bn is not None:\n            x = self.bn(x.reshape(x.shape[0], x.shape[1], 1, 1))\n        if self.relu is not None:\n            x = self.relu(x)\n        return x.reshape(x.shape[0], -1)"
  },
  {
    "path": "external/AR/ltr/models/layers/distance.py",
    "content": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass DistanceMap(nn.Module):\n    \"\"\"Generate a distance map from a origin center location.\n    args:\n        num_bins:  Number of bins in the map.\n        bin_displacement:  Displacement of the bins.\n    \"\"\"\n    def __init__(self, num_bins, bin_displacement=1.0):\n        super().__init__()\n        self.num_bins = num_bins\n        self.bin_displacement = bin_displacement\n\n    def forward(self, center, output_sz):\n        \"\"\"Create the distance map.\n        args:\n            center: Torch tensor with (y,x) center position. Dims (batch, 2)\n            output_sz: Size of output distance map. 2-dimensional tuple.\"\"\"\n\n        center = center.view(-1,2)\n\n        bin_centers = torch.arange(self.num_bins, dtype=torch.float32, device=center.device).view(1, -1, 1, 1)\n\n        k0 = torch.arange(output_sz[0], dtype=torch.float32, device=center.device).view(1,1,-1,1)\n        k1 = torch.arange(output_sz[1], dtype=torch.float32, device=center.device).view(1,1,1,-1)\n\n        d0 = k0 - center[:,0].view(-1,1,1,1)\n        d1 = k1 - center[:,1].view(-1,1,1,1)\n\n        dist = torch.sqrt(d0*d0 + d1*d1)\n        bin_diff = dist / self.bin_displacement - bin_centers\n\n        bin_val = torch.cat((F.relu(1.0 - torch.abs(bin_diff[:,:-1,:,:]), inplace=True),\n                             (1.0 + bin_diff[:,-1:,:,:]).clamp(0, 1)), dim=1)\n\n        return bin_val\n\n\n"
  },
  {
    "path": "external/AR/ltr/models/layers/filter.py",
    "content": "import torch\nimport torch.nn.functional as F\n\n\ndef apply_filter(feat, filter, dilation_factors=None):\n    \"\"\"Applies the filter on the input features (feat). The number of groups is automatically calculated.\n    args:\n        feat: These are the input features. Must have dimensions (images_in_sequence, sequences, feat_dim, H, W)\n        filter: The filter to apply. Must have dimensions (sequences, feat_dim, fH, fW) or (sequences, filters, feat_dim/groups, fH, fW)\n    output:\n        scores: Output of filtering. Dimensions (images_in_sequence, sequences, yH, yW) or (images_in_sequence, sequences, filters, yH, yW)\n    \"\"\"\n\n    multiple_filters = (filter.dim() == 5)\n\n    padding = (filter.shape[-2] // 2, filter.shape[-1] // 2)\n\n    num_images = feat.shape[0]\n    num_sequences = feat.shape[1] if feat.dim() == 5 else 1\n    num_filters = filter.shape[1] if multiple_filters else 1\n    num_channels = feat.shape[-3]\n    groups = num_channels // filter.shape[-3]\n\n    assert num_filters % groups == 0 and num_channels % groups == 0\n\n    if multiple_filters:\n        if dilation_factors is None:\n            scores = F.conv2d(feat.reshape(num_images, -1, feat.shape[-2], feat.shape[-1]), filter.view(-1, *filter.shape[-3:]),\n                              padding=padding, groups=num_sequences*groups)\n\n            return scores.view(num_images, num_sequences, -1, scores.shape[-2], scores.shape[-1])\n        else:\n            scores_all = []\n            start_id = 0\n\n            for d_factor, num_filters_with_d in dilation_factors.items():\n                f_d = filter[:, start_id:start_id+num_filters_with_d, ...].contiguous()\n\n                padding_d = [p+d_factor-1 for p in padding]\n                scores_d = F.conv2d(feat.reshape(num_images, -1, feat.shape[-2], feat.shape[-1]),\n                                    f_d.view(-1, *f_d.shape[-3:]),\n                                    padding=padding_d, groups=num_sequences * groups,\n                                    dilation=d_factor)\n                scores_d = scores_d.view(num_images, num_sequences, -1, scores_d.shape[-2], scores_d.shape[-1])\n                scores_all.append(scores_d)\n                start_id += num_filters_with_d\n\n            scores = torch.cat(scores_all, dim=2)\n            return scores\n\n    scores = F.conv2d(feat.reshape(num_images, -1, feat.shape[-2], feat.shape[-1]), filter,\n                      padding=padding, groups=num_sequences)\n\n    return scores.view(num_images, num_sequences, scores.shape[-2], scores.shape[-1])\n\n\ndef apply_feat_transpose(feat, input, filter_ksz, training=True, groups=1):\n    \"\"\"Applies the transposed operation off apply_filter w.r.t. filter itself. Can be used to compute the filter gradient.\n    args:\n        feat: These are the input features. Must have dimensions (images_in_sequence, sequences, feat_dim, H, W)\n        input: Input activation (e.g. residuals). Must have dimensions (images_in_sequence, sequences, yH, yW) or\n                (images_in_sequence, sequences, filters, yH, yW)\n        training: Choose the faster implementation whether training or not.\n    output:\n        Output of transposed operation. Dimensions (sequences, feat_dim, fH, fW)\n    \"\"\"\n\n    if groups != 1:\n        raise NotImplementedError('Not implemented other values of group.')\n\n    if training or input.dim() == 5:\n        return _apply_feat_transpose_v3(feat, input, filter_ksz)\n    return _apply_feat_transpose_v2(feat, input, filter_ksz)\n\n\ndef _apply_feat_transpose_v1(feat, input, filter_ksz):\n    \"\"\"This one is slow as hell!!!!\"\"\"\n\n    num_images = feat.shape[0]\n    num_sequences = feat.shape[1] if feat.dim() == 5 else 1\n    feat_sz = (feat.shape[-2], feat.shape[-1])\n    if isinstance(filter_ksz, int):\n        filter_ksz = (filter_ksz, filter_ksz)\n\n    # trans_pad = sz + padding - filter_ksz\n    trans_pad = [sz + ksz//2 - ksz for sz, ksz in zip(feat_sz, filter_ksz)]\n\n    filter_grad = F.conv_transpose2d(input.flip((2, 3)).view(1, -1, input.shape[-2], input.shape[-1]),\n                                     feat.reshape(-1, feat.shape[-3], feat.shape[-2], feat.shape[-1]),\n                                     padding=trans_pad, groups=num_images * num_sequences)\n\n    return filter_grad.view(num_images, num_sequences, -1, filter_grad.shape[-2], filter_grad.shape[-1]).sum(dim=0)\n\n\ndef _apply_feat_transpose_v2(feat, input, filter_ksz):\n    \"\"\"Fast forward and slow backward\"\"\"\n\n    multiple_filters = (input.dim() == 5)\n\n    num_images = feat.shape[0]\n    num_sequences = feat.shape[1] if feat.dim() == 5 else 1\n    num_filters = input.shape[2] if multiple_filters else 1\n    if isinstance(filter_ksz, int):\n        filter_ksz = (filter_ksz, filter_ksz)\n\n    trans_pad = [(ksz-1)//2 for ksz in filter_ksz]\n\n    if multiple_filters:\n        filter_grad = F.conv2d(input.reshape(-1, num_filters, input.shape[-2], input.shape[-1]).permute(1,0,2,3),\n                               feat.reshape(-1, 1, feat.shape[-2], feat.shape[-1]),\n                               padding=trans_pad, groups=num_images * num_sequences)\n\n        if num_images == 1:\n            return filter_grad.view(num_filters, num_sequences, -1, filter_grad.shape[-2], filter_grad.shape[-1]).flip((3,4)).permute(1,0,2,3,4)\n        return filter_grad.view(num_filters, num_images, num_sequences, -1, filter_grad.shape[-2], filter_grad.shape[-1]).sum(dim=1).flip((3,4)).permute(1,0,2,3,4)\n\n    filter_grad = F.conv2d(input.reshape(1, -1, input.shape[-2], input.shape[-1]),\n                                     feat.reshape(-1, 1, feat.shape[-2], feat.shape[-1]),\n                                     padding=trans_pad, groups=num_images * num_sequences)\n\n    return filter_grad.view(num_images, num_sequences, -1, filter_grad.shape[-2], filter_grad.shape[-1]).sum(dim=0).flip((2,3))\n\n\ndef _apply_feat_transpose_v3(feat, input, filter_ksz):\n    \"\"\"Slow forward fast backward\"\"\"\n\n    multiple_filters = (input.dim() == 5)\n\n    num_images = feat.shape[0]\n    num_sequences = feat.shape[1] if feat.dim() == 5 else 1\n    num_filters = input.shape[2] if multiple_filters else 1\n    if isinstance(filter_ksz, int):\n        filter_ksz = (filter_ksz, filter_ksz)\n\n    trans_pad = [ksz//2 for  ksz in filter_ksz]\n\n    filter_grad = F.conv2d(feat.reshape(-1, feat.shape[-3], feat.shape[-2], feat.shape[-1]).permute(1,0,2,3),\n                           input.reshape(-1, 1, input.shape[-2], input.shape[-1]),\n                           padding=trans_pad, groups=num_images * num_sequences)\n\n    if multiple_filters:\n        if num_images == 1:\n            return filter_grad.view(-1, num_sequences, num_filters, filter_grad.shape[-2], filter_grad.shape[-1]).permute(1,2,0,3,4)\n        return filter_grad.view(-1, num_images, num_sequences, num_filters, filter_grad.shape[-2], filter_grad.shape[-1]).sum(dim=1).permute(1,2,0,3,4)\n\n    if num_images == 1:\n        return filter_grad.permute(1,0,2,3)\n    return filter_grad.view(-1, num_images, num_sequences, filter_grad.shape[-2], filter_grad.shape[-1]).sum(dim=1).permute(1,0,2,3)\n\n\ndef _apply_feat_transpose_v4(feat, input, filter_ksz):\n    \"\"\"Slow forward fast backward\"\"\"\n\n    num_images = feat.shape[0]\n    num_sequences = feat.shape[1] if feat.dim() == 5 else 1\n    if isinstance(filter_ksz, int):\n        filter_ksz = (filter_ksz, filter_ksz)\n\n    trans_pad = [ksz//2 for  ksz in filter_ksz]\n\n    filter_grad = F.conv2d(feat.permute(2,1,0,3,4).reshape(feat.shape[-3], -1, feat.shape[-2], feat.shape[-1]),\n                           input.permute(1,0,2,3),\n                           padding=trans_pad, groups=num_sequences)\n\n    return filter_grad.permute(1,0,2,3)\n\n\n\ndef filter_gradient(feat, filter, label=None, training=True):\n    \"\"\"Computes gradient of the filter when applied on the input features and ground truth label.\n    args:\n        feat: These are the input features. Must have dimensions (images_in_sequence, sequences, feat_dim, H, W)\n        filter: The filter to apply. Must have dimensions (sequences, feat_dim, fH, fW)\n        label: Ground truth label in the L2 loss. Dimensions (images_in_sequence, sequences, yH, yW)\n    output:\n        filter_gradient: Dimensions same as input filter (sequences, feat_dim, fH, fW)\n    \"\"\"\n\n    residuals = apply_filter(feat, filter)\n    if label is not None:\n        residuals = residuals - label\n    filter_ksz = (filter.shape[-2], filter.shape[-1])\n    return apply_feat_transpose(feat, residuals, filter_ksz, training=training)\n"
  },
  {
    "path": "external/AR/ltr/models/layers/normalization.py",
    "content": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass InstanceL2Norm(nn.Module):\n    \"\"\"Instance L2 normalization.\n    \"\"\"\n    def __init__(self, size_average=True, eps=1e-5, scale=1.0):\n        super().__init__()\n        self.size_average = size_average\n        self.eps = eps\n        self.scale = scale\n\n    def forward(self, input):\n        if self.size_average:\n            return input * (self.scale * ((input.shape[1] * input.shape[2] * input.shape[3]) / (\n                        torch.sum((input * input).view(input.shape[0], 1, 1, -1), dim=3, keepdim=True) + self.eps)).sqrt())\n        else:\n            return input * (self.scale / (torch.sum((input * input).view(input.shape[0], 1, 1, -1), dim=3, keepdim=True) + self.eps).sqrt())\n\n"
  },
  {
    "path": "external/AR/ltr/models/layers/transform.py",
    "content": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom collections import OrderedDict\n\n\ndef interpolate(x, sz):\n    \"\"\"Interpolate 4D tensor x to size sz.\"\"\"\n    sz = sz.tolist() if torch.is_tensor(sz) else sz\n    return F.interpolate(x, sz, mode='bilinear', align_corners=False) if x.shape[-2:] != sz else x\n\n\nclass InterpCat(nn.Module):\n    \"\"\"Interpolate and concatenate features of different resolutions.\"\"\"\n\n    def forward(self, input):\n        if isinstance(input, (dict, OrderedDict)):\n            input = list(input.values())\n\n        output_shape = None\n        for x in input:\n            if output_shape is None or output_shape[0] > x.shape[-2]:\n                output_shape = x.shape[-2:]\n\n        return torch.cat([interpolate(x, output_shape) for x in input], dim=-3)\n"
  },
  {
    "path": "external/AR/ltr/models/loss/__init__.py",
    "content": "from .target_classification import LBHinge\n"
  },
  {
    "path": "external/AR/ltr/models/loss/kl_regression.py",
    "content": "import math\nimport torch\nimport torch.nn as nn\nfrom torch.nn import functional as F\n\n\nclass KLRegression(nn.Module):\n    \"\"\"KL-divergence loss for probabilistic regression.\n    It is computed using Monte Carlo (MC) samples from an arbitrary distribution.\"\"\"\n\n    def __init__(self, eps=0.0):\n        super().__init__()\n        self.eps = eps\n\n    def forward(self, scores, sample_density, gt_density, mc_dim=-1):\n        \"\"\"Args:\n            scores: predicted score values\n            sample_density: probability density of the sample distribution\n            gt_density: probability density of the ground truth distribution\n            mc_dim: dimension of the MC samples\"\"\"\n\n        exp_val = scores - torch.log(sample_density + self.eps)\n\n        L = torch.logsumexp(exp_val, dim=mc_dim) - math.log(scores.shape[mc_dim]) - \\\n            torch.mean(scores * (gt_density / (sample_density + self.eps)), dim=mc_dim)\n\n        return L.mean()\n\n\nclass MLRegression(nn.Module):\n    \"\"\"Maximum likelihood loss for probabilistic regression.\n    It is computed using Monte Carlo (MC) samples from an arbitrary distribution.\"\"\"\n\n    def __init__(self, eps=0.0):\n        super().__init__()\n        self.eps = eps\n\n    def forward(self, scores, sample_density, gt_density=None, mc_dim=-1):\n        \"\"\"Args:\n            scores: predicted score values. First sample must be ground-truth\n            sample_density: probability density of the sample distribution\n            gt_density: not used\n            mc_dim: dimension of the MC samples. Only mc_dim=1 supported\"\"\"\n\n        assert mc_dim == 1\n        assert (sample_density[:,0,...] == -1).all()\n\n        exp_val = scores[:, 1:, ...] - torch.log(sample_density[:, 1:, ...] + self.eps)\n\n        L = torch.logsumexp(exp_val, dim=mc_dim) - math.log(scores.shape[mc_dim] - 1) - scores[:, 0, ...]\n        loss = L.mean()\n        return loss\n\n\nclass KLRegressionGrid(nn.Module):\n    \"\"\"KL-divergence loss for probabilistic regression.\n    It is computed using the grid integration strategy.\"\"\"\n\n    def forward(self, scores, gt_density, grid_dim=-1, grid_scale=1.0):\n        \"\"\"Args:\n            scores: predicted score values\n            gt_density: probability density of the ground truth distribution\n            grid_dim: dimension(s) of the grid\n            grid_scale: area of one grid cell\"\"\"\n\n        score_corr = grid_scale * torch.sum(scores * gt_density, dim=grid_dim)\n\n        L = torch.logsumexp(scores, dim=grid_dim) + math.log(grid_scale) - score_corr\n\n        return L.mean()\n"
  },
  {
    "path": "external/AR/ltr/models/loss/target_classification.py",
    "content": "import torch.nn as nn\nimport torch\nfrom torch.nn import functional as F\n\n\nclass LBHinge(nn.Module):\n    \"\"\"Loss that uses a 'hinge' on the lower bound.\n    This means that for samples with a label value smaller than the threshold, the loss is zero if the prediction is\n    also smaller than that threshold.\n    args:\n        error_matric:  What base loss to use (MSE by default).\n        threshold:  Threshold to use for the hinge.\n        clip:  Clip the loss if it is above this value.\n    \"\"\"\n    def __init__(self, error_metric=nn.MSELoss(), threshold=None, clip=None):\n        super().__init__()\n        self.error_metric = error_metric\n        self.threshold = threshold if threshold is not None else -100\n        self.clip = clip\n\n    def forward(self, prediction, label, target_bb=None):\n        negative_mask = (label < self.threshold).float()\n        positive_mask = (1.0 - negative_mask)\n\n        prediction = negative_mask * F.relu(prediction) + positive_mask * prediction\n\n        loss = self.error_metric(prediction, positive_mask * label)\n\n        if self.clip is not None:\n            loss = torch.min(loss, torch.tensor([self.clip], device=loss.device))\n        return loss\n"
  },
  {
    "path": "external/AR/ltr/models/meta/__init__.py",
    "content": ""
  },
  {
    "path": "external/AR/ltr/models/meta/steepestdescent.py",
    "content": "import math\nimport torch\nimport torch.nn as nn\nfrom pytracking import TensorList\nfrom ltr.models.layers import activation\n\n\nclass GNSteepestDescent(nn.Module):\n    \"\"\"General module for steepest descent based meta learning.\"\"\"\n    def __init__(self, residual_module, num_iter=1, compute_losses=False, detach_length=float('Inf'),\n                 parameter_batch_dim=0, residual_batch_dim=0, steplength_reg=0.0,\n                 filter_dilation_factors=None):\n        super().__init__()\n\n        self.residual_module = residual_module\n        self.num_iter = num_iter\n        self.compute_losses = compute_losses\n        self.detach_length = detach_length\n        self.steplength_reg = steplength_reg\n        self._parameter_batch_dim = parameter_batch_dim\n        self._residual_batch_dim = residual_batch_dim\n        self.filter_dilation_factors = filter_dilation_factors\n\n    def _sqr_norm(self, x: TensorList, batch_dim=0):\n        sum_keep_batch_dim = lambda e: e.sum(dim=[d for d in range(e.dim()) if d != batch_dim])\n        return sum((x * x).apply(sum_keep_batch_dim))\n\n\n    def _compute_loss(self, res):\n        return sum((res * res).sum()) / sum(res.numel())\n\n\n    def forward(self, meta_parameter: TensorList, num_iter=None, *args, **kwargs):\n        # Make sure grad is enabled\n        torch_grad_enabled = torch.is_grad_enabled()\n        torch.set_grad_enabled(True)\n\n        num_iter = self.num_iter if num_iter is None else num_iter\n\n        meta_parameter_iterates = [meta_parameter]\n        losses = []\n\n        for i in range(num_iter):\n            if i > 0 and i % self.detach_length == 0:\n                meta_parameter = meta_parameter.detach()\n\n            meta_parameter.requires_grad_(True)\n\n            # Compute residual vector\n            r = self.residual_module(meta_parameter, filter_dilation_factors=self.filter_dilation_factors, **kwargs)\n\n            if self.compute_losses:\n                losses.append(self._compute_loss(r))\n\n            # Compute gradient of loss\n            u = r.clone()\n            g = TensorList(torch.autograd.grad(r, meta_parameter, u, create_graph=True))\n\n            # Multiply gradient with Jacobian\n            h = TensorList(torch.autograd.grad(g, u, g, create_graph=True))\n\n            # Compute squared norms\n            ip_gg = self._sqr_norm(g, batch_dim=self._parameter_batch_dim)\n            ip_hh = self._sqr_norm(h, batch_dim=self._residual_batch_dim)\n\n            # Compute step length\n            alpha = ip_gg / (ip_hh + self.steplength_reg * ip_gg).clamp(1e-8)\n\n            # Compute optimization step\n            step = g.apply(lambda e: alpha.reshape([-1 if d==self._parameter_batch_dim else 1 for d in range(e.dim())]) * e)\n\n            # Add step to parameter\n            meta_parameter = meta_parameter - step\n\n            meta_parameter_iterates.append(meta_parameter)\n\n\n        if self.compute_losses:\n            losses.append(self._compute_loss(self.residual_module(meta_parameter,\n                                                                  filter_dilation_factors=self.filter_dilation_factors,\n                                                                  **kwargs)))\n\n        # Reset the grad enabled flag\n        torch.set_grad_enabled(torch_grad_enabled)\n        if not torch_grad_enabled:\n            meta_parameter.detach_()\n            for w in meta_parameter_iterates:\n                w.detach_()\n            for l in losses:\n                l.detach_()\n\n        return meta_parameter, meta_parameter_iterates, losses\n"
  },
  {
    "path": "external/AR/ltr/models/neck/CorrNL.py",
    "content": "import torch.nn as nn\nimport torch\nfrom ltr.external.PreciseRoIPooling.pytorch.prroi_pool import PrRoIPool2D\nfrom torch.nn import functional as F\nfrom ltr.models.neck.neck_utils import *\n\nclass CorrNL(nn.Module):\n    \"\"\"Network module for IoU prediction. Refer to the ATOM paper for an illustration of the architecture.\n    It uses two backbone feature layers as input.\n    args:\n        input_dim:  Feature dimensionality of the two input backbone layers.\n        pred_input_dim:  Dimensionality input the the prediction network.\n        pred_inter_dim:  Intermediate dimensionality in the prediction network.\"\"\"\n\n    def __init__(self, pool_size=8, use_NL=True):\n        super().__init__()\n        self.prroi_pool = PrRoIPool2D(pool_size, pool_size, 1/16)\n        num_corr_channel = pool_size*pool_size\n        self.channel_attention = SEModule(num_corr_channel,reduction=4)\n        self.spatial_attention = NONLocalBlock2D(in_channels=num_corr_channel)\n        self.use_NL = use_NL\n    def forward(self, feat1, feat2, bb1):\n        \"\"\"Runs the ATOM IoUNet during training operation.\n        This forward pass is mainly used for training. Call the individual functions during tracking instead.\n        args:\n            feat1:  Features from the reference frames (4 or 5 dims).\n            feat2:  Features from the test frames (4 or 5 dims).\n            bb1:  Target boxes (x,y,w,h) in image coords in the reference samples. Dims (images, sequences, 4).\n            proposals2:  Proposal boxes for which the IoU will be predicted (images, sequences, num_proposals, 4).\"\"\"\n\n        assert bb1.dim() == 3\n        # num_images, num_sequences = bb1.size()[:2] # 1, 64\n\n        # Extract first train sample\n        if len(feat1)==1:\n            feat1 = feat1[0] # size为(64,C,H,W)\n            feat2 = feat2[0] # size为(64,C,H,W)\n            bb1 = bb1[0,...] # (64,4)\n        else:\n            raise ValueError(\"Only support single-layer feature map\")\n        '''get PrRoIPool feature '''\n        # Add batch_index to rois\n        batch_size = bb1.shape[0]\n        batch_index = torch.arange(batch_size, dtype=torch.float32).view(-1, 1).to(bb1.device) # (64,1)\n        # input bb is in format xywh, convert it to x0y0x1y1 format\n        bb1 = bb1.clone()\n        bb1[:, 2:4] = bb1[:, 0:2] + bb1[:, 2:4]\n        roi1 = torch.cat((batch_index, bb1), dim=1) #(64,1),(64,4) ---> (64,5)\n        feat_roi1 = self.prroi_pool(feat1, roi1) # (64,C,H,W)\n        feat_corr,_ = self.corr_fun(feat_roi1, feat2)\n        # print('相关后的特征维度是:',feat_corr.size())#(batch,StxSt,Sr,Sr)\n        '''channel attention: Squeeze and Excitation'''\n        feat_ca = self.channel_attention(feat_corr) # 计算通道注意力特征\n        '''spatial attention: Non-local 2D'''\n        feat_sa = self.spatial_attention(feat_ca)\n        return feat_sa\n\n    def get_ref_kernel(self, feat1, bb1):\n        assert bb1.dim() == 3\n        # num_images, num_sequences = bb1.size()[:2] # 1, 64\n\n        # Extract first train sample\n        if len(feat1) == 1:\n            feat1 = feat1[0]  # size为(64,C,H,W)\n            bb1 = bb1[0, ...]  # (64,4)\n        else:\n            raise ValueError(\"Only support single-layer feature map\")\n        '''get PrRoIPool feature '''\n        # Add batch_index to rois\n        batch_size = bb1.shape[0]\n        batch_index = torch.arange(batch_size, dtype=torch.float32).view(-1, 1).to(bb1.device)  # (64,1)\n        # input bb is in format xywh, convert it to x0y0x1y1 format\n        bb1 = bb1.clone()\n        bb1[:, 2:4] = bb1[:, 0:2] + bb1[:, 2:4]\n        roi1 = torch.cat((batch_index, bb1), dim=1)  # (64,1),(64,4) ---> (64,5)\n        '''注意: feat1 and roi1 must be cuda tensor'''\n        self.ref_kernel = self.prroi_pool(feat1.float(), roi1)  # (64,C,H,W)\n        # self.ref_kernel.half()\n\n    def fuse_feat(self, feat2):\n        '''fuse features from reference and test branch'''\n        if len(feat2) == 1:\n            feat2 = feat2[0]\n        '''Step1: pixel-wise correlation'''\n        feat_corr,_ = self.corr_fun(self.ref_kernel, feat2)\n        # print('相关后的特征维度是:',feat_corr.size())#(batch,StxSt,Sr,Sr) (batch,64,16,16)\n        '''Step2: channel attention: Squeeze and Excitation'''\n        feat_ca = self.channel_attention(feat_corr) # 计算通道注意力特征\n        if not self.use_NL:\n            # print('not use non-local')\n            return feat_ca\n        else:\n            '''Step3: spatial attention: Non-local 2D'''\n            feat_sa = self.spatial_attention(feat_ca)\n            return feat_sa\n\n\n    def corr_fun(self, Kernel_tmp, Feature, KERs=None):\n        size = Kernel_tmp.size()\n        CORR = []\n        Kernel = []\n        for i in range(len(Feature)):\n            ker = Kernel_tmp[i:i + 1]\n            fea = Feature[i:i + 1]\n            ker = ker.view(size[1], size[2] * size[3]).transpose(0, 1)\n            ker = ker.unsqueeze(2).unsqueeze(3)\n            if not (type(KERs) == type(None)):\n                ker = torch.cat([ker, KERs[i]], 0)\n            co = F.conv2d(fea, ker.contiguous())\n            CORR.append(co)\n            ker = ker.unsqueeze(0)\n            Kernel.append(ker)\n        corr = torch.cat(CORR, 0)\n        Kernel = torch.cat(Kernel, 0)\n        return corr, Kernel\n"
  },
  {
    "path": "external/AR/ltr/models/neck/neck_utils.py",
    "content": "import torch.nn as nn\nimport torch\nfrom torch.nn import functional as F\n\ndef conv(in_planes, out_planes, kernel_size=3, stride=1, padding=1, dilation=1):\n    return nn.Sequential(\n            nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride,\n                      padding=padding, dilation=dilation, bias=True),\n            nn.BatchNorm2d(out_planes),\n            nn.ReLU(inplace=True))\n\n'''Channel attention module'''\nclass SEModule(nn.Module):\n\n    def __init__(self, channels, reduction=4):\n        super(SEModule, self).__init__()\n        self.avg_pool = nn.AdaptiveAvgPool2d(1)\n        self.fc1 = nn.Conv2d(channels, channels // reduction, kernel_size=1,\n                             padding=0)\n        self.relu = nn.ReLU(inplace=True)\n        self.fc2 = nn.Conv2d(channels // reduction, channels, kernel_size=1,\n                             padding=0)\n        self.sigmoid = nn.Sigmoid()\n\n    def forward(self, x):\n        module_input = x\n        x = self.avg_pool(x)\n        x = self.fc1(x)\n        x = self.relu(x)\n        x = self.fc2(x)\n        x = self.sigmoid(x)\n        return module_input * x\n'''Non-local module'''\nclass _NonLocalBlockND(nn.Module):\n    def __init__(self, in_channels, inter_channels=None, dimension=3, sub_sample=True, bn_layer=True):\n        \"\"\"\n        :param in_channels:\n        :param inter_channels:\n        :param dimension:\n        :param sub_sample:\n        :param bn_layer:\n        \"\"\"\n        super(_NonLocalBlockND, self).__init__()\n\n        assert dimension in [1, 2, 3]\n\n        self.dimension = dimension\n        self.sub_sample = sub_sample\n\n        self.in_channels = in_channels\n        self.inter_channels = inter_channels\n\n        if self.inter_channels is None:\n            self.inter_channels = in_channels // 2\n            if self.inter_channels == 0:\n                self.inter_channels = 1\n\n        if dimension == 3:\n            conv_nd = nn.Conv3d\n            max_pool_layer = nn.MaxPool3d(kernel_size=(1, 2, 2))\n            bn = nn.BatchNorm3d\n        elif dimension == 2:\n            conv_nd = nn.Conv2d\n            max_pool_layer = nn.MaxPool2d(kernel_size=(2, 2))\n            bn = nn.BatchNorm2d\n        else:\n            conv_nd = nn.Conv1d\n            max_pool_layer = nn.MaxPool1d(kernel_size=(2))\n            bn = nn.BatchNorm1d\n\n        self.g = conv_nd(in_channels=self.in_channels, out_channels=self.inter_channels,\n                         kernel_size=1, stride=1, padding=0)\n\n        if bn_layer:\n            self.W = nn.Sequential(\n                conv_nd(in_channels=self.inter_channels, out_channels=self.in_channels,\n                        kernel_size=1, stride=1, padding=0),\n                bn(self.in_channels)\n            )\n            nn.init.constant_(self.W[1].weight, 0)\n            nn.init.constant_(self.W[1].bias, 0)\n        else:\n            self.W = conv_nd(in_channels=self.inter_channels, out_channels=self.in_channels,\n                             kernel_size=1, stride=1, padding=0)\n            nn.init.constant_(self.W.weight, 0)\n            nn.init.constant_(self.W.bias, 0)\n\n        self.theta = conv_nd(in_channels=self.in_channels, out_channels=self.inter_channels,\n                             kernel_size=1, stride=1, padding=0)\n        self.phi = conv_nd(in_channels=self.in_channels, out_channels=self.inter_channels,\n                           kernel_size=1, stride=1, padding=0)\n\n        if sub_sample:\n            self.g = nn.Sequential(self.g, max_pool_layer)\n            self.phi = nn.Sequential(self.phi, max_pool_layer)\n\n    def forward(self, x, return_nl_map=False):\n        \"\"\"\n        :param x: (b, c, t, h, w)\n        :param return_nl_map: if True return z, nl_map, else only return z.\n        :return:\n        \"\"\"\n\n        batch_size = x.size(0)\n\n        g_x = self.g(x).view(batch_size, self.inter_channels, -1)\n        g_x = g_x.permute(0, 2, 1)\n\n        theta_x = self.theta(x).view(batch_size, self.inter_channels, -1)\n        theta_x = theta_x.permute(0, 2, 1)\n        phi_x = self.phi(x).view(batch_size, self.inter_channels, -1)\n        f = torch.matmul(theta_x, phi_x)\n        f_div_C = F.softmax(f, -1)\n\n        y = torch.matmul(f_div_C, g_x)\n        y = y.permute(0, 2, 1).contiguous()\n        y = y.view(batch_size, self.inter_channels, *x.size()[2:])\n        W_y = self.W(y)\n        z = W_y + x\n\n        if return_nl_map:\n            return z, f_div_C\n        return z\n\nclass NONLocalBlock2D(_NonLocalBlockND):\n    def __init__(self, in_channels, inter_channels=None, sub_sample=True, bn_layer=True):\n        super(NONLocalBlock2D, self).__init__(in_channels,\n                                              inter_channels=inter_channels,\n                                              dimension=2, sub_sample=sub_sample,\n                                              bn_layer=bn_layer,)\n\n\n\n"
  },
  {
    "path": "external/AR/ltr/models/target_classifier/__init__.py",
    "content": "from .linear_filter import LinearFilter\n"
  },
  {
    "path": "external/AR/ltr/models/target_classifier/features.py",
    "content": "import torch\nfrom torch import nn\nimport torch.nn.functional as F\nfrom torchvision.models.resnet import BasicBlock, Bottleneck\nfrom ltr.models.layers.normalization import InstanceL2Norm\nfrom ltr.models.layers.transform import InterpCat\n\n\ndef residual_basic_block(feature_dim=256, num_blocks=1, l2norm=True, final_conv=False, norm_scale=1.0, out_dim=None,\n                         interp_cat=False, final_relu=False, init_pool=False):\n    \"\"\"Construct a network block based on the BasicBlock used in ResNet 18 and 34.\"\"\"\n    if out_dim is None:\n        out_dim = feature_dim\n    feat_layers = []\n    if interp_cat:\n        feat_layers.append(InterpCat())\n    if init_pool:\n        feat_layers.append(nn.MaxPool2d(kernel_size=3, stride=2, padding=1))\n    for i in range(num_blocks):\n        odim = feature_dim if i < num_blocks - 1 + int(final_conv) else out_dim\n        feat_layers.append(BasicBlock(feature_dim, odim))\n    if final_conv:\n        feat_layers.append(nn.Conv2d(feature_dim, out_dim, kernel_size=3, padding=1, bias=False))\n        if final_relu:\n            feat_layers.append(nn.ReLU(inplace=True))\n    if l2norm:\n        feat_layers.append(InstanceL2Norm(scale=norm_scale))\n    return nn.Sequential(*feat_layers)\n\n\ndef residual_basic_block_pool(feature_dim=256, num_blocks=1, l2norm=True, final_conv=False, norm_scale=1.0, out_dim=None,\n                              pool=True):\n    \"\"\"Construct a network block based on the BasicBlock used in ResNet.\"\"\"\n    if out_dim is None:\n        out_dim = feature_dim\n    feat_layers = []\n    for i in range(num_blocks):\n        odim = feature_dim if i < num_blocks - 1 + int(final_conv) else out_dim\n        feat_layers.append(BasicBlock(feature_dim, odim))\n    if final_conv:\n        feat_layers.append(nn.Conv2d(feature_dim, out_dim, kernel_size=3, padding=1, bias=False))\n    if pool:\n        feat_layers.append(nn.MaxPool2d(kernel_size=3, stride=2, padding=1))\n    if l2norm:\n        feat_layers.append(InstanceL2Norm(scale=norm_scale))\n\n    return nn.Sequential(*feat_layers)\n\n\ndef residual_bottleneck(feature_dim=256, num_blocks=1, l2norm=True, final_conv=False, norm_scale=1.0, out_dim=None,\n                        interp_cat=False, final_relu=False, final_pool=False):\n    \"\"\"Construct a network block based on the Bottleneck block used in ResNet.\"\"\"\n    if out_dim is None:\n        out_dim = feature_dim\n    feat_layers = []\n    if interp_cat:\n        feat_layers.append(InterpCat())\n    for i in range(num_blocks):\n        planes = feature_dim if i < num_blocks - 1 + int(final_conv) else out_dim // 4\n        feat_layers.append(Bottleneck(4*feature_dim, planes))\n    if final_conv:\n        feat_layers.append(nn.Conv2d(4*feature_dim, out_dim, kernel_size=3, padding=1, bias=False))\n        if final_relu:\n            feat_layers.append(nn.ReLU(inplace=True))\n        if final_pool:\n            feat_layers.append(nn.MaxPool2d(kernel_size=3, stride=2, padding=1))\n    if l2norm:\n        feat_layers.append(InstanceL2Norm(scale=norm_scale))\n    return nn.Sequential(*feat_layers)"
  },
  {
    "path": "external/AR/ltr/models/target_classifier/initializer.py",
    "content": "import torch.nn as nn\nimport torch\nimport torch.nn.functional as F\nfrom ltr.external.PreciseRoIPooling.pytorch.prroi_pool import PrRoIPool2D\nfrom ltr.models.layers.blocks import conv_block\nimport math\n\n\nclass FilterPool(nn.Module):\n    \"\"\"Pool the target region in a feature map.\n    args:\n        filter_size:  Size of the filter.\n        feature_stride:  Input feature stride.\n        pool_square:  Do a square pooling instead of pooling the exact target region.\"\"\"\n\n    def __init__(self, filter_size=1, feature_stride=16, pool_square=False):\n        super().__init__()\n        self.prroi_pool = PrRoIPool2D(filter_size, filter_size, 1/feature_stride)\n        self.pool_square = pool_square\n\n    def forward(self, feat, bb):\n        \"\"\"Pool the regions in bb.\n        args:\n            feat:  Input feature maps. Dims (num_samples, feat_dim, H, W).\n            bb:  Target bounding boxes (x, y, w, h) in the image coords. Dims (num_samples, 4).\n        returns:\n            pooled_feat:  Pooled features. Dims (num_samples, feat_dim, wH, wW).\"\"\"\n\n        # Add batch_index to rois\n        bb = bb.reshape(-1,4)\n        num_images_total = bb.shape[0]\n        batch_index = torch.arange(num_images_total, dtype=torch.float32).reshape(-1, 1).to(bb.device)\n\n        # input bb is in format xywh, convert it to x0y0x1y1 format\n        pool_bb = bb.clone()\n\n        if self.pool_square:\n            bb_sz = pool_bb[:, 2:4].prod(dim=1, keepdim=True).sqrt()\n            pool_bb[:, :2] += pool_bb[:, 2:]/2 - bb_sz/2\n            pool_bb[:, 2:] = bb_sz\n\n        pool_bb[:, 2:4] = pool_bb[:, 0:2] + pool_bb[:, 2:4]\n        roi1 = torch.cat((batch_index, pool_bb), dim=1)\n\n        return self.prroi_pool(feat, roi1)\n\n\n\nclass FilterInitializer(nn.Module):\n    \"\"\"Initializes a target classification filter by applying a number of conv layers before and after pooling the target region.\n    args:\n        filter_size:  Size of the filter.\n        feature_dim:  Input feature dimentionality.\n        feature_stride:  Input feature stride.\n        pool_square:  Do a square pooling instead of pooling the exact target region.\n        filter_norm:  Normalize the output filter with its size in the end.\n        num_filter_pre_convs:  Conv layers before pooling.\n        num_filter_post_convs:  Conv layers after pooling.\"\"\"\n\n    def __init__(self, filter_size=1, feature_dim=256, feature_stride=16, pool_square=False, filter_norm=True,\n                 num_filter_pre_convs=1, num_filter_post_convs=0):\n        super().__init__()\n\n        self.filter_pool = FilterPool(filter_size=filter_size, feature_stride=feature_stride, pool_square=pool_square)\n        self.filter_norm = filter_norm\n\n        # Make pre conv\n        pre_conv_layers = []\n        for i in range(num_filter_pre_convs):\n            pre_conv_layers.append(conv_block(feature_dim, feature_dim, kernel_size=3, padding=1))\n        self.filter_pre_layers = nn.Sequential(*pre_conv_layers) if pre_conv_layers else None\n\n        # Make post conv\n        post_conv_layers = []\n        for i in range(num_filter_post_convs):\n            post_conv_layers.append(conv_block(feature_dim, feature_dim, kernel_size=1, padding=0))\n        post_conv_layers.append(nn.Conv2d(feature_dim, feature_dim, kernel_size=1, padding=0))\n        self.filter_post_layers = nn.Sequential(*post_conv_layers)\n\n        # Init weights\n        for m in self.modules():\n            if isinstance(m, nn.Conv2d):\n                n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n                m.weight.data.normal_(0, math.sqrt(2. / n))\n                if m.bias is not None:\n                    m.bias.data.zero_()\n            elif isinstance(m, nn.BatchNorm2d):\n                m.weight.data.fill_(1)\n                m.bias.data.zero_()\n\n\n    def forward(self, feat, bb):\n        \"\"\"Runs the initializer module.\n        Note that [] denotes an optional dimension.\n        args:\n            feat:  Input feature maps. Dims (images_in_sequence, [sequences], feat_dim, H, W).\n            bb:  Target bounding boxes (x, y, w, h) in the image coords. Dims (images_in_sequence, [sequences], 4).\n        returns:\n            weights:  The output weights. Dims (sequences, feat_dim, wH, wW).\"\"\"\n\n        num_images = bb.shape[0] if bb.dim() == 3 else 1\n\n        if self.filter_pre_layers is not None:\n            feat = self.filter_pre_layers(feat.reshape(-1, feat.shape[-3], feat.shape[-2], feat.shape[-1]))\n\n        feat_post = self.filter_pool(feat, bb)\n        weights = self.filter_post_layers(feat_post)\n\n        if num_images > 1:\n            weights = torch.mean(weights.reshape(num_images, -1, weights.shape[-3], weights.shape[-2], weights.shape[-1]), dim=0)\n\n        if self.filter_norm:\n            weights = weights / (weights.shape[1] * weights.shape[2] * weights.shape[3])\n\n        return weights\n\n\nclass FilterInitializerLinear(nn.Module):\n    \"\"\"Initializes a target classification filter by applying a linear conv layer and then pooling the target region.\n    args:\n        filter_size:  Size of the filter.\n        feature_dim:  Input feature dimentionality.\n        feature_stride:  Input feature stride.\n        pool_square:  Do a square pooling instead of pooling the exact target region.\n        filter_norm:  Normalize the output filter with its size in the end.\n        conv_ksz:  Kernel size of the conv layer before pooling.\"\"\"\n\n    def __init__(self, filter_size=1, feature_dim=256, feature_stride=16, pool_square=False, filter_norm=True,\n                 conv_ksz=3, init_weights='default'):\n        super().__init__()\n\n        self.filter_conv = nn.Conv2d(feature_dim, feature_dim, kernel_size=conv_ksz, padding=conv_ksz // 2)\n        self.filter_pool = FilterPool(filter_size=filter_size, feature_stride=feature_stride, pool_square=pool_square)\n        self.filter_norm = filter_norm\n\n        # Init weights\n        for m in self.modules():\n            if isinstance(m, nn.Conv2d):\n                if init_weights == 'default':\n                    n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n                    m.weight.data.normal_(0, math.sqrt(2. / n))\n                elif init_weights == 'zero':\n                    m.weight.data.zero_()\n                if m.bias is not None:\n                    m.bias.data.zero_()\n            elif isinstance(m, nn.BatchNorm2d):\n                m.weight.data.fill_(1)\n                m.bias.data.zero_()\n\n\n    def forward(self, feat, bb):\n        \"\"\"Runs the initializer module.\n        Note that [] denotes an optional dimension.\n        args:\n            feat:  Input feature maps. Dims (images_in_sequence, [sequences], feat_dim, H, W).\n            bb:  Target bounding boxes (x, y, w, h) in the image coords. Dims (images_in_sequence, [sequences], 4).\n        returns:\n            weights:  The output weights. Dims (sequences, feat_dim, wH, wW).\"\"\"\n\n        num_images = feat.shape[0]\n\n        feat = self.filter_conv(feat.reshape(-1, feat.shape[-3], feat.shape[-2], feat.shape[-1]))\n\n        weights = self.filter_pool(feat, bb)\n\n        # If multiple input images, compute the initial filter as the average filter.\n        if num_images > 1:\n            weights = torch.mean(weights.reshape(num_images, -1, weights.shape[-3], weights.shape[-2], weights.shape[-1]), dim=0)\n\n        if self.filter_norm:\n            weights = weights / (weights.shape[1] * weights.shape[2] * weights.shape[3])\n\n        return weights\n\n\n\nclass FilterInitializerZero(nn.Module):\n    \"\"\"Initializes a target classification filter with zeros.\n    args:\n        filter_size:  Size of the filter.\n        feature_dim:  Input feature dimentionality.\"\"\"\n\n    def __init__(self, filter_size=1, feature_dim=256):\n        super().__init__()\n\n        self.filter_size = (feature_dim, filter_size, filter_size)\n\n    def forward(self, feat, bb):\n        \"\"\"Runs the initializer module.\n        Note that [] denotes an optional dimension.\n        args:\n            feat:  Input feature maps. Dims (images_in_sequence, [sequences], feat_dim, H, W).\n            bb:  Target bounding boxes (x, y, w, h) in the image coords. Dims (images_in_sequence, [sequences], 4).\n        returns:\n            weights:  The output weights. Dims (sequences, feat_dim, wH, wW).\"\"\"\n\n        num_sequences = feat.shape[1] if feat.dim() == 5 else 1\n\n        return feat.new_zeros(num_sequences, self.filter_size[0], self.filter_size[1], self.filter_size[2])\n\n\nclass FilterInitializerSiamese(nn.Module):\n    \"\"\"Initializes a target classification filter by only pooling the target region (similar to Siamese trackers).\n    args:\n        filter_size:  Size of the filter.\n        feature_stride:  Input feature stride.\n        pool_square:  Do a square pooling instead of pooling the exact target region.\n        filter_norm:  Normalize the output filter with its size in the end.\"\"\"\n\n    def __init__(self, filter_size=1, feature_stride=16, pool_square=False, filter_norm=True):\n        super().__init__()\n\n        self.filter_pool = FilterPool(filter_size=filter_size, feature_stride=feature_stride, pool_square=pool_square)\n        self.filter_norm = filter_norm\n\n        # Init weights\n        for m in self.modules():\n            if isinstance(m, nn.Conv2d):\n                n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n                m.weight.data.normal_(0, math.sqrt(2. / n))\n                if m.bias is not None:\n                    m.bias.data.zero_()\n            elif isinstance(m, nn.BatchNorm2d):\n                m.weight.data.fill_(1)\n                m.bias.data.zero_()\n\n\n    def forward(self, feat, bb):\n        \"\"\"Runs the initializer module.\n        Note that [] denotes an optional dimension.\n        args:\n            feat:  Input feature maps. Dims (images_in_sequence, [sequences], feat_dim, H, W).\n            bb:  Target bounding boxes (x, y, w, h) in the image coords. Dims (images_in_sequence, [sequences], 4).\n        returns:\n            weights:  The output weights. Dims (sequences, feat_dim, wH, wW).\"\"\"\n\n        num_images = feat.shape[0]\n\n        feat = feat.reshape(-1, feat.shape[-3], feat.shape[-2], feat.shape[-1])\n        weights = self.filter_pool(feat, bb)\n\n        if num_images > 1:\n            weights = torch.mean(weights.reshape(num_images, -1, weights.shape[-3], weights.shape[-2], weights.shape[-1]), dim=0)\n\n        if self.filter_norm:\n            weights = weights / (weights.shape[1] * weights.shape[2] * weights.shape[3])\n\n        return weights\n"
  },
  {
    "path": "external/AR/ltr/models/target_classifier/linear_filter.py",
    "content": "import torch.nn as nn\nimport ltr.models.layers.filter as filter_layer\nimport math\n\n\nclass LinearFilter(nn.Module):\n    \"\"\"Target classification filter module.\n    args:\n        filter_size:  Size of filter (int).\n        filter_initialize:  Filter initializer module.\n        filter_optimizer:  Filter optimizer module.\n        feature_extractor:  Feature extractor module applied to the input backbone features.\"\"\"\n\n    def __init__(self, filter_size, filter_initializer, filter_optimizer=None, feature_extractor=None):\n        super().__init__()\n\n        self.filter_size = filter_size\n\n        # Modules\n        self.filter_initializer = filter_initializer\n        self.filter_optimizer = filter_optimizer\n        self.feature_extractor = feature_extractor\n\n        # Init weights\n        for m in self.feature_extractor.modules():\n            if isinstance(m, nn.Conv2d):\n                n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n                m.weight.data.normal_(0, math.sqrt(2. / n))\n                if m.bias is not None:\n                    m.bias.data.zero_()\n            elif isinstance(m, nn.BatchNorm2d):\n                m.weight.data.fill_(1)\n                m.bias.data.zero_()\n\n    def forward(self, train_feat, test_feat, train_bb, *args, **kwargs):\n        \"\"\"Learns a target classification filter based on the train samples and return the resulting classification\n        scores on the test samples.\n        The forward function is ONLY used for training. Call the individual functions during tracking.\n        args:\n            train_feat:  Backbone features for the train samples (4 or 5 dims).\n            test_feat:  Backbone features for the test samples (4 or 5 dims).\n            trian_bb:  Target boxes (x,y,w,h) for the train samples in image coordinates. Dims (images, sequences, 4).\n            *args, **kwargs:  These are passed to the optimizer module.\n        returns:\n            test_scores:  Classification scores on the test samples.\"\"\"\n\n        assert train_bb.dim() == 3\n\n        num_sequences = train_bb.shape[1]\n\n        if train_feat.dim() == 5:\n            train_feat = train_feat.reshape(-1, *train_feat.shape[-3:])\n        if test_feat.dim() == 5:\n            test_feat = test_feat.reshape(-1, *test_feat.shape[-3:])\n\n        # Extract features\n        train_feat = self.extract_classification_feat(train_feat, num_sequences)\n        test_feat = self.extract_classification_feat(test_feat, num_sequences)\n\n        # Train filter\n        filter, filter_iter, losses = self.get_filter(train_feat, train_bb, *args, **kwargs)\n\n        # Classify samples using all return filters\n        test_scores = [self.classify(f, test_feat) for f in filter_iter]\n\n        return test_scores\n\n    def extract_classification_feat(self, feat, num_sequences=None):\n        \"\"\"Extract classification features based on the input backbone features.\"\"\"\n        if self.feature_extractor is None:\n            return feat\n        if num_sequences is None:\n            return self.feature_extractor(feat)\n\n        output = self.feature_extractor(feat)\n        return output.reshape(-1, num_sequences, *output.shape[-3:])\n\n    def classify(self, weights, feat):\n        \"\"\"Run classifier (filter) on the features (feat).\"\"\"\n\n        scores = filter_layer.apply_filter(feat, weights)\n\n        return scores\n\n    def get_filter(self, feat, bb, *args, **kwargs):\n        \"\"\"Outputs the learned filter based on the input features (feat) and target boxes (bb) by running the\n        filter initializer and optimizer. Note that [] denotes an optional dimension.\n        args:\n            feat:  Input feature maps. Dims (images_in_sequence, [sequences], feat_dim, H, W).\n            bb:  Target bounding boxes (x, y, w, h) in the image coords. Dims (images_in_sequence, [sequences], 4).\n            *args, **kwargs:  These are passed to the optimizer module.\n        returns:\n            weights:  The final oprimized weights. Dims (sequences, feat_dim, wH, wW).\n            weight_iterates:  The weights computed in each iteration (including initial input and final output).\n            losses:  Train losses.\"\"\"\n\n        weights = self.filter_initializer(feat, bb)\n\n        if self.filter_optimizer is not None:\n            weights, weights_iter, losses = self.filter_optimizer(weights, feat=feat, bb=bb, *args, **kwargs)\n        else:\n            weights_iter = [weights]\n            losses = None\n\n        return weights, weights_iter, losses\n\n    def train_classifier(self, backbone_feat, bb):\n        num_sequences = bb.shape[1]\n\n        if backbone_feat.dim() == 5:\n            backbone_feat = backbone_feat.reshape(-1, *backbone_feat.shape[-3:])\n\n        # Extract features\n        train_feat = self.extract_classification_feat(backbone_feat, num_sequences)\n\n        # Get filters from each iteration\n        final_filter, _, train_losses = self.get_filter(train_feat, bb)\n        return final_filter, train_losses\n\n    def track_frame(self, filter_weights, backbone_feat):\n        if backbone_feat.dim() == 5:\n            num_sequences = backbone_feat.shape[1]\n            backbone_feat = backbone_feat.reshape(-1, *backbone_feat.shape[-3:])\n        else:\n            num_sequences = None\n\n        test_feat = self.extract_classification_feat(backbone_feat, num_sequences)\n\n        scores = filter_layer.apply_filter(test_feat, filter_weights)\n\n        return scores"
  },
  {
    "path": "external/AR/ltr/models/target_classifier/optimizer.py",
    "content": "import torch.nn as nn\nimport torch\nimport torch.nn.functional as F\nimport ltr.models.layers.filter as filter_layer\nimport ltr.models.layers.activation as activation\nfrom ltr.models.layers.distance import DistanceMap\nimport math\n\n\n\nclass DiMPSteepestDescentGN(nn.Module):\n    \"\"\"Optimizer module for DiMP.\n    It unrolls the steepest descent with Gauss-Newton iterations to optimize the target filter.\n    Moreover it learns parameters in the loss itself, as described in the DiMP paper.\n    args:\n        num_iter:  Number of default optimization iterations.\n        feat_stride:  The stride of the input feature.\n        init_step_length:  Initial scaling of the step length (which is then learned).\n        init_filter_reg:  Initial filter regularization weight (which is then learned).\n        init_gauss_sigma:  The standard deviation to use for the initialization of the label function.\n        num_dist_bins:  Number of distance bins used for learning the loss label, mask and weight.\n        bin_displacement:  The displacement of the bins (level of discritization).\n        mask_init_factor:  Parameter controlling the initialization of the target mask.\n        score_act:  Type of score activation (target mask computation) to use. The default 'relu' is what is described in the paper.\n        act_param:  Parameter for the score_act.\n        min_filter_reg:  Enforce a minimum value on the regularization (helps stability sometimes).\n        mask_act:  What activation to do on the output of the mask computation ('sigmoid' or 'linear').\n        detach_length:  Detach the filter every n-th iteration. Default is to never detech, i.e. 'Inf'.\n        alpha_eps:  Term in the denominator of the steepest descent that stabalizes learning.\n    \"\"\"\n    def __init__(self, num_iter=1, feat_stride=16, init_step_length=1.0,\n                 init_filter_reg=1e-2, init_gauss_sigma=1.0, num_dist_bins=5, bin_displacement=1.0, mask_init_factor=4.0,\n                 score_act='relu', act_param=None, min_filter_reg=1e-3, mask_act='sigmoid',\n                 detach_length=float('Inf'), alpha_eps=0):\n        super().__init__()\n\n        self.num_iter = num_iter\n        self.feat_stride = feat_stride\n        self.log_step_length = nn.Parameter(math.log(init_step_length) * torch.ones(1))\n        self.filter_reg = nn.Parameter(init_filter_reg * torch.ones(1))\n        self.distance_map = DistanceMap(num_dist_bins, bin_displacement)\n        self.min_filter_reg = min_filter_reg\n        self.detach_length = detach_length\n        self.alpha_eps = alpha_eps\n\n        # Distance coordinates\n        d = torch.arange(num_dist_bins, dtype=torch.float32).reshape(1,-1,1,1) * bin_displacement\n        if init_gauss_sigma == 0:\n            init_gauss = torch.zeros_like(d)\n            init_gauss[0,0,0,0] = 1\n        else:\n            init_gauss = torch.exp(-1/2 * (d / init_gauss_sigma)**2)\n\n        # Module that predicts the target label function (y in the paper)\n        self.label_map_predictor = nn.Conv2d(num_dist_bins, 1, kernel_size=1, bias=False)\n        self.label_map_predictor.weight.data = init_gauss - init_gauss.min()\n\n        # Module that predicts the target mask (m in the paper)\n        mask_layers = [nn.Conv2d(num_dist_bins, 1, kernel_size=1, bias=False)]\n        if mask_act == 'sigmoid':\n            mask_layers.append(nn.Sigmoid())\n            init_bias = 0.0\n        elif mask_act == 'linear':\n            init_bias = 0.5\n        else:\n            raise ValueError('Unknown activation')\n        self.target_mask_predictor = nn.Sequential(*mask_layers)\n        self.target_mask_predictor[0].weight.data = mask_init_factor * torch.tanh(2.0 - d) + init_bias\n\n        # Module that predicts the residual weights (v in the paper)\n        self.spatial_weight_predictor = nn.Conv2d(num_dist_bins, 1, kernel_size=1, bias=False)\n        self.spatial_weight_predictor.weight.data.fill_(1.0)\n\n        # The score actvation and its derivative\n        if score_act == 'bentpar':\n            self.score_activation = activation.BentIdentPar(act_param)\n            self.score_activation_deriv = activation.BentIdentParDeriv(act_param)\n        elif score_act == 'relu':\n            self.score_activation = activation.LeakyReluPar()\n            self.score_activation_deriv = activation.LeakyReluParDeriv()\n        else:\n            raise ValueError('Unknown score activation')\n\n\n    def forward(self, weights, feat, bb, sample_weight=None, num_iter=None, compute_losses=True):\n        \"\"\"Runs the optimizer module.\n        Note that [] denotes an optional dimension.\n        args:\n            weights:  Initial weights. Dims (sequences, feat_dim, wH, wW).\n            feat:  Input feature maps. Dims (images_in_sequence, [sequences], feat_dim, H, W).\n            bb:  Target bounding boxes (x, y, w, h) in the image coords. Dims (images_in_sequence, [sequences], 4).\n            sample_weight:  Optional weight for each sample. Dims: (images_in_sequence, [sequences]).\n            num_iter:  Number of iterations to run.\n            compute_losses:  Whether to compute the (train) loss in each iteration.\n        returns:\n            weights:  The final oprimized weights.\n            weight_iterates:  The weights computed in each iteration (including initial input and final output).\n            losses:  Train losses.\"\"\"\n\n        # Sizes\n        num_iter = self.num_iter if num_iter is None else num_iter\n        num_images = feat.shape[0]\n        num_sequences = feat.shape[1] if feat.dim() == 5 else 1\n        filter_sz = (weights.shape[-2], weights.shape[-1])\n        output_sz = (feat.shape[-2] + (weights.shape[-2] + 1) % 2, feat.shape[-1] + (weights.shape[-1] + 1) % 2)\n\n        # Get learnable scalars\n        step_length_factor = torch.exp(self.log_step_length)\n        reg_weight = (self.filter_reg*self.filter_reg).clamp(min=self.min_filter_reg**2)\n\n        # Compute distance map\n        dmap_offset = (torch.Tensor(filter_sz).to(bb.device) % 2) / 2.0\n        center = ((bb[..., :2] + bb[..., 2:] / 2) / self.feat_stride).reshape(-1, 2).flip((1,)) - dmap_offset\n        dist_map = self.distance_map(center, output_sz)\n\n        # Compute label map masks and weight\n        label_map = self.label_map_predictor(dist_map).reshape(num_images, num_sequences, *dist_map.shape[-2:])\n        target_mask = self.target_mask_predictor(dist_map).reshape(num_images, num_sequences, *dist_map.shape[-2:])\n        spatial_weight = self.spatial_weight_predictor(dist_map).reshape(num_images, num_sequences, *dist_map.shape[-2:])\n\n        # Get total sample weights\n        if sample_weight is None:\n            sample_weight = math.sqrt(1.0 / num_images) * spatial_weight\n        elif isinstance(sample_weight, torch.Tensor):\n            sample_weight = sample_weight.sqrt().reshape(num_images, num_sequences, 1, 1) * spatial_weight\n\n        backprop_through_learning = (self.detach_length > 0)\n\n        weight_iterates = [weights]\n        losses = []\n\n        for i in range(num_iter):\n            if not backprop_through_learning or (i > 0 and i % self.detach_length == 0):\n                weights = weights.detach()\n\n            # Compute residuals\n            scores = filter_layer.apply_filter(feat, weights)\n            scores_act = self.score_activation(scores, target_mask)\n            score_mask = self.score_activation_deriv(scores, target_mask)\n            residuals = sample_weight * (scores_act - label_map)\n\n            if compute_losses:\n                losses.append(((residuals**2).sum() + reg_weight * (weights**2).sum())/num_sequences)\n\n            # Compute gradient\n            residuals_mapped = score_mask * (sample_weight * residuals)\n            weights_grad = filter_layer.apply_feat_transpose(feat, residuals_mapped, filter_sz, training=self.training) + \\\n                          reg_weight * weights\n\n            # Map the gradient with the Jacobian\n            scores_grad = filter_layer.apply_filter(feat, weights_grad)\n            scores_grad = sample_weight * (score_mask * scores_grad)\n\n            # Compute optimal step length\n            alpha_num = (weights_grad * weights_grad).sum(dim=(1,2,3))\n            alpha_den = ((scores_grad * scores_grad).reshape(num_images, num_sequences, -1).sum(dim=(0,2)) + (reg_weight + self.alpha_eps) * alpha_num).clamp(1e-8)\n            alpha = alpha_num / alpha_den\n\n            # Update filter\n            weights = weights - (step_length_factor * alpha.reshape(-1, 1, 1, 1)) * weights_grad\n\n            # Add the weight iterate\n            weight_iterates.append(weights)\n\n        if compute_losses:\n            scores = filter_layer.apply_filter(feat, weights)\n            scores = self.score_activation(scores, target_mask)\n            losses.append((((sample_weight * (scores - label_map))**2).sum() + reg_weight * (weights**2).sum())/num_sequences)\n\n        return weights, weight_iterates, losses\n\n\n\nclass DiMPL2SteepestDescentGN(nn.Module):\n    \"\"\"A simpler optimizer module that uses L2 loss.\n    args:\n        num_iter:  Number of default optimization iterations.\n        feat_stride:  The stride of the input feature.\n        init_step_length:  Initial scaling of the step length (which is then learned).\n        gauss_sigma:  The standard deviation of the label function.\n        hinge_threshold:  Threshold for the hinge-based loss (see DiMP paper).\n        init_filter_reg:  Initial filter regularization weight (which is then learned).\n        min_filter_reg:  Enforce a minimum value on the regularization (helps stability sometimes).\n        detach_length:  Detach the filter every n-th iteration. Default is to never detech, i.e. 'Inf'.\n        alpha_eps:  Term in the denominator of the steepest descent that stabalizes learning.\n    \"\"\"\n    def __init__(self, num_iter=1, feat_stride=16, init_step_length=1.0, gauss_sigma=1.0, hinge_threshold=-999,\n                 init_filter_reg=1e-2, min_filter_reg=1e-3, detach_length=float('Inf'), alpha_eps=0.0):\n        super().__init__()\n\n        self.num_iter = num_iter\n        self.feat_stride = feat_stride\n        self.log_step_length = nn.Parameter(math.log(init_step_length) * torch.ones(1))\n        self.filter_reg = nn.Parameter(init_filter_reg * torch.ones(1))\n        self.min_filter_reg = min_filter_reg\n        self.detach_length = detach_length\n        self.hinge_threshold = hinge_threshold\n        self.gauss_sigma = gauss_sigma\n        self.alpha_eps = alpha_eps\n\n    def get_label(self, center, output_sz):\n        center = center.reshape(center.shape[0], -1, center.shape[-1])\n        k0 = torch.arange(output_sz[0], dtype=torch.float32).reshape(1, 1, -1, 1).to(center.device)\n        k1 = torch.arange(output_sz[1], dtype=torch.float32).reshape(1, 1, 1, -1).to(center.device)\n        g0 = torch.exp(-1.0 / (2 * self.gauss_sigma ** 2) * (k0 - center[:,:,0].reshape(*center.shape[:2], 1, 1)) ** 2)\n        g1 = torch.exp(-1.0 / (2 * self.gauss_sigma ** 2) * (k1 - center[:,:,1].reshape(*center.shape[:2], 1, 1)) ** 2)\n        gauss = g0 * g1\n        return gauss\n\n\n    def forward(self, weights, feat, bb, sample_weight=None, num_iter=None, compute_losses=True):\n        \"\"\"Runs the optimizer module.\n        Note that [] denotes an optional dimension.\n        args:\n            weights:  Initial weights. Dims (sequences, feat_dim, wH, wW).\n            feat:  Input feature maps. Dims (images_in_sequence, [sequences], feat_dim, H, W).\n            bb:  Target bounding boxes (x, y, w, h) in the image coords. Dims (images_in_sequence, [sequences], 4).\n            sample_weight:  Optional weight for each sample. Dims: (images_in_sequence, [sequences]).\n            num_iter:  Number of iterations to run.\n            compute_losses:  Whether to compute the (train) loss in each iteration.\n        returns:\n            weights:  The final oprimized weights.\n            weight_iterates:  The weights computed in each iteration (including initial input and final output).\n            losses:  Train losses.\"\"\"\n\n        # Sizes\n        num_iter = self.num_iter if num_iter is None else num_iter\n        num_images = feat.shape[0]\n        num_sequences = feat.shape[1] if feat.dim() == 5 else 1\n        filter_sz = (weights.shape[-2], weights.shape[-1])\n        output_sz = (feat.shape[-2] + (weights.shape[-2] + 1) % 2, feat.shape[-1] + (weights.shape[-1] + 1) % 2)\n\n        # Get learnable scalars\n        step_length_factor = torch.exp(self.log_step_length)\n        reg_weight = (self.filter_reg*self.filter_reg).clamp(min=self.min_filter_reg**2)\n\n        # Compute distance map\n        dmap_offset = (torch.Tensor(filter_sz).to(bb.device) % 2) / 2.0\n        center = ((bb[..., :2] + bb[..., 2:] / 2) / self.feat_stride).flip((-1,)) - dmap_offset\n        label_map = self.get_label(center, output_sz)\n        target_mask = (label_map > self.hinge_threshold).float()\n        label_map *= target_mask\n\n        # Get total sample weights\n        if sample_weight is None:\n            sample_weight = math.sqrt(1.0 / num_images)\n        elif isinstance(sample_weight, torch.Tensor):\n            sample_weight = sample_weight.sqrt().reshape(num_images, num_sequences, 1, 1)\n\n        weight_iterates = [weights]\n        losses = []\n\n        for i in range(num_iter):\n            if i > 0 and i % self.detach_length == 0:\n                weights = weights.detach()\n\n            # Compute residuals\n            scores = filter_layer.apply_filter(feat, weights)\n            scores_act = target_mask * scores + (1.0 - target_mask) * F.relu(scores)\n            score_mask = target_mask + (1.0 - target_mask) * (scores.detach() > 0).float()\n            residuals = sample_weight * (scores_act - label_map)\n\n            if compute_losses:\n                losses.append(((residuals**2).sum() + reg_weight * (weights**2).sum())/num_sequences)\n\n            # Compute gradient\n            residuals_mapped = score_mask * (sample_weight * residuals)\n            weights_grad = filter_layer.apply_feat_transpose(feat, residuals_mapped, filter_sz, training=self.training) + \\\n                          reg_weight * weights\n\n            # Map the gradient with the Jacobian\n            scores_grad = filter_layer.apply_filter(feat, weights_grad)\n            scores_grad = sample_weight * (score_mask * scores_grad)\n\n            # Compute optimal step length\n            alpha_num = (weights_grad * weights_grad).sum(dim=(1,2,3))\n            alpha_den = ((scores_grad * scores_grad).reshape(num_images, num_sequences, -1).sum(dim=(0,2)) + (reg_weight + self.alpha_eps) * alpha_num).clamp(1e-8)\n            alpha = alpha_num / alpha_den\n\n            # Update filter\n            weights = weights - (step_length_factor * alpha.reshape(-1, 1, 1, 1)) * weights_grad\n\n            # Add the weight iterate\n            weight_iterates.append(weights)\n\n        if compute_losses:\n            scores = filter_layer.apply_filter(feat, weights)\n            scores = target_mask * scores + (1.0 - target_mask) * F.relu(scores)\n            losses.append((((sample_weight * (scores - label_map))**2).sum() + reg_weight * (weights**2).sum())/num_sequences)\n\n        return weights, weight_iterates, losses\n\n\nclass PrDiMPSteepestDescentNewton(nn.Module):\n    \"\"\"Optimizer module for PrDiMP.\n    It unrolls the steepest descent with Newton iterations to optimize the target filter. See the PrDiMP paper.\n    args:\n        num_iter:  Number of default optimization iterations.\n        feat_stride:  The stride of the input feature.\n        init_step_length:  Initial scaling of the step length (which is then learned).\n        init_filter_reg:  Initial filter regularization weight (which is then learned).\n        gauss_sigma:  The standard deviation to use for the label density function.\n        min_filter_reg:  Enforce a minimum value on the regularization (helps stability sometimes).\n        detach_length:  Detach the filter every n-th iteration. Default is to never detech, i.e. 'Inf'.\n        alpha_eps:  Term in the denominator of the steepest descent that stabalizes learning.\n        init_uni_weight:  Weight of uniform label distribution.\n        normalize_label:  Wheter to normalize the label distribution.\n        label_shrink:  How much to shrink to label distribution.\n        softmax_reg:  Regularization in the denominator of the SoftMax.\n        label_threshold:  Threshold probabilities smaller than this.\n    \"\"\"\n    def __init__(self, num_iter=1, feat_stride=16, init_step_length=1.0,\n                 init_filter_reg=1e-2, gauss_sigma=1.0, min_filter_reg=1e-3, detach_length=float('Inf'),\n                 alpha_eps=0.0, init_uni_weight=None, normalize_label=False, label_shrink=0, softmax_reg=None, label_threshold=0.0):\n        super().__init__()\n\n        self.num_iter = num_iter\n        self.feat_stride = feat_stride\n        self.log_step_length = nn.Parameter(math.log(init_step_length) * torch.ones(1))\n        self.filter_reg = nn.Parameter(init_filter_reg * torch.ones(1))\n        self.gauss_sigma = gauss_sigma\n        self.min_filter_reg = min_filter_reg\n        self.detach_length = detach_length\n        self.alpha_eps = alpha_eps\n        self.uni_weight = 0 if init_uni_weight is None else init_uni_weight\n        self.normalize_label = normalize_label\n        self.label_shrink = label_shrink\n        self.softmax_reg = softmax_reg\n        self.label_threshold = label_threshold\n\n    def get_label_density(self, center, output_sz):\n        center = center.reshape(center.shape[0], -1, center.shape[-1])\n        k0 = torch.arange(output_sz[0], dtype=torch.float32).reshape(1, 1, -1, 1).to(center.device)\n        k1 = torch.arange(output_sz[1], dtype=torch.float32).reshape(1, 1, 1, -1).to(center.device)\n        dist0 = (k0 - center[:,:,0].reshape(*center.shape[:2], 1, 1)) ** 2\n        dist1 = (k1 - center[:,:,1].reshape(*center.shape[:2], 1, 1)) ** 2\n        if self.gauss_sigma == 0:\n            dist0_view = dist0.reshape(-1, dist0.shape[-2])\n            dist1_view = dist1.reshape(-1, dist1.shape[-1])\n            one_hot0 = torch.zeros_like(dist0_view)\n            one_hot1 = torch.zeros_like(dist1_view)\n            one_hot0[torch.arange(one_hot0.shape[0]), dist0_view.argmin(dim=-1)] = 1.0\n            one_hot1[torch.arange(one_hot1.shape[0]), dist1_view.argmin(dim=-1)] = 1.0\n            gauss = one_hot0.reshape(dist0.shape) * one_hot1.reshape(dist1.shape)\n        else:\n            g0 = torch.exp(-1.0 / (2 * self.gauss_sigma ** 2) * dist0)\n            g1 = torch.exp(-1.0 / (2 * self.gauss_sigma ** 2) * dist1)\n            gauss = (g0 / (2*math.pi*self.gauss_sigma**2)) * g1\n        gauss = gauss * (gauss > self.label_threshold).float()\n        if self.normalize_label:\n            gauss /= (gauss.sum(dim=(-2,-1), keepdim=True) + 1e-8)\n        label_dens = (1.0 - self.label_shrink)*((1.0 - self.uni_weight) * gauss + self.uni_weight / (output_sz[0]*output_sz[1]))\n        return label_dens\n\n    def forward(self, weights, feat, bb, sample_weight=None, num_iter=None, compute_losses=True):\n        \"\"\"Runs the optimizer module.\n        Note that [] denotes an optional dimension.\n        args:\n            weights:  Initial weights. Dims (sequences, feat_dim, wH, wW).\n            feat:  Input feature maps. Dims (images_in_sequence, [sequences], feat_dim, H, W).\n            bb:  Target bounding boxes (x, y, w, h) in the image coords. Dims (images_in_sequence, [sequences], 4).\n            sample_weight:  Optional weight for each sample. Dims: (images_in_sequence, [sequences]).\n            num_iter:  Number of iterations to run.\n            compute_losses:  Whether to compute the (train) loss in each iteration.\n        returns:\n            weights:  The final oprimized weights.\n            weight_iterates:  The weights computed in each iteration (including initial input and final output).\n            losses:  Train losses.\"\"\"\n\n        # Sizes\n        num_iter = self.num_iter if num_iter is None else num_iter\n        num_images = feat.shape[0]\n        num_sequences = feat.shape[1] if feat.dim() == 5 else 1\n        filter_sz = (weights.shape[-2], weights.shape[-1])\n        output_sz = (feat.shape[-2] + (weights.shape[-2] + 1) % 2, feat.shape[-1] + (weights.shape[-1] + 1) % 2)\n\n        # Get learnable scalars\n        step_length_factor = torch.exp(self.log_step_length)\n        reg_weight = (self.filter_reg*self.filter_reg).clamp(min=self.min_filter_reg**2)\n\n        # Compute label density\n        offset = (torch.Tensor(filter_sz).to(bb.device) % 2) / 2.0\n        center = ((bb[..., :2] + bb[..., 2:] / 2) / self.feat_stride).flip((-1,)) - offset\n        label_density = self.get_label_density(center, output_sz)\n\n        # Get total sample weights\n        if sample_weight is None:\n            sample_weight = torch.Tensor([1.0 / num_images]).to(feat.device)\n        elif isinstance(sample_weight, torch.Tensor):\n            sample_weight = sample_weight.reshape(num_images, num_sequences, 1, 1)\n\n        exp_reg = 0 if self.softmax_reg is None else math.exp(self.softmax_reg)\n        def _compute_loss(scores, weights):\n            return torch.sum(sample_weight.reshape(sample_weight.shape[0], -1) *\n                             (torch.log(scores.exp().sum(dim=(-2, -1)) + exp_reg) - (label_density * scores).sum(dim=(-2, -1)))) / num_sequences +\\\n                   reg_weight * (weights ** 2).sum() / num_sequences\n\n        weight_iterates = [weights]\n        losses = []\n\n        for i in range(num_iter):\n            if i > 0 and i % self.detach_length == 0:\n                weights = weights.detach()\n\n            # Compute \"residuals\"\n            scores = filter_layer.apply_filter(feat, weights)\n            scores_softmax = activation.softmax_reg(scores.reshape(num_images, num_sequences, -1), dim=2, reg=self.softmax_reg).reshape(scores.shape)\n            res = sample_weight*(scores_softmax - label_density)\n\n            if compute_losses:\n                losses.append(_compute_loss(scores, weights))\n\n            # Compute gradient\n            weights_grad = filter_layer.apply_feat_transpose(feat, res, filter_sz, training=self.training) + \\\n                          reg_weight * weights\n\n            # Map the gradient with the Hessian\n            scores_grad = filter_layer.apply_filter(feat, weights_grad)\n            sm_scores_grad = scores_softmax * scores_grad\n            hes_scores_grad = sm_scores_grad - scores_softmax * torch.sum(sm_scores_grad, dim=(-2,-1), keepdim=True)\n            grad_hes_grad = (scores_grad * hes_scores_grad).reshape(num_images, num_sequences, -1).sum(dim=2).clamp(min=0)\n            grad_hes_grad = (sample_weight.reshape(sample_weight.shape[0], -1) * grad_hes_grad).sum(dim=0)\n\n            # Compute optimal step length\n            alpha_num = (weights_grad * weights_grad).sum(dim=(1,2,3))\n            alpha_den = (grad_hes_grad + (reg_weight + self.alpha_eps) * alpha_num).clamp(1e-8)\n            alpha = alpha_num / alpha_den\n\n            # Update filter\n            weights = weights - (step_length_factor * alpha.reshape(-1, 1, 1, 1)) * weights_grad\n\n            # Add the weight iterate\n            weight_iterates.append(weights)\n\n        if compute_losses:\n            scores = filter_layer.apply_filter(feat, weights)\n            losses.append(_compute_loss(scores, weights))\n\n        return weights, weight_iterates, losses\n"
  },
  {
    "path": "external/AR/ltr/models/target_classifier/residual_modules.py",
    "content": "import torch\nimport torch.nn as nn\nimport math\nimport ltr.models.layers.filter as filter_layer\nimport ltr.models.layers.activation as activation\nfrom ltr.models.layers.distance import DistanceMap\nfrom pytracking import TensorList\n\n\nclass LinearFilterLearnGen(nn.Module):\n    def __init__(self, feat_stride=16, init_filter_reg=1e-2, init_gauss_sigma=1.0, num_dist_bins=5, bin_displacement=1.0,\n                 mask_init_factor=4.0, score_act='bentpar', act_param=None, mask_act='sigmoid'):\n        super().__init__()\n\n        self.filter_reg = nn.Parameter(init_filter_reg * torch.ones(1))\n        self.feat_stride = feat_stride\n        self.distance_map = DistanceMap(num_dist_bins, bin_displacement)\n\n        # Distance coordinates\n        d = torch.arange(num_dist_bins, dtype=torch.float32).reshape(1,-1,1,1) * bin_displacement\n        if init_gauss_sigma == 0:\n            init_gauss = torch.zeros_like(d)\n            init_gauss[0,0,0,0] = 1\n        else:\n            init_gauss = torch.exp(-1/2 * (d / init_gauss_sigma)**2)\n\n        self.label_map_predictor = nn.Conv2d(num_dist_bins, 1, kernel_size=1, bias=False)\n        self.label_map_predictor.weight.data = init_gauss - init_gauss.min()\n\n        mask_layers = [nn.Conv2d(num_dist_bins, 1, kernel_size=1, bias=False)]\n        if mask_act == 'sigmoid':\n            mask_layers.append(nn.Sigmoid())\n            init_bias = 0.0\n        elif mask_act == 'linear':\n            init_bias = 0.5\n        else:\n            raise ValueError('Unknown activation')\n        self.target_mask_predictor = nn.Sequential(*mask_layers)\n        self.target_mask_predictor[0].weight.data = mask_init_factor * torch.tanh(2.0 - d) + init_bias\n\n        self.spatial_weight_predictor = nn.Conv2d(num_dist_bins, 1, kernel_size=1, bias=False)\n        self.spatial_weight_predictor.weight.data.fill_(1.0)\n\n        if score_act == 'bentpar':\n            self.score_activation = activation.BentIdentPar(act_param)\n        elif score_act == 'relu':\n            self.score_activation = activation.LeakyReluPar()\n        else:\n            raise ValueError('Unknown activation')\n\n\n    def forward(self, meta_parameter: TensorList, feat, bb, sample_weight=None, is_distractor=None):\n        filter = meta_parameter[0]\n\n        num_images = feat.shape[0]\n        num_sequences = feat.shape[1] if feat.dim() == 5 else 1\n        filter_sz = (filter.shape[-2], filter.shape[-1])\n\n        # Compute scores\n        scores = filter_layer.apply_filter(feat, filter)\n\n        # Compute distance map\n        center = ((bb[..., :2] + bb[..., 2:] / 2) / self.feat_stride).reshape(-1, 2).flip((1,))\n        if is_distractor is not None:\n            center[is_distractor.reshape(-1), :] = 99999\n        dist_map = self.distance_map(center, scores.shape[-2:])\n\n        # Compute label map masks and weight\n        label_map = self.label_map_predictor(dist_map).reshape(num_images, num_sequences, dist_map.shape[-2], dist_map.shape[-1])\n        target_mask = self.target_mask_predictor(dist_map).reshape(num_images, num_sequences, dist_map.shape[-2], dist_map.shape[-1])\n        spatial_weight = self.spatial_weight_predictor(dist_map).reshape(num_images, num_sequences, dist_map.shape[-2], dist_map.shape[-1])\n\n        if sample_weight is None:\n            sample_weight = math.sqrt(1.0 / num_images) * spatial_weight\n        elif isinstance(sample_weight, torch.Tensor):\n            sample_weight = sample_weight.sqrt().reshape(-1, 1, 1, 1) * spatial_weight\n\n        # Compute data residual\n        scores_act = self.score_activation(scores, target_mask)\n        data_residual = sample_weight * (scores_act - label_map)\n\n        # Compute regularization residual. Put batch in second dimension\n        reg_residual = self.filter_reg*filter.reshape(1, num_sequences, -1)\n\n        return TensorList([data_residual, reg_residual])\n"
  },
  {
    "path": "external/AR/ltr/models/tracking/__init__.py",
    "content": ""
  },
  {
    "path": "external/AR/ltr/models/tracking/dimpnet.py",
    "content": "import math\nimport torch\nimport torch.nn as nn\nfrom collections import OrderedDict\nfrom ltr.models.meta import steepestdescent\nimport ltr.models.target_classifier.linear_filter as target_clf\nimport ltr.models.target_classifier.features as clf_features\nimport ltr.models.target_classifier.initializer as clf_initializer\nimport ltr.models.target_classifier.optimizer as clf_optimizer\nimport ltr.models.bbreg as bbmodels\nimport ltr.models.backbone as backbones\nfrom ltr import model_constructor\n\n\nclass DiMPnet(nn.Module):\n    \"\"\"The DiMP network.\n    args:\n        feature_extractor:  Backbone feature extractor network. Must return a dict of feature maps\n        classifier:  Target classification module.\n        bb_regressor:  Bounding box regression module.\n        classification_layer:  Name of the backbone feature layer to use for classification.\n        bb_regressor_layer:  Names of the backbone layers to use for bounding box regression.\"\"\"\n\n    def __init__(self, feature_extractor, classifier, bb_regressor, classification_layer, bb_regressor_layer):\n        super().__init__()\n\n        self.feature_extractor = feature_extractor\n        self.classifier = classifier\n        self.bb_regressor = bb_regressor\n        self.classification_layer = [classification_layer] if isinstance(classification_layer, str) else classification_layer\n        self.bb_regressor_layer = bb_regressor_layer\n        self.output_layers = sorted(list(set(self.classification_layer + self.bb_regressor_layer)))\n\n\n    def forward(self, train_imgs, test_imgs, train_bb, test_proposals, *args, **kwargs):\n        \"\"\"Runs the DiMP network the way it is applied during training.\n        The forward function is ONLY used for training. Call the individual functions during tracking.\n        args:\n            train_imgs:  Train image samples (images, sequences, 3, H, W).\n            test_imgs:  Test image samples (images, sequences, 3, H, W).\n            trian_bb:  Target boxes (x,y,w,h) for the train images. Dims (images, sequences, 4).\n            test_proposals:  Proposal boxes to use for the IoUNet (bb_regressor) module.\n            *args, **kwargs:  These are passed to the classifier module.\n        returns:\n            test_scores:  Classification scores on the test samples.\n            iou_pred:  Predicted IoU scores for the test_proposals.\"\"\"\n\n        assert train_imgs.dim() == 5 and test_imgs.dim() == 5, 'Expect 5 dimensional inputs'\n\n        # Extract backbone features\n        train_feat = self.extract_backbone_features(train_imgs.reshape(-1, *train_imgs.shape[-3:]))\n        test_feat = self.extract_backbone_features(test_imgs.reshape(-1, *test_imgs.shape[-3:]))\n\n        # Classification features\n        train_feat_clf = self.get_backbone_clf_feat(train_feat)\n        test_feat_clf = self.get_backbone_clf_feat(test_feat)\n\n        # Run classifier module\n        target_scores = self.classifier(train_feat_clf, test_feat_clf, train_bb, *args, **kwargs)\n\n        # Get bb_regressor features\n        train_feat_iou = self.get_backbone_bbreg_feat(train_feat)\n        test_feat_iou = self.get_backbone_bbreg_feat(test_feat)\n\n        # Run the IoUNet module\n        iou_pred = self.bb_regressor(train_feat_iou, test_feat_iou, train_bb, test_proposals)\n\n        return target_scores, iou_pred\n\n    def get_backbone_clf_feat(self, backbone_feat):\n        feat = OrderedDict({l: backbone_feat[l] for l in self.classification_layer})\n        if len(self.classification_layer) == 1:\n            return feat[self.classification_layer[0]]\n        return feat\n\n    def get_backbone_bbreg_feat(self, backbone_feat):\n        return [backbone_feat[l] for l in self.bb_regressor_layer]\n\n    def extract_classification_feat(self, backbone_feat):\n        return self.classifier.extract_classification_feat(self.get_backbone_clf_feat(backbone_feat))\n\n    def extract_backbone_features(self, im, layers=None):\n        if layers is None:\n            layers = self.output_layers\n        return self.feature_extractor(im, layers)\n\n    def extract_features(self, im, layers=None):\n        if layers is None:\n            layers = self.bb_regressor_layer + ['classification']\n        if 'classification' not in layers:\n            return self.feature_extractor(im, layers)\n        backbone_layers = sorted(list(set([l for l in layers + self.classification_layer if l != 'classification'])))\n        all_feat = self.feature_extractor(im, backbone_layers)\n        all_feat['classification'] = self.extract_classification_feat(all_feat)\n        return OrderedDict({l: all_feat[l] for l in layers})\n\n\n\n@model_constructor\ndef dimpnet18(filter_size=1, optim_iter=5, optim_init_step=1.0, optim_init_reg=0.01,\n              classification_layer='layer3', feat_stride=16, backbone_pretrained=True, clf_feat_blocks=1,\n              clf_feat_norm=True, init_filter_norm=False, final_conv=True,\n              out_feature_dim=256, init_gauss_sigma=1.0, num_dist_bins=5, bin_displacement=1.0,\n              mask_init_factor=4.0, iou_input_dim=(256, 256), iou_inter_dim=(256, 256),\n              score_act='relu', act_param=None, target_mask_act='sigmoid',\n              detach_length=float('Inf'), frozen_backbone_layers=()):\n    # Backbone\n    backbone_net = backbones.resnet18(pretrained=backbone_pretrained, frozen_layers=frozen_backbone_layers)\n\n    # Feature normalization\n    norm_scale = math.sqrt(1.0 / (out_feature_dim * filter_size * filter_size))\n\n    # Classifier features\n    clf_feature_extractor = clf_features.residual_basic_block(num_blocks=clf_feat_blocks, l2norm=clf_feat_norm,\n                                                              final_conv=final_conv, norm_scale=norm_scale,\n                                                              out_dim=out_feature_dim)\n\n    # Initializer for the DiMP classifier\n    initializer = clf_initializer.FilterInitializerLinear(filter_size=filter_size, filter_norm=init_filter_norm,\n                                                          feature_dim=out_feature_dim)\n\n    # Optimizer for the DiMP classifier\n    optimizer = clf_optimizer.DiMPSteepestDescentGN(num_iter=optim_iter, feat_stride=feat_stride,\n                                                    init_step_length=optim_init_step,\n                                                    init_filter_reg=optim_init_reg, init_gauss_sigma=init_gauss_sigma,\n                                                    num_dist_bins=num_dist_bins,\n                                                    bin_displacement=bin_displacement,\n                                                    mask_init_factor=mask_init_factor,\n                                                    score_act=score_act, act_param=act_param, mask_act=target_mask_act,\n                                                    detach_length=detach_length)\n\n    # The classifier module\n    classifier = target_clf.LinearFilter(filter_size=filter_size, filter_initializer=initializer,\n                                         filter_optimizer=optimizer, feature_extractor=clf_feature_extractor)\n\n    # Bounding box regressor\n    bb_regressor = bbmodels.AtomIoUNet(pred_input_dim=iou_input_dim, pred_inter_dim=iou_inter_dim)\n\n    # DiMP network\n    net = DiMPnet(feature_extractor=backbone_net, classifier=classifier, bb_regressor=bb_regressor,\n                  classification_layer=classification_layer, bb_regressor_layer=['layer2', 'layer3'])\n    return net\n\n\n@model_constructor\ndef dimpnet50(filter_size=1, optim_iter=5, optim_init_step=1.0, optim_init_reg=0.01,\n              classification_layer='layer3', feat_stride=16, backbone_pretrained=True, clf_feat_blocks=0,\n              clf_feat_norm=True, init_filter_norm=False, final_conv=True,\n              out_feature_dim=512, init_gauss_sigma=1.0, num_dist_bins=5, bin_displacement=1.0,\n              mask_init_factor=4.0, iou_input_dim=(256, 256), iou_inter_dim=(256, 256),\n              score_act='relu', act_param=None, target_mask_act='sigmoid',\n              detach_length=float('Inf'), frozen_backbone_layers=()):\n\n    # Backbone\n    backbone_net = backbones.resnet50(pretrained=backbone_pretrained, frozen_layers=frozen_backbone_layers)\n\n    # Feature normalization\n    norm_scale = math.sqrt(1.0 / (out_feature_dim * filter_size * filter_size))\n\n    # Classifier features\n    if classification_layer == 'layer3':\n        feature_dim = 256\n    elif classification_layer == 'layer4':\n        feature_dim = 512\n    else:\n        raise Exception\n\n    clf_feature_extractor = clf_features.residual_bottleneck(feature_dim=feature_dim,\n                                                             num_blocks=clf_feat_blocks, l2norm=clf_feat_norm,\n                                                             final_conv=final_conv, norm_scale=norm_scale,\n                                                             out_dim=out_feature_dim)\n\n    # Initializer for the DiMP classifier\n    initializer = clf_initializer.FilterInitializerLinear(filter_size=filter_size, filter_norm=init_filter_norm,\n                                                          feature_dim=out_feature_dim)\n\n    # Optimizer for the DiMP classifier\n    optimizer = clf_optimizer.DiMPSteepestDescentGN(num_iter=optim_iter, feat_stride=feat_stride,\n                                                    init_step_length=optim_init_step,\n                                                    init_filter_reg=optim_init_reg, init_gauss_sigma=init_gauss_sigma,\n                                                    num_dist_bins=num_dist_bins,\n                                                    bin_displacement=bin_displacement,\n                                                    mask_init_factor=mask_init_factor,\n                                                    score_act=score_act, act_param=act_param, mask_act=target_mask_act,\n                                                    detach_length=detach_length)\n\n    # The classifier module\n    classifier = target_clf.LinearFilter(filter_size=filter_size, filter_initializer=initializer,\n                                         filter_optimizer=optimizer, feature_extractor=clf_feature_extractor)\n\n    # Bounding box regressor\n    bb_regressor = bbmodels.AtomIoUNet(input_dim=(4*128,4*256), pred_input_dim=iou_input_dim, pred_inter_dim=iou_inter_dim)\n\n    # DiMP network\n    net = DiMPnet(feature_extractor=backbone_net, classifier=classifier, bb_regressor=bb_regressor,\n                  classification_layer=classification_layer, bb_regressor_layer=['layer2', 'layer3'])\n    return net\n\n\n\n@model_constructor\ndef L2dimpnet18(filter_size=1, optim_iter=5, optim_init_step=1.0, optim_init_reg=0.01,\n              classification_layer='layer3', feat_stride=16, backbone_pretrained=True, clf_feat_blocks=1,\n              clf_feat_norm=True, init_filter_norm=False, final_conv=True,\n              out_feature_dim=256, iou_input_dim=(256, 256), iou_inter_dim=(256, 256),\n              detach_length=float('Inf'), hinge_threshold=-999, gauss_sigma=1.0, alpha_eps=0):\n    # Backbone\n    backbone_net = backbones.resnet18(pretrained=backbone_pretrained)\n\n    # Feature normalization\n    norm_scale = math.sqrt(1.0 / (out_feature_dim * filter_size * filter_size))\n\n    # Classifier features\n    clf_feature_extractor = clf_features.residual_basic_block(num_blocks=clf_feat_blocks, l2norm=clf_feat_norm,\n                                                              final_conv=final_conv, norm_scale=norm_scale,\n                                                              out_dim=out_feature_dim)\n\n    # Initializer for the DiMP classifier\n    initializer = clf_initializer.FilterInitializerLinear(filter_size=filter_size, filter_norm=init_filter_norm,\n                                                          feature_dim=out_feature_dim)\n\n    # Optimizer for the DiMP classifier\n    optimizer = clf_optimizer.DiMPL2SteepestDescentGN(num_iter=optim_iter, feat_stride=feat_stride,\n                                                    init_step_length=optim_init_step, hinge_threshold=hinge_threshold,\n                                                    init_filter_reg=optim_init_reg, gauss_sigma=gauss_sigma,\n                                                    detach_length=detach_length, alpha_eps=alpha_eps)\n\n    # The classifier module\n    classifier = target_clf.LinearFilter(filter_size=filter_size, filter_initializer=initializer,\n                                         filter_optimizer=optimizer, feature_extractor=clf_feature_extractor)\n\n    # Bounding box regressor\n    bb_regressor = bbmodels.AtomIoUNet(pred_input_dim=iou_input_dim, pred_inter_dim=iou_inter_dim)\n\n    # DiMP network\n    net = DiMPnet(feature_extractor=backbone_net, classifier=classifier, bb_regressor=bb_regressor,\n                  classification_layer=classification_layer, bb_regressor_layer=['layer2', 'layer3'])\n    return net\n\n\n@model_constructor\ndef klcedimpnet18(filter_size=1, optim_iter=5, optim_init_step=1.0, optim_init_reg=0.01,\n                  classification_layer='layer3', feat_stride=16, backbone_pretrained=True, clf_feat_blocks=1,\n                  clf_feat_norm=True, init_filter_norm=False, final_conv=True,\n                  out_feature_dim=256, gauss_sigma=1.0,\n                  iou_input_dim=(256, 256), iou_inter_dim=(256, 256),\n                  detach_length=float('Inf'), alpha_eps=0.0, train_feature_extractor=True,\n                  init_uni_weight=None, optim_min_reg=1e-3, init_initializer='default', normalize_label=False,\n                  label_shrink=0, softmax_reg=None, label_threshold=0, final_relu=False, init_pool_square=False,\n                  frozen_backbone_layers=()):\n\n    if not train_feature_extractor:\n        frozen_backbone_layers = 'all'\n\n    # Backbone\n    backbone_net = backbones.resnet18(pretrained=backbone_pretrained, frozen_layers=frozen_backbone_layers)\n\n    # Feature normalization\n    norm_scale = math.sqrt(1.0 / (out_feature_dim * filter_size * filter_size))\n\n    # Classifier features\n    clf_feature_extractor = clf_features.residual_basic_block(num_blocks=clf_feat_blocks, l2norm=clf_feat_norm,\n                                                              final_conv=final_conv, norm_scale=norm_scale,\n                                                              out_dim=out_feature_dim, final_relu=final_relu)\n\n    # Initializer for the DiMP classifier\n    initializer = clf_initializer.FilterInitializerLinear(filter_size=filter_size, filter_norm=init_filter_norm,\n                                                          feature_dim=out_feature_dim, init_weights=init_initializer,\n                                                          pool_square=init_pool_square)\n\n    # Optimizer for the DiMP classifier\n    optimizer = clf_optimizer.PrDiMPSteepestDescentNewton(num_iter=optim_iter, feat_stride=feat_stride,\n                                                          init_step_length=optim_init_step,\n                                                          init_filter_reg=optim_init_reg, gauss_sigma=gauss_sigma,\n                                                          detach_length=detach_length, alpha_eps=alpha_eps,\n                                                          init_uni_weight=init_uni_weight,\n                                                          min_filter_reg=optim_min_reg, normalize_label=normalize_label,\n                                                          label_shrink=label_shrink, softmax_reg=softmax_reg,\n                                                          label_threshold=label_threshold)\n\n    # The classifier module\n    classifier = target_clf.LinearFilter(filter_size=filter_size, filter_initializer=initializer,\n                                         filter_optimizer=optimizer, feature_extractor=clf_feature_extractor)\n\n    # Bounding box regressor\n    bb_regressor = bbmodels.AtomIoUNet(pred_input_dim=iou_input_dim, pred_inter_dim=iou_inter_dim)\n\n    # DiMP network\n    net = DiMPnet(feature_extractor=backbone_net, classifier=classifier, bb_regressor=bb_regressor,\n                  classification_layer=classification_layer, bb_regressor_layer=['layer2', 'layer3'])\n    return net\n\n\n@model_constructor\ndef klcedimpnet50(filter_size=1, optim_iter=5, optim_init_step=1.0, optim_init_reg=0.01,\n                  classification_layer='layer3', feat_stride=16, backbone_pretrained=True, clf_feat_blocks=0,\n                  clf_feat_norm=True, init_filter_norm=False, final_conv=True,\n                  out_feature_dim=512, gauss_sigma=1.0,\n                  iou_input_dim=(256, 256), iou_inter_dim=(256, 256),\n                  detach_length=float('Inf'), alpha_eps=0.0, train_feature_extractor=True,\n                  init_uni_weight=None, optim_min_reg=1e-3, init_initializer='default', normalize_label=False,\n                  label_shrink=0, softmax_reg=None, label_threshold=0, final_relu=False, frozen_backbone_layers=()):\n\n    if not train_feature_extractor:\n        frozen_backbone_layers = 'all'\n\n    # Backbone\n    backbone_net = backbones.resnet50(pretrained=backbone_pretrained, frozen_layers=frozen_backbone_layers)\n\n    # Feature normalization\n    norm_scale = math.sqrt(1.0 / (out_feature_dim * filter_size * filter_size))\n\n    # Classifier features\n    clf_feature_extractor = clf_features.residual_bottleneck(num_blocks=clf_feat_blocks, l2norm=clf_feat_norm,\n                                                             final_conv=final_conv, norm_scale=norm_scale,\n                                                             out_dim=out_feature_dim, final_relu=final_relu)\n\n    # Initializer for the DiMP classifier\n    initializer = clf_initializer.FilterInitializerLinear(filter_size=filter_size, filter_norm=init_filter_norm,\n                                                          feature_dim=out_feature_dim, init_weights=init_initializer)\n\n    # Optimizer for the DiMP classifier\n    optimizer = clf_optimizer.PrDiMPSteepestDescentNewton(num_iter=optim_iter, feat_stride=feat_stride,\n                                                          init_step_length=optim_init_step,\n                                                          init_filter_reg=optim_init_reg, gauss_sigma=gauss_sigma,\n                                                          detach_length=detach_length, alpha_eps=alpha_eps,\n                                                          init_uni_weight=init_uni_weight,\n                                                          min_filter_reg=optim_min_reg, normalize_label=normalize_label,\n                                                          label_shrink=label_shrink, softmax_reg=softmax_reg,\n                                                          label_threshold=label_threshold)\n\n    # The classifier module\n    classifier = target_clf.LinearFilter(filter_size=filter_size, filter_initializer=initializer,\n                                         filter_optimizer=optimizer, feature_extractor=clf_feature_extractor)\n\n    # Bounding box regressor\n    bb_regressor = bbmodels.AtomIoUNet(input_dim=(4*128,4*256), pred_input_dim=iou_input_dim, pred_inter_dim=iou_inter_dim)\n\n    # DiMP network\n    net = DiMPnet(feature_extractor=backbone_net, classifier=classifier, bb_regressor=bb_regressor,\n                  classification_layer=classification_layer, bb_regressor_layer=['layer2', 'layer3'])\n    return net\n"
  },
  {
    "path": "external/AR/ltr/run_training.py",
    "content": "import os\nimport sys\nimport argparse\nimport importlib\nimport multiprocessing\nimport cv2 as cv\nimport torch.backends.cudnn\n\nenv_path = os.path.join(os.path.dirname(__file__), '..')\nif env_path not in sys.path:\n    sys.path.append(env_path)\n\nimport ltr.admin.settings as ws_settings\n\n\ndef run_training(train_module, train_name, cudnn_benchmark=True):\n    \"\"\"Run a train scripts in train_settings.\n    args:\n        train_module: Name of module in the \"train_settings/\" folder.\n        train_name: Name of the train settings file.\n        cudnn_benchmark: Use cudnn benchmark or not (default is True).\n    \"\"\"\n\n    # This is needed to avoid strange crashes related to opencv\n    cv.setNumThreads(0)\n\n    torch.backends.cudnn.benchmark = cudnn_benchmark\n\n    print('Training:  {}  {}'.format(train_module, train_name))\n\n    settings = ws_settings.Settings()\n    settings.module_name = train_module\n    settings.script_name = train_name\n    settings.project_path = 'ltr/{}/{}'.format(train_module, train_name)\n\n    expr_module = importlib.import_module('ltr.train_settings.{}.{}'.format(train_module, train_name))\n    expr_func = getattr(expr_module, 'run')\n\n    expr_func(settings)\n\n\ndef main():\n    parser = argparse.ArgumentParser(description='Run a train scripts in train_settings.')\n    parser.add_argument('train_module', type=str, help='Name of module in the \"train_settings/\" folder.')\n    parser.add_argument('train_name', type=str, help='Name of the train settings file.')\n    parser.add_argument('--cudnn_benchmark', type=bool, default=True, help='Set cudnn benchmark on (1) or off (0) (default is on).')\n\n    args = parser.parse_args()\n\n    run_training(args.train_module, args.train_name, args.cudnn_benchmark)\n\n\nif __name__ == '__main__':\n    multiprocessing.set_start_method('spawn', force=True)\n    main()\n"
  },
  {
    "path": "external/AR/ltr/train_settings/__init__.py",
    "content": ""
  },
  {
    "path": "external/AR/ltr/train_settings/bbreg/__init__.py",
    "content": ""
  },
  {
    "path": "external/AR/ltr/train_settings/bbreg/atom.py",
    "content": "import torch.nn as nn\nimport torch.optim as optim\nfrom ltr.dataset import Lasot, TrackingNet, MSCOCOSeq, Got10k\nfrom ltr.data import processing, sampler, LTRLoader\nimport ltr.models.bbreg.atom as atom_models\nfrom ltr import actors\nfrom ltr.trainers import LTRTrainer\nimport ltr.data.transforms as tfm\n\n\ndef run(settings):\n    # Most common settings are assigned in the settings struct\n    settings.description = 'ATOM IoUNet with default settings, but additionally using GOT10k for training.'\n    settings.batch_size = 64\n    settings.num_workers = 8\n    settings.print_interval = 1\n    settings.normalize_mean = [0.485, 0.456, 0.406]\n    settings.normalize_std = [0.229, 0.224, 0.225]\n    settings.search_area_factor = 5.0\n    settings.feature_sz = 18\n    settings.output_sz = settings.feature_sz * 16\n    settings.center_jitter_factor = {'train': 0, 'test': 4.5}\n    settings.scale_jitter_factor = {'train': 0, 'test': 0.5}\n\n    # Train datasets\n    lasot_train = Lasot(settings.env.lasot_dir, split='train')\n    got10k_train = Got10k(settings.env.got10k_dir, split='vottrain')\n    trackingnet_train = TrackingNet(settings.env.trackingnet_dir, set_ids=list(range(4)))\n    coco_train = MSCOCOSeq(settings.env.coco_dir)\n\n    # Validation datasets\n    got10k_val = Got10k(settings.env.got10k_dir, split='votval')\n\n    # The joint augmentation transform, that is applied to the pairs jointly\n    transform_joint = tfm.Transform(tfm.ToGrayscale(probability=0.05))\n\n    # The augmentation transform applied to the training set (individually to each image in the pair)\n    transform_train = tfm.Transform(tfm.ToTensorAndJitter(0.2),\n                                    tfm.Normalize(mean=settings.normalize_mean, std=settings.normalize_std))\n\n    # The augmentation transform applied to the validation set (individually to each image in the pair)\n    transform_val = tfm.Transform(tfm.ToTensor(),\n                                  tfm.Normalize(mean=settings.normalize_mean, std=settings.normalize_std))\n\n    # Data processing to do on the training pairs\n    proposal_params = {'min_iou': 0.1, 'boxes_per_frame': 16, 'sigma_factor': [0.01, 0.05, 0.1, 0.2, 0.3]}\n    data_processing_train = processing.ATOMProcessing(search_area_factor=settings.search_area_factor,\n                                                      output_sz=settings.output_sz,\n                                                      center_jitter_factor=settings.center_jitter_factor,\n                                                      scale_jitter_factor=settings.scale_jitter_factor,\n                                                      mode='sequence',\n                                                      proposal_params=proposal_params,\n                                                      transform=transform_train,\n                                                      joint_transform=transform_joint)\n\n    # Data processing to do on the validation pairs\n    data_processing_val = processing.ATOMProcessing(search_area_factor=settings.search_area_factor,\n                                                    output_sz=settings.output_sz,\n                                                    center_jitter_factor=settings.center_jitter_factor,\n                                                    scale_jitter_factor=settings.scale_jitter_factor,\n                                                    mode='sequence',\n                                                    proposal_params=proposal_params,\n                                                    transform=transform_val,\n                                                    joint_transform=transform_joint)\n\n    # The sampler for training\n    dataset_train = sampler.ATOMSampler([lasot_train, got10k_train, trackingnet_train, coco_train], [1,1,1,1],\n                                samples_per_epoch=1000*settings.batch_size, max_gap=50, processing=data_processing_train)\n\n    # The loader for training\n    loader_train = LTRLoader('train', dataset_train, training=True, batch_size=settings.batch_size, num_workers=settings.num_workers,\n                             shuffle=True, drop_last=True, stack_dim=1)\n\n    # The sampler for validation\n    dataset_val = sampler.ATOMSampler([got10k_val], [1], samples_per_epoch=500*settings.batch_size, max_gap=50,\n                                      processing=data_processing_val)\n\n    # The loader for validation\n    loader_val = LTRLoader('val', dataset_val, training=False, batch_size=settings.batch_size, num_workers=settings.num_workers,\n                           shuffle=False, drop_last=True, epoch_interval=5, stack_dim=1)\n\n    # Create network and actor\n    net = atom_models.atom_resnet18(backbone_pretrained=True)\n    objective = nn.MSELoss()\n    actor = actors.AtomActor(net=net, objective=objective)\n\n    # Optimizer\n    optimizer = optim.Adam(actor.net.bb_regressor.parameters(), lr=1e-3)\n    lr_scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=15, gamma=0.2)\n\n    # Create trainer\n    trainer = LTRTrainer(actor, [loader_train, loader_val], optimizer, settings, lr_scheduler)\n\n    # Run training (set fail_safe=False if you are debugging)\n    trainer.train(50, load_latest=True, fail_safe=True)\n"
  },
  {
    "path": "external/AR/ltr/train_settings/bbreg/atom_gmm_sampl.py",
    "content": "import torch.nn as nn\nimport torch.optim as optim\nfrom ltr.dataset import Lasot, TrackingNet, MSCOCOSeq, Got10k\nfrom ltr.data import processing, sampler, LTRLoader\nimport ltr.models.bbreg.atom as atom_models\nfrom ltr import actors\nfrom ltr.trainers import LTRTrainer\nimport ltr.data.transforms as tfm\n\n\ndef run(settings):\n    # Most common settings are assigned in the settings struct\n    settings.description = 'ATOM IoUNet using the baseline ATOM* settings in [https://arxiv.org/abs/1909.12297].' \\\n                           'Unlike standard ATOM, it employs the GMM-based proposal sampling and minor parameter changes.'\n    settings.batch_size = 64\n    settings.num_workers = 8\n    settings.print_interval = 1\n    settings.normalize_mean = [0.485, 0.456, 0.406]\n    settings.normalize_std = [0.229, 0.224, 0.225]\n    settings.search_area_factor = 5.0\n    settings.feature_sz = 18\n    settings.output_sz = settings.feature_sz * 16\n    settings.center_jitter_factor = {'train': 0, 'test': 4.5}\n    settings.scale_jitter_factor = {'train': 0, 'test': 0.5}\n\n    # Train datasets\n    lasot_train = Lasot(settings.env.lasot_dir, split='train')\n    got10k_train = Got10k(settings.env.got10k_dir, split='vottrain')\n    trackingnet_train = TrackingNet(settings.env.trackingnet_dir, set_ids=list(range(4)))\n    coco_train = MSCOCOSeq(settings.env.coco_dir)\n\n    # Validation datasets\n    got10k_val = Got10k(settings.env.got10k_dir, split='votval')\n\n    # The joint augmentation transform, that is applied to the pairs jointly\n    transform_joint = tfm.Transform(tfm.ToGrayscale(probability=0.05))\n\n    # The augmentation transform applied to the training set (individually to each image in the pair)\n    transform_train = tfm.Transform(tfm.ToTensorAndJitter(0.2),\n                                    tfm.Normalize(mean=settings.normalize_mean, std=settings.normalize_std))\n\n    # The augmentation transform applied to the validation set (individually to each image in the pair)\n    transform_val = tfm.Transform(tfm.ToTensor(),\n                                  tfm.Normalize(mean=settings.normalize_mean, std=settings.normalize_std))\n\n    # Data processing to do on the training pairs\n    proposal_params = {'proposal_method': 'gmm', 'boxes_per_frame': 128, 'proposal_sigma': [(0.05, 0.05), (0.5, 0.5)]}\n    data_processing_train = processing.ATOMProcessing(search_area_factor=settings.search_area_factor,\n                                                      output_sz=settings.output_sz,\n                                                      center_jitter_factor=settings.center_jitter_factor,\n                                                      scale_jitter_factor=settings.scale_jitter_factor,\n                                                      mode='sequence',\n                                                      proposal_params=proposal_params,\n                                                      transform=transform_train,\n                                                      joint_transform=transform_joint)\n\n    # Data processing to do on the validation pairs\n    data_processing_val = processing.ATOMProcessing(search_area_factor=settings.search_area_factor,\n                                                    output_sz=settings.output_sz,\n                                                    center_jitter_factor=settings.center_jitter_factor,\n                                                    scale_jitter_factor=settings.scale_jitter_factor,\n                                                    mode='sequence',\n                                                    proposal_params=proposal_params,\n                                                    transform=transform_val,\n                                                    joint_transform=transform_joint)\n\n    # The sampler for training\n    dataset_train = sampler.ATOMSampler([lasot_train, got10k_train, trackingnet_train, coco_train], [1,1,1,1],\n                                samples_per_epoch=1000*settings.batch_size, max_gap=200, processing=data_processing_train)\n\n    # The loader for training\n    loader_train = LTRLoader('train', dataset_train, training=True, batch_size=settings.batch_size, num_workers=settings.num_workers,\n                             shuffle=True, drop_last=True, stack_dim=1)\n\n    # The sampler for validation\n    dataset_val = sampler.ATOMSampler([got10k_val], [1], samples_per_epoch=500*settings.batch_size, max_gap=200,\n                                      processing=data_processing_val)\n\n    # The loader for validation\n    loader_val = LTRLoader('val', dataset_val, training=False, batch_size=settings.batch_size, num_workers=settings.num_workers,\n                           shuffle=False, drop_last=True, epoch_interval=5, stack_dim=1)\n\n    # Create network and actor\n    net = atom_models.atom_resnet18(backbone_pretrained=True)\n    objective = nn.MSELoss()\n    actor = actors.AtomActor(net=net, objective=objective)\n\n    # Optimizer\n    optimizer = optim.Adam(actor.net.bb_regressor.parameters(), lr=1e-3)\n    lr_scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=15, gamma=0.2)\n\n    # Create trainer\n    trainer = LTRTrainer(actor, [loader_train, loader_val], optimizer, settings, lr_scheduler)\n\n    # Run training (set fail_safe=False if you are debugging)\n    trainer.train(50, load_latest=True, fail_safe=True)\n"
  },
  {
    "path": "external/AR/ltr/train_settings/bbreg/atom_paper.py",
    "content": "import torch.nn as nn\nimport torch.optim as optim\nfrom ltr.dataset import Lasot, TrackingNet, MSCOCOSeq\nfrom ltr.data import processing, sampler, LTRLoader\nimport ltr.models.bbreg.atom as atom_models\nfrom ltr import actors\nfrom ltr.trainers import LTRTrainer\nimport ltr.data.transforms as tfm\n\n\ndef run(settings):\n    # Most common settings are assigned in the settings struct\n    settings.description = 'ATOM IoUNet with default settings according to the paper.'\n    settings.batch_size = 64\n    settings.num_workers = 8\n    settings.print_interval = 1\n    settings.normalize_mean = [0.485, 0.456, 0.406]\n    settings.normalize_std = [0.229, 0.224, 0.225]\n    settings.search_area_factor = 5.0\n    settings.feature_sz = 18\n    settings.output_sz = settings.feature_sz * 16\n    settings.center_jitter_factor = {'train': 0, 'test': 4.5}\n    settings.scale_jitter_factor = {'train': 0, 'test': 0.5}\n\n    # Train datasets\n    lasot_train = Lasot(settings.env.lasot_dir, split='train')\n    trackingnet_train = TrackingNet(settings.env.trackingnet_dir, set_ids=list(range(11)))\n    coco_train = MSCOCOSeq(settings.env.coco_dir)\n\n    # Validation datasets\n    trackingnet_val = TrackingNet(settings.env.trackingnet_dir, set_ids=list(range(11,12)))\n\n    # The joint augmentation transform, that is applied to the pairs jointly\n    transform_joint = tfm.Transform(tfm.ToGrayscale(probability=0.05))\n\n    # The augmentation transform applied to the training set (individually to each image in the pair)\n    transform_train = tfm.Transform(tfm.ToTensorAndJitter(0.2),\n                                    tfm.Normalize(mean=settings.normalize_mean, std=settings.normalize_std))\n\n    # The augmentation transform applied to the validation set (individually to each image in the pair)\n    transform_val = tfm.Transform(tfm.ToTensor(),\n                                  tfm.Normalize(mean=settings.normalize_mean, std=settings.normalize_std))\n\n    # Data processing to do on the training pairs\n    proposal_params = {'min_iou': 0.1, 'boxes_per_frame': 16, 'sigma_factor': [0.01, 0.05, 0.1, 0.2, 0.3]}\n    data_processing_train = processing.ATOMProcessing(search_area_factor=settings.search_area_factor,\n                                                      output_sz=settings.output_sz,\n                                                      center_jitter_factor=settings.center_jitter_factor,\n                                                      scale_jitter_factor=settings.scale_jitter_factor,\n                                                      mode='sequence',\n                                                      proposal_params=proposal_params,\n                                                      transform=transform_train,\n                                                      joint_transform=transform_joint)\n\n    # Data processing to do on the validation pairs\n    data_processing_val = processing.ATOMProcessing(search_area_factor=settings.search_area_factor,\n                                                    output_sz=settings.output_sz,\n                                                    center_jitter_factor=settings.center_jitter_factor,\n                                                    scale_jitter_factor=settings.scale_jitter_factor,\n                                                    mode='sequence',\n                                                    proposal_params=proposal_params,\n                                                    transform=transform_val,\n                                                    joint_transform=transform_joint)\n\n    # The sampler for training\n    dataset_train = sampler.ATOMSampler([lasot_train, trackingnet_train, coco_train], [1,1,1],\n                                samples_per_epoch=1000*settings.batch_size, max_gap=50, processing=data_processing_train)\n\n    # The loader for training\n    loader_train = LTRLoader('train', dataset_train, training=True, batch_size=settings.batch_size, num_workers=settings.num_workers,\n                             shuffle=True, drop_last=True, stack_dim=1)\n\n    # The sampler for validation\n    dataset_val = sampler.ATOMSampler([trackingnet_val], [1], samples_per_epoch=500*settings.batch_size, max_gap=50,\n                              processing=data_processing_val)\n\n    # The loader for validation\n    loader_val = LTRLoader('val', dataset_val, training=False, batch_size=settings.batch_size, num_workers=settings.num_workers,\n                           shuffle=False, drop_last=True, epoch_interval=5, stack_dim=1)\n\n    # Create network and actor\n    net = atom_models.atom_resnet18(backbone_pretrained=True)\n    objective = nn.MSELoss()\n    actor = actors.AtomActor(net=net, objective=objective)\n\n    # Optimizer\n    optimizer = optim.Adam(actor.net.bb_regressor.parameters(), lr=1e-3)\n    lr_scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=15, gamma=0.2)\n\n    # Create trainer\n    trainer = LTRTrainer(actor, [loader_train, loader_val], optimizer, settings, lr_scheduler)\n\n    # Run training (set fail_safe=False if you are debugging)\n    trainer.train(50, load_latest=True, fail_safe=True)\n"
  },
  {
    "path": "external/AR/ltr/train_settings/bbreg/atom_prob_ml.py",
    "content": "import torch.optim as optim\nfrom ltr.dataset import Lasot, TrackingNet, MSCOCOSeq, Got10k\nfrom ltr.data import processing, sampler, LTRLoader\nimport ltr.models.bbreg.atom as atom_models\nimport ltr.models.loss.kl_regression as klreg_losses\nimport ltr.actors.bbreg as bbreg_actors\nfrom ltr.trainers import LTRTrainer\nimport ltr.data.transforms as tfm\n\n\ndef run(settings):\n    # Most common settings are assigned in the settings struct\n    settings.description = 'ATOM using the probabilistic maximum likelihood trained regression model for bounding-box' \\\n                           'regression presented in [https://arxiv.org/abs/1909.12297].'\n    settings.batch_size = 64\n    settings.num_workers = 8\n    settings.print_interval = 1\n    settings.normalize_mean = [0.485, 0.456, 0.406]\n    settings.normalize_std = [0.229, 0.224, 0.225]\n    settings.search_area_factor = 5.0\n    settings.feature_sz = 18\n    settings.output_sz = settings.feature_sz * 16\n    settings.center_jitter_factor = {'train': 0, 'test': 4.5}\n    settings.scale_jitter_factor = {'train': 0, 'test': 0.5}\n\n    # Train datasets\n    lasot_train = Lasot(settings.env.lasot_dir, split='train')\n    got10k_train = Got10k(settings.env.got10k_dir, split='vottrain')\n    trackingnet_train = TrackingNet(settings.env.trackingnet_dir, set_ids=list(range(4)))\n    coco_train = MSCOCOSeq(settings.env.coco_dir)\n\n    # Validation datasets\n    got10k_val = Got10k(settings.env.got10k_dir, split='votval')\n\n    # The joint augmentation transform, that is applied to the pairs jointly\n    transform_joint = tfm.Transform(tfm.ToGrayscale(probability=0.05))\n\n    # The augmentation transform applied to the training set (individually to each image in the pair)\n    transform_train = tfm.Transform(tfm.ToTensorAndJitter(0.2),\n                                    tfm.Normalize(mean=settings.normalize_mean, std=settings.normalize_std))\n\n    # The augmentation transform applied to the validation set (individually to each image in the pair)\n    transform_val = tfm.Transform(tfm.ToTensor(),\n                                  tfm.Normalize(mean=settings.normalize_mean, std=settings.normalize_std))\n\n    # Data processing to do on the training pairs\n    proposal_params = {'boxes_per_frame': 128, 'gt_sigma': (0, 0), 'proposal_sigma': [(0.05, 0.05), (0.5, 0.5)],\n                       'add_mean_box': True}\n    data_processing_train = processing.KLBBregProcessing(search_area_factor=settings.search_area_factor,\n                                                         output_sz=settings.output_sz,\n                                                         center_jitter_factor=settings.center_jitter_factor,\n                                                         scale_jitter_factor=settings.scale_jitter_factor,\n                                                         mode='sequence',\n                                                         proposal_params=proposal_params,\n                                                         transform=transform_train,\n                                                         joint_transform=transform_joint)\n\n    # Data processing to do on the validation pairs\n    data_processing_val = processing.KLBBregProcessing(search_area_factor=settings.search_area_factor,\n                                                       output_sz=settings.output_sz,\n                                                       center_jitter_factor=settings.center_jitter_factor,\n                                                       scale_jitter_factor=settings.scale_jitter_factor,\n                                                       mode='sequence',\n                                                       proposal_params=proposal_params,\n                                                       transform=transform_val,\n                                                       joint_transform=transform_joint)\n\n    # The sampler for training\n    dataset_train = sampler.ATOMSampler([lasot_train, got10k_train, trackingnet_train, coco_train], [1,1,1,1],\n                                samples_per_epoch=1000*settings.batch_size, max_gap=200, processing=data_processing_train)\n\n    # The loader for training\n    loader_train = LTRLoader('train', dataset_train, training=True, batch_size=settings.batch_size, num_workers=settings.num_workers,\n                             shuffle=True, drop_last=True, stack_dim=1)\n\n    # The sampler for validation\n    dataset_val = sampler.ATOMSampler([got10k_val], [1], samples_per_epoch=500*settings.batch_size, max_gap=200,\n                                      processing=data_processing_val)\n\n    # The loader for validation\n    loader_val = LTRLoader('val', dataset_val, training=False, batch_size=settings.batch_size, num_workers=settings.num_workers,\n                           shuffle=False, drop_last=True, epoch_interval=5, stack_dim=1)\n\n    # Create network and actor\n    net = atom_models.atom_resnet18(backbone_pretrained=True)\n    objective = klreg_losses.MLRegression()\n    actor = bbreg_actors.AtomBBKLActor(net=net, objective=objective)\n\n    # Optimizer\n    optimizer = optim.Adam(actor.net.bb_regressor.parameters(), lr=1e-3)\n    lr_scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=15, gamma=0.2)\n\n    # Create trainer\n    trainer = LTRTrainer(actor, [loader_train, loader_val], optimizer, settings, lr_scheduler)\n\n    # Run training (set fail_safe=False if you are debugging)\n    trainer.train(50, load_latest=True, fail_safe=True)\n"
  },
  {
    "path": "external/AR/ltr/train_settings/dimp/__init__.py",
    "content": ""
  },
  {
    "path": "external/AR/ltr/train_settings/dimp/dimp18.py",
    "content": "import torch.nn as nn\nimport torch.optim as optim\nfrom ltr.dataset import Lasot, Got10k, TrackingNet, MSCOCOSeq\nfrom ltr.data import processing, sampler, LTRLoader\nfrom ltr.models.tracking import dimpnet\nimport ltr.models.loss as ltr_losses\nfrom ltr import actors\nfrom ltr.trainers import LTRTrainer\nimport ltr.data.transforms as tfm\nfrom ltr import MultiGPU\n\n\ndef run(settings):\n    settings.description = 'Default train settings for DiMP with ResNet18 as backbone.'\n    settings.batch_size = 26\n    settings.num_workers = 8\n    settings.multi_gpu = False\n    settings.print_interval = 1\n    settings.normalize_mean = [0.485, 0.456, 0.406]\n    settings.normalize_std = [0.229, 0.224, 0.225]\n    settings.search_area_factor = 5.0\n    settings.output_sigma_factor = 1/4\n    settings.target_filter_sz = 4\n    settings.feature_sz = 18\n    settings.output_sz = settings.feature_sz * 16\n    settings.center_jitter_factor = {'train': 3, 'test': 4.5}\n    settings.scale_jitter_factor = {'train': 0.25, 'test': 0.5}\n    settings.hinge_threshold = 0.05\n    # settings.print_stats = ['Loss/total', 'Loss/iou', 'ClfTrain/init_loss', 'ClfTrain/test_loss']\n\n    # Train datasets\n    lasot_train = Lasot(settings.env.lasot_dir, split='train')\n    got10k_train = Got10k(settings.env.got10k_dir, split='vottrain')\n    trackingnet_train = TrackingNet(settings.env.trackingnet_dir, set_ids=list(range(4)))\n    coco_train = MSCOCOSeq(settings.env.coco_dir)\n\n    # Validation datasets\n    got10k_val = Got10k(settings.env.got10k_dir, split='votval')\n\n\n    # Data transform\n    transform_joint = tfm.Transform(tfm.ToGrayscale(probability=0.05))\n\n    transform_train = tfm.Transform(tfm.ToTensorAndJitter(0.2),\n                                    tfm.Normalize(mean=settings.normalize_mean, std=settings.normalize_std))\n\n    transform_val = tfm.Transform(tfm.ToTensor(),\n                                  tfm.Normalize(mean=settings.normalize_mean, std=settings.normalize_std))\n\n    # The tracking pairs processing module\n    output_sigma = settings.output_sigma_factor / settings.search_area_factor\n    proposal_params = {'min_iou': 0.1, 'boxes_per_frame': 8, 'sigma_factor': [0.01, 0.05, 0.1, 0.2, 0.3]}\n    label_params = {'feature_sz': settings.feature_sz, 'sigma_factor': output_sigma, 'kernel_sz': settings.target_filter_sz}\n    data_processing_train = processing.DiMPProcessing(search_area_factor=settings.search_area_factor,\n                                                      output_sz=settings.output_sz,\n                                                      center_jitter_factor=settings.center_jitter_factor,\n                                                      scale_jitter_factor=settings.scale_jitter_factor,\n                                                      mode='sequence',\n                                                      proposal_params=proposal_params,\n                                                      label_function_params=label_params,\n                                                      transform=transform_train,\n                                                      joint_transform=transform_joint)\n\n    data_processing_val = processing.DiMPProcessing(search_area_factor=settings.search_area_factor,\n                                                    output_sz=settings.output_sz,\n                                                    center_jitter_factor=settings.center_jitter_factor,\n                                                    scale_jitter_factor=settings.scale_jitter_factor,\n                                                    mode='sequence',\n                                                    proposal_params=proposal_params,\n                                                    label_function_params=label_params,\n                                                    transform=transform_val,\n                                                    joint_transform=transform_joint)\n\n    # Train sampler and loader\n    dataset_train = sampler.DiMPSampler([lasot_train, got10k_train, trackingnet_train, coco_train], [0.25,1,1,1],\n                                        samples_per_epoch=26000, max_gap=30, num_test_frames=3, num_train_frames=3,\n                                        processing=data_processing_train)\n\n    loader_train = LTRLoader('train', dataset_train, training=True, batch_size=settings.batch_size, num_workers=settings.num_workers,\n                             shuffle=True, drop_last=True, stack_dim=1)\n\n    # Validation samplers and loaders\n    dataset_val = sampler.DiMPSampler([got10k_val], [1], samples_per_epoch=5000, max_gap=30,\n                                      num_test_frames=3, num_train_frames=3,\n                                      processing=data_processing_val)\n\n    loader_val = LTRLoader('val', dataset_val, training=False, batch_size=settings.batch_size, num_workers=settings.num_workers,\n                           shuffle=False, drop_last=True, epoch_interval=5, stack_dim=1)\n\n    # Create network and actor\n    net = dimpnet.dimpnet18(filter_size=settings.target_filter_sz, backbone_pretrained=True, optim_iter=5,\n                            clf_feat_norm=True, final_conv=True, optim_init_step=0.9, optim_init_reg=0.1,\n                            init_gauss_sigma=output_sigma * settings.feature_sz, num_dist_bins=100,\n                            bin_displacement=0.1, mask_init_factor=3.0, target_mask_act='sigmoid', score_act='relu')\n\n    # Wrap the network for multi GPU training\n    if settings.multi_gpu:\n        net = MultiGPU(net, dim=1)\n\n    objective = {'iou': nn.MSELoss(), 'test_clf': ltr_losses.LBHinge(threshold=settings.hinge_threshold)}\n\n    loss_weight = {'iou': 1, 'test_clf': 100, 'test_init_clf': 100, 'test_iter_clf': 400}\n\n    actor = actors.DiMPActor(net=net, objective=objective, loss_weight=loss_weight)\n\n    # Optimizer\n    optimizer = optim.Adam([{'params': actor.net.classifier.filter_initializer.parameters(), 'lr': 5e-5},\n                            {'params': actor.net.classifier.filter_optimizer.parameters(), 'lr': 5e-4},\n                            {'params': actor.net.classifier.feature_extractor.parameters(), 'lr': 5e-5},\n                            {'params': actor.net.bb_regressor.parameters(), 'lr': 1e-3},\n                            {'params': actor.net.feature_extractor.parameters()}],\n                           lr=2e-4)\n\n    lr_scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=15, gamma=0.2)\n\n    trainer = LTRTrainer(actor, [loader_train, loader_val], optimizer, settings, lr_scheduler)\n\n    trainer.train(50, load_latest=True, fail_safe=True)\n"
  },
  {
    "path": "external/AR/ltr/train_settings/dimp/dimp50.py",
    "content": "import torch.nn as nn\nimport torch.optim as optim\nfrom ltr.dataset import Lasot, Got10k, TrackingNet, MSCOCOSeq\nfrom ltr.data import processing, sampler, LTRLoader\nfrom ltr.models.tracking import dimpnet\nimport ltr.models.loss as ltr_losses\nfrom ltr import actors\nfrom ltr.trainers import LTRTrainer\nimport ltr.data.transforms as tfm\nfrom ltr import MultiGPU\n\n\ndef run(settings):\n    settings.description = 'Default train settings for DiMP with ResNet50 as backbone.'\n    settings.batch_size = 10\n    settings.num_workers = 8\n    settings.multi_gpu = False\n    settings.print_interval = 1\n    settings.normalize_mean = [0.485, 0.456, 0.406]\n    settings.normalize_std = [0.229, 0.224, 0.225]\n    settings.search_area_factor = 5.0\n    settings.output_sigma_factor = 1/4\n    settings.target_filter_sz = 4\n    settings.feature_sz = 18\n    settings.output_sz = settings.feature_sz * 16\n    settings.center_jitter_factor = {'train': 3, 'test': 4.5}\n    settings.scale_jitter_factor = {'train': 0.25, 'test': 0.5}\n    settings.hinge_threshold = 0.05\n    # settings.print_stats = ['Loss/total', 'Loss/iou', 'ClfTrain/clf_ce', 'ClfTrain/test_loss']\n\n    # Train datasets\n    lasot_train = Lasot(settings.env.lasot_dir, split='train')\n    got10k_train = Got10k(settings.env.got10k_dir, split='vottrain')\n    trackingnet_train = TrackingNet(settings.env.trackingnet_dir, set_ids=list(range(4)))\n    coco_train = MSCOCOSeq(settings.env.coco_dir)\n\n    # Validation datasets\n    got10k_val = Got10k(settings.env.got10k_dir, split='votval')\n\n\n    # Data transform\n    transform_joint = tfm.Transform(tfm.ToGrayscale(probability=0.05))\n\n    transform_train = tfm.Transform(tfm.ToTensorAndJitter(0.2),\n                                    tfm.Normalize(mean=settings.normalize_mean, std=settings.normalize_std))\n\n    transform_val = tfm.Transform(tfm.ToTensor(),\n                                  tfm.Normalize(mean=settings.normalize_mean, std=settings.normalize_std))\n\n    # The tracking pairs processing module\n    output_sigma = settings.output_sigma_factor / settings.search_area_factor\n    proposal_params = {'min_iou': 0.1, 'boxes_per_frame': 8, 'sigma_factor': [0.01, 0.05, 0.1, 0.2, 0.3]}\n    label_params = {'feature_sz': settings.feature_sz, 'sigma_factor': output_sigma, 'kernel_sz': settings.target_filter_sz}\n    data_processing_train = processing.DiMPProcessing(search_area_factor=settings.search_area_factor,\n                                                      output_sz=settings.output_sz,\n                                                      center_jitter_factor=settings.center_jitter_factor,\n                                                      scale_jitter_factor=settings.scale_jitter_factor,\n                                                      mode='sequence',\n                                                      proposal_params=proposal_params,\n                                                      label_function_params=label_params,\n                                                      transform=transform_train,\n                                                      joint_transform=transform_joint)\n\n    data_processing_val = processing.DiMPProcessing(search_area_factor=settings.search_area_factor,\n                                                    output_sz=settings.output_sz,\n                                                    center_jitter_factor=settings.center_jitter_factor,\n                                                    scale_jitter_factor=settings.scale_jitter_factor,\n                                                    mode='sequence',\n                                                    proposal_params=proposal_params,\n                                                    label_function_params=label_params,\n                                                    transform=transform_val,\n                                                    joint_transform=transform_joint)\n\n    # Train sampler and loader\n    dataset_train = sampler.DiMPSampler([lasot_train, got10k_train, trackingnet_train, coco_train], [0.25,1,1,1],\n                                        samples_per_epoch=26000, max_gap=30, num_test_frames=3, num_train_frames=3,\n                                        processing=data_processing_train)\n\n    loader_train = LTRLoader('train', dataset_train, training=True, batch_size=settings.batch_size, num_workers=settings.num_workers,\n                             shuffle=True, drop_last=True, stack_dim=1)\n\n    # Validation samplers and loaders\n    dataset_val = sampler.DiMPSampler([got10k_val], [1], samples_per_epoch=5000, max_gap=30,\n                                      num_test_frames=3, num_train_frames=3,\n                                      processing=data_processing_val)\n\n    loader_val = LTRLoader('val', dataset_val, training=False, batch_size=settings.batch_size, num_workers=settings.num_workers,\n                           shuffle=False, drop_last=True, epoch_interval=5, stack_dim=1)\n\n    # Create network and actor\n    net = dimpnet.dimpnet50(filter_size=settings.target_filter_sz, backbone_pretrained=True, optim_iter=5,\n                            clf_feat_norm=True, clf_feat_blocks=0, final_conv=True, out_feature_dim=512,\n                            optim_init_step=0.9, optim_init_reg=0.1,\n                            init_gauss_sigma=output_sigma * settings.feature_sz, num_dist_bins=100,\n                            bin_displacement=0.1, mask_init_factor=3.0, target_mask_act='sigmoid', score_act='relu')\n\n    # Wrap the network for multi GPU training\n    if settings.multi_gpu:\n        net = MultiGPU(net, dim=1)\n\n    objective = {'iou': nn.MSELoss(), 'test_clf': ltr_losses.LBHinge(threshold=settings.hinge_threshold)}\n\n    loss_weight = {'iou': 1, 'test_clf': 100, 'test_init_clf': 100, 'test_iter_clf': 400}\n\n    actor = actors.DiMPActor(net=net, objective=objective, loss_weight=loss_weight)\n\n    # Optimizer\n    optimizer = optim.Adam([{'params': actor.net.classifier.filter_initializer.parameters(), 'lr': 5e-5},\n                            {'params': actor.net.classifier.filter_optimizer.parameters(), 'lr': 5e-4},\n                            {'params': actor.net.classifier.feature_extractor.parameters(), 'lr': 5e-5},\n                            {'params': actor.net.bb_regressor.parameters()},\n                            {'params': actor.net.feature_extractor.parameters(), 'lr': 2e-5}],\n                           lr=2e-4)\n\n    lr_scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=15, gamma=0.2)\n\n    trainer = LTRTrainer(actor, [loader_train, loader_val], optimizer, settings, lr_scheduler)\n\n    trainer.train(50, load_latest=True, fail_safe=True)\n"
  },
  {
    "path": "external/AR/ltr/train_settings/dimp/prdimp18.py",
    "content": "import torch.optim as optim\nfrom ltr.dataset import Lasot, Got10k, TrackingNet, MSCOCOSeq\nfrom ltr.data import processing, sampler, LTRLoader\nfrom ltr.models.tracking import dimpnet\nimport ltr.models.loss as ltr_losses\nimport ltr.models.loss.kl_regression as klreg_losses\nimport ltr.actors.tracking as tracking_actors\nfrom ltr.trainers import LTRTrainer\nimport ltr.data.transforms as tfm\nfrom ltr import MultiGPU\n\n\ndef run(settings):\n    settings.description = 'Default train settings for PrDiMP with ResNet18 as backbone.'\n    settings.batch_size = 26\n    settings.num_workers = 8\n    settings.multi_gpu = False\n    settings.print_interval = 1\n    settings.normalize_mean = [0.485, 0.456, 0.406]\n    settings.normalize_std = [0.229, 0.224, 0.225]\n    settings.search_area_factor = 5.0\n    settings.output_sigma_factor = 1/4\n    settings.target_filter_sz = 4\n    settings.feature_sz = 18\n    settings.output_sz = settings.feature_sz * 16\n    settings.center_jitter_factor = {'train': 3, 'test': 4.5}\n    settings.scale_jitter_factor = {'train': 0.25, 'test': 0.5}\n    settings.hinge_threshold = 0.05\n    settings.print_stats = ['Loss/total', 'Loss/bb_ce', 'ClfTrain/clf_ce']\n\n    # Train datasets\n    lasot_train = Lasot(settings.env.lasot_dir, split='train')\n    got10k_train = Got10k(settings.env.got10k_dir, split='vottrain')\n    trackingnet_train = TrackingNet(settings.env.trackingnet_dir, set_ids=list(range(4)))\n    coco_train = MSCOCOSeq(settings.env.coco_dir)\n\n    # Validation datasets\n    got10k_val = Got10k(settings.env.got10k_dir, split='votval')\n\n\n    # Data transform\n    transform_joint = tfm.Transform(tfm.ToGrayscale(probability=0.05))\n\n    transform_train = tfm.Transform(tfm.ToTensorAndJitter(0.2),\n                                    tfm.Normalize(mean=settings.normalize_mean, std=settings.normalize_std))\n\n    transform_val = tfm.Transform(tfm.ToTensor(),\n                                  tfm.Normalize(mean=settings.normalize_mean, std=settings.normalize_std))\n\n    # The tracking pairs processing module\n    output_sigma = settings.output_sigma_factor / settings.search_area_factor\n    proposal_params = {'boxes_per_frame': 128, 'gt_sigma': (0.05, 0.05), 'proposal_sigma': [(0.05, 0.05), (0.5, 0.5)]}\n    label_params = {'feature_sz': settings.feature_sz, 'sigma_factor': output_sigma, 'kernel_sz': settings.target_filter_sz}\n    label_density_params = {'feature_sz': settings.feature_sz, 'sigma_factor': output_sigma, 'kernel_sz': settings.target_filter_sz, 'normalize': True}\n\n    data_processing_train = processing.KLDiMPProcessing(search_area_factor=settings.search_area_factor,\n                                                        output_sz=settings.output_sz,\n                                                        center_jitter_factor=settings.center_jitter_factor,\n                                                        scale_jitter_factor=settings.scale_jitter_factor,\n                                                        mode='sequence',\n                                                        proposal_params=proposal_params,\n                                                        label_function_params=label_params,\n                                                        label_density_params=label_density_params,\n                                                        transform=transform_train,\n                                                        joint_transform=transform_joint)\n\n    data_processing_val = processing.KLDiMPProcessing(search_area_factor=settings.search_area_factor,\n                                                      output_sz=settings.output_sz,\n                                                      center_jitter_factor=settings.center_jitter_factor,\n                                                      scale_jitter_factor=settings.scale_jitter_factor,\n                                                      mode='sequence',\n                                                      proposal_params=proposal_params,\n                                                      label_function_params=label_params,\n                                                      label_density_params=label_density_params,\n                                                      transform=transform_val,\n                                                      joint_transform=transform_joint)\n\n    # Train sampler and loader\n    dataset_train = sampler.DiMPSampler([lasot_train, got10k_train, trackingnet_train, coco_train], [0.25,1,1,1],\n                                        samples_per_epoch=26000, max_gap=200, num_test_frames=3, num_train_frames=3,\n                                        processing=data_processing_train)\n\n    loader_train = LTRLoader('train', dataset_train, training=True, batch_size=settings.batch_size, num_workers=settings.num_workers,\n                             shuffle=True, drop_last=True, stack_dim=1)\n\n    # Validation samplers and loaders\n    dataset_val = sampler.DiMPSampler([got10k_val], [1], samples_per_epoch=5000, max_gap=200,\n                                      num_test_frames=3, num_train_frames=3,\n                                      processing=data_processing_val)\n\n    loader_val = LTRLoader('val', dataset_val, training=False, batch_size=settings.batch_size, num_workers=settings.num_workers,\n                           shuffle=False, drop_last=True, epoch_interval=5, stack_dim=1)\n\n    # Create network and actor\n    net = dimpnet.klcedimpnet18(filter_size=settings.target_filter_sz, backbone_pretrained=True, optim_iter=5,\n                            clf_feat_norm=True, final_conv=True, optim_init_step=1.0, optim_init_reg=0.05, optim_min_reg=0.05,\n                            gauss_sigma=output_sigma * settings.feature_sz, alpha_eps=0.05, normalize_label=True, init_initializer='zero')\n\n    # Wrap the network for multi GPU training\n    if settings.multi_gpu:\n        net = MultiGPU(net, dim=1)\n\n    objective = {'bb_ce': klreg_losses.KLRegression(), 'clf_ce': klreg_losses.KLRegressionGrid()}\n\n    loss_weight = {'bb_ce': 0.0025, 'clf_ce': 0.25, 'clf_ce_init': 0.25, 'clf_ce_iter': 1.0}\n\n    actor = tracking_actors.KLDiMPActor(net=net, objective=objective, loss_weight=loss_weight)\n\n    # Optimizer\n    optimizer = optim.Adam([{'params': actor.net.classifier.parameters(), 'lr': 1e-3},\n                            {'params': actor.net.bb_regressor.parameters(), 'lr': 1e-3},\n                            {'params': actor.net.feature_extractor.parameters()}],\n                           lr=2e-4)\n\n    lr_scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=15, gamma=0.2)\n\n    trainer = LTRTrainer(actor, [loader_train, loader_val], optimizer, settings, lr_scheduler)\n\n    trainer.train(50, load_latest=True, fail_safe=True)\n"
  },
  {
    "path": "external/AR/ltr/train_settings/dimp/prdimp50.py",
    "content": "import torch.optim as optim\nfrom ltr.dataset import Lasot, Got10k, TrackingNet, MSCOCOSeq\nfrom ltr.data import processing, sampler, LTRLoader\nfrom ltr.models.tracking import dimpnet\nimport ltr.models.loss as ltr_losses\nimport ltr.models.loss.kl_regression as klreg_losses\nimport ltr.actors.tracking as tracking_actors\nfrom ltr.trainers import LTRTrainer\nimport ltr.data.transforms as tfm\nfrom ltr import MultiGPU\n\n\ndef run(settings):\n    settings.description = 'Default train settings for PrDiMP with ResNet50 as backbone.'\n    settings.batch_size = 10\n    settings.num_workers = 8\n    settings.multi_gpu = False\n    settings.print_interval = 1\n    settings.normalize_mean = [0.485, 0.456, 0.406]\n    settings.normalize_std = [0.229, 0.224, 0.225]\n    settings.search_area_factor = 5.0\n    settings.output_sigma_factor = 1/4\n    settings.target_filter_sz = 4\n    settings.feature_sz = 18\n    settings.output_sz = settings.feature_sz * 16\n    settings.center_jitter_factor = {'train': 3, 'test': 4.5}\n    settings.scale_jitter_factor = {'train': 0.25, 'test': 0.5}\n    settings.hinge_threshold = 0.05\n    settings.print_stats = ['Loss/total', 'Loss/bb_ce', 'ClfTrain/clf_ce']\n\n    # Train datasets\n    lasot_train = Lasot(settings.env.lasot_dir, split='train')\n    got10k_train = Got10k(settings.env.got10k_dir, split='vottrain')\n    trackingnet_train = TrackingNet(settings.env.trackingnet_dir, set_ids=list(range(4)))\n    coco_train = MSCOCOSeq(settings.env.coco_dir)\n\n    # Validation datasets\n    got10k_val = Got10k(settings.env.got10k_dir, split='votval')\n\n\n    # Data transform\n    transform_joint = tfm.Transform(tfm.ToGrayscale(probability=0.05))\n\n    transform_train = tfm.Transform(tfm.ToTensorAndJitter(0.2),\n                                    tfm.Normalize(mean=settings.normalize_mean, std=settings.normalize_std))\n\n    transform_val = tfm.Transform(tfm.ToTensor(),\n                                  tfm.Normalize(mean=settings.normalize_mean, std=settings.normalize_std))\n\n    # The tracking pairs processing module\n    output_sigma = settings.output_sigma_factor / settings.search_area_factor\n    proposal_params = {'boxes_per_frame': 128, 'gt_sigma': (0.05, 0.05), 'proposal_sigma': [(0.05, 0.05), (0.5, 0.5)]}\n    label_params = {'feature_sz': settings.feature_sz, 'sigma_factor': output_sigma, 'kernel_sz': settings.target_filter_sz}\n    label_density_params = {'feature_sz': settings.feature_sz, 'sigma_factor': output_sigma, 'kernel_sz': settings.target_filter_sz, 'normalize': True}\n\n    data_processing_train = processing.KLDiMPProcessing(search_area_factor=settings.search_area_factor,\n                                                        output_sz=settings.output_sz,\n                                                        center_jitter_factor=settings.center_jitter_factor,\n                                                        scale_jitter_factor=settings.scale_jitter_factor,\n                                                        mode='sequence',\n                                                        proposal_params=proposal_params,\n                                                        label_function_params=label_params,\n                                                        label_density_params=label_density_params,\n                                                        transform=transform_train,\n                                                        joint_transform=transform_joint)\n\n    data_processing_val = processing.KLDiMPProcessing(search_area_factor=settings.search_area_factor,\n                                                      output_sz=settings.output_sz,\n                                                      center_jitter_factor=settings.center_jitter_factor,\n                                                      scale_jitter_factor=settings.scale_jitter_factor,\n                                                      mode='sequence',\n                                                      proposal_params=proposal_params,\n                                                      label_function_params=label_params,\n                                                      label_density_params=label_density_params,\n                                                      transform=transform_val,\n                                                      joint_transform=transform_joint)\n\n    # Train sampler and loader\n    dataset_train = sampler.DiMPSampler([lasot_train, got10k_train, trackingnet_train, coco_train], [0.25,1,1,1],\n                                        samples_per_epoch=26000, max_gap=200, num_test_frames=3, num_train_frames=3,\n                                        processing=data_processing_train)\n\n    loader_train = LTRLoader('train', dataset_train, training=True, batch_size=settings.batch_size, num_workers=settings.num_workers,\n                             shuffle=True, drop_last=True, stack_dim=1)\n\n    # Validation samplers and loaders\n    dataset_val = sampler.DiMPSampler([got10k_val], [1], samples_per_epoch=5000, max_gap=200,\n                                      num_test_frames=3, num_train_frames=3,\n                                      processing=data_processing_val)\n\n    loader_val = LTRLoader('val', dataset_val, training=False, batch_size=settings.batch_size, num_workers=settings.num_workers,\n                           shuffle=False, drop_last=True, epoch_interval=5, stack_dim=1)\n\n    # Create network and actor\n    net = dimpnet.klcedimpnet50(filter_size=settings.target_filter_sz, backbone_pretrained=True, optim_iter=5,\n                                clf_feat_norm=True, clf_feat_blocks=0, final_conv=True, out_feature_dim=512,\n                                optim_init_step=1.0, optim_init_reg=0.05, optim_min_reg=0.05,\n                                gauss_sigma=output_sigma * settings.feature_sz, alpha_eps=0.05, normalize_label=True, init_initializer='zero')\n\n    # Wrap the network for multi GPU training\n    if settings.multi_gpu:\n        net = MultiGPU(net, dim=1)\n\n    objective = {'bb_ce': klreg_losses.KLRegression(), 'clf_ce': klreg_losses.KLRegressionGrid()}\n\n    loss_weight = {'bb_ce': 0.0025, 'clf_ce': 0.25, 'clf_ce_init': 0.25, 'clf_ce_iter': 1.0}\n\n    actor = tracking_actors.KLDiMPActor(net=net, objective=objective, loss_weight=loss_weight)\n\n    # Optimizer\n    optimizer = optim.Adam([{'params': actor.net.classifier.parameters(), 'lr': 1e-3},\n                            {'params': actor.net.bb_regressor.parameters(), 'lr': 1e-3},\n                            {'params': actor.net.feature_extractor.parameters(), 'lr': 2e-5}],\n                           lr=2e-4)\n\n    lr_scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=15, gamma=0.2)\n\n    trainer = LTRTrainer(actor, [loader_train, loader_val], optimizer, settings, lr_scheduler)\n\n    trainer.train(50, load_latest=True, fail_safe=True)\n"
  },
  {
    "path": "external/AR/ltr/train_settings/dimp/super_dimp.py",
    "content": "import torch.optim as optim\nfrom ltr.dataset import Lasot, Got10k, TrackingNet, MSCOCOSeq\nfrom ltr.data import processing, sampler, LTRLoader\nfrom ltr.models.tracking import dimpnet\nimport ltr.models.loss as ltr_losses\nimport ltr.models.loss.kl_regression as klreg_losses\nimport ltr.actors.tracking as tracking_actors\nfrom ltr.trainers import LTRTrainer\nimport ltr.data.transforms as tfm\nfrom ltr import MultiGPU\n\n\ndef run(settings):\n    settings.description = 'SuperDiMP: Combines the DiMP classifier with the PrDiMP bounding box regressor and better' \\\n                           'training settings (larger batch size, inside_major cropping, and flipping augmentation.' \\\n                           'Gives results significantly better than both DiMP-50 and PrDiMP-50.'\n    settings.batch_size = 20\n    settings.num_workers = 8\n    settings.multi_gpu = False\n    settings.print_interval = 1\n    settings.normalize_mean = [0.485, 0.456, 0.406]\n    settings.normalize_std = [0.229, 0.224, 0.225]\n    settings.search_area_factor = 6.0\n    settings.output_sigma_factor = 1/4\n    settings.target_filter_sz = 4\n    settings.feature_sz = 22\n    settings.output_sz = settings.feature_sz * 16\n    settings.center_jitter_factor = {'train': 3, 'test': 5.5}\n    settings.scale_jitter_factor = {'train': 0.25, 'test': 0.5}\n    settings.hinge_threshold = 0.05\n    # settings.print_stats = ['Loss/total', 'Loss/iou', 'ClfTrain/init_loss', 'ClfTrain/test_loss']\n\n    # Train datasets\n    lasot_train = Lasot(settings.env.lasot_dir, split='train')\n    got10k_train = Got10k(settings.env.got10k_dir, split='vottrain')\n    trackingnet_train = TrackingNet(settings.env.trackingnet_dir, set_ids=list(range(4)))\n    coco_train = MSCOCOSeq(settings.env.coco_dir)\n\n    # Validation datasets\n    got10k_val = Got10k(settings.env.got10k_dir, split='votval')\n\n\n    # Data transform\n    transform_joint = tfm.Transform(tfm.ToGrayscale(probability=0.05),\n                                    tfm.RandomHorizontalFlip(probability=0.5))\n\n    transform_train = tfm.Transform(tfm.ToTensorAndJitter(0.2),\n                                    tfm.RandomHorizontalFlip(probability=0.5),\n                                    tfm.Normalize(mean=settings.normalize_mean, std=settings.normalize_std))\n\n    transform_val = tfm.Transform(tfm.ToTensor(),\n                                  tfm.Normalize(mean=settings.normalize_mean, std=settings.normalize_std))\n\n    # The tracking pairs processing module\n    output_sigma = settings.output_sigma_factor / settings.search_area_factor\n    proposal_params = {'boxes_per_frame': 128, 'gt_sigma': (0.05, 0.05), 'proposal_sigma': [(0.05, 0.05), (0.5, 0.5)]}\n    label_params = {'feature_sz': settings.feature_sz, 'sigma_factor': output_sigma, 'kernel_sz': settings.target_filter_sz}\n    label_density_params = {'feature_sz': settings.feature_sz, 'sigma_factor': output_sigma, 'kernel_sz': settings.target_filter_sz}\n\n    data_processing_train = processing.KLDiMPProcessing(search_area_factor=settings.search_area_factor,\n                                                        output_sz=settings.output_sz,\n                                                        center_jitter_factor=settings.center_jitter_factor,\n                                                        scale_jitter_factor=settings.scale_jitter_factor,\n                                                        crop_type='inside_major',\n                                                        max_scale_change=1.5,\n                                                        mode='sequence',\n                                                        proposal_params=proposal_params,\n                                                        label_function_params=label_params,\n                                                        label_density_params=label_density_params,\n                                                        transform=transform_train,\n                                                        joint_transform=transform_joint)\n\n    data_processing_val = processing.KLDiMPProcessing(search_area_factor=settings.search_area_factor,\n                                                      output_sz=settings.output_sz,\n                                                      center_jitter_factor=settings.center_jitter_factor,\n                                                      scale_jitter_factor=settings.scale_jitter_factor,\n                                                      crop_type='inside_major',\n                                                      max_scale_change=1.5,\n                                                      mode='sequence',\n                                                      proposal_params=proposal_params,\n                                                      label_function_params=label_params,\n                                                      label_density_params=label_density_params,\n                                                      transform=transform_val,\n                                                      joint_transform=transform_joint)\n\n    # Train sampler and loader\n    dataset_train = sampler.DiMPSampler([lasot_train, got10k_train, trackingnet_train, coco_train], [1,1,1,1],\n                                        samples_per_epoch=40000, max_gap=200, num_test_frames=3, num_train_frames=3,\n                                        processing=data_processing_train)\n\n    loader_train = LTRLoader('train', dataset_train, training=True, batch_size=settings.batch_size, num_workers=settings.num_workers,\n                             shuffle=True, drop_last=True, stack_dim=1)\n\n    # Validation samplers and loaders\n    dataset_val = sampler.DiMPSampler([got10k_val], [1], samples_per_epoch=10000, max_gap=200,\n                                      num_test_frames=3, num_train_frames=3,\n                                      processing=data_processing_val)\n\n    loader_val = LTRLoader('val', dataset_val, training=False, batch_size=settings.batch_size, num_workers=settings.num_workers,\n                           shuffle=False, drop_last=True, epoch_interval=5, stack_dim=1)\n\n    # Create network and actor\n    net = dimpnet.dimpnet50(filter_size=settings.target_filter_sz, backbone_pretrained=True, optim_iter=5,\n                            clf_feat_norm=True, clf_feat_blocks=0, final_conv=True, out_feature_dim=512,\n                            optim_init_step=0.9, optim_init_reg=0.1,\n                            init_gauss_sigma=output_sigma * settings.feature_sz, num_dist_bins=100,\n                            bin_displacement=0.1, mask_init_factor=3.0, target_mask_act='sigmoid', score_act='relu',\n                            frozen_backbone_layers=['conv1', 'bn1', 'layer1', 'layer2'])\n\n    # Wrap the network for multi GPU training\n    if settings.multi_gpu:\n        net = MultiGPU(net, dim=1)\n\n    objective = {'bb_ce': klreg_losses.KLRegression(), 'test_clf': ltr_losses.LBHinge(threshold=settings.hinge_threshold)}\n\n    loss_weight = {'bb_ce': 0.01, 'test_clf': 100, 'test_init_clf': 100, 'test_iter_clf': 400}\n\n    actor = tracking_actors.KLDiMPActor(net=net, objective=objective, loss_weight=loss_weight)\n\n    # Optimizer\n    optimizer = optim.Adam([{'params': actor.net.classifier.filter_initializer.parameters(), 'lr': 5e-5},\n                            {'params': actor.net.classifier.filter_optimizer.parameters(), 'lr': 5e-4},\n                            {'params': actor.net.classifier.feature_extractor.parameters(), 'lr': 5e-5},\n                            {'params': actor.net.bb_regressor.parameters(), 'lr': 1e-3},\n                            {'params': actor.net.feature_extractor.layer3.parameters(), 'lr': 2e-5}],\n                           lr=2e-4)\n\n    lr_scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=15, gamma=0.2)\n\n    trainer = LTRTrainer(actor, [loader_train, loader_val], optimizer, settings, lr_scheduler)\n\n    trainer.train(50, load_latest=True, fail_safe=True)\n"
  },
  {
    "path": "external/AR/ltr/trainers/__init__.py",
    "content": "from .base_trainer import BaseTrainer\nfrom .ltr_trainer import LTRTrainer"
  },
  {
    "path": "external/AR/ltr/trainers/base_trainer.py",
    "content": "import os\nimport glob\nimport torch\nimport traceback\nfrom ltr.admin import loading, multigpu\n\n\nclass BaseTrainer:\n    \"\"\"Base trainer class. Contains functions for training and saving/loading chackpoints.\n    Trainer classes should inherit from this one and overload the train_epoch function.\"\"\"\n\n    def __init__(self, actor, loaders, optimizer, settings, lr_scheduler=None):\n        \"\"\"\n        args:\n            actor - The actor for training the network\n            loaders - list of dataset loaders, e.g. [train_loader, val_loader]. In each epoch, the trainer runs one\n                        epoch for each loader.\n            optimizer - The optimizer used for training, e.g. Adam\n            settings - Training settings\n            lr_scheduler - Learning rate scheduler\n        \"\"\"\n        self.actor = actor\n        self.optimizer = optimizer\n        self.lr_scheduler = lr_scheduler\n        self.loaders = loaders\n\n        self.update_settings(settings)\n\n        self.epoch = 0\n        self.stats = {}\n\n        self.device = getattr(settings, 'device', None)\n        if self.device is None:\n            self.device = torch.device(\"cuda:0\" if torch.cuda.is_available() and settings.use_gpu else \"cpu\")\n\n        self.actor.to(self.device)\n\n    def update_settings(self, settings=None):\n        \"\"\"Updates the trainer settings. Must be called to update internal settings.\"\"\"\n        if settings is not None:\n            self.settings = settings\n\n        if self.settings.env.workspace_dir is not None:\n            self.settings.env.workspace_dir = os.path.expanduser(self.settings.env.workspace_dir)\n            self._checkpoint_dir = os.path.join(self.settings.env.workspace_dir, 'checkpoints')\n            if not os.path.exists(self._checkpoint_dir):\n                os.makedirs(self._checkpoint_dir)\n        else:\n            self._checkpoint_dir = None\n\n\n    def train(self, max_epochs, load_latest=False, fail_safe=True):\n        \"\"\"Do training for the given number of epochs.\n        args:\n            max_epochs - Max number of training epochs,\n            load_latest - Bool indicating whether to resume from latest epoch.\n            fail_safe - Bool indicating whether the training to automatically restart in case of any crashes.\n        \"\"\"\n\n        epoch = -1\n        num_tries = 10\n        for i in range(num_tries):\n            try:\n                if load_latest:\n                    self.load_checkpoint()\n\n                for epoch in range(self.epoch+1, max_epochs+1):\n                    self.epoch = epoch\n\n                    self.train_epoch()\n\n                    if self.lr_scheduler is not None:\n                        self.lr_scheduler.step()\n\n                    if self._checkpoint_dir:\n                        self.save_checkpoint()\n            except:\n                print('Training crashed at epoch {}'.format(epoch))\n                if fail_safe:\n                    self.epoch -= 1\n                    load_latest = True\n                    print('Traceback for the error!')\n                    print(traceback.format_exc())\n                    print('Restarting training from last epoch ...')\n                else:\n                    raise\n\n        print('Finished training!')\n\n\n    def train_epoch(self):\n        raise NotImplementedError\n\n\n    def save_checkpoint(self):\n        \"\"\"Saves a checkpoint of the network and other variables.\"\"\"\n\n        net = self.actor.net.module if multigpu.is_multi_gpu(self.actor.net) else self.actor.net\n\n        actor_type = type(self.actor).__name__\n        net_type = type(net).__name__\n        state = {\n            'epoch': self.epoch,\n            'actor_type': actor_type,\n            'net_type': net_type,\n            'net': net.state_dict(),\n            'net_info': getattr(net, 'info', None),\n            'constructor': getattr(net, 'constructor', None),\n            'optimizer': self.optimizer.state_dict(),\n            'stats': self.stats,\n            'settings': self.settings\n        }\n\n\n        directory = '{}/{}'.format(self._checkpoint_dir, self.settings.project_path)\n        if not os.path.exists(directory):\n            os.makedirs(directory)\n\n        # First save as a tmp file\n        tmp_file_path = '{}/{}_ep{:04d}.tmp'.format(directory, net_type, self.epoch)\n        torch.save(state, tmp_file_path)\n\n        file_path = '{}/{}_ep{:04d}.pth.tar'.format(directory, net_type, self.epoch)\n\n        # Now rename to actual checkpoint. os.rename seems to be atomic if files are on same filesystem. Not 100% sure\n        os.rename(tmp_file_path, file_path)\n\n\n    def load_checkpoint(self, checkpoint = None, fields = None, ignore_fields = None, load_constructor = False):\n        \"\"\"Loads a network checkpoint file.\n\n        Can be called in three different ways:\n            load_checkpoint():\n                Loads the latest epoch from the workspace. Use this to continue training.\n            load_checkpoint(epoch_num):\n                Loads the network at the given epoch number (int).\n            load_checkpoint(path_to_checkpoint):\n                Loads the file from the given absolute path (str).\n        \"\"\"\n\n        net = self.actor.net.module if multigpu.is_multi_gpu(self.actor.net) else self.actor.net\n\n        actor_type = type(self.actor).__name__\n        net_type = type(net).__name__\n\n        if checkpoint is None:\n            # Load most recent checkpoint\n            checkpoint_list = sorted(glob.glob('{}/{}/{}_ep*.pth.tar'.format(self._checkpoint_dir,\n                                                                             self.settings.project_path, net_type)))\n            if checkpoint_list:\n                checkpoint_path = checkpoint_list[-1]\n            else:\n                print('No matching checkpoint file found')\n                return\n        elif isinstance(checkpoint, int):\n            # Checkpoint is the epoch number\n            checkpoint_path = '{}/{}/{}_ep{:04d}.pth.tar'.format(self._checkpoint_dir, self.settings.project_path,\n                                                                 net_type, checkpoint)\n        elif isinstance(checkpoint, str):\n            # checkpoint is the path\n            if os.path.isdir(checkpoint):\n                checkpoint_list = sorted(glob.glob('{}/*_ep*.pth.tar'.format(checkpoint)))\n                if checkpoint_list:\n                    checkpoint_path = checkpoint_list[-1]\n                else:\n                    raise Exception('No checkpoint found')\n            else:\n                checkpoint_path = os.path.expanduser(checkpoint)\n        else:\n            raise TypeError\n\n        # Load network\n        checkpoint_dict = loading.torch_load_legacy(checkpoint_path)\n\n        assert net_type == checkpoint_dict['net_type'], 'Network is not of correct type.'\n\n        if fields is None:\n            fields = checkpoint_dict.keys()\n        if ignore_fields is None:\n            ignore_fields = ['settings']\n\n            # Never load the scheduler. It exists in older checkpoints.\n        ignore_fields.extend(['lr_scheduler', 'constructor', 'net_type', 'actor_type', 'net_info'])\n\n        # Load all fields\n        for key in fields:\n            if key in ignore_fields:\n                continue\n            if key == 'net':\n                net.load_state_dict(checkpoint_dict[key])\n            elif key == 'optimizer':\n                self.optimizer.load_state_dict(checkpoint_dict[key])\n            else:\n                setattr(self, key, checkpoint_dict[key])\n\n        # Set the net info\n        if load_constructor and 'constructor' in checkpoint_dict and checkpoint_dict['constructor'] is not None:\n            net.constructor = checkpoint_dict['constructor']\n        if 'net_info' in checkpoint_dict and checkpoint_dict['net_info'] is not None:\n            net.info = checkpoint_dict['net_info']\n\n        # Update the epoch in lr scheduler\n        if 'epoch' in fields:\n            self.lr_scheduler.last_epoch = self.epoch\n\n        return True\n"
  },
  {
    "path": "external/AR/ltr/trainers/ltr_trainer.py",
    "content": "import os\nfrom collections import OrderedDict\nfrom ltr.trainers import BaseTrainer\nfrom ltr.admin.stats import AverageMeter, StatValue\nfrom ltr.admin.tensorboard import TensorboardWriter\nimport torch\nimport time\n\n\nclass LTRTrainer(BaseTrainer):\n    def __init__(self, actor, loaders, optimizer, settings, lr_scheduler=None):\n        \"\"\"\n        args:\n            actor - The actor for training the network\n            loaders - list of dataset loaders, e.g. [train_loader, val_loader]. In each epoch, the trainer runs one\n                        epoch for each loader.\n            optimizer - The optimizer used for training, e.g. Adam\n            settings - Training settings\n            lr_scheduler - Learning rate scheduler\n        \"\"\"\n        super().__init__(actor, loaders, optimizer, settings, lr_scheduler)\n\n        self._set_default_settings()\n\n        # Initialize statistics variables\n        self.stats = OrderedDict({loader.name: None for loader in self.loaders})\n\n        # Initialize tensorboard\n        tensorboard_writer_dir = os.path.join(self.settings.env.tensorboard_dir, self.settings.project_path)\n        self.tensorboard_writer = TensorboardWriter(tensorboard_writer_dir, [l.name for l in loaders])\n\n        self.move_data_to_gpu = getattr(settings, 'move_data_to_gpu', True)\n\n    def _set_default_settings(self):\n        # Dict of all default values\n        default = {'print_interval': 10,\n                   'print_stats': None,\n                   'description': ''}\n\n        for param, default_value in default.items():\n            if getattr(self.settings, param, None) is None:\n                setattr(self.settings, param, default_value)\n\n    def cycle_dataset(self, loader):\n        \"\"\"Do a cycle of training or validation.\"\"\"\n\n        self.actor.train(loader.training)\n        torch.set_grad_enabled(loader.training)\n\n        self._init_timing()\n\n        for i, data in enumerate(loader, 1):\n            # get inputs\n            if self.move_data_to_gpu:\n                data = data.to(self.device)\n\n            data['epoch'] = self.epoch\n            data['settings'] = self.settings\n\n            # forward pass\n            loss, stats = self.actor(data)\n\n            # backward pass and update weights\n            if loader.training:\n                self.optimizer.zero_grad()\n                loss.backward()\n                self.optimizer.step()\n\n            # update statistics\n            batch_size = data['train_images'].shape[loader.stack_dim]\n            self._update_stats(stats, batch_size, loader)\n\n            # print statistics\n            self._print_stats(i, loader, batch_size)\n\n    def train_epoch(self):\n        \"\"\"Do one epoch for each loader.\"\"\"\n        for loader in self.loaders:\n            if self.epoch % loader.epoch_interval == 0:\n                self.cycle_dataset(loader)\n\n        self._stats_new_epoch()\n        self._write_tensorboard()\n\n    def _init_timing(self):\n        self.num_frames = 0\n        self.start_time = time.time()\n        self.prev_time = self.start_time\n\n    def _update_stats(self, new_stats: OrderedDict, batch_size, loader):\n        # Initialize stats if not initialized yet\n        if loader.name not in self.stats.keys() or self.stats[loader.name] is None:\n            self.stats[loader.name] = OrderedDict({name: AverageMeter() for name in new_stats.keys()})\n\n        for name, val in new_stats.items():\n            if name not in self.stats[loader.name].keys():\n                self.stats[loader.name][name] = AverageMeter()\n            self.stats[loader.name][name].update(val, batch_size)\n\n    def _print_stats(self, i, loader, batch_size):\n        self.num_frames += batch_size\n        current_time = time.time()\n        batch_fps = batch_size / (current_time - self.prev_time)\n        average_fps = self.num_frames / (current_time - self.start_time)\n        self.prev_time = current_time\n        if i % self.settings.print_interval == 0 or i == loader.__len__():\n            print_str = '[%s: %d, %d / %d] ' % (loader.name, self.epoch, i, loader.__len__())\n            print_str += 'FPS: %.1f (%.1f)  ,  ' % (average_fps, batch_fps)\n            for name, val in self.stats[loader.name].items():\n                if (self.settings.print_stats is None or name in self.settings.print_stats) and hasattr(val, 'avg'):\n                    print_str += '%s: %.5f  ,  ' % (name, val.avg)\n            print(print_str[:-5])\n\n    def _stats_new_epoch(self):\n        # Record learning rate\n        for loader in self.loaders:\n            if loader.training:\n                lr_list = self.lr_scheduler.get_lr()\n                for i, lr in enumerate(lr_list):\n                    var_name = 'LearningRate/group{}'.format(i)\n                    if var_name not in self.stats[loader.name].keys():\n                        self.stats[loader.name][var_name] = StatValue()\n                    self.stats[loader.name][var_name].update(lr)\n\n        for loader_stats in self.stats.values():\n            if loader_stats is None:\n                continue\n            for stat_value in loader_stats.values():\n                if hasattr(stat_value, 'new_epoch'):\n                    stat_value.new_epoch()\n\n    def _write_tensorboard(self):\n        if self.epoch == 1:\n            self.tensorboard_writer.write_info(self.settings.module_name, self.settings.script_name, self.settings.description)\n\n        self.tensorboard_writer.write_epoch(self.stats, self.epoch)"
  },
  {
    "path": "external/AR/pytracking/ARcm_seg.py",
    "content": "import os\nimport sys\nimport torch\nimport numpy as np\nimport cv2\nimport torch.nn as nn\nfrom external.AR.pytracking.utils.loading import load_network\nfrom external.AR.ltr.data.processing_utils_SE import sample_target_SE, transform_image_to_crop_SE, map_mask_back\nenv_path = os.path.join(os.path.dirname(__file__), '..')\nif env_path not in sys.path:\n    sys.path.append(env_path)\n\n\ndef mask_torch2numpy(Pmask):\n    Pmask_arr = np.array(Pmask.squeeze().cpu())  # (H,W) (0,1)\n    return Pmask_arr\n\n\nclass ARcm_seg(object):\n    def __init__(self, refine_net_dir, search_factor=2.0, input_sz=256):\n        self.refine_network = self.get_network(refine_net_dir)\n        self.search_factor = search_factor\n        self.input_sz = input_sz\n        self.mean = np.array([0.485, 0.456, 0.406]).reshape((1,1,3))\n        self.std = np.array([0.229, 0.224, 0.225]).reshape((1,1,3))\n\n    def initialize(self, frame1, bbox1):\n        '''\n        :param frame1: cv array (H,W,3)\n        :param bbox1: ndarray (4,)\n        :return:\n        '''\n        '''Step1: get cropped patch(tensor)'''\n        patch1, h_f, w_f = sample_target_SE(frame1, bbox1, self.search_factor, self.input_sz, mode=cv2.BORDER_CONSTANT)\n        patch1_tensor = self.img_preprocess(patch1)\n        '''Step2: get GT's cooridinate on the cropped patch(tensor)'''\n        crop_sz = torch.Tensor((self.input_sz, self.input_sz))\n        bbox1_tensor = self.gt_preprocess(bbox1) # (4,)\n        bbox1_crop_tensor = transform_image_to_crop_SE(bbox1_tensor, bbox1_tensor, h_f, w_f, crop_sz).cuda()\n        '''Step3: forward prop (reference branch)'''\n        with torch.no_grad():\n            self.refine_network.forward_ref(patch1_tensor, bbox1_crop_tensor)\n\n    '''refine'''\n    def get_mask(self, Cframe, Cbbox, dtm=None, vis=False):\n        '''\n        :param Cframe: Current frame(cv2 array)\n        :param Cbbox: Current bbox (ndarray) (x1,y1,w,h)\n        :return: mask\n        '''\n        '''Step1: get cropped patch(tensor)'''\n        Cpatch, h_f, w_f = sample_target_SE(Cframe, Cbbox, self.search_factor, self.input_sz, mode=cv2.BORDER_CONSTANT)\n        Cpatch_tensor = self.img_preprocess(Cpatch)\n\n        '''Step2: forward prop (test branch)'''\n        with torch.no_grad():\n            if dtm is not None:\n                '''2020.4.26 support input dtm'''\n                pred = self.refine_network.forward_test(Cpatch_tensor, dtm, mode='mask')\n            else:\n                pred = self.refine_network.forward_test(Cpatch_tensor,mode='mask')\n            Pmask_arr = mask_torch2numpy(pred)\n            mask_arr = map_mask_back(Cframe, Cbbox, self.search_factor, Pmask_arr,\n                                     mode=cv2.BORDER_CONSTANT)\n            if vis:\n                return mask_arr, Cpatch, Pmask_arr\n            else:\n                return mask_arr\n\n    def get_network(self,checkpoint_dir):\n        network = load_network(checkpoint_dir)\n        network.cuda()\n        network.eval()\n        return network\n\n    def img_preprocess(self,img_arr):\n        '''---> Pytorch tensor(RGB),Normal(-1 to 1,subtract mean, divide std)\n        input img_arr (H,W,3)\n        output (1,1,3,H,W)\n        '''\n        norm_img = ((img_arr/255.0) - self.mean)/(self.std)\n        img_f32 = norm_img.astype(np.float32)\n        img_tensor = torch.from_numpy(img_f32).cuda()\n        img_tensor = img_tensor.permute((2,0,1))\n        return img_tensor.unsqueeze(dim=0).unsqueeze(dim=0)\n\n    def gt_preprocess(self,gt_arr):\n        '''\n        :param gt: ndarray (4,)\n        :return: torch tensor (4,)\n        '''\n        return torch.from_numpy(gt_arr.astype(np.float32))\n\n\ndef add_frame_mask(frame, mask, threshold=0.5):\n    mask_new = (mask>threshold)*255 #(H,W)\n    frame_new = frame.copy().astype(np.float)\n    frame_new[...,1] += 0.3*mask_new\n    frame_new = frame_new.clip(0,255).astype(np.uint8)\n    return frame_new\n\n\ndef add_frame_bbox(frame, refined_box, color):\n    x1, y1, w, h = refined_box.tolist()\n    cv2.rectangle(frame, (int(x1), int(y1)), (int(x1 + w), int(y1 + h)), color, 2)\n    return frame\n"
  },
  {
    "path": "external/AR/pytracking/VOT/tracker_DiMP.m",
    "content": "% Set path to the python in the pytracking conda environment\npython_path = 'PATH_TO_CONDA_INSTALLATION/envs/pytracking/bin/python';\n\n% Set path to pytracking\npytracking_path = 'PATH_TO_VISIONML/pytracking';\n\n% Set path to trax installation. Check\n% https://trax.readthedocs.io/en/latest/tutorial_compiling.html for\n% compilation information\ntrax_path = 'PATH_TO_VOT_TOOLKIT/native/trax';\n\ntracker_name = 'dimp';          % Name of the tracker to evaluate\nrunfile_name = 'dimp18_vot';    % Name of the parameter file to use\ndebug = 0;\n\n%%\ntracker_label = [tracker_name, '_', runfile_name];\n\n% Generate python command\ntracker_command = sprintf(['%s -c \"import sys; sys.path.append(''%s'');', ...\n                           'sys.path.append(''%s/support/python'');', ...\n                           'import run_vot;', ...\n                           'run_vot.run_vot(''%s'', ''%s'', debug=%d)\"'],...\n                           python_path, pytracking_path, trax_path, ...\n                           tracker_name, runfile_name, debug);\n\n\ntracker_interpreter = python_path;\n\ntracker_linkpath = {[trax_path, '/build'],...\n\t\t[trax_path, '/build/support/client'],...\n\t\t[trax_path, '/build/support/opencv']};\n"
  },
  {
    "path": "external/AR/pytracking/VOT/trackers.ini",
    "content": "[DiMP]  # <tracker-name>\nlabel = DiMP\nprotocol = traxpython\n\ncommand = run_vot; run_vot.run_vot2020('dimp', 'dimp50')  # Set the tracker name and the parameter name\n\n# Specify a path to trax python wrapper if it is not visible (separate by ; if using multiple paths)\npaths = PATH_TO_PYTRACKING\n\n# Additional environment paths\n#env_PATH = <additional-env-paths>;${PATH}\n\n"
  },
  {
    "path": "external/AR/pytracking/VOT/vot.py",
    "content": "\"\"\"\n\\file vot.py\n\n@brief Python utility functions for VOT integration\n\n@author Luka Cehovin, Alessio Dore\n\n@date 2016, 2019\n\n\"\"\"\n\nimport sys\nimport copy\nimport collections\n\ntry:\n    import trax\nexcept ImportError:\n    raise Exception('TraX support not found. Please add trax module to Python path.')\n\nRectangle = collections.namedtuple('Rectangle', ['x', 'y', 'width', 'height'])\nPoint = collections.namedtuple('Point', ['x', 'y'])\nPolygon = collections.namedtuple('Polygon', ['points'])\n\nclass VOT(object):\n    \"\"\" Base class for Python VOT integration \"\"\"\n    def __init__(self, region_format, channels=None):\n        \"\"\" Constructor\n\n        Args:\n            region_format: Region format options\n        \"\"\"\n        assert(region_format in [trax.Region.RECTANGLE, trax.Region.POLYGON])\n\n        if channels is None:\n            channels = ['color']\n        elif channels == 'rgbd':\n            channels = ['color', 'depth']\n        elif channels == 'rgbt':\n            channels = ['color', 'ir']\n        elif channels == 'ir':\n            channels = ['ir']\n        else:\n            raise Exception('Illegal configuration {}.'.format(channels))\n\n        self._trax = trax.Server([region_format], [trax.Image.PATH], channels)\n\n        request = self._trax.wait()\n        assert(request.type == 'initialize')\n        if isinstance(request.region, trax.Polygon):\n            self._region = Polygon([Point(x[0], x[1]) for x in request.region])\n        else:\n            self._region = Rectangle(*request.region.bounds())\n        self._image = [str(x) for k, x in request.image.items()]\n        if len(self._image) == 1:\n            self._image = self._image[0]\n        self._trax.status(request.region)\n\n    def region(self):\n        \"\"\"\n        Send configuration message to the client and receive the initialization\n        region and the path of the first image\n\n        Returns:\n            initialization region\n        \"\"\"\n\n        return self._region\n\n    def report(self, region, confidence = None):\n        \"\"\"\n        Report the tracking results to the client\n\n        Arguments:\n            region: region for the frame\n        \"\"\"\n        assert(isinstance(region, Rectangle) or isinstance(region, Polygon))\n        if isinstance(region, Polygon):\n            tregion = trax.Polygon.create([(x.x, x.y) for x in region.points])\n        else:\n            tregion = trax.Rectangle.create(region.x, region.y, region.width, region.height)\n        properties = {}\n        if not confidence is None:\n            properties['confidence'] = confidence\n        self._trax.status(tregion, properties)\n\n    def frame(self):\n        \"\"\"\n        Get a frame (image path) from client\n\n        Returns:\n            absolute path of the image\n        \"\"\"\n        if hasattr(self, \"_image\"):\n            image = self._image\n            del self._image\n            return tuple(image)\n\n        request = self._trax.wait()\n\n        if request.type == 'frame':\n            image = [str(x) for k, x in request.image.items()]\n            if len(image) == 1:\n                image = image[0]\n            return tuple(image)\n        else:\n            return None\n\n\n    def quit(self):\n        if hasattr(self, '_trax'):\n            self._trax.quit()\n\n    def __del__(self):\n        self.quit()\n\n"
  },
  {
    "path": "external/AR/pytracking/VOT2020_super_only_mask_384_HP/dimp_alpha_065.py",
    "content": "from pytracking.VOT2020_super_only_mask_384_HP.dimp_alpha_seg_class import run_vot_exp\nimport os\nos.environ['CUDA_VISIBLE_DEVICES'] = '1'\n# run_vot_exp('dimp','dimp50_vot19','SEbcm',0.60,VIS=False)\nrun_vot_exp('dimp','super_dimp','ARcm_coco_seg_only_mask_384',0.65,VIS=False)\n# run_vot_exp('dimp','super_dimp','ARcm_coco_seg_only_mask_384',0.65,VIS=True)"
  },
  {
    "path": "external/AR/pytracking/VOT2020_super_only_mask_384_HP/dimp_alpha_seg_class.py",
    "content": "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport cv2\nimport torch\nimport vot\nimport sys\nimport time\n\n'''Refine module & Pytracking base trackers'''\nimport os\nfrom pytracking.evaluation import Tracker\nfrom pytracking.ARcm_seg import ARcm_seg\nfrom pytracking.vot20_utils import *\n\n''''''\n'''DiMP-alpha class'''\n\n\nclass DIMP_ALPHA(object):\n    def __init__(self, tracker_name='dimp', para_name='dimp50_vot19',\n                 refine_model_name='ARcm_coco_seg', threshold=0.15):\n        self.THRES = threshold\n        '''create tracker'''\n        '''DIMP'''\n        tracker_info = Tracker(tracker_name, para_name, None)\n        params = tracker_info.get_parameters()\n        params.visualization = False\n        params.debug = False\n        params.visdom_info = {'use_visdom': False, 'server': '127.0.0.1', 'port': 8097}\n        self.dimp = tracker_info.tracker_class(params)\n        '''Alpha-Refine'''\n        project_path = os.path.join(os.path.dirname(__file__), '..', '..')\n        refine_root = os.path.join(project_path, 'ltr/checkpoints/ltr/ARcm_seg/')\n        refine_path = os.path.join(refine_root, refine_model_name)\n        '''2020.4.25 input size: 384x384'''\n        self.alpha = ARcm_seg(refine_path, input_sz=384)\n\n    def initialize(self, img_RGB, mask):\n        region = rect_from_mask(mask)\n        self.H, self.W, _ = img_RGB.shape\n        gt_bbox_np = np.array(region).astype(np.float32)\n        '''Initialize dimp for specific video'''\n        gt_bbox_torch = torch.from_numpy(gt_bbox_np)\n        init_info = {}\n        init_info['init_bbox'] = gt_bbox_torch\n        _ = self.dimp.initialize(img_RGB, init_info)\n        '''initilize refinement module for specific video'''\n        self.alpha.initialize(img_RGB, np.array(gt_bbox_np))\n\n    def track(self, img_RGB):\n        '''TRACK'''\n        '''base tracker'''\n        outputs = self.dimp.track(img_RGB)\n        pred_bbox = outputs['target_bbox']\n        '''Step1: Post-Process'''\n        x1, y1, w, h = pred_bbox\n        # add boundary and min size limit\n        x1, y1, x2, y2 = bbox_clip(x1, y1, x1 + w, y1 + h, (self.H, self.W))\n        w = x2 - x1\n        h = y2 - y1\n        new_pos = torch.from_numpy(np.array([y1 + h / 2, x1 + w / 2]).astype(np.float32))\n        new_target_sz = torch.from_numpy(np.array([h, w]).astype(np.float32))\n        new_scale = torch.sqrt(new_target_sz.prod() / self.dimp.base_target_sz.prod())\n        ##### update\n        self.dimp.pos = new_pos.clone()\n        self.dimp.target_sz = new_target_sz\n        self.dimp.target_scale = new_scale\n        bbox_new = [x1, y1, w, h]\n        '''Step2: Mask report'''\n        pred_mask, search, search_mask = self.alpha.get_mask(img_RGB, np.array(bbox_new), vis=True)\n        final_mask = (pred_mask > self.THRES).astype(np.uint8)\n        search_region = search.astype(np.uint8)\n        search_mask = (search_mask > self.THRES).astype(np.uint8)\n        return bbox_new, final_mask, search_region, search_mask\n\n\ndef run_vot_exp(tracker_name, para_name, refine_model_name, threshold, VIS=False):\n    torch.set_num_threads(1)\n    # torch.cuda.set_device(CUDA_ID)  # set GPU id\n    save_root = os.path.join('<SAVE_DIR>', para_name)\n    if VIS and (not os.path.exists(save_root)):\n        os.mkdir(save_root)\n    tracker = DIMP_ALPHA(tracker_name=tracker_name, para_name=para_name,\n                         refine_model_name=refine_model_name, threshold=threshold)\n    handle = vot.VOT(\"mask\")\n    selection = handle.region()\n    imagefile = handle.frame()\n    if not imagefile:\n        sys.exit(0)\n    if VIS:\n        '''for vis'''\n        seq_name = imagefile.split('/')[-3]\n        save_v_dir = os.path.join(save_root, seq_name)\n        if not os.path.exists(save_v_dir):\n            os.mkdir(save_v_dir)\n        cur_time = int(time.time() % 10000)\n        save_dir = os.path.join(save_v_dir, str(cur_time))\n        if not os.path.exists(save_dir):\n            os.makedirs(save_dir)\n\n    image = cv2.cvtColor(cv2.imread(imagefile), cv2.COLOR_BGR2RGB)  # Right\n    # mask given by the toolkit ends with the target (zero-padding to the right and down is needed)\n    mask = make_full_size(selection, (image.shape[1], image.shape[0]))\n    tracker.initialize(image, mask)\n\n    while True:\n        imagefile = handle.frame()\n        if not imagefile:\n            break\n        image = cv2.cvtColor(cv2.imread(imagefile), cv2.COLOR_BGR2RGB)  # Right\n        b1, m, search, search_m = tracker.track(image)\n        handle.report(m)\n        if VIS:\n            '''Visualization'''\n            # original image\n            image_ori = image[:, :, ::-1].copy()  # RGB --> BGR\n            image_name = imagefile.split('/')[-1]\n            save_path = os.path.join(save_dir, image_name)\n            cv2.imwrite(save_path, image_ori)\n            # dimp box\n            image_b = image_ori.copy()\n            cv2.rectangle(image_b, (int(b1[0]), int(b1[1])),\n                          (int(b1[0] + b1[2]), int(b1[1] + b1[3])), (0, 0, 255), 2)\n            image_b_name = image_name.replace('.jpg', '_bbox.jpg')\n            save_path = os.path.join(save_dir, image_b_name)\n            cv2.imwrite(save_path, image_b)\n            # search region\n            search_bgr = search[:, :, ::-1].copy()\n            search_name = image_name.replace('.jpg', '_search.jpg')\n            save_path = os.path.join(save_dir, search_name)\n            cv2.imwrite(save_path, search_bgr)\n            # search region mask\n            search_bgr_m = search_bgr.astype(np.float32)\n            search_bgr_m[:, :, 1] += 127.0 * search_m\n            search_bgr_m[:, :, 2] += 127.0 * search_m\n            contours, _ = cv2.findContours(search_m, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n            search_bgr_m = cv2.drawContours(search_bgr_m, contours, -1, (0, 255, 255), 4)\n            search_bgr_m = search_bgr_m.clip(0, 255).astype(np.uint8)\n            search_name_m = image_name.replace('.jpg', '_search_mask.jpg')\n            save_path = os.path.join(save_dir, search_name_m)\n            cv2.imwrite(save_path, search_bgr_m)\n            # original image + mask\n            image_m = image_ori.copy().astype(np.float32)\n            image_m[:, :, 1] += 127.0 * m\n            image_m[:, :, 2] += 127.0 * m\n            contours, _ = cv2.findContours(m, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n            image_m = cv2.drawContours(image_m, contours, -1, (0, 255, 255), 2)\n            image_m = image_m.clip(0, 255).astype(np.uint8)\n            image_mask_name_m = image_name.replace('.jpg', '_mask.jpg')\n            save_path = os.path.join(save_dir, image_mask_name_m)\n            cv2.imwrite(save_path, image_m)\n"
  },
  {
    "path": "external/AR/pytracking/VOT2020_super_only_mask_384_HP/mixformer_alpha_seg_class.py",
    "content": "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport cv2\nimport torch\nimport vot\nimport sys\nimport time\nimport os\nimport numpy as np\nfrom lib.test.tracker.mixformer_online import MixFormerOnline\nfrom pytracking.ARcm_seg import ARcm_seg\nfrom pytracking.vot20_utils import *\n\nimport lib.test.parameter.mixformer_online as vot_params\n\nclass MIXFORMER_ALPHA_SEG(object):\n    def __init__(self, tracker,\n                 refine_model_name='ARcm_coco_seg', threshold=0.6):\n        self.THRES = threshold\n        self.tracker = tracker\n        '''create tracker'''\n        '''Alpha-Refine'''\n        project_path = os.path.join(os.path.dirname(__file__), '..', '..')\n        refine_root = os.path.join(project_path, 'ltr/checkpoints/ltr/ARcm_seg/')\n        refine_path = os.path.join(refine_root, refine_model_name)\n        '''2020.4.25 input size: 384x384'''\n        self.alpha = ARcm_seg(refine_path, input_sz=384)\n\n    def initialize(self, image, mask):\n        region = rect_from_mask(mask)\n        # init_info = {'init_bbox': region}\n        # self.tracker.initialize(image, init_info)\n\n        self.H, self.W, _ = image.shape\n        gt_bbox_np = np.array(region).astype(np.float32)\n        '''Initialize STARK for specific video'''\n        init_info = {'init_bbox': list(gt_bbox_np)}\n        self.tracker.initialize(image, init_info)\n        '''initilize refinement module for specific video'''\n        self.alpha.initialize(image, np.array(gt_bbox_np))\n\n    def track(self, img_RGB):\n        '''TRACK'''\n        '''base tracker'''\n        outputs = self.tracker.track(img_RGB)\n        pred_bbox = outputs['target_bbox']\n        '''Step2: Mask report'''\n        pred_mask, search, search_mask = self.alpha.get_mask(img_RGB, np.array(pred_bbox), vis=True)\n        final_mask = (pred_mask > self.THRES).astype(np.uint8)\n        return final_mask, 1\n\n\ndef make_full_size(x, output_sz):\n    '''\n    zero-pad input x (right and down) to match output_sz\n    x: numpy array e.g., binary mask\n    output_sz: size of the output [width, height]\n    '''\n    if x.shape[0] == output_sz[1] and x.shape[1] == output_sz[0]:\n        return x\n    pad_x = output_sz[0] - x.shape[1]\n    if pad_x < 0:\n        x = x[:, :x.shape[1] + pad_x]\n        # padding has to be set to zero, otherwise pad function fails\n        pad_x = 0\n    pad_y = output_sz[1] - x.shape[0]\n    if pad_y < 0:\n        x = x[:x.shape[0] + pad_y, :]\n        # padding has to be set to zero, otherwise pad function fails\n        pad_y = 0\n    return np.pad(x, ((0, pad_y), (0, pad_x)), 'constant', constant_values=0)\n\n\n\nrefine_model_name = 'ARcm_coco_seg_only_mask_384'\nparams = vot_params.parameters(\"baseline\", model=\"mixformer_online_22k.pth.tar\")\n# params = vot_params.parameters(\"baseline\")\nmixformer = MixFormerOnline(params, \"VOT20\")\ntracker = MIXFORMER_ALPHA_SEG(tracker=mixformer, refine_model_name=refine_model_name)\nhandle = vot.VOT(\"mask\")\nselection = handle.region()\nimagefile = handle.frame()\n\nif not imagefile:\n    sys.exit(0)\n\nimage = cv2.cvtColor(cv2.imread(imagefile), cv2.COLOR_BGR2RGB)  # Right\n# mask given by the toolkit ends with the target (zero-padding to the right and down is needed)\nmask = make_full_size(selection, (image.shape[1], image.shape[0]))\n\ntracker.H = image.shape[0]\ntracker.W = image.shape[1]\n\ntracker.initialize(image, mask)\n\nwhile True:\n    imagefile = handle.frame()\n    if not imagefile:\n        break\n    image = cv2.cvtColor(cv2.imread(imagefile), cv2.COLOR_BGR2RGB)  # Right\n    region, confidence = tracker.track(image)\n    handle.report(region, confidence)\n"
  },
  {
    "path": "external/AR/pytracking/VOT2020_super_only_mask_384_HP/mixformer_large_alpha_seg_class.py",
    "content": "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport cv2\nimport torch\nimport vot\nimport sys\nimport time\nimport os\nimport numpy as np\nfrom lib.test.tracker.mixformer_online import MixFormerOnline\nfrom pytracking.ARcm_seg import ARcm_seg\nfrom pytracking.vot20_utils import *\n\nimport lib.test.parameter.mixformer_online as vot_params\n\nclass MIXFORMER_ALPHA_SEG(object):\n    def __init__(self, tracker,\n                 refine_model_name='ARcm_coco_seg', threshold=0.6):\n        self.THRES = threshold\n        self.tracker = tracker\n        '''create tracker'''\n        '''Alpha-Refine'''\n        project_path = os.path.join(os.path.dirname(__file__), '..', '..')\n        refine_root = os.path.join(project_path, 'ltr/checkpoints/ltr/ARcm_seg/')\n        refine_path = os.path.join(refine_root, refine_model_name)\n        '''2020.4.25 input size: 384x384'''\n        self.alpha = ARcm_seg(refine_path, input_sz=384)\n\n    def initialize(self, image, mask):\n        region = rect_from_mask(mask)\n        # init_info = {'init_bbox': region}\n        # self.tracker.initialize(image, init_info)\n\n        self.H, self.W, _ = image.shape\n        gt_bbox_np = np.array(region).astype(np.float32)\n        '''Initialize STARK for specific video'''\n        init_info = {'init_bbox': list(gt_bbox_np)}\n        self.tracker.initialize(image, init_info)\n        '''initilize refinement module for specific video'''\n        self.alpha.initialize(image, np.array(gt_bbox_np))\n\n    def track(self, img_RGB):\n        '''TRACK'''\n        '''base tracker'''\n        outputs = self.tracker.track(img_RGB)\n        pred_bbox = outputs['target_bbox']\n        '''Step2: Mask report'''\n        pred_mask, search, search_mask = self.alpha.get_mask(img_RGB, np.array(pred_bbox), vis=True)\n        final_mask = (pred_mask > self.THRES).astype(np.uint8)\n        return final_mask, 1\n\n\ndef make_full_size(x, output_sz):\n    '''\n    zero-pad input x (right and down) to match output_sz\n    x: numpy array e.g., binary mask\n    output_sz: size of the output [width, height]\n    '''\n    if x.shape[0] == output_sz[1] and x.shape[1] == output_sz[0]:\n        return x\n    pad_x = output_sz[0] - x.shape[1]\n    if pad_x < 0:\n        x = x[:, :x.shape[1] + pad_x]\n        # padding has to be set to zero, otherwise pad function fails\n        pad_x = 0\n    pad_y = output_sz[1] - x.shape[0]\n    if pad_y < 0:\n        x = x[:x.shape[0] + pad_y, :]\n        # padding has to be set to zero, otherwise pad function fails\n        pad_y = 0\n    return np.pad(x, ((0, pad_y), (0, pad_x)), 'constant', constant_values=0)\n\n\n\nrefine_model_name = 'ARcm_coco_seg_only_mask_384'\n# params = vot_params.parameters(\"baseline_large\")\nparams = vot_params.parameters(\"baseline_large\", model=\"mixformerL_online_22k.pth.tar\")\nmixformer = MixFormerOnline(params, \"VOT20\")\ntracker = MIXFORMER_ALPHA_SEG(tracker=mixformer, refine_model_name=refine_model_name)\nhandle = vot.VOT(\"mask\")\nselection = handle.region()\nimagefile = handle.frame()\n\nif not imagefile:\n    sys.exit(0)\n\nimage = cv2.cvtColor(cv2.imread(imagefile), cv2.COLOR_BGR2RGB)  # Right\n# mask given by the toolkit ends with the target (zero-padding to the right and down is needed)\nmask = make_full_size(selection, (image.shape[1], image.shape[0]))\n\ntracker.H = image.shape[0]\ntracker.W = image.shape[1]\n\ntracker.initialize(image, mask)\n\nwhile True:\n    imagefile = handle.frame()\n    if not imagefile:\n        break\n    image = cv2.cvtColor(cv2.imread(imagefile), cv2.COLOR_BGR2RGB)  # Right\n    region, confidence = tracker.track(image)\n    handle.report(region, confidence)\n"
  },
  {
    "path": "external/AR/pytracking/VOT2020_super_only_mask_384_HP/vot.py",
    "content": "\"\"\"\n\\file vot.py\n@brief Python utility functions for VOT integration\n@author Luka Cehovin, Alessio Dore\n@date 2016\n\"\"\"\n\nimport sys\nimport copy\nimport collections\nimport numpy as np\n\ntry:\n    import trax\nexcept ImportError:\n    raise Exception('TraX support not found. Please add trax module to Python path.')\n\nRectangle = collections.namedtuple('Rectangle', ['x', 'y', 'width', 'height'])\nPoint = collections.namedtuple('Point', ['x', 'y'])\nPolygon = collections.namedtuple('Polygon', ['points'])\n\nclass VOT(object):\n    \"\"\" Base class for Python VOT integration \"\"\"\n    def __init__(self, region_format, channels=None):\n        \"\"\" Constructor\n        Args:\n            region_format: Region format options\n        \"\"\"\n        assert(region_format in [trax.Region.RECTANGLE, trax.Region.POLYGON, trax.Region.MASK])\n\n        if channels is None:\n            channels = ['color']\n        elif channels == 'rgbd':\n            channels = ['color', 'depth']\n        elif channels == 'rgbt':\n            channels = ['color', 'ir']\n        elif channels == 'ir':\n            channels = ['ir']\n        else:\n            raise Exception('Illegal configuration {}.'.format(channels))\n\n        self._trax = trax.Server([region_format], [trax.Image.PATH], channels, customMetadata=dict(vot=\"python\"))\n\n        request = self._trax.wait()\n        assert(request.type == 'initialize')\n        if isinstance(request.region, trax.Polygon):\n            self._region = Polygon([Point(x[0], x[1]) for x in request.region])\n        elif isinstance(request.region, trax.Mask):\n            self._region = request.region.array(True)\n        else:\n            self._region = Rectangle(*request.region.bounds())\n        self._image = [x.path() for k, x in request.image.items()]\n        if len(self._image) == 1:\n            self._image = self._image[0]\n\n        self._trax.status(request.region)\n\n    def region(self):\n        \"\"\"\n        Send configuration message to the client and receive the initialization\n        region and the path of the first image\n        Returns:\n            initialization region\n        \"\"\"\n\n        return self._region\n\n    def report(self, region, confidence = None):\n        \"\"\"\n        Report the tracking results to the client\n        Arguments:\n            region: region for the frame\n        \"\"\"\n        assert(isinstance(region, (Rectangle, Polygon, np.ndarray)))\n        if isinstance(region, Polygon):\n            tregion = trax.Polygon.create([(x.x, x.y) for x in region.points])\n        elif isinstance(region, np.ndarray):\n            tregion = trax.Mask.create(region)\n        else:\n            tregion = trax.Rectangle.create(region.x, region.y, region.width, region.height)\n        properties = {}\n        if not confidence is None:\n            properties['confidence'] = confidence\n        self._trax.status(tregion, properties)\n\n    def frame(self):\n        \"\"\"\n        Get a frame (image path) from client\n        Returns:\n            absolute path of the image\n        \"\"\"\n        if hasattr(self, \"_image\"):\n            image = self._image\n            del self._image\n            return image\n\n        request = self._trax.wait()\n\n        if request.type == 'frame':\n            image = [x.path() for k, x in request.image.items()]\n            if len(image) == 1:\n                return image[0]\n            return image\n        else:\n            return None\n\n\n    def quit(self):\n        if hasattr(self, '_trax'):\n            self._trax.quit()\n\n    def __del__(self):\n        self.quit()\n"
  },
  {
    "path": "external/AR/pytracking/__init__.py",
    "content": "from pytracking.libs import TensorList, TensorDict\n"
  },
  {
    "path": "external/AR/pytracking/analysis/__init__.py",
    "content": ""
  },
  {
    "path": "external/AR/pytracking/analysis/evaluate_vos.py",
    "content": "import os\nimport numpy as np\nimport torch\nimport pandas as pd\nfrom collections import OrderedDict\nfrom ltr.data.image_loader import imread_indexed\nfrom pytracking.evaluation import get_dataset\nfrom pathlib import Path\nfrom pytracking.analysis.plot_results import generate_formatted_report\n\nimport pytracking.analysis.vos_utils as utils\n\n# Originally db_eval_sequence() in the davis challenge toolkit:\ndef evaluate_sequence(seq_name, segmentations, annotations, object_info, measure='J'):\n    \"\"\"\n    Evaluate video sequence results.\n\n      Arguments:\n          segmentations (dict of ndarray): segmentation labels.\n          annotations   (dict of ndarray): ground-truth labels.\n          object_info   dict: {object_id: first_frame_index}\n\n      measure       evaluation metric (J,F)\n    \"\"\"\n\n    results = dict(raw=OrderedDict())\n\n    _measures = {'J': utils.davis_jaccard_measure, 'F': utils.davis_f_measure}\n    _statistics = {'decay': utils.decay, 'mean': utils.mean, 'recall': utils.recall, 'std': utils.std}\n\n    for obj_id, first_frame in object_info.items():\n\n        r = np.ones((len(annotations))) * np.nan\n\n        for i, (an, sg) in enumerate(zip(annotations, segmentations)):\n            if list(annotations.keys()).index(first_frame) < i < len(annotations) - 1:\n                r[i] = _measures[measure](annotations[an] == obj_id, segmentations[sg] == obj_id)\n\n        results['raw'][obj_id] = r\n\n    for stat, stat_fn in _statistics.items():\n        results[stat] = [float(stat_fn(r)) for r in results['raw'].values()]\n\n    return results\n\n\ndef evaluate_dataset(results_path, dset_name, measure='J', to_file=True, scores=False, sequences=None, quiet=False):\n    dset = get_dataset(dset_name)\n    results = OrderedDict()\n    dset_scores = []\n    dset_decay = []\n    dset_recall = []\n\n    if to_file:\n        f = open(results_path / (\"evaluation-%s.txt\" % measure), \"w\")\n\n    def _print(msg):\n        if not quiet:\n            print(msg)\n        if to_file:\n            print(msg, file=f)\n\n    if sequences is not None:\n        sequences = [sequences] if not isinstance(sequences, (list, tuple)) else sequences\n\n    target_names = []\n    for j, sequence in enumerate(dset):\n        if (sequences is not None) and (sequence.name not in sequences):\n            continue\n\n        # Load all frames\n        frames = sequence.ground_truth_seg\n\n        annotations = OrderedDict()\n        segmentations = OrderedDict()\n\n        for f in frames:\n            if f is None:\n                continue\n\n            file = Path(f)\n            annotations[file.name] = imread_indexed(file)\n            if not scores:\n                segmentations[file.name] = imread_indexed(os.path.join(results_path, sequence.name, file.name))\n            else:\n                raise NotImplementedError\n        # Find object ids and starting frames\n\n        object_info = dict()\n\n        for f_id, d in sequence.init_data.items():\n            for obj_id in d['object_ids']:\n                object_info[int(obj_id)] = Path(d['mask']).name\n\n        if 0 in object_info:  # Remove background\n            object_info.pop(0)\n\n        # Evaluate\n        n_seqs = len(dset)\n        n_objs = len(object_info)\n        seq_name = sequence.name\n\n        _print(\"%d/%d: %s: %d object%s\" % (j + 1, n_seqs, seq_name, n_objs, \"s\" if n_objs > 1 else \"\"))\n        r = evaluate_sequence(seq_name, segmentations, annotations, object_info, measure=measure)\n        results[seq_name] = r\n\n        # Print scores, per frame and object, ignoring NaNs\n\n        per_obj_score = []  # Per-object accuracies, averaged over the sequence\n        per_frame_score = []  # Per-frame accuracies, averaged over the objects\n\n        for obj_id, score in r['raw'].items():\n            target_names.append('{}_{}'.format(seq_name, obj_id))\n            per_frame_score.append(score)\n            s = utils.mean(score)  # Sequence average for one object\n            per_obj_score.append(s)\n            if n_objs > 1:\n                _print(\"joint {obj}: acc {score:.3f} ┊{apf}┊\".format(obj=obj_id, score=s, apf=utils.text_bargraph(score)))\n\n        # Print mean object score per frame and final score\n        dset_decay.extend(r['decay'])\n        dset_recall.extend(r['recall'])\n        dset_scores.extend(per_obj_score)\n\n        seq_score = utils.mean(per_obj_score)  # Final score\n        seq_mean_score = utils.nanmean(np.array(per_frame_score), axis=0)  # Mean object score per frame\n\n        # Print sequence results\n        _print(\"final  : acc {seq:.3f} ({dset:.3f}) ┊{apf}┊\".format(\n            seq=seq_score, dset=np.mean(dset_scores), apf=utils.text_bargraph(seq_mean_score)))\n\n    _print(\"%s: %.3f, recall: %.3f, decay: %.3f\" % (measure, utils.mean(dset_scores), utils.mean(dset_recall), utils.mean(dset_decay)))\n\n    if to_file:\n        f.close()\n\n    return target_names, dset_scores, dset_recall, dset_decay\n\n\ndef evaluate_vos(trackers, dataset='yt2019_jjval', force=False):\n    \"\"\" evaluate a list of trackers on a vos dataset.\n\n    args:\n        trackers - list of trackers to evaluate\n        dataset - name of the dataset\n        force - Force re-evaluation. If False, the pre-computed results are loaded if available\n    \"\"\"\n    csv_name_global = f'{dataset}_global_results.csv'\n    csv_name_per_sequence = f'{dataset}_per-sequence_results.csv'\n\n    table_g_all = []\n    table_seq_all = []\n    scores = {'J-Mean': [], 'J-Recall': [], 'J-Decay': []}\n    display_names = []\n    for t in trackers:\n        if t.display_name is not None:\n            disp_name = t.display_name\n        elif t.run_id is not None:\n            disp_name = '{} {}_{:03d}'.format(t.name, t.parameter_name, t.run_id)\n        else:\n            disp_name = '{} {}'.format(t.name, t.parameter_name)\n\n        display_names.append(disp_name)\n        results_path = t.segmentation_dir\n\n        csv_name_global_path = os.path.join(results_path, csv_name_global)\n        csv_name_per_sequence_path = os.path.join(results_path, csv_name_per_sequence)\n        if os.path.exists(csv_name_global_path) and os.path.exists(csv_name_per_sequence_path) and not force:\n            table_g = pd.read_csv(csv_name_global_path)\n            table_seq = pd.read_csv(csv_name_per_sequence_path)\n        else:\n            seq_names, dset_scores, dset_recall, dset_decay = evaluate_dataset(results_path, dataset, measure='J',\n                                                                               to_file=False, scores=False,\n                                                                               sequences=None)\n            g_measures = ['J-Mean', 'J-Recall', 'J-Decay']\n            g_res = np.array([utils.mean(dset_scores), utils.mean(dset_recall), utils.mean(dset_decay)])\n            g_res = np.reshape(g_res, [1, len(g_res)])\n\n            table_g = pd.DataFrame(data=g_res, columns=g_measures)\n            with open(csv_name_global_path, 'w') as f:\n                table_g.to_csv(f, index=False, float_format=\"%.3f\")\n\n            seq_measures = ['Sequence', 'J-Mean', 'J-Recall', 'J-Decay']\n\n            table_seq = pd.DataFrame(data=list(zip(seq_names, dset_scores, dset_recall, dset_decay)), columns=seq_measures)\n            with open(csv_name_per_sequence_path, 'w') as f:\n                table_seq.to_csv(f, index=False, float_format=\"%.3f\")\n\n        scores['J-Mean'].append(table_g['J-Mean'].values[0]*100)\n        scores['J-Recall'].append(table_g['J-Recall'].values[0]*100)\n        scores['J-Decay'].append(table_g['J-Decay'].values[0]*100)\n\n        table_g_all.append(table_g)\n        table_seq_all.append(table_seq)\n\n    report = generate_formatted_report(display_names, scores)\n    print(report)\n\n    return table_g_all, table_seq_all\n"
  },
  {
    "path": "external/AR/pytracking/analysis/extract_results.py",
    "content": "import os\nimport sys\nimport importlib\nimport numpy as np\nfrom pytracking.utils.load_text import load_text\nimport torch\nimport pickle\nfrom tqdm import tqdm\n\nenv_path = os.path.join(os.path.dirname(__file__), '../..')\nif env_path not in sys.path:\n    sys.path.append(env_path)\n\nfrom pytracking.evaluation.environment import env_settings\n\n\ndef calc_err_center(pred_bb, anno_bb, normalized=False):\n    pred_center = pred_bb[:, :2] + 0.5 * (pred_bb[:, 2:] - 1.0)\n    anno_center = anno_bb[:, :2] + 0.5 * (anno_bb[:, 2:] - 1.0)\n\n    if normalized:\n        pred_center = pred_center / anno_bb[:, 2:]\n        anno_center = anno_center / anno_bb[:, 2:]\n\n    err_center = ((pred_center - anno_center)**2).sum(1).sqrt()\n    return err_center\n\n\ndef calc_iou_overlap(pred_bb, anno_bb):\n    tl = torch.max(pred_bb[:, :2], anno_bb[:, :2])\n    br = torch.min(pred_bb[:, :2] + pred_bb[:, 2:] - 1.0, anno_bb[:, :2] + anno_bb[:, 2:] - 1.0)\n    sz = (br - tl + 1.0).clamp(0)\n\n    # Area\n    intersection = sz.prod(dim=1)\n    union = pred_bb[:, 2:].prod(dim=1) + anno_bb[:, 2:].prod(dim=1) - intersection\n\n    return intersection / union\n\n\ndef calc_seq_err_robust(pred_bb, anno_bb, dataset, target_visible=None):\n    pred_bb = pred_bb.clone()\n\n    # Check if invalid values are present\n    if torch.isnan(pred_bb).any() or (pred_bb[:, 2:] < 0.0).any():\n        raise Exception('Error: Invalid results')\n\n    if torch.isnan(anno_bb).any():\n        if dataset == 'uav':\n            pass\n        else:\n            raise Exception('Warning: NaNs in annotation')\n\n    if (pred_bb[:, 2:] == 0.0).any():\n        for i in range(1, pred_bb.shape[0]):\n            if (pred_bb[i, 2:] == 0.0).any() and not torch.isnan(anno_bb[i, :]).any():\n                pred_bb[i, :] = pred_bb[i-1, :]\n\n    if pred_bb.shape[0] != anno_bb.shape[0]:\n        if dataset == 'lasot':\n            if pred_bb.shape[0] > anno_bb.shape[0]:\n                # For monkey-17, there is a mismatch for some trackers.\n                pred_bb = pred_bb[:anno_bb.shape[0], :]\n            else:\n                raise Exception('Mis-match in tracker prediction and GT lengths')\n        else:\n            # print('Warning: Mis-match in tracker prediction and GT lengths')\n            if pred_bb.shape[0] > anno_bb.shape[0]:\n                pred_bb = pred_bb[:anno_bb.shape[0], :]\n            else:\n                pad = torch.zeros((anno_bb.shape[0] - pred_bb.shape[0], 4)).type_as(pred_bb)\n                pred_bb = torch.cat((pred_bb, pad), dim=0)\n\n    pred_bb[0, :] = anno_bb[0, :]\n\n    if target_visible is not None:\n        target_visible = target_visible.bool()\n        valid = ((anno_bb[:, 2:] > 0.0).sum(1) == 2) & target_visible\n    else:\n        valid = ((anno_bb[:, 2:] > 0.0).sum(1) == 2)\n\n    err_center = calc_err_center(pred_bb, anno_bb)\n    err_center_normalized = calc_err_center(pred_bb, anno_bb, normalized=True)\n    err_overlap = calc_iou_overlap(pred_bb, anno_bb)\n\n    # handle invalid anno cases\n    if dataset in ['uav']:\n        err_center[~valid] = -1.0\n    else:\n        err_center[~valid] = float(\"Inf\")\n    err_center_normalized[~valid] = -1.0\n    err_overlap[~valid] = -1.0\n\n    if dataset == 'lasot':\n        err_center_normalized[~target_visible] = float(\"Inf\")\n        err_center[~target_visible] = float(\"Inf\")\n\n    if torch.isnan(err_overlap).any():\n        raise Exception('Nans in calculated overlap')\n    return err_overlap, err_center, err_center_normalized, valid\n\n\ndef extract_results(trackers, dataset, report_name, skip_missing_seq=False, plot_bin_gap=0.05,\n                    exclude_invalid_frames=False):\n    settings = env_settings()\n    eps = 1e-16\n\n    result_plot_path = os.path.join(settings.result_plot_path, report_name)\n\n    if not os.path.exists(result_plot_path):\n        os.makedirs(result_plot_path)\n\n    threshold_set_overlap = torch.arange(0.0, 1.0 + plot_bin_gap, plot_bin_gap, dtype=torch.float64)\n    threshold_set_center = torch.arange(0, 51, dtype=torch.float64)\n    threshold_set_center_norm = torch.arange(0, 51, dtype=torch.float64) / 100.0\n\n    avg_overlap_all = torch.zeros((len(dataset), len(trackers)), dtype=torch.float64)\n    ave_success_rate_plot_overlap = torch.zeros((len(dataset), len(trackers), threshold_set_overlap.numel()),\n                                                dtype=torch.float32)\n    ave_success_rate_plot_center = torch.zeros((len(dataset), len(trackers), threshold_set_center.numel()),\n                                               dtype=torch.float32)\n    ave_success_rate_plot_center_norm = torch.zeros((len(dataset), len(trackers), threshold_set_center.numel()),\n                                                    dtype=torch.float32)\n\n    valid_sequence = torch.ones(len(dataset), dtype=torch.uint8)\n\n    for seq_id, seq in enumerate(tqdm(dataset)):\n        # Load anno\n        anno_bb = torch.tensor(seq.ground_truth_rect)\n        target_visible = torch.tensor(seq.target_visible, dtype=torch.uint8) if seq.target_visible is not None else None\n        for trk_id, trk in enumerate(trackers):\n            # Load results\n            base_results_path = '{}/{}'.format(trk.results_dir, seq.name)\n            results_path = '{}.txt'.format(base_results_path)\n\n            if os.path.isfile(results_path):\n                pred_bb = torch.tensor(load_text(str(results_path), delimiter=('\\t', ','), dtype=np.float64))\n            else:\n                if skip_missing_seq:\n                    valid_sequence[seq_id] = 0\n                    break\n                else:\n                    raise Exception('Result not found. {}'.format(results_path))\n\n            # Calculate measures\n            err_overlap, err_center, err_center_normalized, valid_frame = calc_seq_err_robust(\n                pred_bb, anno_bb, seq.dataset, target_visible)\n\n            avg_overlap_all[seq_id, trk_id] = err_overlap[valid_frame].mean()\n\n            if exclude_invalid_frames:\n                seq_length = valid_frame.long().sum()\n            else:\n                seq_length = anno_bb.shape[0]\n\n            if seq_length <= 0:\n                raise Exception('Seq length zero')\n\n            ave_success_rate_plot_overlap[seq_id, trk_id, :] = (err_overlap.view(-1, 1) > threshold_set_overlap.view(1, -1)).sum(0).float() / seq_length\n            ave_success_rate_plot_center[seq_id, trk_id, :] = (err_center.view(-1, 1) <= threshold_set_center.view(1, -1)).sum(0).float() / seq_length\n            ave_success_rate_plot_center_norm[seq_id, trk_id, :] = (err_center_normalized.view(-1, 1) <= threshold_set_center_norm.view(1, -1)).sum(0).float() / seq_length\n\n    print('\\n\\nComputed results over {} / {} sequences'.format(valid_sequence.long().sum().item(), valid_sequence.shape[0]))\n\n    # Prepare dictionary for saving data\n    seq_names = [s.name for s in dataset]\n    tracker_names = [{'name': t.name, 'param': t.parameter_name, 'run_id': t.run_id, 'disp_name': t.display_name}\n                     for t in trackers]\n\n    eval_data = {'sequences': seq_names, 'trackers': tracker_names,\n                 'valid_sequence': valid_sequence.tolist(),\n                 'ave_success_rate_plot_overlap': ave_success_rate_plot_overlap.tolist(),\n                 'ave_success_rate_plot_center': ave_success_rate_plot_center.tolist(),\n                 'ave_success_rate_plot_center_norm': ave_success_rate_plot_center_norm.tolist(),\n                 'avg_overlap_all': avg_overlap_all.tolist(),\n                 'threshold_set_overlap': threshold_set_overlap.tolist(),\n                 'threshold_set_center': threshold_set_center.tolist(),\n                 'threshold_set_center_norm': threshold_set_center_norm.tolist()}\n\n    with open(result_plot_path + '/eval_data.pkl', 'wb') as fh:\n        pickle.dump(eval_data, fh)\n\n    return eval_data\n"
  },
  {
    "path": "external/AR/pytracking/analysis/playback_results.py",
    "content": "import os\nimport sys\nimport importlib\nimport numpy as np\nimport torch\nimport time\nimport matplotlib.patches as patches\nimport cv2 as cv\nimport matplotlib.pyplot as plt\nfrom pytracking.analysis.plot_results import get_plot_draw_styles\nfrom pytracking.utils.plotting import draw_figure\nfrom pytracking.evaluation import get_dataset, trackerlist\n\nenv_path = os.path.join(os.path.dirname(__file__), '../..')\nif env_path not in sys.path:\n    sys.path.append(env_path)\n\n\nclass Display:\n    def __init__(self, sequence_length, plot_draw_styles, sequence_name):\n        self.active = True\n        self.frame_number = 0\n        self.pause_mode = True\n        self.step_size = 0\n        self.step_direction = 'forward'\n        self.fig, self.ax = plt.subplots(1)\n        self.fig.canvas.mpl_connect('key_press_event', self.key_callback_fn)\n        plt.tight_layout()\n\n        self.sequence_length = sequence_length\n        self.sequence_name = sequence_name\n        self.plot_draw_styles = plot_draw_styles\n\n    def key_callback_fn(self, event):\n        if event.key == ' ':\n            self.pause_mode = not self.pause_mode\n            self.step_size = 0\n            self.step_direction = 'forward'\n        elif event.key == 'right':\n            if self.pause_mode:\n                self.frame_number += 1\n\n                if self.frame_number >= self.sequence_length:\n                    self.frame_number = self.sequence_length - 1\n            elif self.step_direction == 'stop':\n                self.step_direction = 'forward'\n                self.step_size = 0\n            elif self.step_direction == 'backward' and self.step_size == 0:\n                self.step_direction = 'stop'\n            else:\n                self.step_size += 1\n        elif event.key == 'left':\n            if self.pause_mode:\n                self.frame_number -= 1\n\n                if self.frame_number < 0:\n                    self.frame_number = 0\n            elif self.step_direction == 'stop':\n                self.step_direction = 'backward'\n                self.step_size = 0\n            elif self.step_direction == 'forward' and self.step_size == 0:\n                self.step_direction = 'stop'\n            else:\n                self.step_size -= 1\n        elif event.key == 'escape' or event.key == 'q':\n            self.active = False\n\n    def _get_speed(self):\n        delta = 0\n        if self.step_direction == 'forward':\n            delta = 2 ** abs(self.step_size)\n        elif self.step_direction == 'backward':\n            delta = -1 * 2 ** abs(self.step_size)\n\n        return delta\n\n    def step(self):\n        delta = self._get_speed()\n\n        self.frame_number += delta\n        if self.frame_number < 0:\n            self.frame_number = 0\n        elif self.frame_number >= self.sequence_length:\n            self.frame_number = self.sequence_length - 1\n\n    def show(self, image, bb_list, trackers, gt=None):\n        self.ax.cla()\n        self.ax.imshow(image)\n\n        # Draw rects\n        rect_handles = []\n        for i, bb in enumerate(bb_list):\n            rect = patches.Rectangle((bb[0], bb[1]), bb[2], bb[3], linewidth=1,\n                                     edgecolor=self.plot_draw_styles[i]['color'], facecolor='none')\n            self.ax.add_patch(rect)\n\n            rect_handles.append(patches.Rectangle((bb[0], bb[1]), bb[2], bb[3], linewidth=1,\n                                     edgecolor=self.plot_draw_styles[i]['color'],\n                                                  facecolor=self.plot_draw_styles[i]['color'],\n                                                  label=trackers[i]))\n\n        if gt is not None:\n            rect = patches.Rectangle((gt[0], gt[1]), gt[2], gt[3], linewidth=2, edgecolor='g',\n                                     facecolor='none')\n            self.ax.add_patch(rect)\n            rect_handles.append(rect)\n\n        self.ax.set_axis_off()\n        self.ax.axis('equal')\n        plt.legend(handles=rect_handles, loc=4, borderaxespad=0.)\n        mode = 'manual' if self.pause_mode else 'auto     '\n        speed = self._get_speed()\n        self.fig.suptitle('Sequence: {}    Mode: {}    Speed: {:d}x'.format(self.sequence_name, mode, speed),\n                          fontsize=14)\n        draw_figure(self.fig)\n\n\ndef read_image(image_file: str):\n    im = cv.imread(image_file)\n    return cv.cvtColor(im, cv.COLOR_BGR2RGB)\n\n\ndef _get_display_name(tracker):\n    if tracker.display_name is None:\n        if tracker.run_id is not None:\n            return '{}_{}_{:03d}'.format(tracker.name, tracker.parameter_name, tracker.run_id)\n        else:\n            return '{}_{}'.format(tracker.name, tracker.parameter_name)\n    else:\n        return tracker.display_name\n\n\ndef playback_results(trackers, sequence):\n    \"\"\"\n    Playback saved results of input trackers for a particular sequence. You can navigate the sequence using left/right\n    arrow keys. You can also change to 'auto' mode by pressing space bar, in which case the sequence will be replayed\n    at a particular speed. The speed for playback in 'auto' mode can be controlled using the left/right arrow keys.\n    You can exit the application using escape or q keys.\n    \"\"\"\n    plot_draw_styles = get_plot_draw_styles()\n\n    tracker_results = []\n    # Load results\n    for trk_id, trk in enumerate(trackers):\n        # Load results\n        base_results_path = '{}/{}'.format(trk.results_dir, sequence.name)\n        results_path = '{}.txt'.format(base_results_path)\n\n        if os.path.isfile(results_path):\n            try:\n                pred_bb = torch.tensor(np.loadtxt(str(results_path), dtype=np.float64))\n            except:\n                pred_bb = torch.tensor(np.loadtxt(str(results_path), delimiter=',', dtype=np.float64))\n        else:\n            raise Exception('Result not found. {}'.format(results_path))\n\n        tracker_results.append(pred_bb)\n\n    # Convert to list of shape seq_length * num_trackers * 4\n    tracker_results = torch.stack(tracker_results, dim=1).tolist()\n    tracker_names = [_get_display_name(t) for t in trackers]\n\n    display = Display(len(tracker_results), plot_draw_styles, sequence.name)\n\n    while display.active:\n        frame_number = display.frame_number\n        image = read_image(sequence.frames[frame_number])\n\n        display.show(image, tracker_results[frame_number], tracker_names)\n\n        time.sleep(0.01)\n        if display.pause_mode and display.frame_number == frame_number:\n            time.sleep(0.1)\n        elif not display.pause_mode:\n            display.step()\n\n"
  },
  {
    "path": "external/AR/pytracking/analysis/plot_results.py",
    "content": "import tikzplotlib\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport os\nimport torch\nimport pickle\nimport json\nfrom pytracking.evaluation.environment import env_settings\nfrom pytracking.analysis.extract_results import extract_results\n\n\ndef get_plot_draw_styles():\n    plot_draw_style = [{'color': (1.0, 0.0, 0.0), 'line_style': '-'},\n                       {'color': (0.0, 1.0, 0.0), 'line_style': '-'},\n                       {'color': (0.0, 0.0, 1.0), 'line_style': '-'},\n                       {'color': (0.0, 0.0, 0.0), 'line_style': '-'},\n                       {'color': (1.0, 0.0, 1.0), 'line_style': '-'},\n                       {'color': (0.0, 1.0, 1.0), 'line_style': '-'},\n                       {'color': (0.5, 0.5, 0.5), 'line_style': '-'},\n                       {'color': (136.0 / 255.0, 0.0, 21.0 / 255.0), 'line_style': '-'},\n                       {'color': (1.0, 127.0 / 255.0, 39.0 / 255.0), 'line_style': '-'},\n                       {'color': (0.0, 162.0 / 255.0, 232.0 / 255.0), 'line_style': '-'},\n                       {'color': (0.0, 0.5, 0.0), 'line_style': '-'},\n                       {'color': (1.0, 0.5, 0.2), 'line_style': '-'},\n                       {'color': (0.1, 0.4, 0.0), 'line_style': '-'},\n                       {'color': (0.6, 0.3, 0.9), 'line_style': '-'},\n                       {'color': (0.4, 0.7, 0.1), 'line_style': '-'},\n                       {'color': (0.2, 0.1, 0.7), 'line_style': '-'},\n                       {'color': (0.7, 0.6, 0.2), 'line_style': '-'}]\n\n    return plot_draw_style\n\n\ndef check_eval_data_is_valid(eval_data, trackers, dataset):\n    \"\"\" Checks if the pre-computed results are valid\"\"\"\n    seq_names = [s.name for s in dataset]\n    seq_names_saved = eval_data['sequences']\n\n    tracker_names_f = [(t.name, t.parameter_name, t.run_id) for t in trackers]\n    tracker_names_f_saved = [(t['name'], t['param'], t['run_id']) for t in eval_data['trackers']]\n\n    return seq_names == seq_names_saved and tracker_names_f == tracker_names_f_saved\n\n\ndef merge_multiple_runs(eval_data):\n    new_tracker_names = []\n    ave_success_rate_plot_overlap_merged = []\n    ave_success_rate_plot_center_merged = []\n    ave_success_rate_plot_center_norm_merged = []\n    avg_overlap_all_merged = []\n\n    ave_success_rate_plot_overlap = torch.tensor(eval_data['ave_success_rate_plot_overlap'])\n    ave_success_rate_plot_center = torch.tensor(eval_data['ave_success_rate_plot_center'])\n    ave_success_rate_plot_center_norm = torch.tensor(eval_data['ave_success_rate_plot_center_norm'])\n    avg_overlap_all = torch.tensor(eval_data['avg_overlap_all'])\n\n    trackers = eval_data['trackers']\n    merged = torch.zeros(len(trackers), dtype=torch.uint8)\n    for i in range(len(trackers)):\n        if merged[i]:\n            continue\n        base_tracker = trackers[i]\n        new_tracker_names.append(base_tracker)\n\n        match = [t['name'] == base_tracker['name'] and t['param'] == base_tracker['param'] for t in trackers]\n        match = torch.tensor(match)\n\n        ave_success_rate_plot_overlap_merged.append(ave_success_rate_plot_overlap[:, match, :].mean(1))\n        ave_success_rate_plot_center_merged.append(ave_success_rate_plot_center[:, match, :].mean(1))\n        ave_success_rate_plot_center_norm_merged.append(ave_success_rate_plot_center_norm[:, match, :].mean(1))\n        avg_overlap_all_merged.append(avg_overlap_all[:, match].mean(1))\n\n        merged[match] = 1\n\n    ave_success_rate_plot_overlap_merged = torch.stack(ave_success_rate_plot_overlap_merged, dim=1)\n    ave_success_rate_plot_center_merged = torch.stack(ave_success_rate_plot_center_merged, dim=1)\n    ave_success_rate_plot_center_norm_merged = torch.stack(ave_success_rate_plot_center_norm_merged, dim=1)\n    avg_overlap_all_merged = torch.stack(avg_overlap_all_merged, dim=1)\n\n    eval_data['trackers'] = new_tracker_names\n    eval_data['ave_success_rate_plot_overlap'] = ave_success_rate_plot_overlap_merged.tolist()\n    eval_data['ave_success_rate_plot_center'] = ave_success_rate_plot_center_merged.tolist()\n    eval_data['ave_success_rate_plot_center_norm'] = ave_success_rate_plot_center_norm_merged.tolist()\n    eval_data['avg_overlap_all'] = avg_overlap_all_merged.tolist()\n\n    return eval_data\n\n\ndef get_tracker_display_name(tracker):\n    if tracker['disp_name'] is None:\n        if tracker['run_id'] is None:\n            disp_name = '{}_{}'.format(tracker['name'], tracker['param'])\n        else:\n            disp_name = '{}_{}_{:03d}'.format(tracker['name'], tracker['param'],\n                                              tracker['run_id'])\n    else:\n        disp_name = tracker['disp_name']\n\n    return  disp_name\n\n\ndef plot_draw_save(y, x, scores, trackers, plot_draw_styles, result_plot_path, plot_opts):\n    # Plot settings\n    font_size = plot_opts.get('font_size', 12)\n    font_size_axis = plot_opts.get('font_size_axis', 13)\n    line_width = plot_opts.get('line_width', 2)\n    font_size_legend = plot_opts.get('font_size_legend', 13)\n\n    plot_type = plot_opts['plot_type']\n    legend_loc = plot_opts['legend_loc']\n\n    xlabel = plot_opts['xlabel']\n    ylabel = plot_opts['ylabel']\n    xlim = plot_opts['xlim']\n    ylim = plot_opts['ylim']\n\n    title = plot_opts['title']\n\n    matplotlib.rcParams.update({'font.size': font_size})\n    matplotlib.rcParams.update({'axes.titlesize': font_size_axis})\n    matplotlib.rcParams.update({'axes.titleweight': 'black'})\n    matplotlib.rcParams.update({'axes.labelsize': font_size_axis})\n\n    fig, ax = plt.subplots()\n\n    index_sort = scores.argsort(descending=False)\n\n    plotted_lines = []\n    legend_text = []\n\n    for id, id_sort in enumerate(index_sort):\n        line = ax.plot(x.tolist(), y[id_sort, :].tolist(),\n                       linewidth=line_width,\n                       color=plot_draw_styles[index_sort.numel() - id - 1]['color'],\n                       linestyle=plot_draw_styles[index_sort.numel() - id - 1]['line_style'])\n\n        plotted_lines.append(line[0])\n\n        tracker = trackers[id_sort]\n        disp_name = get_tracker_display_name(tracker)\n\n        legend_text.append('{} [{:.1f}]'.format(disp_name, scores[id_sort]))\n\n    ax.legend(plotted_lines[::-1], legend_text[::-1], loc=legend_loc, fancybox=False, edgecolor='black',\n              fontsize=font_size_legend, framealpha=1.0)\n\n    ax.set(xlabel=xlabel,\n           ylabel=ylabel,\n           xlim=xlim, ylim=ylim,\n           title=title)\n\n    ax.grid(True, linestyle='-.')\n    fig.tight_layout()\n\n    tikzplotlib.save('{}/{}_plot.tex'.format(result_plot_path, plot_type))\n    fig.savefig('{}/{}_plot.pdf'.format(result_plot_path, plot_type), dpi=300, format='pdf', transparent=True)\n    plt.draw()\n\n\ndef check_and_load_precomputed_results(trackers, dataset, report_name, force_evaluation=False, **kwargs):\n    # Load data\n    settings = env_settings()\n\n    # Load pre-computed results\n    result_plot_path = os.path.join(settings.result_plot_path, report_name)\n    eval_data_path = os.path.join(result_plot_path, 'eval_data.pkl')\n\n    if os.path.isfile(eval_data_path) and not force_evaluation:\n        with open(eval_data_path, 'rb') as fh:\n            eval_data = pickle.load(fh)\n    else:\n        # print('Pre-computed evaluation data not found. Computing results!')\n        eval_data = extract_results(trackers, dataset, report_name, **kwargs)\n\n    if not check_eval_data_is_valid(eval_data, trackers, dataset):\n        # print('Pre-computed evaluation data invalid. Re-computing results!')\n        eval_data = extract_results(trackers, dataset, report_name, **kwargs)\n    else:\n        # Update display names\n        tracker_names = [{'name': t.name, 'param': t.parameter_name, 'run_id': t.run_id, 'disp_name': t.display_name}\n                         for t in trackers]\n        eval_data['trackers'] = tracker_names\n\n    return eval_data\n\n\ndef get_auc_curve(ave_success_rate_plot_overlap, valid_sequence):\n    ave_success_rate_plot_overlap = ave_success_rate_plot_overlap[valid_sequence, :, :]\n    auc_curve = ave_success_rate_plot_overlap.mean(0) * 100.0\n    auc = auc_curve.mean(-1)\n\n    return auc_curve, auc\n\n\ndef get_prec_curve(ave_success_rate_plot_center, valid_sequence):\n    ave_success_rate_plot_center = ave_success_rate_plot_center[valid_sequence, :, :]\n    prec_curve = ave_success_rate_plot_center.mean(0) * 100.0\n    prec_score = prec_curve[:, 20]\n\n    return prec_curve, prec_score\n\n\ndef plot_results(trackers, dataset, report_name, merge_results=False,\n                 plot_types=('success'), force_evaluation=False, **kwargs):\n    \"\"\"\n    Plot results for the given trackers\n\n    args:\n        trackers - List of trackers to evaluate\n        dataset - List of sequences to evaluate\n        report_name - Name of the folder in env_settings.perm_mat_path where the computed results and plots are saved\n        merge_results - If True, multiple random runs for a non-deterministic trackers are averaged\n        plot_types - List of scores to display. Can contain 'success',\n                    'prec' (precision), and 'norm_prec' (normalized precision)\n    \"\"\"\n    # Load data\n    settings = env_settings()\n\n    plot_draw_styles = get_plot_draw_styles()\n\n    # Load pre-computed results\n    result_plot_path = os.path.join(settings.result_plot_path, report_name)\n    eval_data = check_and_load_precomputed_results(trackers, dataset, report_name, force_evaluation, **kwargs)\n\n    # Merge results from multiple runs\n    if merge_results:\n        eval_data = merge_multiple_runs(eval_data)\n\n    tracker_names = eval_data['trackers']\n\n    valid_sequence = torch.tensor(eval_data['valid_sequence'], dtype=torch.bool)\n\n    print('\\nPlotting results over {} / {} sequences'.format(valid_sequence.long().sum().item(), valid_sequence.shape[0]))\n\n    print('\\nGenerating plots for: {}'.format(report_name))\n\n    # ********************************  Success Plot **************************************\n    if 'success' in plot_types:\n        ave_success_rate_plot_overlap = torch.tensor(eval_data['ave_success_rate_plot_overlap'])\n\n        # Index out valid sequences\n        auc_curve, auc = get_auc_curve(ave_success_rate_plot_overlap, valid_sequence)\n        threshold_set_overlap = torch.tensor(eval_data['threshold_set_overlap'])\n\n        success_plot_opts = {'plot_type': 'success', 'legend_loc': 'lower left', 'xlabel': 'Overlap threshold',\n                             'ylabel': 'Overlap Precision [%]', 'xlim': (0, 1.0), 'ylim': (0, 100), 'title': 'Success plot'}\n        plot_draw_save(auc_curve, threshold_set_overlap, auc, tracker_names, plot_draw_styles, result_plot_path, success_plot_opts)\n\n    # ********************************  Precision Plot **************************************\n    if 'prec' in plot_types:\n        ave_success_rate_plot_center = torch.tensor(eval_data['ave_success_rate_plot_center'])\n\n        # Index out valid sequences\n        prec_curve, prec_score = get_prec_curve(ave_success_rate_plot_center, valid_sequence)\n        threshold_set_center = torch.tensor(eval_data['threshold_set_center'])\n\n        precision_plot_opts = {'plot_type': 'precision', 'legend_loc': 'lower right',\n                               'xlabel': 'Location error threshold [pixels]', 'ylabel': 'Distance Precision [%]',\n                               'xlim': (0, 50), 'ylim': (0, 100), 'title': 'Precision plot'}\n        plot_draw_save(prec_curve, threshold_set_center, prec_score, tracker_names, plot_draw_styles, result_plot_path,\n                       precision_plot_opts)\n\n    # ********************************  Norm Precision Plot **************************************\n    if 'norm_prec' in plot_types:\n        ave_success_rate_plot_center_norm = torch.tensor(eval_data['ave_success_rate_plot_center_norm'])\n\n        # Index out valid sequences\n        prec_curve, prec_score = get_prec_curve(ave_success_rate_plot_center_norm, valid_sequence)\n        threshold_set_center_norm = torch.tensor(eval_data['threshold_set_center_norm'])\n\n        norm_precision_plot_opts = {'plot_type': 'norm_precision', 'legend_loc': 'lower right',\n                                    'xlabel': 'Location error threshold', 'ylabel': 'Distance Precision [%]',\n                                    'xlim': (0, 0.5), 'ylim': (0, 100), 'title': 'Normalized Precision plot'}\n        plot_draw_save(prec_curve, threshold_set_center_norm, prec_score, tracker_names, plot_draw_styles, result_plot_path,\n                       norm_precision_plot_opts)\n\n    plt.show()\n\n\ndef generate_formatted_report(row_labels, scores, table_name=''):\n    name_width = max([len(d) for d in row_labels] + [len(table_name)]) + 5\n    min_score_width = 10\n\n    report_text = '\\n{label: <{width}} |'.format(label=table_name, width=name_width)\n\n    score_widths = [max(min_score_width, len(k) + 3) for k in scores.keys()]\n\n    for s, s_w in zip(scores.keys(), score_widths):\n        report_text = '{prev} {s: <{width}} |'.format(prev=report_text, s=s, width=s_w)\n\n    report_text = '{prev}\\n'.format(prev=report_text)\n\n    for trk_id, d_name in enumerate(row_labels):\n        # display name\n        report_text = '{prev}{tracker: <{width}} |'.format(prev=report_text, tracker=d_name,\n                                                           width=name_width)\n        for (score_type, score_value), s_w in zip(scores.items(), score_widths):\n            report_text = '{prev} {score: <{width}} |'.format(prev=report_text,\n                                                              score='{:0.2f}'.format(score_value[trk_id].item()),\n                                                              width=s_w)\n        report_text = '{prev}\\n'.format(prev=report_text)\n\n    return report_text\n\n\ndef print_results(trackers, dataset, report_name, merge_results=False,\n                  plot_types=('success'), **kwargs):\n    \"\"\" Print the results for the given trackers in a formatted table\n    args:\n        trackers - List of trackers to evaluate\n        dataset - List of sequences to evaluate\n        report_name - Name of the folder in env_settings.perm_mat_path where the computed results and plots are saved\n        merge_results - If True, multiple random runs for a non-deterministic trackers are averaged\n        plot_types - List of scores to display. Can contain 'success' (prints AUC, OP50, and OP75 scores),\n                    'prec' (prints precision score), and 'norm_prec' (prints normalized precision score)\n    \"\"\"\n    # Load pre-computed results\n    eval_data = check_and_load_precomputed_results(trackers, dataset, report_name, **kwargs)\n\n    # Merge results from multiple runs\n    if merge_results:\n        eval_data = merge_multiple_runs(eval_data)\n\n    tracker_names = eval_data['trackers']\n    valid_sequence = torch.tensor(eval_data['valid_sequence'], dtype=torch.bool)\n\n    print('\\nReporting results over {} / {} sequences'.format(valid_sequence.long().sum().item(), valid_sequence.shape[0]))\n\n    scores = {}\n\n    # ********************************  Success Plot **************************************\n    if 'success' in plot_types:\n        threshold_set_overlap = torch.tensor(eval_data['threshold_set_overlap'])\n        ave_success_rate_plot_overlap = torch.tensor(eval_data['ave_success_rate_plot_overlap'])\n\n        # Index out valid sequences\n        auc_curve, auc = get_auc_curve(ave_success_rate_plot_overlap, valid_sequence)\n        scores['AUC'] = auc\n        scores['OP50'] = auc_curve[:, threshold_set_overlap == 0.50]\n        scores['OP75'] = auc_curve[:, threshold_set_overlap == 0.75]\n\n    # ********************************  Precision Plot **************************************\n    if 'prec' in plot_types:\n        ave_success_rate_plot_center = torch.tensor(eval_data['ave_success_rate_plot_center'])\n\n        # Index out valid sequences\n        prec_curve, prec_score = get_prec_curve(ave_success_rate_plot_center, valid_sequence)\n        scores['Precision'] = prec_score\n\n    # ********************************  Norm Precision Plot *********************************\n    if 'norm_prec' in plot_types:\n        ave_success_rate_plot_center_norm = torch.tensor(eval_data['ave_success_rate_plot_center_norm'])\n\n        # Index out valid sequences\n        norm_prec_curve, norm_prec_score = get_prec_curve(ave_success_rate_plot_center_norm, valid_sequence)\n        scores['Norm Precision'] = norm_prec_score\n\n    # Print\n    tracker_disp_names = [get_tracker_display_name(trk) for trk in tracker_names]\n    report_text = generate_formatted_report(tracker_disp_names, scores, table_name=report_name)\n    print(report_text)\n\n\ndef plot_got_success(trackers, report_name):\n    \"\"\" Plot success plot for GOT-10k dataset using the json reports.\n    Save the json reports from http://got-10k.aitestunion.com/leaderboard in the directory set to\n    env_settings.got_reports_path\n\n    The tracker name in the experiment file should be set to the name of the report file for that tracker,\n    e.g. DiMP50_report_2019_09_02_15_44_25 if the report is name DiMP50_report_2019_09_02_15_44_25.json\n\n    args:\n        trackers - List of trackers to evaluate\n        report_name - Name of the folder in env_settings.perm_mat_path where the computed results and plots are saved\n    \"\"\"\n    # Load data\n    settings = env_settings()\n    plot_draw_styles = get_plot_draw_styles()\n\n    result_plot_path = os.path.join(settings.result_plot_path, report_name)\n\n    auc_curve = torch.zeros((len(trackers), 101))\n    scores = torch.zeros(len(trackers))\n\n    # Load results\n    tracker_names = []\n    for trk_id, trk in enumerate(trackers):\n        json_path = '{}/{}.json'.format(settings.got_reports_path, trk.name)\n\n        if os.path.isfile(json_path):\n            with open(json_path, 'r') as f:\n                eval_data = json.load(f)\n        else:\n            raise Exception('Report not found {}'.format(json_path))\n\n        if len(eval_data.keys()) > 1:\n            raise Exception\n\n        # First field is the tracker name. Index it out\n        eval_data = eval_data[list(eval_data.keys())[0]]\n        if 'succ_curve' in eval_data.keys():\n            curve = eval_data['succ_curve']\n            ao = eval_data['ao']\n        elif 'overall' in eval_data.keys() and 'succ_curve' in eval_data['overall'].keys():\n            curve = eval_data['overall']['succ_curve']\n            ao = eval_data['overall']['ao']\n        else:\n            raise Exception('Invalid JSON file {}'.format(json_path))\n\n        auc_curve[trk_id, :] = torch.tensor(curve) * 100.0\n        scores[trk_id] = ao * 100.0\n\n        tracker_names.append({'name': trk.name, 'param': trk.parameter_name, 'run_id': trk.run_id,\n                              'disp_name': trk.display_name})\n\n    threshold_set_overlap = torch.arange(0.0, 1.01, 0.01, dtype=torch.float64)\n\n    success_plot_opts = {'plot_type': 'success', 'legend_loc': 'lower left', 'xlabel': 'Overlap threshold',\n                         'ylabel': 'Overlap Precision [%]', 'xlim': (0, 1.0), 'ylim': (0, 100), 'title': 'Success plot'}\n    plot_draw_save(auc_curve, threshold_set_overlap, scores, tracker_names, plot_draw_styles, result_plot_path,\n                   success_plot_opts)\n    plt.show()\n\n\ndef print_per_sequence_results(trackers, dataset, report_name, merge_results=False,\n                               filter_criteria=None, **kwargs):\n    \"\"\" Print per-sequence results for the given trackers. Additionally, the sequences to list can be filtered using\n    the filter criteria.\n\n    args:\n        trackers - List of trackers to evaluate\n        dataset - List of sequences to evaluate\n        report_name - Name of the folder in env_settings.perm_mat_path where the computed results and plots are saved\n        merge_results - If True, multiple random runs for a non-deterministic trackers are averaged\n        filter_criteria - Filter sequence results which are reported. Following modes are supported\n                        None: No filtering. Display results for all sequences in dataset\n                        'ao_min': Only display sequences for which the minimum average overlap (AO) score over the\n                                  trackers is less than a threshold filter_criteria['threshold']. This mode can\n                                  be used to select sequences where at least one tracker performs poorly.\n                        'ao_max': Only display sequences for which the maximum average overlap (AO) score over the\n                                  trackers is less than a threshold filter_criteria['threshold']. This mode can\n                                  be used to select sequences all tracker performs poorly.\n                        'delta_ao': Only display sequences for which the performance of different trackers vary by at\n                                    least filter_criteria['threshold'] in average overlap (AO) score. This mode can\n                                    be used to select sequences where the behaviour of the trackers greatly differ\n                                    between each other.\n    \"\"\"\n    # Load pre-computed results\n    eval_data = check_and_load_precomputed_results(trackers, dataset, report_name, **kwargs)\n\n    # Merge results from multiple runs\n    if merge_results:\n        eval_data = merge_multiple_runs(eval_data)\n\n    tracker_names = eval_data['trackers']\n    valid_sequence = torch.tensor(eval_data['valid_sequence'], dtype=torch.bool)\n    sequence_names = eval_data['sequences']\n    avg_overlap_all = torch.tensor(eval_data['avg_overlap_all']) * 100.0\n\n    # Filter sequences\n    if filter_criteria is not None:\n        if filter_criteria['mode'] == 'ao_min':\n            min_ao = avg_overlap_all.min(dim=1)[0]\n            valid_sequence = valid_sequence & (min_ao < filter_criteria['threshold'])\n        elif filter_criteria['mode'] == 'ao_max':\n            max_ao = avg_overlap_all.max(dim=1)[0]\n            valid_sequence = valid_sequence & (max_ao < filter_criteria['threshold'])\n        elif filter_criteria['mode'] == 'delta_ao':\n            min_ao = avg_overlap_all.min(dim=1)[0]\n            max_ao = avg_overlap_all.max(dim=1)[0]\n            valid_sequence = valid_sequence & ((max_ao - min_ao) > filter_criteria['threshold'])\n        else:\n            raise Exception\n\n    avg_overlap_all = avg_overlap_all[valid_sequence, :]\n    sequence_names = [s + ' (ID={})'.format(i) for i, (s, v) in enumerate(zip(sequence_names, valid_sequence.tolist())) if v]\n\n    tracker_disp_names = [get_tracker_display_name(trk) for trk in tracker_names]\n\n    scores_per_tracker = {k: avg_overlap_all[:, i] for i, k in enumerate(tracker_disp_names)}\n    report_text = generate_formatted_report(sequence_names, scores_per_tracker)\n\n    print(report_text)\n"
  },
  {
    "path": "external/AR/pytracking/analysis/vos_utils.py",
    "content": "import warnings\nimport numpy as np\nfrom skimage.morphology import binary_dilation, disk\nfrom math import floor\n\n\ndef text_bargraph(values):\n\n    blocks = np.array(('u', ' ', '▁', '▂', '▃', '▄', '▅', '▆', '▇', '█', 'o'))\n    nsteps = len(blocks)-2-1\n    hstep = 1 / (2*nsteps)\n    values = np.array(values)\n    nans = np.isnan(values)\n    values[nans] = 0  # '░'\n    indices = ((values + hstep) * nsteps + 1).astype(np.int)\n    indices[values < 0] = 0\n    indices[values > 1] = len(blocks)-1\n    graph = blocks[indices]\n    graph[nans] = '░'\n    graph = str.join('', graph)\n    return graph\n\n\n# ----------------------------------------------------------------------------\n# The 2017 DAVIS Challenge on Video Object Segmentation\n# -----------------------------------------------------------------------------\n# Copyright (c) 2017 Federico Perazzi\n# Licensed under the BSD License [see LICENSE for details]\n# Written by Federico Perazzi (federico@disneyresearch.com)\n# Adapted from DAVIS 2016 (Federico Perazzi)\n# ----------------------------------------------------------------------------\n\n# Originally db_eval_iou() in the davis challenge toolkit:\ndef davis_jaccard_measure(fg_mask, gt_mask):\n    \"\"\" Compute region similarity as the Jaccard Index.\n\n    :param fg_mask: (ndarray): binary segmentation map.\n    :param gt_mask: (ndarray): binary annotation map.\n    :return: jaccard (float): region similarity\n    \"\"\"\n\n    gt_mask = gt_mask.astype(np.bool)\n    fg_mask = fg_mask.astype(np.bool)\n\n    if np.isclose(np.sum(gt_mask), 0) and np.isclose(np.sum(fg_mask), 0):\n        return 1\n    else:\n        return np.sum((gt_mask & fg_mask)) / \\\n               np.sum((gt_mask | fg_mask), dtype=np.float32)\n\n\ndef davis_jaccard_measure_torch(fg_mask, gt_mask):\n    \"\"\" Compute region similarity as the Jaccard Index.\n\n    :param fg_mask: (ndarray): binary segmentation map.\n    :param gt_mask: (ndarray): binary annotation map.\n    :return: jaccard (float): region similarity\n    \"\"\"\n\n    #gt_mask = gt_mask.astype(np.bool)\n    #fg_mask = fg_mask.astype(np.bool)\n\n    if gt_mask.sum() == 0 and fg_mask.sum() == 0:\n        return 1\n    else:\n        return (gt_mask & fg_mask).sum() / \\\n               (gt_mask | fg_mask).sum().float()\n\n# Originally db_eval_boundary() in the davis challenge toolkit:\ndef davis_f_measure(foreground_mask, gt_mask, bound_th=0.008):\n    \"\"\"\n    Compute mean,recall and decay from per-frame evaluation.\n    Calculates precision/recall for boundaries between foreground_mask and\n    gt_mask using morphological operators to speed it up.\n\n    Arguments:\n        foreground_mask (ndarray): binary segmentation image.\n        gt_mask         (ndarray): binary annotated image.\n\n    Returns:\n        F (float): boundaries F-measure\n        P (float): boundaries precision\n        R (float): boundaries recall\n    \"\"\"\n    assert np.atleast_3d(foreground_mask).shape[2] == 1\n\n    bound_pix = bound_th if bound_th >= 1 else \\\n        np.ceil(bound_th * np.linalg.norm(foreground_mask.shape))\n\n    # Get the pixel boundaries of both masks\n    fg_boundary = seg2bmap(foreground_mask)\n    gt_boundary = seg2bmap(gt_mask)\n\n    fg_dil = binary_dilation(fg_boundary, disk(bound_pix))\n    gt_dil = binary_dilation(gt_boundary, disk(bound_pix))\n\n    # Get the intersection\n    gt_match = gt_boundary * fg_dil\n    fg_match = fg_boundary * gt_dil\n\n    # Area of the intersection\n    n_fg = np.sum(fg_boundary)\n    n_gt = np.sum(gt_boundary)\n\n    # % Compute precision and recall\n    if n_fg == 0 and n_gt > 0:\n        precision = 1\n        recall = 0\n    elif n_fg > 0 and n_gt == 0:\n        precision = 0\n        recall = 1\n    elif n_fg == 0 and n_gt == 0:\n        precision = 1\n        recall = 1\n    else:\n        precision = np.sum(fg_match) / float(n_fg)\n        recall = np.sum(gt_match) / float(n_gt)\n\n    # Compute F measure\n    if precision + recall == 0:\n        F = 0\n    else:\n        F = 2 * precision * recall / (precision + recall)\n\n    return F\n\n\ndef seg2bmap(seg, width=None, height=None):\n    \"\"\"\n    From a segmentation, compute a binary boundary map with 1 pixel wide\n    boundaries.  The boundary pixels are offset by 1/2 pixel towards the\n    origin from the actual segment boundary.\n\n    Arguments:\n        seg     : Segments labeled from 1..k.\n        width\t  :\tWidth of desired bmap  <= seg.shape[1]\n        height  :\tHeight of desired bmap <= seg.shape[0]\n\n    Returns:\n        bmap (ndarray):\tBinary boundary map.\n\n     David Martin <dmartin@eecs.berkeley.edu>\n     January 2003\n \"\"\"\n\n    seg = seg.astype(np.bool)\n    seg[seg > 0] = 1\n\n    assert np.atleast_3d(seg).shape[2] == 1\n\n    width = seg.shape[1] if width is None else width\n    height = seg.shape[0] if height is None else height\n\n    h, w = seg.shape[:2]\n\n    ar1 = float(width) / float(height)\n    ar2 = float(w) / float(h)\n\n    assert not (width > w | height > h | abs(ar1 - ar2) > 0.01), \\\n        'Can''t convert %dx%d seg to %dx%d bmap.' % (w, h, width, height)\n\n    e = np.zeros_like(seg)\n    s = np.zeros_like(seg)\n    se = np.zeros_like(seg)\n\n    e[:, :-1] = seg[:, 1:]\n    s[:-1, :] = seg[1:, :]\n    se[:-1, :-1] = seg[1:, 1:]\n\n    b = seg ^ e | seg ^ s | seg ^ se\n    b[-1, :] = seg[-1, :] ^ e[-1, :]\n    b[:, -1] = seg[:, -1] ^ s[:, -1]\n    b[-1, -1] = 0\n\n    if w == width and h == height:\n        bmap = b\n    else:\n        bmap = np.zeros((height, width))\n        for x in range(w):\n            for y in range(h):\n                if b[y, x]:\n                    j = 1 + floor((y - 1) + height / h)\n                    i = 1 + floor((x - 1) + width / h)\n                    bmap[j, i] = 1\n\n    return bmap\n\n\ndef nanmean(*args, **kwargs):\n    with warnings.catch_warnings():\n        warnings.simplefilter(\"ignore\", category=RuntimeWarning)\n        return np.nanmean(*args, **kwargs)\n\n\ndef mean(X):\n    \"\"\"\n    Compute average ignoring NaN values.\n    \"\"\"\n    return np.nanmean(X)\n\n\ndef recall(X, threshold=0.5):\n    \"\"\"\n    Fraction of values of X scoring higher than 'threshold'\n    \"\"\"\n    with warnings.catch_warnings():\n        warnings.simplefilter(\"ignore\", category=RuntimeWarning)\n        x = X[~np.isnan(X)]\n        x = mean(x > threshold)\n    return x\n\n\ndef decay(X, n_bins=4):\n    \"\"\"\n    Performance loss over time.\n    \"\"\"\n    X = X[~np.isnan(X)]\n    ids = np.round(np.linspace(1, len(X), n_bins + 1) + 1e-10) - 1\n    ids = ids.astype(np.uint8)\n\n    D_bins = [X[ids[i]:ids[i + 1] + 1] for i in range(0, 4)]\n\n    with warnings.catch_warnings():\n        warnings.simplefilter(\"ignore\", category=RuntimeWarning)\n        D = np.nanmean(D_bins[0]) - np.nanmean(D_bins[3])\n    return D\n\n\ndef std(X):\n    \"\"\"\n    Compute standard deviation.\n    \"\"\"\n    return np.nanstd(X)\n\n\n"
  },
  {
    "path": "external/AR/pytracking/evaluation/__init__.py",
    "content": "from .data import Sequence\nfrom .tracker import Tracker, trackerlist\nfrom .datasets import get_dataset"
  },
  {
    "path": "external/AR/pytracking/evaluation/data.py",
    "content": "import numpy as np\nfrom pytracking.evaluation.environment import env_settings\nfrom ltr.data.image_loader import imread_indexed\nfrom collections import OrderedDict\n\n\nclass BaseDataset:\n    \"\"\"Base class for all datasets.\"\"\"\n    def __init__(self):\n        self.env_settings = env_settings()\n\n    def __len__(self):\n        \"\"\"Overload this function in your dataset. This should return number of sequences in the dataset.\"\"\"\n        raise NotImplementedError\n\n    def get_sequence_list(self):\n        \"\"\"Overload this in your dataset. Should return the list of sequences in the dataset.\"\"\"\n        raise NotImplementedError\n\n\nclass Sequence:\n    \"\"\"Class for the sequence in an evaluation.\"\"\"\n    def __init__(self, name, frames, dataset, ground_truth_rect, ground_truth_seg=None, init_data=None,\n                 object_class=None, target_visible=None, object_ids=None, multiobj_mode=False):\n        self.name = name\n        self.frames = frames\n        self.dataset = dataset\n        self.ground_truth_rect = ground_truth_rect\n        self.ground_truth_seg = ground_truth_seg\n        self.object_class = object_class\n        self.target_visible = target_visible\n        self.object_ids = object_ids\n        self.multiobj_mode = multiobj_mode\n        self.init_data = self._construct_init_data(init_data)\n        self._ensure_start_frame()\n\n    def _ensure_start_frame(self):\n        # Ensure start frame is 0\n        start_frame = min(list(self.init_data.keys()))\n        if start_frame > 0:\n            self.frames = self.frames[start_frame:]\n            if self.ground_truth_rect is not None:\n                if isinstance(self.ground_truth_rect, (dict, OrderedDict)):\n                    for obj_id, gt in self.ground_truth_rect.items():\n                        self.ground_truth_rect[obj_id] = gt[start_frame:,:]\n                else:\n                    self.ground_truth_rect = self.ground_truth_rect[start_frame:,:]\n            if self.ground_truth_seg is not None:\n                self.ground_truth_seg = self.ground_truth_seg[start_frame:]\n                assert len(self.frames) == len(self.ground_truth_seg)\n\n            if self.target_visible is not None:\n                self.target_visible = self.target_visible[start_frame:]\n            self.init_data = {frame-start_frame: val for frame, val in self.init_data.items()}\n\n    def _construct_init_data(self, init_data):\n        if init_data is not None:\n            if not self.multiobj_mode:\n                assert self.object_ids is None or len(self.object_ids) == 1\n                for frame, init_val in init_data.items():\n                    if 'bbox' in init_val and isinstance(init_val['bbox'], (dict, OrderedDict)):\n                        init_val['bbox'] = init_val['bbox'][self.object_ids[0]]\n            # convert to list\n            for frame, init_val in init_data.items():\n                if 'bbox' in init_val:\n                    if isinstance(init_val['bbox'], (dict, OrderedDict)):\n                        init_val['bbox'] = OrderedDict({obj_id: list(init) for obj_id, init in init_val['bbox'].items()})\n                    else:\n                        init_val['bbox'] = list(init_val['bbox'])\n        else:\n            init_data = {0: dict()}     # Assume start from frame 0\n\n            if self.object_ids is not None:\n                init_data[0]['object_ids'] = self.object_ids\n\n            if self.ground_truth_rect is not None:\n                if self.multiobj_mode:\n                    assert isinstance(self.ground_truth_rect, (dict, OrderedDict))\n                    init_data[0]['bbox'] = OrderedDict({obj_id: list(gt[0,:]) for obj_id, gt in self.ground_truth_rect.items()})\n                else:\n                    assert self.object_ids is None or len(self.object_ids) == 1\n                    if isinstance(self.ground_truth_rect, (dict, OrderedDict)):\n                        init_data[0]['bbox'] = list(self.ground_truth_rect[self.object_ids[0]][0, :])\n                    else:\n                        init_data[0]['bbox'] = list(self.ground_truth_rect[0,:])\n\n            if self.ground_truth_seg is not None:\n                init_data[0]['mask'] = self.ground_truth_seg[0]\n\n        return init_data\n\n    def init_info(self):\n        info = self.frame_info(frame_num=0)\n        return info\n\n    def frame_info(self, frame_num):\n        info = self.object_init_data(frame_num=frame_num)\n        return info\n\n    def init_bbox(self, frame_num=0):\n        return self.object_init_data(frame_num=frame_num).get('init_bbox')\n\n    def init_mask(self, frame_num=0):\n        return self.object_init_data(frame_num=frame_num).get('init_mask')\n\n    def get_info(self, keys, frame_num=None):\n        info = dict()\n        for k in keys:\n            val = self.get(k, frame_num=frame_num)\n            if val is not None:\n                info[k] = val\n        return info\n\n    def object_init_data(self, frame_num=None) -> dict:\n        if frame_num is None:\n            frame_num = 0\n        if frame_num not in self.init_data:\n            return dict()\n\n        init_data = dict()\n        for key, val in self.init_data[frame_num].items():\n            if val is None:\n                continue\n            init_data['init_'+key] = val\n\n        if 'init_mask' in init_data and init_data['init_mask'] is not None:\n            anno = imread_indexed(init_data['init_mask'])\n            if not self.multiobj_mode and self.object_ids is not None:\n                assert len(self.object_ids) == 1\n                anno = (anno == int(self.object_ids[0])).astype(np.uint8)\n            init_data['init_mask'] = anno\n\n        if self.object_ids is not None:\n            init_data['object_ids'] = self.object_ids\n            init_data['sequence_object_ids'] = self.object_ids\n\n        return init_data\n\n    def target_class(self, frame_num=None):\n        return self.object_class\n\n    def get(self, name, frame_num=None):\n        return getattr(self, name)(frame_num)\n\n    def __repr__(self):\n        return \"{self.__class__.__name__} {self.name}, length={len} frames\".format(self=self, len=len(self.frames))\n\n\n\nclass SequenceList(list):\n    \"\"\"List of sequences. Supports the addition operator to concatenate sequence lists.\"\"\"\n    def __getitem__(self, item):\n        if isinstance(item, str):\n            for seq in self:\n                if seq.name == item:\n                    return seq\n            raise IndexError('Sequence name not in the dataset.')\n        elif isinstance(item, int):\n            return super(SequenceList, self).__getitem__(item)\n        elif isinstance(item, (tuple, list)):\n            return SequenceList([super(SequenceList, self).__getitem__(i) for i in item])\n        else:\n            return SequenceList(super(SequenceList, self).__getitem__(item))\n\n    def __add__(self, other):\n        return SequenceList(super(SequenceList, self).__add__(other))\n\n    def copy(self):\n        return SequenceList(super(SequenceList, self).copy())"
  },
  {
    "path": "external/AR/pytracking/evaluation/datasets.py",
    "content": "from collections import namedtuple\nimport importlib\nfrom pytracking.evaluation.data import SequenceList\n\nDatasetInfo = namedtuple('DatasetInfo', ['module', 'class_name', 'kwargs'])\n\npt = \"pytracking.evaluation.%sdataset\"  # Useful abbreviations to reduce the clutter\n\ndataset_dict = dict(\n    otb=DatasetInfo(module=pt % \"otb\", class_name=\"OTBDataset\", kwargs=dict()),\n    nfs=DatasetInfo(module=pt % \"nfs\", class_name=\"NFSDataset\", kwargs=dict()),\n    uav=DatasetInfo(module=pt % \"uav\", class_name=\"UAVDataset\", kwargs=dict()),\n    tpl=DatasetInfo(module=pt % \"tpl\", class_name=\"TPLDataset\", kwargs=dict()),\n    tpl_nootb=DatasetInfo(module=pt % \"tpl\", class_name=\"TPLDataset\", kwargs=dict(exclude_otb=True)),\n    vot=DatasetInfo(module=pt % \"vot\", class_name=\"VOTDataset\", kwargs=dict()),\n    trackingnet=DatasetInfo(module=pt % \"trackingnet\", class_name=\"TrackingNetDataset\", kwargs=dict()),\n    got10k_test=DatasetInfo(module=pt % \"got10k\", class_name=\"GOT10KDataset\", kwargs=dict(split='test')),\n    got10k_val=DatasetInfo(module=pt % \"got10k\", class_name=\"GOT10KDataset\", kwargs=dict(split='val')),\n    got10k_ltrval=DatasetInfo(module=pt % \"got10k\", class_name=\"GOT10KDataset\", kwargs=dict(split='ltrval')),\n    lasot=DatasetInfo(module=pt % \"lasot\", class_name=\"LaSOTDataset\", kwargs=dict()),\n    dv2017_val=DatasetInfo(module=\"ltr.dataset.davis\", class_name=\"Davis\", kwargs=dict(version='2017', split='val')),\n    dv2016_val=DatasetInfo(module=\"ltr.dataset.davis\", class_name=\"Davis\", kwargs=dict(version='2016', split='val')),\n    dv2017_test_dev=DatasetInfo(module=\"ltr.dataset.davis\", class_name=\"Davis\",\n                                kwargs=dict(version='2017', split='test-dev')),\n    dv2017_test_chal=DatasetInfo(module=\"ltr.dataset.davis\", class_name=\"Davis\",\n                                 kwargs=dict(version='2017', split='test-challenge')),\n    yt2019_test=DatasetInfo(module=\"ltr.dataset.youtubevos\", class_name=\"YouTubeVOS\",\n                            kwargs=dict(version='2019', split='test')),\n    yt2019_valid=DatasetInfo(module=\"ltr.dataset.youtubevos\", class_name=\"YouTubeVOS\",\n                             kwargs=dict(version='2019', split='valid')),\n    yt2019_valid_all=DatasetInfo(module=\"ltr.dataset.youtubevos\", class_name=\"YouTubeVOS\",\n                                 kwargs=dict(version='2019', split='valid', all_frames=True)),\n    yt2018_valid_all=DatasetInfo(module=\"ltr.dataset.youtubevos\", class_name=\"YouTubeVOS\",\n                                 kwargs=dict(version='2018', split='valid', all_frames=True)),\n    yt2018_jjval=DatasetInfo(module=\"ltr.dataset.youtubevos\", class_name=\"YouTubeVOS\",\n                             kwargs=dict(version='2018', split='jjvalid')),\n    yt2019_jjval=DatasetInfo(module=\"ltr.dataset.youtubevos\", class_name=\"YouTubeVOS\",\n                             kwargs=dict(version='2019', split='jjvalid', cleanup=['starts'])),\n    yt2019_jjval_all=DatasetInfo(module=\"ltr.dataset.youtubevos\", class_name=\"YouTubeVOS\",\n                                 kwargs=dict(version='2019', split='jjvalid', all_frames=True, cleanup=['starts'])),\n)\n\n\ndef load_dataset(name: str):\n    \"\"\" Import and load a single dataset.\"\"\"\n    name = name.lower()\n    dset_info = dataset_dict.get(name)\n    if dset_info is None:\n        raise ValueError('Unknown dataset \\'%s\\'' % name)\n\n    m = importlib.import_module(dset_info.module)\n    dataset = getattr(m, dset_info.class_name)(**dset_info.kwargs)  # Call the constructor\n    return dataset.get_sequence_list()\n\n\ndef get_dataset(*args):\n    \"\"\" Get a single or set of datasets.\"\"\"\n    dset = SequenceList()\n    for name in args:\n        dset.extend(load_dataset(name))\n    return dset"
  },
  {
    "path": "external/AR/pytracking/evaluation/environment.py",
    "content": "import importlib\nimport os\n\n\nclass EnvSettings:\n    def __init__(self):\n        pytracking_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))\n\n        self.results_path = '{}/tracking_results/'.format(pytracking_path)\n        self.segmentation_path = '{}/segmentation_results/'.format(pytracking_path)\n        self.network_path = '{}/networks/'.format(pytracking_path)\n        self.result_plot_path = '{}/result_plots/'.format(pytracking_path)\n        self.otb_path = ''\n        self.nfs_path = ''\n        self.uav_path = ''\n        self.tpl_path = ''\n        self.vot_path = ''\n        self.got10k_path = ''\n        self.lasot_path = ''\n        self.trackingnet_path = ''\n        self.davis_dir = ''\n        self.youtubevos_dir = ''\n\n        self.got_packed_results_path = ''\n        self.got_reports_path = ''\n        self.tn_packed_results_path = ''\n\n\ndef create_default_local_file():\n    comment = {'results_path': 'Where to store tracking results',\n               'network_path': 'Where tracking networks are stored.'}\n\n    path = os.path.join(os.path.dirname(__file__), 'local.py')\n    with open(path, 'w') as f:\n        settings = EnvSettings()\n\n        f.write('from pytracking.evaluation.environment import EnvSettings\\n\\n')\n        f.write('def local_env_settings():\\n')\n        f.write('    settings = EnvSettings()\\n\\n')\n        f.write('    # Set your local paths here.\\n\\n')\n\n        for attr in dir(settings):\n            comment_str = None\n            if attr in comment:\n                comment_str = comment[attr]\n            attr_val = getattr(settings, attr)\n            if not attr.startswith('__') and not callable(attr_val):\n                if comment_str is None:\n                    f.write('    settings.{} = \\'{}\\'\\n'.format(attr, attr_val))\n                else:\n                    f.write('    settings.{} = \\'{}\\'    # {}\\n'.format(attr, attr_val, comment_str))\n        f.write('\\n    return settings\\n\\n')\n\n\ndef env_settings():\n    env_module_name = 'pytracking.evaluation.local'\n    try:\n        env_module = importlib.import_module(env_module_name)\n        return env_module.local_env_settings()\n    except:\n        env_file = os.path.join(os.path.dirname(__file__), 'local.py')\n\n        # Create a default file\n        create_default_local_file()\n        raise RuntimeError('YOU HAVE NOT SETUP YOUR local.py!!!\\n Go to \"{}\" and set all the paths you need. '\n                           'Then try to run again.'.format(env_file))"
  },
  {
    "path": "external/AR/pytracking/evaluation/got10kdataset.py",
    "content": "import numpy as np\nfrom pytracking.evaluation.data import Sequence, BaseDataset, SequenceList\nfrom pytracking.utils.load_text import load_text\nimport os\n\n\nclass GOT10KDataset(BaseDataset):\n    \"\"\" GOT-10k dataset.\n\n    Publication:\n        GOT-10k: A Large High-Diversity Benchmark for Generic Object Tracking in the Wild\n        Lianghua Huang, Xin Zhao, and Kaiqi Huang\n        arXiv:1810.11981, 2018\n        https://arxiv.org/pdf/1810.11981.pdf\n\n    Download dataset from http://got-10k.aitestunion.com/downloads\n    \"\"\"\n    def __init__(self, split):\n        super().__init__()\n        # Split can be test, val, or ltrval (a validation split consisting of videos from the official train set)\n        if split == 'test' or split == 'val':\n            self.base_path = os.path.join(self.env_settings.got10k_path, split)\n        else:\n            self.base_path = os.path.join(self.env_settings.got10k_path, 'train')\n\n        self.sequence_list = self._get_sequence_list(split)\n        self.split = split\n\n    def get_sequence_list(self):\n        return SequenceList([self._construct_sequence(s) for s in self.sequence_list])\n\n    def _construct_sequence(self, sequence_name):\n        anno_path = '{}/{}/groundtruth.txt'.format(self.base_path, sequence_name)\n\n        ground_truth_rect = load_text(str(anno_path), delimiter=',', dtype=np.float64)\n\n        frames_path = '{}/{}'.format(self.base_path, sequence_name)\n        frame_list = [frame for frame in os.listdir(frames_path) if frame.endswith(\".jpg\")]\n        frame_list.sort(key=lambda f: int(f[:-4]))\n        frames_list = [os.path.join(frames_path, frame) for frame in frame_list]\n\n        return Sequence(sequence_name, frames_list, 'got10k', ground_truth_rect.reshape(-1, 4))\n\n    def __len__(self):\n        return len(self.sequence_list)\n\n    def _get_sequence_list(self, split):\n        with open('{}/list.txt'.format(self.base_path)) as f:\n            sequence_list = f.read().splitlines()\n\n        if split == 'ltrval':\n            with open('{}/got10k_val_split.txt'.format(self.env_settings.dataspec_path)) as f:\n                seq_ids = f.read().splitlines()\n\n            sequence_list = [sequence_list[int(x)] for x in seq_ids]\n        return sequence_list\n"
  },
  {
    "path": "external/AR/pytracking/evaluation/lasotdataset.py",
    "content": "import numpy as np\nfrom pytracking.evaluation.data import Sequence, BaseDataset, SequenceList\nfrom pytracking.utils.load_text import load_text\n\n\nclass LaSOTDataset(BaseDataset):\n    \"\"\"\n    LaSOT test set consisting of 280 videos (see Protocol-II in the LaSOT paper)\n\n    Publication:\n        LaSOT: A High-quality Benchmark for Large-scale Single Object Tracking\n        Heng Fan, Liting Lin, Fan Yang, Peng Chu, Ge Deng, Sijia Yu, Hexin Bai, Yong Xu, Chunyuan Liao and Haibin Ling\n        CVPR, 2019\n        https://arxiv.org/pdf/1809.07845.pdf\n\n    Download the dataset from https://cis.temple.edu/lasot/download.html\n    \"\"\"\n    def __init__(self):\n        super().__init__()\n        self.base_path = self.env_settings.lasot_path\n        self.sequence_list = self._get_sequence_list()\n        self.clean_list = self.clean_seq_list()\n\n    def clean_seq_list(self):\n        clean_lst = []\n        for i in range(len(self.sequence_list)):\n            cls, _ = self.sequence_list[i].split('-')\n            clean_lst.append(cls)\n        return  clean_lst\n\n    def get_sequence_list(self):\n        return SequenceList([self._construct_sequence(s) for s in self.sequence_list])\n\n    def _construct_sequence(self, sequence_name):\n        class_name = sequence_name.split('-')[0]\n        anno_path = '{}/{}/{}/groundtruth.txt'.format(self.base_path, class_name, sequence_name)\n\n        ground_truth_rect = load_text(str(anno_path), delimiter=',', dtype=np.float64)\n\n        occlusion_label_path = '{}/{}/{}/full_occlusion.txt'.format(self.base_path, class_name, sequence_name)\n\n        # NOTE: pandas backed seems super super slow for loading occlusion/oov masks\n        full_occlusion = load_text(str(occlusion_label_path), delimiter=',', dtype=np.float64, backend='numpy')\n\n        out_of_view_label_path = '{}/{}/{}/out_of_view.txt'.format(self.base_path, class_name, sequence_name)\n        out_of_view = load_text(str(out_of_view_label_path), delimiter=',', dtype=np.float64, backend='numpy')\n\n        target_visible = np.logical_and(full_occlusion == 0, out_of_view == 0)\n\n        frames_path = '{}/{}/{}/img'.format(self.base_path, class_name, sequence_name)\n\n        frames_list = ['{}/{:08d}.jpg'.format(frames_path, frame_number) for frame_number in range(1, ground_truth_rect.shape[0] + 1)]\n\n        target_class = class_name\n        return Sequence(sequence_name, frames_list, 'lasot', ground_truth_rect.reshape(-1, 4),\n                        object_class=target_class, target_visible=target_visible)\n\n    def __len__(self):\n        return len(self.sequence_list)\n\n    def _get_sequence_list(self):\n        sequence_list = ['airplane-1',\n                         'airplane-9',\n                         'airplane-13',\n                         'airplane-15',\n                         'basketball-1',\n                         'basketball-6',\n                         'basketball-7',\n                         'basketball-11',\n                         'bear-2',\n                         'bear-4',\n                         'bear-6',\n                         'bear-17',\n                         'bicycle-2',\n                         'bicycle-7',\n                         'bicycle-9',\n                         'bicycle-18',\n                         'bird-2',\n                         'bird-3',\n                         'bird-15',\n                         'bird-17',\n                         'boat-3',\n                         'boat-4',\n                         'boat-12',\n                         'boat-17',\n                         'book-3',\n                         'book-10',\n                         'book-11',\n                         'book-19',\n                         'bottle-1',\n                         'bottle-12',\n                         'bottle-14',\n                         'bottle-18',\n                         'bus-2',\n                         'bus-5',\n                         'bus-17',\n                         'bus-19',\n                         'car-2',\n                         'car-6',\n                         'car-9',\n                         'car-17',\n                         'cat-1',\n                         'cat-3',\n                         'cat-18',\n                         'cat-20',\n                         'cattle-2',\n                         'cattle-7',\n                         'cattle-12',\n                         'cattle-13',\n                         'spider-14',\n                         'spider-16',\n                         'spider-18',\n                         'spider-20',\n                         'coin-3',\n                         'coin-6',\n                         'coin-7',\n                         'coin-18',\n                         'crab-3',\n                         'crab-6',\n                         'crab-12',\n                         'crab-18',\n                         'surfboard-12',\n                         'surfboard-4',\n                         'surfboard-5',\n                         'surfboard-8',\n                         'cup-1',\n                         'cup-4',\n                         'cup-7',\n                         'cup-17',\n                         'deer-4',\n                         'deer-8',\n                         'deer-10',\n                         'deer-14',\n                         'dog-1',\n                         'dog-7',\n                         'dog-15',\n                         'dog-19',\n                         'guitar-3',\n                         'guitar-8',\n                         'guitar-10',\n                         'guitar-16',\n                         'person-1',\n                         'person-5',\n                         'person-10',\n                         'person-12',\n                         'pig-2',\n                         'pig-10',\n                         'pig-13',\n                         'pig-18',\n                         'rubicCube-1',\n                         'rubicCube-6',\n                         'rubicCube-14',\n                         'rubicCube-19',\n                         'swing-10',\n                         'swing-14',\n                         'swing-17',\n                         'swing-20',\n                         'drone-13',\n                         'drone-15',\n                         'drone-2',\n                         'drone-7',\n                         'pool-12',\n                         'pool-15',\n                         'pool-3',\n                         'pool-7',\n                         'rabbit-10',\n                         'rabbit-13',\n                         'rabbit-17',\n                         'rabbit-19',\n                         'racing-10',\n                         'racing-15',\n                         'racing-16',\n                         'racing-20',\n                         'robot-1',\n                         'robot-19',\n                         'robot-5',\n                         'robot-8',\n                         'sepia-13',\n                         'sepia-16',\n                         'sepia-6',\n                         'sepia-8',\n                         'sheep-3',\n                         'sheep-5',\n                         'sheep-7',\n                         'sheep-9',\n                         'skateboard-16',\n                         'skateboard-19',\n                         'skateboard-3',\n                         'skateboard-8',\n                         'tank-14',\n                         'tank-16',\n                         'tank-6',\n                         'tank-9',\n                         'tiger-12',\n                         'tiger-18',\n                         'tiger-4',\n                         'tiger-6',\n                         'train-1',\n                         'train-11',\n                         'train-20',\n                         'train-7',\n                         'truck-16',\n                         'truck-3',\n                         'truck-6',\n                         'truck-7',\n                         'turtle-16',\n                         'turtle-5',\n                         'turtle-8',\n                         'turtle-9',\n                         'umbrella-17',\n                         'umbrella-19',\n                         'umbrella-2',\n                         'umbrella-9',\n                         'yoyo-15',\n                         'yoyo-17',\n                         'yoyo-19',\n                         'yoyo-7',\n                         'zebra-10',\n                         'zebra-14',\n                         'zebra-16',\n                         'zebra-17',\n                         'elephant-1',\n                         'elephant-12',\n                         'elephant-16',\n                         'elephant-18',\n                         'goldfish-3',\n                         'goldfish-7',\n                         'goldfish-8',\n                         'goldfish-10',\n                         'hat-1',\n                         'hat-2',\n                         'hat-5',\n                         'hat-18',\n                         'kite-4',\n                         'kite-6',\n                         'kite-10',\n                         'kite-15',\n                         'motorcycle-1',\n                         'motorcycle-3',\n                         'motorcycle-9',\n                         'motorcycle-18',\n                         'mouse-1',\n                         'mouse-8',\n                         'mouse-9',\n                         'mouse-17',\n                         'flag-3',\n                         'flag-9',\n                         'flag-5',\n                         'flag-2',\n                         'frog-3',\n                         'frog-4',\n                         'frog-20',\n                         'frog-9',\n                         'gametarget-1',\n                         'gametarget-2',\n                         'gametarget-7',\n                         'gametarget-13',\n                         'hand-2',\n                         'hand-3',\n                         'hand-9',\n                         'hand-16',\n                         'helmet-5',\n                         'helmet-11',\n                         'helmet-19',\n                         'helmet-13',\n                         'licenseplate-6',\n                         'licenseplate-12',\n                         'licenseplate-13',\n                         'licenseplate-15',\n                         'electricfan-1',\n                         'electricfan-10',\n                         'electricfan-18',\n                         'electricfan-20',\n                         'chameleon-3',\n                         'chameleon-6',\n                         'chameleon-11',\n                         'chameleon-20',\n                         'crocodile-3',\n                         'crocodile-4',\n                         'crocodile-10',\n                         'crocodile-14',\n                         'gecko-1',\n                         'gecko-5',\n                         'gecko-16',\n                         'gecko-19',\n                         'fox-2',\n                         'fox-3',\n                         'fox-5',\n                         'fox-20',\n                         'giraffe-2',\n                         'giraffe-10',\n                         'giraffe-13',\n                         'giraffe-15',\n                         'gorilla-4',\n                         'gorilla-6',\n                         'gorilla-9',\n                         'gorilla-13',\n                         'hippo-1',\n                         'hippo-7',\n                         'hippo-9',\n                         'hippo-20',\n                         'horse-1',\n                         'horse-4',\n                         'horse-12',\n                         'horse-15',\n                         'kangaroo-2',\n                         'kangaroo-5',\n                         'kangaroo-11',\n                         'kangaroo-14',\n                         'leopard-1',\n                         'leopard-7',\n                         'leopard-16',\n                         'leopard-20',\n                         'lion-1',\n                         'lion-5',\n                         'lion-12',\n                         'lion-20',\n                         'lizard-1',\n                         'lizard-3',\n                         'lizard-6',\n                         'lizard-13',\n                         'microphone-2',\n                         'microphone-6',\n                         'microphone-14',\n                         'microphone-16',\n                         'monkey-3',\n                         'monkey-4',\n                         'monkey-9',\n                         'monkey-17',\n                         'shark-2',\n                         'shark-3',\n                         'shark-5',\n                         'shark-6',\n                         'squirrel-8',\n                         'squirrel-11',\n                         'squirrel-13',\n                         'squirrel-19',\n                         'volleyball-1',\n                         'volleyball-13',\n                         'volleyball-18',\n                         'volleyball-19']\n        return sequence_list\n"
  },
  {
    "path": "external/AR/pytracking/evaluation/mobifacedataset.py",
    "content": "from pytracking.evaluation.data import Sequence, BaseDataset, SequenceList\nimport glob\nimport numpy as np\nimport os.path as osp\nfrom collections import OrderedDict\nimport pandas as pd\n\n\nclass MobifaceDataset(BaseDataset):\n    \"\"\" Mobiface dataset.\n        Publication:\n            MobiFace: A Novel Dataset for Mobile Face Tracking in the Wild\n            Yiming Lin, Shiyang Cheng, Jie Shen, Maja Pantic\n            arXiv:1805.09749, 2018\n            https://arxiv.org/pdf/1805.09749v2\n\n        Download dataset from https://mobiface.github.io/\n    \"\"\"\n    def __init__(self, split):\n        \"\"\"\n        args:\n            split - Split to use. Can be i) 'train': official training set, ii) 'test': official test set, iii) 'all': whole dataset.\n        \"\"\"\n        super().__init__()\n        self.base_path = self.env_settings.mobiface_path\n        self.sequence_list = self._get_sequence_list(split)\n        self.split = split\n\n    def get_sequence_list(self):\n        return SequenceList([self._construct_sequence(s) for s in self.sequence_list])\n\n    def _get_sequence_list(self, split):\n\n        self.train_meta_fn = osp.join(self.base_path, 'train.meta.csv')\n        self.test_meta_fn = osp.join(self.base_path, 'test.meta.csv')\n        self.train_meta = pd.read_csv(self.train_meta_fn,index_col=0).transpose().to_dict()\n        self.test_meta = pd.read_csv(self.test_meta_fn,index_col=0).transpose().to_dict()\n        if split == 'train':\n            self.meta = self.train_meta\n        elif split == 'test':\n            self.meta = self.test_meta\n        else:\n            self.meta = {**self.train_meta, **self.test_meta} # In Python 3.5 or greater\n        self.meta = OrderedDict(sorted(self.meta.items(), key=lambda t: t[0]))\n        self.anno_files = []\n        for k,v in self.meta.items():\n            if k in self.train_meta.keys():\n                self.anno_files.append(osp.abspath(osp.join(self.base_path,'train', k+'.annot.csv')))\n            else:\n                self.anno_files.append(osp.abspath(osp.join(self.base_path,'test', k+'.annot.csv')))\n        self.seq_names = sorted(list(self.meta.keys()))\n        self.seq_dirs = [fn[:-len('.annot.csv')] for fn in self.anno_files]\n        return self.seq_names\n\n    def _construct_sequence(self, sequence_name):\n        index = self.seq_names.index(sequence_name)\n        img_files = sorted(glob.glob(self.seq_dirs[index]+'/*.jpg'))\n        if len(img_files) == 0:\n            img_files = sorted(glob.glob(self.seq_dirs[index]+'.png'))\n        with open(self.anno_files[index], 'r') as f:\n            anno = np.loadtxt(f, delimiter=',', skiprows=1, dtype=int)\n        anno = anno[:,1:]\n        assert anno.shape[1] == 4\n\n        return Sequence(sequence_name, img_files, anno.reshape(-1, 4))\n\n    def __len__(self):\n        return len(self.sequence_list)\n"
  },
  {
    "path": "external/AR/pytracking/evaluation/multi_object_wrapper.py",
    "content": "import numpy as np\nfrom collections import OrderedDict\nimport time\nimport copy\n\n\nclass MultiObjectWrapper:\n    def __init__(self, base_tracker_class, params, visdom=None, fast_load=False):\n        self.base_tracker_class = base_tracker_class\n        self.params = params\n        self.visdom = visdom\n\n        self.initialized_ids = []\n        self.trackers = OrderedDict()\n\n        self.fast_load = fast_load\n        if self.fast_load:\n            self.tracker_copy = self.base_tracker_class(self.params)\n            if hasattr(self.tracker_copy, 'initialize_features'):\n                self.tracker_copy.initialize_features()\n\n    def create_tracker(self):\n        tracker = None\n        if self.fast_load:\n            try:\n                tracker = copy.deepcopy(self.tracker_copy)\n            except:\n                pass\n        if tracker is None:\n            tracker = self.base_tracker_class(self.params)\n        tracker.visdom = self.visdom\n        return tracker\n\n    def _split_info(self, info):\n        info_split = OrderedDict()\n        init_other = OrderedDict()              # Init other contains init info for all other objects\n        for obj_id in info['init_object_ids']:\n            info_split[obj_id] = dict()\n            init_other[obj_id] = dict()\n            info_split[obj_id]['object_ids'] = [obj_id]\n            info_split[obj_id]['sequence_object_ids'] = info['sequence_object_ids']\n            if 'init_bbox' in info:\n                info_split[obj_id]['init_bbox'] = info['init_bbox'][obj_id]\n                init_other[obj_id]['init_bbox'] = info['init_bbox'][obj_id]\n            if 'init_mask' in info:\n                info_split[obj_id]['init_mask'] = (info['init_mask'] == int(obj_id)).astype(np.uint8)\n                init_other[obj_id]['init_mask'] = info_split[obj_id]['init_mask']\n        for obj_info in info_split.values():\n            obj_info['init_other'] = init_other\n        return info_split\n\n    def _set_defaults(self, tracker_out: dict, defaults=None):\n        defaults = {} if defaults is None else defaults\n\n        for key, val in defaults.items():\n            if tracker_out.get(key) is None:\n                tracker_out[key] = val\n\n        return tracker_out\n\n    def default_merge(self, out_all):\n        out_merged = OrderedDict()\n\n        out_first = list(out_all.values())[0]\n        out_types = out_first.keys()\n\n        # Merge segmentation mask\n        if 'segmentation' in out_types and out_first['segmentation'] is not None:\n            # Stack all masks\n            # If a tracker outputs soft segmentation mask, use that. Else use the binary segmentation\n            segmentation_maps = [out.get('segmentation_soft', out['segmentation']) for out in out_all.values()]\n            segmentation_maps = np.stack(segmentation_maps)\n\n            obj_ids = np.array([0, *map(int, out_all.keys())], dtype=np.uint8)\n            segm_threshold = getattr(self.params, 'segmentation_threshold', 0.5)\n            merged_segmentation = obj_ids[np.where(segmentation_maps.max(axis=0) > segm_threshold,\n                                                   segmentation_maps.argmax(axis=0) + 1, 0)]\n\n            out_merged['segmentation'] = merged_segmentation\n\n        # Merge other fields\n        for key in out_types:\n            if key == 'segmentation':\n                pass\n            else:\n                out_merged[key] = {obj_id: out[key] for obj_id, out in out_all.items()}\n\n        return out_merged\n\n    def merge_outputs(self, out_all):\n        if hasattr(self.base_tracker_class, 'merge_results'):\n            out_merged = self.base_tracker_class.merge_results(out_all)\n        else:\n            out_merged = self.default_merge(out_all)\n\n        return out_merged\n\n    def initialize(self, image, info: dict) -> dict:\n        self.initialized_ids = []\n        self.trackers = OrderedDict()\n\n        if len(info['init_object_ids']) == 0:\n            return None\n\n        object_ids = info['object_ids']\n\n        init_info_split = self._split_info(info)\n        self.trackers = OrderedDict({obj_id: self.create_tracker() for obj_id in object_ids})\n\n        out_all = OrderedDict()\n        # Run individual trackers for each object\n        for obj_id in info['init_object_ids']:\n            start_time = time.time()\n            out = self.trackers[obj_id].initialize(image, init_info_split[obj_id])\n            if out is None:\n                out = {}\n\n            init_default = {'target_bbox': init_info_split[obj_id].get('init_bbox'),\n                            'time': time.time() - start_time,\n                            'segmentation': init_info_split[obj_id].get('init_mask')}\n\n            out = self._set_defaults(out, init_default)\n            out_all[obj_id] = out\n\n        # Merge results\n        out_merged = self.merge_outputs(out_all)\n\n        self.initialized_ids = info['init_object_ids'].copy()\n        return out_merged\n\n    def track(self, image, info: dict = None) -> dict:\n        if info is None:\n            info = {}\n\n        prev_output = info.get('previous_output', OrderedDict())\n\n        if info.get('init_object_ids', False):\n            init_info_split = self._split_info(info)\n            for obj_init_info in init_info_split.values():\n                obj_init_info['previous_output'] = prev_output\n\n            info['init_other'] = list(init_info_split.values())[0]['init_other']\n\n        out_all = OrderedDict()\n        for obj_id in self.initialized_ids:\n            start_time = time.time()\n\n            out = self.trackers[obj_id].track(image, info)\n\n            default = {'time': time.time() - start_time}\n            out = self._set_defaults(out, default)\n            out_all[obj_id] = out\n\n        # Initialize new\n        if info.get('init_object_ids', False):\n            for obj_id in info['init_object_ids']:\n                if not obj_id in self.trackers:\n                    self.trackers[obj_id] = self.create_tracker()\n\n                start_time = time.time()\n                out = self.trackers[obj_id].initialize(image, init_info_split[obj_id])\n                if out is None:\n                    out = {}\n\n                init_default = {'target_bbox': init_info_split[obj_id].get('init_bbox'),\n                                'time': time.time() - start_time,\n                                'segmentation': init_info_split[obj_id].get('init_mask')}\n\n                out = self._set_defaults(out, init_default)\n                out_all[obj_id] = out\n\n            self.initialized_ids.extend(info['init_object_ids'])\n\n        # Merge results\n        out_merged = self.merge_outputs(out_all)\n\n        return out_merged\n\n    def visdom_draw_tracking(self, image, box, segmentation):\n        if isinstance(box, (OrderedDict, dict)):\n            box = [v for k, v in box.items()]\n        else:\n            box = (box,)\n        if segmentation is None:\n            self.visdom.register((image, *box), 'Tracking', 1, 'Tracking')\n        else:\n            self.visdom.register((image, *box, segmentation), 'Tracking', 1, 'Tracking')\n"
  },
  {
    "path": "external/AR/pytracking/evaluation/nfsdataset.py",
    "content": "import numpy as np\nfrom pytracking.evaluation.data import Sequence, BaseDataset, SequenceList\nfrom pytracking.utils.load_text import load_text\n\n\nclass NFSDataset(BaseDataset):\n    \"\"\" NFS dataset.\n\n    Publication:\n        Need for Speed: A Benchmark for Higher Frame Rate Object Tracking\n        H. Kiani Galoogahi, A. Fagg, C. Huang, D. Ramanan, and S.Lucey\n        ICCV, 2017\n        http://openaccess.thecvf.com/content_ICCV_2017/papers/Galoogahi_Need_for_Speed_ICCV_2017_paper.pdf\n\n    Download the dataset from http://ci2cv.net/nfs/index.html\n    \"\"\"\n    def __init__(self):\n        super().__init__()\n        self.base_path = self.env_settings.nfs_path\n        self.sequence_info_list = self._get_sequence_info_list()\n\n    def get_sequence_list(self):\n        return SequenceList([self._construct_sequence(s) for s in self.sequence_info_list])\n\n    def _construct_sequence(self, sequence_info):\n        sequence_path = sequence_info['path']\n        nz = sequence_info['nz']\n        ext = sequence_info['ext']\n        start_frame = sequence_info['startFrame']\n        end_frame = sequence_info['endFrame']\n\n        init_omit = 0\n        if 'initOmit' in sequence_info:\n            init_omit = sequence_info['initOmit']\n\n        frames = ['{base_path}/{sequence_path}/{frame:0{nz}}.{ext}'.format(base_path=self.base_path, \n        sequence_path=sequence_path, frame=frame_num, nz=nz, ext=ext) for frame_num in range(start_frame+init_omit, end_frame+1)]\n\n        anno_path = '{}/{}'.format(self.base_path, sequence_info['anno_path'])\n\n        ground_truth_rect = load_text(str(anno_path), delimiter='\\t', dtype=np.float64)\n\n        return Sequence(sequence_info['name'], frames, 'nfs', ground_truth_rect[init_omit:,:],\n                        object_class=sequence_info['object_class'])\n\n    def __len__(self):\n        return len(self.sequence_info_list)\n\n    def _get_sequence_info_list(self):\n        sequence_info_list = [\n            {\"name\": \"nfs_Gymnastics\", \"path\": \"sequences/Gymnastics\", \"startFrame\": 1, \"endFrame\": 368, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_Gymnastics.txt\", \"object_class\": \"person\", 'occlusion': False},\n            {\"name\": \"nfs_MachLoop_jet\", \"path\": \"sequences/MachLoop_jet\", \"startFrame\": 1, \"endFrame\": 99, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_MachLoop_jet.txt\", \"object_class\": \"aircraft\", 'occlusion': False},\n            {\"name\": \"nfs_Skiing_red\", \"path\": \"sequences/Skiing_red\", \"startFrame\": 1, \"endFrame\": 69, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_Skiing_red.txt\", \"object_class\": \"person\", 'occlusion': False},\n            {\"name\": \"nfs_Skydiving\", \"path\": \"sequences/Skydiving\", \"startFrame\": 1, \"endFrame\": 196, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_Skydiving.txt\", \"object_class\": \"person\", 'occlusion': True},\n            {\"name\": \"nfs_airboard_1\", \"path\": \"sequences/airboard_1\", \"startFrame\": 1, \"endFrame\": 425, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_airboard_1.txt\", \"object_class\": \"ball\", 'occlusion': False},\n            {\"name\": \"nfs_airplane_landing\", \"path\": \"sequences/airplane_landing\", \"startFrame\": 1, \"endFrame\": 81, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_airplane_landing.txt\", \"object_class\": \"aircraft\", 'occlusion': False},\n            {\"name\": \"nfs_airtable_3\", \"path\": \"sequences/airtable_3\", \"startFrame\": 1, \"endFrame\": 482, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_airtable_3.txt\", \"object_class\": \"ball\", 'occlusion': False},\n            {\"name\": \"nfs_basketball_1\", \"path\": \"sequences/basketball_1\", \"startFrame\": 1, \"endFrame\": 282, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_basketball_1.txt\", \"object_class\": \"ball\", 'occlusion': False},\n            {\"name\": \"nfs_basketball_2\", \"path\": \"sequences/basketball_2\", \"startFrame\": 1, \"endFrame\": 102, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_basketball_2.txt\", \"object_class\": \"ball\", 'occlusion': False},\n            {\"name\": \"nfs_basketball_3\", \"path\": \"sequences/basketball_3\", \"startFrame\": 1, \"endFrame\": 421, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_basketball_3.txt\", \"object_class\": \"ball\", 'occlusion': False},\n            {\"name\": \"nfs_basketball_6\", \"path\": \"sequences/basketball_6\", \"startFrame\": 1, \"endFrame\": 224, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_basketball_6.txt\", \"object_class\": \"ball\", 'occlusion': False},\n            {\"name\": \"nfs_basketball_7\", \"path\": \"sequences/basketball_7\", \"startFrame\": 1, \"endFrame\": 240, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_basketball_7.txt\", \"object_class\": \"person\", 'occlusion': True},\n            {\"name\": \"nfs_basketball_player\", \"path\": \"sequences/basketball_player\", \"startFrame\": 1, \"endFrame\": 369, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_basketball_player.txt\", \"object_class\": \"person\", 'occlusion': True},\n            {\"name\": \"nfs_basketball_player_2\", \"path\": \"sequences/basketball_player_2\", \"startFrame\": 1, \"endFrame\": 437, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_basketball_player_2.txt\", \"object_class\": \"person\", 'occlusion': False},\n            {\"name\": \"nfs_beach_flipback_person\", \"path\": \"sequences/beach_flipback_person\", \"startFrame\": 1, \"endFrame\": 61, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_beach_flipback_person.txt\", \"object_class\": \"person head\", 'occlusion': False},\n            {\"name\": \"nfs_bee\", \"path\": \"sequences/bee\", \"startFrame\": 1, \"endFrame\": 45, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_bee.txt\", \"object_class\": \"insect\", 'occlusion': False},\n            {\"name\": \"nfs_biker_acrobat\", \"path\": \"sequences/biker_acrobat\", \"startFrame\": 1, \"endFrame\": 128, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_biker_acrobat.txt\", \"object_class\": \"bicycle\", 'occlusion': False},\n            {\"name\": \"nfs_biker_all_1\", \"path\": \"sequences/biker_all_1\", \"startFrame\": 1, \"endFrame\": 113, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_biker_all_1.txt\", \"object_class\": \"person\", 'occlusion': False},\n            {\"name\": \"nfs_biker_head_2\", \"path\": \"sequences/biker_head_2\", \"startFrame\": 1, \"endFrame\": 132, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_biker_head_2.txt\", \"object_class\": \"person head\", 'occlusion': False},\n            {\"name\": \"nfs_biker_head_3\", \"path\": \"sequences/biker_head_3\", \"startFrame\": 1, \"endFrame\": 254, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_biker_head_3.txt\", \"object_class\": \"person head\", 'occlusion': False},\n            {\"name\": \"nfs_biker_upper_body\", \"path\": \"sequences/biker_upper_body\", \"startFrame\": 1, \"endFrame\": 194, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_biker_upper_body.txt\", \"object_class\": \"person\", 'occlusion': False},\n            {\"name\": \"nfs_biker_whole_body\", \"path\": \"sequences/biker_whole_body\", \"startFrame\": 1, \"endFrame\": 572, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_biker_whole_body.txt\", \"object_class\": \"person\", 'occlusion': True},\n            {\"name\": \"nfs_billiard_2\", \"path\": \"sequences/billiard_2\", \"startFrame\": 1, \"endFrame\": 604, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_billiard_2.txt\", \"object_class\": \"ball\", 'occlusion': False},\n            {\"name\": \"nfs_billiard_3\", \"path\": \"sequences/billiard_3\", \"startFrame\": 1, \"endFrame\": 698, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_billiard_3.txt\", \"object_class\": \"ball\", 'occlusion': False},\n            {\"name\": \"nfs_billiard_6\", \"path\": \"sequences/billiard_6\", \"startFrame\": 1, \"endFrame\": 771, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_billiard_6.txt\", \"object_class\": \"ball\", 'occlusion': False},\n            {\"name\": \"nfs_billiard_7\", \"path\": \"sequences/billiard_7\", \"startFrame\": 1, \"endFrame\": 724, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_billiard_7.txt\", \"object_class\": \"ball\", 'occlusion': False},\n            {\"name\": \"nfs_billiard_8\", \"path\": \"sequences/billiard_8\", \"startFrame\": 1, \"endFrame\": 778, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_billiard_8.txt\", \"object_class\": \"ball\", 'occlusion': False},\n            {\"name\": \"nfs_bird_2\", \"path\": \"sequences/bird_2\", \"startFrame\": 1, \"endFrame\": 476, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_bird_2.txt\", \"object_class\": \"bird\", 'occlusion': False},\n            {\"name\": \"nfs_book\", \"path\": \"sequences/book\", \"startFrame\": 1, \"endFrame\": 288, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_book.txt\", \"object_class\": \"other\", 'occlusion': False},\n            {\"name\": \"nfs_bottle\", \"path\": \"sequences/bottle\", \"startFrame\": 1, \"endFrame\": 2103, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_bottle.txt\", \"object_class\": \"other\", 'occlusion': False},\n            {\"name\": \"nfs_bowling_1\", \"path\": \"sequences/bowling_1\", \"startFrame\": 1, \"endFrame\": 303, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_bowling_1.txt\", \"object_class\": \"ball\", 'occlusion': True},\n            {\"name\": \"nfs_bowling_2\", \"path\": \"sequences/bowling_2\", \"startFrame\": 1, \"endFrame\": 710, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_bowling_2.txt\", \"object_class\": \"ball\", 'occlusion': True},\n            {\"name\": \"nfs_bowling_3\", \"path\": \"sequences/bowling_3\", \"startFrame\": 1, \"endFrame\": 271, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_bowling_3.txt\", \"object_class\": \"ball\", 'occlusion': True},\n            {\"name\": \"nfs_bowling_6\", \"path\": \"sequences/bowling_6\", \"startFrame\": 1, \"endFrame\": 260, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_bowling_6.txt\", \"object_class\": \"ball\", 'occlusion': False},\n            {\"name\": \"nfs_bowling_ball\", \"path\": \"sequences/bowling_ball\", \"startFrame\": 1, \"endFrame\": 275, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_bowling_ball.txt\", \"object_class\": \"ball\", 'occlusion': True},\n            {\"name\": \"nfs_bunny\", \"path\": \"sequences/bunny\", \"startFrame\": 1, \"endFrame\": 705, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_bunny.txt\", \"object_class\": \"mammal\", 'occlusion': False},\n            {\"name\": \"nfs_car\", \"path\": \"sequences/car\", \"startFrame\": 1, \"endFrame\": 2020, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_car.txt\", \"object_class\": \"car\", 'occlusion': True},\n            {\"name\": \"nfs_car_camaro\", \"path\": \"sequences/car_camaro\", \"startFrame\": 1, \"endFrame\": 36, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_car_camaro.txt\", \"object_class\": \"car\", 'occlusion': False},\n            {\"name\": \"nfs_car_drifting\", \"path\": \"sequences/car_drifting\", \"startFrame\": 1, \"endFrame\": 173, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_car_drifting.txt\", \"object_class\": \"car\", 'occlusion': False},\n            {\"name\": \"nfs_car_jumping\", \"path\": \"sequences/car_jumping\", \"startFrame\": 1, \"endFrame\": 22, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_car_jumping.txt\", \"object_class\": \"car\", 'occlusion': False},\n            {\"name\": \"nfs_car_rc_rolling\", \"path\": \"sequences/car_rc_rolling\", \"startFrame\": 1, \"endFrame\": 62, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_car_rc_rolling.txt\", \"object_class\": \"car\", 'occlusion': False},\n            {\"name\": \"nfs_car_rc_rotating\", \"path\": \"sequences/car_rc_rotating\", \"startFrame\": 1, \"endFrame\": 80, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_car_rc_rotating.txt\", \"object_class\": \"car\", 'occlusion': False},\n            {\"name\": \"nfs_car_side\", \"path\": \"sequences/car_side\", \"startFrame\": 1, \"endFrame\": 108, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_car_side.txt\", \"object_class\": \"car\", 'occlusion': False},\n            {\"name\": \"nfs_car_white\", \"path\": \"sequences/car_white\", \"startFrame\": 1, \"endFrame\": 2063, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_car_white.txt\", \"object_class\": \"car\", 'occlusion': False},\n            {\"name\": \"nfs_cheetah\", \"path\": \"sequences/cheetah\", \"startFrame\": 1, \"endFrame\": 167, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_cheetah.txt\", \"object_class\": \"mammal\", 'occlusion': True},\n            {\"name\": \"nfs_cup\", \"path\": \"sequences/cup\", \"startFrame\": 1, \"endFrame\": 1281, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_cup.txt\", \"object_class\": \"other\", 'occlusion': False},\n            {\"name\": \"nfs_cup_2\", \"path\": \"sequences/cup_2\", \"startFrame\": 1, \"endFrame\": 182, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_cup_2.txt\", \"object_class\": \"other\", 'occlusion': False},\n            {\"name\": \"nfs_dog\", \"path\": \"sequences/dog\", \"startFrame\": 1, \"endFrame\": 1030, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_dog.txt\", \"object_class\": \"dog\", 'occlusion': True},\n            {\"name\": \"nfs_dog_1\", \"path\": \"sequences/dog_1\", \"startFrame\": 1, \"endFrame\": 168, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_dog_1.txt\", \"object_class\": \"dog\", 'occlusion': False},\n            {\"name\": \"nfs_dog_2\", \"path\": \"sequences/dog_2\", \"startFrame\": 1, \"endFrame\": 594, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_dog_2.txt\", \"object_class\": \"dog\", 'occlusion': True},\n            {\"name\": \"nfs_dog_3\", \"path\": \"sequences/dog_3\", \"startFrame\": 1, \"endFrame\": 200, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_dog_3.txt\", \"object_class\": \"dog\", 'occlusion': False},\n            {\"name\": \"nfs_dogs\", \"path\": \"sequences/dogs\", \"startFrame\": 1, \"endFrame\": 198, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_dogs.txt\", \"object_class\": \"dog\", 'occlusion': True},\n            {\"name\": \"nfs_dollar\", \"path\": \"sequences/dollar\", \"startFrame\": 1, \"endFrame\": 1426, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_dollar.txt\", \"object_class\": \"other\", 'occlusion': False},\n            {\"name\": \"nfs_drone\", \"path\": \"sequences/drone\", \"startFrame\": 1, \"endFrame\": 70, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_drone.txt\", \"object_class\": \"aircraft\", 'occlusion': False},\n            {\"name\": \"nfs_ducks_lake\", \"path\": \"sequences/ducks_lake\", \"startFrame\": 1, \"endFrame\": 107, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_ducks_lake.txt\", \"object_class\": \"bird\", 'occlusion': False},\n            {\"name\": \"nfs_exit\", \"path\": \"sequences/exit\", \"startFrame\": 1, \"endFrame\": 359, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_exit.txt\", \"object_class\": \"other\", 'occlusion': False},\n            {\"name\": \"nfs_first\", \"path\": \"sequences/first\", \"startFrame\": 1, \"endFrame\": 435, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_first.txt\", \"object_class\": \"other\", 'occlusion': False},\n            {\"name\": \"nfs_flower\", \"path\": \"sequences/flower\", \"startFrame\": 1, \"endFrame\": 448, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_flower.txt\", \"object_class\": \"other\", 'occlusion': False},\n            {\"name\": \"nfs_footbal_skill\", \"path\": \"sequences/footbal_skill\", \"startFrame\": 1, \"endFrame\": 131, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_footbal_skill.txt\", \"object_class\": \"ball\", 'occlusion': True},\n            {\"name\": \"nfs_helicopter\", \"path\": \"sequences/helicopter\", \"startFrame\": 1, \"endFrame\": 310, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_helicopter.txt\", \"object_class\": \"aircraft\", 'occlusion': False},\n            {\"name\": \"nfs_horse_jumping\", \"path\": \"sequences/horse_jumping\", \"startFrame\": 1, \"endFrame\": 117, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_horse_jumping.txt\", \"object_class\": \"horse\", 'occlusion': True},\n            {\"name\": \"nfs_horse_running\", \"path\": \"sequences/horse_running\", \"startFrame\": 1, \"endFrame\": 139, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_horse_running.txt\", \"object_class\": \"horse\", 'occlusion': False},\n            {\"name\": \"nfs_iceskating_6\", \"path\": \"sequences/iceskating_6\", \"startFrame\": 1, \"endFrame\": 603, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_iceskating_6.txt\", \"object_class\": \"person\", 'occlusion': False},\n            {\"name\": \"nfs_jellyfish_5\", \"path\": \"sequences/jellyfish_5\", \"startFrame\": 1, \"endFrame\": 746, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_jellyfish_5.txt\", \"object_class\": \"invertebrate\", 'occlusion': False},\n            {\"name\": \"nfs_kid_swing\", \"path\": \"sequences/kid_swing\", \"startFrame\": 1, \"endFrame\": 169, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_kid_swing.txt\", \"object_class\": \"person\", 'occlusion': False},\n            {\"name\": \"nfs_motorcross\", \"path\": \"sequences/motorcross\", \"startFrame\": 1, \"endFrame\": 39, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_motorcross.txt\", \"object_class\": \"vehicle\", 'occlusion': True},\n            {\"name\": \"nfs_motorcross_kawasaki\", \"path\": \"sequences/motorcross_kawasaki\", \"startFrame\": 1, \"endFrame\": 65, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_motorcross_kawasaki.txt\", \"object_class\": \"vehicle\", 'occlusion': False},\n            {\"name\": \"nfs_parkour\", \"path\": \"sequences/parkour\", \"startFrame\": 1, \"endFrame\": 58, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_parkour.txt\", \"object_class\": \"person head\", 'occlusion': False},\n            {\"name\": \"nfs_person_scooter\", \"path\": \"sequences/person_scooter\", \"startFrame\": 1, \"endFrame\": 413, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_person_scooter.txt\", \"object_class\": \"person\", 'occlusion': True},\n            {\"name\": \"nfs_pingpong_2\", \"path\": \"sequences/pingpong_2\", \"startFrame\": 1, \"endFrame\": 1277, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_pingpong_2.txt\", \"object_class\": \"ball\", 'occlusion': False},\n            {\"name\": \"nfs_pingpong_7\", \"path\": \"sequences/pingpong_7\", \"startFrame\": 1, \"endFrame\": 1290, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_pingpong_7.txt\", \"object_class\": \"ball\", 'occlusion': False},\n            {\"name\": \"nfs_pingpong_8\", \"path\": \"sequences/pingpong_8\", \"startFrame\": 1, \"endFrame\": 296, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_pingpong_8.txt\", \"object_class\": \"ball\", 'occlusion': False},\n            {\"name\": \"nfs_purse\", \"path\": \"sequences/purse\", \"startFrame\": 1, \"endFrame\": 968, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_purse.txt\", \"object_class\": \"other\", 'occlusion': False},\n            {\"name\": \"nfs_rubber\", \"path\": \"sequences/rubber\", \"startFrame\": 1, \"endFrame\": 1328, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_rubber.txt\", \"object_class\": \"other\", 'occlusion': False},\n            {\"name\": \"nfs_running\", \"path\": \"sequences/running\", \"startFrame\": 1, \"endFrame\": 677, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_running.txt\", \"object_class\": \"person\", 'occlusion': False},\n            {\"name\": \"nfs_running_100_m\", \"path\": \"sequences/running_100_m\", \"startFrame\": 1, \"endFrame\": 313, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_running_100_m.txt\", \"object_class\": \"person\", 'occlusion': True},\n            {\"name\": \"nfs_running_100_m_2\", \"path\": \"sequences/running_100_m_2\", \"startFrame\": 1, \"endFrame\": 337, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_running_100_m_2.txt\", \"object_class\": \"person\", 'occlusion': True},\n            {\"name\": \"nfs_running_2\", \"path\": \"sequences/running_2\", \"startFrame\": 1, \"endFrame\": 363, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_running_2.txt\", \"object_class\": \"person\", 'occlusion': False},\n            {\"name\": \"nfs_shuffleboard_1\", \"path\": \"sequences/shuffleboard_1\", \"startFrame\": 1, \"endFrame\": 42, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_shuffleboard_1.txt\", \"object_class\": \"other\", 'occlusion': False},\n            {\"name\": \"nfs_shuffleboard_2\", \"path\": \"sequences/shuffleboard_2\", \"startFrame\": 1, \"endFrame\": 41, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_shuffleboard_2.txt\", \"object_class\": \"other\", 'occlusion': False},\n            {\"name\": \"nfs_shuffleboard_4\", \"path\": \"sequences/shuffleboard_4\", \"startFrame\": 1, \"endFrame\": 62, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_shuffleboard_4.txt\", \"object_class\": \"other\", 'occlusion': False},\n            {\"name\": \"nfs_shuffleboard_5\", \"path\": \"sequences/shuffleboard_5\", \"startFrame\": 1, \"endFrame\": 32, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_shuffleboard_5.txt\", \"object_class\": \"other\", 'occlusion': False},\n            {\"name\": \"nfs_shuffleboard_6\", \"path\": \"sequences/shuffleboard_6\", \"startFrame\": 1, \"endFrame\": 52, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_shuffleboard_6.txt\", \"object_class\": \"other\", 'occlusion': False},\n            {\"name\": \"nfs_shuffletable_2\", \"path\": \"sequences/shuffletable_2\", \"startFrame\": 1, \"endFrame\": 372, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_shuffletable_2.txt\", \"object_class\": \"other\", 'occlusion': False},\n            {\"name\": \"nfs_shuffletable_3\", \"path\": \"sequences/shuffletable_3\", \"startFrame\": 1, \"endFrame\": 368, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_shuffletable_3.txt\", \"object_class\": \"other\", 'occlusion': False},\n            {\"name\": \"nfs_shuffletable_4\", \"path\": \"sequences/shuffletable_4\", \"startFrame\": 1, \"endFrame\": 101, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_shuffletable_4.txt\", \"object_class\": \"other\", 'occlusion': False},\n            {\"name\": \"nfs_ski_long\", \"path\": \"sequences/ski_long\", \"startFrame\": 1, \"endFrame\": 274, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_ski_long.txt\", \"object_class\": \"person\", 'occlusion': False},\n            {\"name\": \"nfs_soccer_ball\", \"path\": \"sequences/soccer_ball\", \"startFrame\": 1, \"endFrame\": 163, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_soccer_ball.txt\", \"object_class\": \"ball\", 'occlusion': False},\n            {\"name\": \"nfs_soccer_ball_2\", \"path\": \"sequences/soccer_ball_2\", \"startFrame\": 1, \"endFrame\": 1934, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_soccer_ball_2.txt\", \"object_class\": \"ball\", 'occlusion': False},\n            {\"name\": \"nfs_soccer_ball_3\", \"path\": \"sequences/soccer_ball_3\", \"startFrame\": 1, \"endFrame\": 1381, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_soccer_ball_3.txt\", \"object_class\": \"ball\", 'occlusion': False},\n            {\"name\": \"nfs_soccer_player_2\", \"path\": \"sequences/soccer_player_2\", \"startFrame\": 1, \"endFrame\": 475, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_soccer_player_2.txt\", \"object_class\": \"person\", 'occlusion': False},\n            {\"name\": \"nfs_soccer_player_3\", \"path\": \"sequences/soccer_player_3\", \"startFrame\": 1, \"endFrame\": 319, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_soccer_player_3.txt\", \"object_class\": \"person\", 'occlusion': True},\n            {\"name\": \"nfs_stop_sign\", \"path\": \"sequences/stop_sign\", \"startFrame\": 1, \"endFrame\": 302, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_stop_sign.txt\", \"object_class\": \"other\", 'occlusion': False},\n            {\"name\": \"nfs_suv\", \"path\": \"sequences/suv\", \"startFrame\": 1, \"endFrame\": 2584, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_suv.txt\", \"object_class\": \"car\", 'occlusion': False},\n            {\"name\": \"nfs_tiger\", \"path\": \"sequences/tiger\", \"startFrame\": 1, \"endFrame\": 1556, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_tiger.txt\", \"object_class\": \"mammal\", 'occlusion': False},\n            {\"name\": \"nfs_walking\", \"path\": \"sequences/walking\", \"startFrame\": 1, \"endFrame\": 555, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_walking.txt\", \"object_class\": \"person\", 'occlusion': False},\n            {\"name\": \"nfs_walking_3\", \"path\": \"sequences/walking_3\", \"startFrame\": 1, \"endFrame\": 1427, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_walking_3.txt\", \"object_class\": \"person\", 'occlusion': False},\n            {\"name\": \"nfs_water_ski_2\", \"path\": \"sequences/water_ski_2\", \"startFrame\": 1, \"endFrame\": 47, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_water_ski_2.txt\", \"object_class\": \"person\", 'occlusion': False},\n            {\"name\": \"nfs_yoyo\", \"path\": \"sequences/yoyo\", \"startFrame\": 1, \"endFrame\": 67, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_yoyo.txt\", \"object_class\": \"other\", 'occlusion': False},\n            {\"name\": \"nfs_zebra_fish\", \"path\": \"sequences/zebra_fish\", \"startFrame\": 1, \"endFrame\": 671, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_zebra_fish.txt\", \"object_class\": \"fish\", 'occlusion': False},\n        ]\n\n        return sequence_info_list\n"
  },
  {
    "path": "external/AR/pytracking/evaluation/otbdataset.py",
    "content": "import numpy as np\nfrom pytracking.evaluation.data import Sequence, BaseDataset, SequenceList\nfrom pytracking.utils.load_text import load_text\n\n\nclass OTBDataset(BaseDataset):\n    \"\"\" OTB-2015 dataset\n\n    Publication:\n        Object Tracking Benchmark\n        Wu, Yi, Jongwoo Lim, and Ming-hsuan Yan\n        TPAMI, 2015\n        http://faculty.ucmerced.edu/mhyang/papers/pami15_tracking_benchmark.pdf\n\n    Download the dataset from http://cvlab.hanyang.ac.kr/tracker_benchmark/index.html\n    \"\"\"\n    def __init__(self):\n        super().__init__()\n        self.base_path = self.env_settings.otb_path\n        self.sequence_info_list = self._get_sequence_info_list()\n\n    def get_sequence_list(self):\n        return SequenceList([self._construct_sequence(s) for s in self.sequence_info_list])\n\n    def _construct_sequence(self, sequence_info):\n        sequence_path = sequence_info['path']\n        nz = sequence_info['nz']\n        ext = sequence_info['ext']\n        start_frame = sequence_info['startFrame']\n        end_frame = sequence_info['endFrame']\n\n        init_omit = 0\n        if 'initOmit' in sequence_info:\n            init_omit = sequence_info['initOmit']\n\n        frames = ['{base_path}/{sequence_path}/{frame:0{nz}}.{ext}'.format(base_path=self.base_path, \n        sequence_path=sequence_path, frame=frame_num, nz=nz, ext=ext) for frame_num in range(start_frame+init_omit, end_frame+1)]\n\n        anno_path = '{}/{}'.format(self.base_path, sequence_info['anno_path'])\n\n        # NOTE: OTB has some weird annos which panda cannot handle\n        ground_truth_rect = load_text(str(anno_path), delimiter=(',', None), dtype=np.float64, backend='numpy')\n\n        return Sequence(sequence_info['name'], frames, 'otb', ground_truth_rect[init_omit:,:],\n                        object_class=sequence_info['object_class'])\n\n    def __len__(self):\n        return len(self.sequence_info_list)\n\n    def _get_sequence_info_list(self):\n        sequence_info_list = [\n            {\"name\": \"Basketball\", \"path\": \"Basketball/img\", \"startFrame\": 1, \"endFrame\": 725, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Basketball/groundtruth_rect.txt\",\n             \"object_class\": \"person\"},\n            {\"name\": \"Biker\", \"path\": \"Biker/img\", \"startFrame\": 1, \"endFrame\": 142, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Biker/groundtruth_rect.txt\",\n             \"object_class\": \"person head\"},\n            {\"name\": \"Bird1\", \"path\": \"Bird1/img\", \"startFrame\": 1, \"endFrame\": 408, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Bird1/groundtruth_rect.txt\",\n             \"object_class\": \"bird\"},\n            {\"name\": \"Bird2\", \"path\": \"Bird2/img\", \"startFrame\": 1, \"endFrame\": 99, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Bird2/groundtruth_rect.txt\",\n             \"object_class\": \"bird\"},\n            {\"name\": \"BlurBody\", \"path\": \"BlurBody/img\", \"startFrame\": 1, \"endFrame\": 334, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"BlurBody/groundtruth_rect.txt\",\n             \"object_class\": \"person\"},\n            {\"name\": \"BlurCar1\", \"path\": \"BlurCar1/img\", \"startFrame\": 247, \"endFrame\": 988, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"BlurCar1/groundtruth_rect.txt\",\n             \"object_class\": \"car\"},\n            {\"name\": \"BlurCar2\", \"path\": \"BlurCar2/img\", \"startFrame\": 1, \"endFrame\": 585, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"BlurCar2/groundtruth_rect.txt\",\n             \"object_class\": \"car\"},\n            {\"name\": \"BlurCar3\", \"path\": \"BlurCar3/img\", \"startFrame\": 3, \"endFrame\": 359, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"BlurCar3/groundtruth_rect.txt\",\n             \"object_class\": \"car\"},\n            {\"name\": \"BlurCar4\", \"path\": \"BlurCar4/img\", \"startFrame\": 18, \"endFrame\": 397, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"BlurCar4/groundtruth_rect.txt\",\n             \"object_class\": \"car\"},\n            {\"name\": \"BlurFace\", \"path\": \"BlurFace/img\", \"startFrame\": 1, \"endFrame\": 493, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"BlurFace/groundtruth_rect.txt\",\n             \"object_class\": \"face\"},\n            {\"name\": \"BlurOwl\", \"path\": \"BlurOwl/img\", \"startFrame\": 1, \"endFrame\": 631, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"BlurOwl/groundtruth_rect.txt\",\n             \"object_class\": \"other\"},\n            {\"name\": \"Board\", \"path\": \"Board/img\", \"startFrame\": 1, \"endFrame\": 698, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"Board/groundtruth_rect.txt\",\n             \"object_class\": \"other\"},\n            {\"name\": \"Bolt\", \"path\": \"Bolt/img\", \"startFrame\": 1, \"endFrame\": 350, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Bolt/groundtruth_rect.txt\",\n             \"object_class\": \"person\"},\n            {\"name\": \"Bolt2\", \"path\": \"Bolt2/img\", \"startFrame\": 1, \"endFrame\": 293, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Bolt2/groundtruth_rect.txt\",\n             \"object_class\": \"person\"},\n            {\"name\": \"Box\", \"path\": \"Box/img\", \"startFrame\": 1, \"endFrame\": 1161, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Box/groundtruth_rect.txt\",\n             \"object_class\": \"other\"},\n            {\"name\": \"Boy\", \"path\": \"Boy/img\", \"startFrame\": 1, \"endFrame\": 602, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Boy/groundtruth_rect.txt\",\n             \"object_class\": \"face\"},\n            {\"name\": \"Car1\", \"path\": \"Car1/img\", \"startFrame\": 1, \"endFrame\": 1020, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Car1/groundtruth_rect.txt\",\n             \"object_class\": \"car\"},\n            {\"name\": \"Car2\", \"path\": \"Car2/img\", \"startFrame\": 1, \"endFrame\": 913, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Car2/groundtruth_rect.txt\",\n             \"object_class\": \"car\"},\n            {\"name\": \"Car24\", \"path\": \"Car24/img\", \"startFrame\": 1, \"endFrame\": 3059, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Car24/groundtruth_rect.txt\",\n             \"object_class\": \"car\"},\n            {\"name\": \"Car4\", \"path\": \"Car4/img\", \"startFrame\": 1, \"endFrame\": 659, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Car4/groundtruth_rect.txt\",\n             \"object_class\": \"car\"},\n            {\"name\": \"CarDark\", \"path\": \"CarDark/img\", \"startFrame\": 1, \"endFrame\": 393, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"CarDark/groundtruth_rect.txt\",\n             \"object_class\": \"car\"},\n            {\"name\": \"CarScale\", \"path\": \"CarScale/img\", \"startFrame\": 1, \"endFrame\": 252, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"CarScale/groundtruth_rect.txt\",\n             \"object_class\": \"car\"},\n            {\"name\": \"ClifBar\", \"path\": \"ClifBar/img\", \"startFrame\": 1, \"endFrame\": 472, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"ClifBar/groundtruth_rect.txt\",\n             \"object_class\": \"other\"},\n            {\"name\": \"Coke\", \"path\": \"Coke/img\", \"startFrame\": 1, \"endFrame\": 291, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Coke/groundtruth_rect.txt\",\n             \"object_class\": \"other\"},\n            {\"name\": \"Couple\", \"path\": \"Couple/img\", \"startFrame\": 1, \"endFrame\": 140, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Couple/groundtruth_rect.txt\",\n             \"object_class\": \"person\"},\n            {\"name\": \"Coupon\", \"path\": \"Coupon/img\", \"startFrame\": 1, \"endFrame\": 327, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Coupon/groundtruth_rect.txt\",\n             \"object_class\": \"other\"},\n            {\"name\": \"Crossing\", \"path\": \"Crossing/img\", \"startFrame\": 1, \"endFrame\": 120, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Crossing/groundtruth_rect.txt\",\n             \"object_class\": \"person\"},\n            {\"name\": \"Crowds\", \"path\": \"Crowds/img\", \"startFrame\": 1, \"endFrame\": 347, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Crowds/groundtruth_rect.txt\",\n             \"object_class\": \"person\"},\n            {\"name\": \"Dancer\", \"path\": \"Dancer/img\", \"startFrame\": 1, \"endFrame\": 225, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Dancer/groundtruth_rect.txt\",\n             \"object_class\": \"person\"},\n            {\"name\": \"Dancer2\", \"path\": \"Dancer2/img\", \"startFrame\": 1, \"endFrame\": 150, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Dancer2/groundtruth_rect.txt\",\n             \"object_class\": \"person\"},\n            {\"name\": \"David\", \"path\": \"David/img\", \"startFrame\": 300, \"endFrame\": 770, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"David/groundtruth_rect.txt\",\n             \"object_class\": \"face\"},\n            {\"name\": \"David2\", \"path\": \"David2/img\", \"startFrame\": 1, \"endFrame\": 537, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"David2/groundtruth_rect.txt\",\n             \"object_class\": \"face\"},\n            {\"name\": \"David3\", \"path\": \"David3/img\", \"startFrame\": 1, \"endFrame\": 252, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"David3/groundtruth_rect.txt\",\n             \"object_class\": \"person\"},\n            {\"name\": \"Deer\", \"path\": \"Deer/img\", \"startFrame\": 1, \"endFrame\": 71, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Deer/groundtruth_rect.txt\",\n             \"object_class\": \"mammal\"},\n            {\"name\": \"Diving\", \"path\": \"Diving/img\", \"startFrame\": 1, \"endFrame\": 215, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Diving/groundtruth_rect.txt\",\n             \"object_class\": \"person\"},\n            {\"name\": \"Dog\", \"path\": \"Dog/img\", \"startFrame\": 1, \"endFrame\": 127, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Dog/groundtruth_rect.txt\",\n             \"object_class\": \"dog\"},\n            {\"name\": \"Dog1\", \"path\": \"Dog1/img\", \"startFrame\": 1, \"endFrame\": 1350, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Dog1/groundtruth_rect.txt\",\n             \"object_class\": \"dog\"},\n            {\"name\": \"Doll\", \"path\": \"Doll/img\", \"startFrame\": 1, \"endFrame\": 3872, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Doll/groundtruth_rect.txt\",\n             \"object_class\": \"other\"},\n            {\"name\": \"DragonBaby\", \"path\": \"DragonBaby/img\", \"startFrame\": 1, \"endFrame\": 113, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"DragonBaby/groundtruth_rect.txt\",\n             \"object_class\": \"face\"},\n            {\"name\": \"Dudek\", \"path\": \"Dudek/img\", \"startFrame\": 1, \"endFrame\": 1145, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Dudek/groundtruth_rect.txt\",\n             \"object_class\": \"face\"},\n            {\"name\": \"FaceOcc1\", \"path\": \"FaceOcc1/img\", \"startFrame\": 1, \"endFrame\": 892, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"FaceOcc1/groundtruth_rect.txt\",\n             \"object_class\": \"face\"},\n            {\"name\": \"FaceOcc2\", \"path\": \"FaceOcc2/img\", \"startFrame\": 1, \"endFrame\": 812, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"FaceOcc2/groundtruth_rect.txt\",\n             \"object_class\": \"face\"},\n            {\"name\": \"Fish\", \"path\": \"Fish/img\", \"startFrame\": 1, \"endFrame\": 476, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Fish/groundtruth_rect.txt\",\n             \"object_class\": \"other\"},\n            {\"name\": \"FleetFace\", \"path\": \"FleetFace/img\", \"startFrame\": 1, \"endFrame\": 707, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"FleetFace/groundtruth_rect.txt\",\n             \"object_class\": \"face\"},\n            {\"name\": \"Football\", \"path\": \"Football/img\", \"startFrame\": 1, \"endFrame\": 362, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Football/groundtruth_rect.txt\",\n             \"object_class\": \"person head\"},\n            {\"name\": \"Football1\", \"path\": \"Football1/img\", \"startFrame\": 1, \"endFrame\": 74, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Football1/groundtruth_rect.txt\",\n             \"object_class\": \"face\"},\n            {\"name\": \"Freeman1\", \"path\": \"Freeman1/img\", \"startFrame\": 1, \"endFrame\": 326, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Freeman1/groundtruth_rect.txt\",\n             \"object_class\": \"face\"},\n            {\"name\": \"Freeman3\", \"path\": \"Freeman3/img\", \"startFrame\": 1, \"endFrame\": 460, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Freeman3/groundtruth_rect.txt\",\n             \"object_class\": \"face\"},\n            {\"name\": \"Freeman4\", \"path\": \"Freeman4/img\", \"startFrame\": 1, \"endFrame\": 283, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Freeman4/groundtruth_rect.txt\",\n             \"object_class\": \"face\"},\n            {\"name\": \"Girl\", \"path\": \"Girl/img\", \"startFrame\": 1, \"endFrame\": 500, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Girl/groundtruth_rect.txt\",\n             \"object_class\": \"face\"},\n            {\"name\": \"Girl2\", \"path\": \"Girl2/img\", \"startFrame\": 1, \"endFrame\": 1500, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Girl2/groundtruth_rect.txt\",\n             \"object_class\": \"person\"},\n            {\"name\": \"Gym\", \"path\": \"Gym/img\", \"startFrame\": 1, \"endFrame\": 767, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Gym/groundtruth_rect.txt\",\n             \"object_class\": \"person\"},\n            {\"name\": \"Human2\", \"path\": \"Human2/img\", \"startFrame\": 1, \"endFrame\": 1128, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Human2/groundtruth_rect.txt\",\n             \"object_class\": \"person\"},\n            {\"name\": \"Human3\", \"path\": \"Human3/img\", \"startFrame\": 1, \"endFrame\": 1698, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Human3/groundtruth_rect.txt\",\n             \"object_class\": \"person\"},\n            {\"name\": \"Human4_2\", \"path\": \"Human4/img\", \"startFrame\": 1, \"endFrame\": 667, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Human4/groundtruth_rect.2.txt\",\n             \"object_class\": \"person\"},\n            {\"name\": \"Human5\", \"path\": \"Human5/img\", \"startFrame\": 1, \"endFrame\": 713, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Human5/groundtruth_rect.txt\",\n             \"object_class\": \"person\"},\n            {\"name\": \"Human6\", \"path\": \"Human6/img\", \"startFrame\": 1, \"endFrame\": 792, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Human6/groundtruth_rect.txt\",\n             \"object_class\": \"person\"},\n            {\"name\": \"Human7\", \"path\": \"Human7/img\", \"startFrame\": 1, \"endFrame\": 250, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Human7/groundtruth_rect.txt\",\n             \"object_class\": \"person\"},\n            {\"name\": \"Human8\", \"path\": \"Human8/img\", \"startFrame\": 1, \"endFrame\": 128, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Human8/groundtruth_rect.txt\",\n             \"object_class\": \"person\"},\n            {\"name\": \"Human9\", \"path\": \"Human9/img\", \"startFrame\": 1, \"endFrame\": 305, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Human9/groundtruth_rect.txt\",\n             \"object_class\": \"person\"},\n            {\"name\": \"Ironman\", \"path\": \"Ironman/img\", \"startFrame\": 1, \"endFrame\": 166, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Ironman/groundtruth_rect.txt\",\n             \"object_class\": \"person head\"},\n            {\"name\": \"Jogging_1\", \"path\": \"Jogging/img\", \"startFrame\": 1, \"endFrame\": 307, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Jogging/groundtruth_rect.1.txt\",\n             \"object_class\": \"person\"},\n            {\"name\": \"Jogging_2\", \"path\": \"Jogging/img\", \"startFrame\": 1, \"endFrame\": 307, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Jogging/groundtruth_rect.2.txt\",\n             \"object_class\": \"person\"},\n            {\"name\": \"Jump\", \"path\": \"Jump/img\", \"startFrame\": 1, \"endFrame\": 122, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Jump/groundtruth_rect.txt\",\n             \"object_class\": \"person\"},\n            {\"name\": \"Jumping\", \"path\": \"Jumping/img\", \"startFrame\": 1, \"endFrame\": 313, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Jumping/groundtruth_rect.txt\",\n             \"object_class\": \"face\"},\n            {\"name\": \"KiteSurf\", \"path\": \"KiteSurf/img\", \"startFrame\": 1, \"endFrame\": 84, \"nz\": 4, \"ext\": \"png\", \"anno_path\": \"KiteSurf/groundtruth_rect.txt\",\n             \"object_class\": \"face\"},\n            {\"name\": \"Lemming\", \"path\": \"Lemming/img\", \"startFrame\": 1, \"endFrame\": 1336, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Lemming/groundtruth_rect.txt\",\n             \"object_class\": \"other\"},\n            {\"name\": \"Liquor\", \"path\": \"Liquor/img\", \"startFrame\": 1, \"endFrame\": 1741, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Liquor/groundtruth_rect.txt\",\n             \"object_class\": \"other\"},\n            {\"name\": \"Man\", \"path\": \"Man/img\", \"startFrame\": 1, \"endFrame\": 134, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Man/groundtruth_rect.txt\",\n             \"object_class\": \"face\"},\n            {\"name\": \"Matrix\", \"path\": \"Matrix/img\", \"startFrame\": 1, \"endFrame\": 100, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Matrix/groundtruth_rect.txt\",\n             \"object_class\": \"person head\"},\n            {\"name\": \"Mhyang\", \"path\": \"Mhyang/img\", \"startFrame\": 1, \"endFrame\": 1490, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Mhyang/groundtruth_rect.txt\",\n             \"object_class\": \"face\"},\n            {\"name\": \"MotorRolling\", \"path\": \"MotorRolling/img\", \"startFrame\": 1, \"endFrame\": 164, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"MotorRolling/groundtruth_rect.txt\",\n             \"object_class\": \"vehicle\"},\n            {\"name\": \"MountainBike\", \"path\": \"MountainBike/img\", \"startFrame\": 1, \"endFrame\": 228, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"MountainBike/groundtruth_rect.txt\",\n             \"object_class\": \"bicycle\"},\n            {\"name\": \"Panda\", \"path\": \"Panda/img\", \"startFrame\": 1, \"endFrame\": 1000, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Panda/groundtruth_rect.txt\",\n             \"object_class\": \"mammal\"},\n            {\"name\": \"RedTeam\", \"path\": \"RedTeam/img\", \"startFrame\": 1, \"endFrame\": 1918, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"RedTeam/groundtruth_rect.txt\",\n             \"object_class\": \"vehicle\"},\n            {\"name\": \"Rubik\", \"path\": \"Rubik/img\", \"startFrame\": 1, \"endFrame\": 1997, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Rubik/groundtruth_rect.txt\",\n             \"object_class\": \"other\"},\n            {\"name\": \"Shaking\", \"path\": \"Shaking/img\", \"startFrame\": 1, \"endFrame\": 365, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Shaking/groundtruth_rect.txt\",\n             \"object_class\": \"face\"},\n            {\"name\": \"Singer1\", \"path\": \"Singer1/img\", \"startFrame\": 1, \"endFrame\": 351, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Singer1/groundtruth_rect.txt\",\n             \"object_class\": \"person\"},\n            {\"name\": \"Singer2\", \"path\": \"Singer2/img\", \"startFrame\": 1, \"endFrame\": 366, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Singer2/groundtruth_rect.txt\",\n             \"object_class\": \"person\"},\n            {\"name\": \"Skater\", \"path\": \"Skater/img\", \"startFrame\": 1, \"endFrame\": 160, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Skater/groundtruth_rect.txt\",\n             \"object_class\": \"person\"},\n            {\"name\": \"Skater2\", \"path\": \"Skater2/img\", \"startFrame\": 1, \"endFrame\": 435, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Skater2/groundtruth_rect.txt\",\n             \"object_class\": \"person\"},\n            {\"name\": \"Skating1\", \"path\": \"Skating1/img\", \"startFrame\": 1, \"endFrame\": 400, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Skating1/groundtruth_rect.txt\",\n             \"object_class\": \"person\"},\n            {\"name\": \"Skating2_1\", \"path\": \"Skating2/img\", \"startFrame\": 1, \"endFrame\": 473, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Skating2/groundtruth_rect.1.txt\",\n             \"object_class\": \"person\"},\n            {\"name\": \"Skating2_2\", \"path\": \"Skating2/img\", \"startFrame\": 1, \"endFrame\": 473, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Skating2/groundtruth_rect.2.txt\",\n             \"object_class\": \"person\"},\n            {\"name\": \"Skiing\", \"path\": \"Skiing/img\", \"startFrame\": 1, \"endFrame\": 81, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Skiing/groundtruth_rect.txt\",\n             \"object_class\": \"person\"},\n            {\"name\": \"Soccer\", \"path\": \"Soccer/img\", \"startFrame\": 1, \"endFrame\": 392, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Soccer/groundtruth_rect.txt\",\n             \"object_class\": \"face\"},\n            {\"name\": \"Subway\", \"path\": \"Subway/img\", \"startFrame\": 1, \"endFrame\": 175, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Subway/groundtruth_rect.txt\",\n             \"object_class\": \"person\"},\n            {\"name\": \"Surfer\", \"path\": \"Surfer/img\", \"startFrame\": 1, \"endFrame\": 376, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Surfer/groundtruth_rect.txt\",\n             \"object_class\": \"person head\"},\n            {\"name\": \"Suv\", \"path\": \"Suv/img\", \"startFrame\": 1, \"endFrame\": 945, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Suv/groundtruth_rect.txt\",\n             \"object_class\": \"car\"},\n            {\"name\": \"Sylvester\", \"path\": \"Sylvester/img\", \"startFrame\": 1, \"endFrame\": 1345, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Sylvester/groundtruth_rect.txt\",\n             \"object_class\": \"other\"},\n            {\"name\": \"Tiger1\", \"path\": \"Tiger1/img\", \"startFrame\": 1, \"endFrame\": 354, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Tiger1/groundtruth_rect.txt\", \"initOmit\": 5,\n             \"object_class\": \"other\"},\n            {\"name\": \"Tiger2\", \"path\": \"Tiger2/img\", \"startFrame\": 1, \"endFrame\": 365, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Tiger2/groundtruth_rect.txt\",\n             \"object_class\": \"other\"},\n            {\"name\": \"Toy\", \"path\": \"Toy/img\", \"startFrame\": 1, \"endFrame\": 271, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Toy/groundtruth_rect.txt\",\n             \"object_class\": \"other\"},\n            {\"name\": \"Trans\", \"path\": \"Trans/img\", \"startFrame\": 1, \"endFrame\": 124, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Trans/groundtruth_rect.txt\",\n             \"object_class\": \"other\"},\n            {\"name\": \"Trellis\", \"path\": \"Trellis/img\", \"startFrame\": 1, \"endFrame\": 569, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Trellis/groundtruth_rect.txt\",\n             \"object_class\": \"face\"},\n            {\"name\": \"Twinnings\", \"path\": \"Twinnings/img\", \"startFrame\": 1, \"endFrame\": 472, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Twinnings/groundtruth_rect.txt\",\n             \"object_class\": \"other\"},\n            {\"name\": \"Vase\", \"path\": \"Vase/img\", \"startFrame\": 1, \"endFrame\": 271, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Vase/groundtruth_rect.txt\",\n             \"object_class\": \"other\"},\n            {\"name\": \"Walking\", \"path\": \"Walking/img\", \"startFrame\": 1, \"endFrame\": 412, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Walking/groundtruth_rect.txt\",\n             \"object_class\": \"person\"},\n            {\"name\": \"Walking2\", \"path\": \"Walking2/img\", \"startFrame\": 1, \"endFrame\": 500, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Walking2/groundtruth_rect.txt\",\n             \"object_class\": \"person\"},\n            {\"name\": \"Woman\", \"path\": \"Woman/img\", \"startFrame\": 1, \"endFrame\": 597, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Woman/groundtruth_rect.txt\",\n             \"object_class\": \"person\"}\n        ]\n    \n        return sequence_info_list\n"
  },
  {
    "path": "external/AR/pytracking/evaluation/running.py",
    "content": "import numpy as np\nimport multiprocessing\nimport os\nimport sys\nfrom itertools import product\nfrom collections import OrderedDict\nfrom pytracking.evaluation import Sequence, Tracker\nfrom ltr.data.image_loader import imwrite_indexed\n\n\ndef _save_tracker_output(seq: Sequence, tracker: Tracker, output: dict):\n    \"\"\"Saves the output of the tracker.\"\"\"\n\n    if not os.path.exists(tracker.results_dir):\n        os.makedirs(tracker.results_dir)\n\n    base_results_path = os.path.join(tracker.results_dir, seq.name)\n    segmentation_path = os.path.join(tracker.segmentation_dir, seq.name)\n\n    frame_names = [os.path.splitext(os.path.basename(f))[0] for f in seq.frames]\n\n    def save_bb(file, data):\n        tracked_bb = np.array(data).astype(int)\n        np.savetxt(file, tracked_bb, delimiter='\\t', fmt='%d')\n\n    def save_time(file, data):\n        exec_times = np.array(data).astype(float)\n        np.savetxt(file, exec_times, delimiter='\\t', fmt='%f')\n\n    def _convert_dict(input_dict):\n        data_dict = {}\n        for elem in input_dict:\n            for k, v in elem.items():\n                if k in data_dict.keys():\n                    data_dict[k].append(v)\n                else:\n                    data_dict[k] = [v, ]\n        return data_dict\n\n    for key, data in output.items():\n        # If data is empty\n        if not data:\n            continue\n\n        if key == 'target_bbox':\n            if isinstance(data[0], (dict, OrderedDict)):\n                data_dict = _convert_dict(data)\n\n                for obj_id, d in data_dict.items():\n                    bbox_file = '{}_{}.txt'.format(base_results_path, obj_id)\n                    save_bb(bbox_file, d)\n            else:\n                # Single-object mode\n                bbox_file = '{}.txt'.format(base_results_path)\n                save_bb(bbox_file, data)\n\n        elif key == 'time':\n            if isinstance(data[0], dict):\n                data_dict = _convert_dict(data)\n\n                for obj_id, d in data_dict.items():\n                    timings_file = '{}_{}_time.txt'.format(base_results_path, obj_id)\n                    save_time(timings_file, d)\n            else:\n                timings_file = '{}_time.txt'.format(base_results_path)\n                save_time(timings_file, data)\n\n        elif key == 'segmentation':\n            assert len(frame_names) == len(data)\n            if not os.path.exists(segmentation_path):\n                os.makedirs(segmentation_path)\n            for frame_name, frame_seg in zip(frame_names, data):\n                imwrite_indexed(os.path.join(segmentation_path, '{}.png'.format(frame_name)), frame_seg)\n\n\ndef run_sequence(seq: Sequence, tracker: Tracker, debug=False, visdom_info=None):\n    \"\"\"Runs a tracker on a sequence.\"\"\"\n\n    def _results_exist():\n        if seq.object_ids is None:\n            bbox_file = '{}/{}.txt'.format(tracker.results_dir, seq.name)\n            return os.path.isfile(bbox_file)\n        else:\n            bbox_files = ['{}/{}_{}.txt'.format(tracker.results_dir, seq.name, obj_id) for obj_id in seq.object_ids]\n            missing = [not os.path.isfile(f) for f in bbox_files]\n            return sum(missing) == 0\n\n    visdom_info = {} if visdom_info is None else visdom_info\n\n    if _results_exist() and not debug:\n        print('FPS: {}'.format(-1))\n        return\n\n    print('Tracker: {} {} {} ,  Sequence: {}'.format(tracker.name, tracker.parameter_name, tracker.run_id, seq.name))\n\n    if debug:\n        output = tracker.run_sequence(seq, debug=debug, visdom_info=visdom_info)\n    else:\n        try:\n            output = tracker.run_sequence(seq, debug=debug, visdom_info=visdom_info)\n        except Exception as e:\n            print(e)\n            return\n\n    sys.stdout.flush()\n\n    if isinstance(output['time'][0], (dict, OrderedDict)):\n        exec_time = sum([sum(times.values()) for times in output['time']])\n        num_frames = len(output['time'])\n    else:\n        exec_time = sum(output['time'])\n        num_frames = len(output['time'])\n\n    print('FPS: {}'.format(num_frames / exec_time))\n\n    if not debug:\n        _save_tracker_output(seq, tracker, output)\n\n\ndef run_dataset(dataset, trackers, debug=False, threads=0, visdom_info=None):\n    \"\"\"Runs a list of trackers on a dataset.\n    args:\n        dataset: List of Sequence instances, forming a dataset.\n        trackers: List of Tracker instances.\n        debug: Debug level.\n        threads: Number of threads to use (default 0).\n        visdom_info: Dict containing information about the server for visdom\n    \"\"\"\n    multiprocessing.set_start_method('spawn', force=True)\n\n    print('Evaluating {:4d} trackers on {:5d} sequences'.format(len(trackers), len(dataset)))\n\n    multiprocessing.set_start_method('spawn', force=True)\n\n    visdom_info = {} if visdom_info is None else visdom_info\n\n    if threads == 0:\n        mode = 'sequential'\n    else:\n        mode = 'parallel'\n\n    if mode == 'sequential':\n        for seq in dataset:\n            for tracker_info in trackers:\n                run_sequence(seq, tracker_info, debug=debug, visdom_info=visdom_info)\n    elif mode == 'parallel':\n        param_list = [(seq, tracker_info, debug, visdom_info) for seq, tracker_info in product(dataset, trackers)]\n        with multiprocessing.Pool(processes=threads) as pool:\n            pool.starmap(run_sequence, param_list)\n    print('Done')\n"
  },
  {
    "path": "external/AR/pytracking/evaluation/tpldataset.py",
    "content": "import numpy as np\nfrom pytracking.evaluation.data import Sequence, BaseDataset, SequenceList\nfrom pytracking.utils.load_text import load_text\n\n\nclass TPLDataset(BaseDataset):\n    \"\"\"\n    Temple Color 128 dataset\n\n    Publication:\n        Encoding Color Information for Visual Tracking: Algorithms and Benchmark\n        P. Liang, E. Blasch, and H. Ling\n        TIP, 2015\n        http://www.dabi.temple.edu/~hbling/publication/TColor-128.pdf\n\n    Download the dataset from http://www.dabi.temple.edu/~hbling/data/TColor-128/TColor-128.html\n    \"\"\"\n    def __init__(self, exclude_otb=False):\n        \"\"\"\n        args:\n            exclude_otb (bool) - If True, sequences overlapping with the OTB dataset are excluded\n        \"\"\"\n        super().__init__()\n        self.base_path = self.env_settings.tpl_path\n        self.sequence_info_list = self._get_sequence_info_list(exclude_otb)\n\n    def get_sequence_list(self):\n        return SequenceList([self._construct_sequence(s) for s in self.sequence_info_list])\n\n    def _construct_sequence(self, sequence_info):\n        sequence_path = sequence_info['path']\n        nz = sequence_info['nz']\n        ext = sequence_info['ext']\n        start_frame = sequence_info['startFrame']\n        end_frame = sequence_info['endFrame']\n\n        init_omit = 0\n        if 'initOmit' in sequence_info:\n            init_omit = sequence_info['initOmit']\n\n        frames = ['{base_path}/{sequence_path}/{frame:0{nz}}.{ext}'.format(base_path=self.base_path, \n        sequence_path=sequence_path, frame=frame_num, nz=nz, ext=ext) for frame_num in range(start_frame+init_omit, end_frame+1)]\n\n        anno_path = '{}/{}'.format(self.base_path, sequence_info['anno_path'])\n\n        ground_truth_rect = load_text(str(anno_path), delimiter=(',', None), dtype=np.float64, backend='numpy')\n\n        return Sequence(sequence_info['name'], frames, 'tpl', ground_truth_rect[init_omit:,:])\n\n    def __len__(self):\n        return len(self.sequence_info_list)\n\n    def _get_sequence_info_list(self, exclude_otb=False):\n        sequence_info_list = [\n            {\"name\": \"tpl_Skating2\", \"path\": \"tpl_Skating2/img\", \"startFrame\": 1, \"endFrame\": 707, \"nz\": 4,\n             \"ext\": \"jpg\", \"anno_path\": \"tpl_Skating2/Skating2_gt.txt\"},\n            {\"name\": \"tpl_Pool_ce3\", \"path\": \"tpl_Pool_ce3/img\", \"startFrame\": 1, \"endFrame\": 124, \"nz\": 4,\n             \"ext\": \"jpg\", \"anno_path\": \"tpl_Pool_ce3/Pool_ce3_gt.txt\"},\n            {\"name\": \"tpl_Microphone_ce1\", \"path\": \"tpl_Microphone_ce1/img\", \"startFrame\": 1, \"endFrame\": 204, \"nz\": 4,\n             \"ext\": \"jpg\", \"anno_path\": \"tpl_Microphone_ce1/Microphone_ce1_gt.txt\"},\n            {\"name\": \"tpl_Torus\", \"path\": \"tpl_Torus/img\", \"startFrame\": 1, \"endFrame\": 264, \"nz\": 4, \"ext\": \"jpg\",\n             \"anno_path\": \"tpl_Torus/Torus_gt.txt\"},\n            {\"name\": \"tpl_Lemming\", \"path\": \"tpl_Lemming/img\", \"startFrame\": 1, \"endFrame\": 1336, \"nz\": 4, \"ext\": \"jpg\",\n             \"anno_path\": \"tpl_Lemming/Lemming_gt.txt\"},\n            {\"name\": \"tpl_Eagle_ce\", \"path\": \"tpl_Eagle_ce/img\", \"startFrame\": 1, \"endFrame\": 112, \"nz\": 4,\n             \"ext\": \"jpg\", \"anno_path\": \"tpl_Eagle_ce/Eagle_ce_gt.txt\"},\n            {\"name\": \"tpl_Skating_ce2\", \"path\": \"tpl_Skating_ce2/img\", \"startFrame\": 1, \"endFrame\": 497, \"nz\": 4,\n             \"ext\": \"jpg\", \"anno_path\": \"tpl_Skating_ce2/Skating_ce2_gt.txt\"},\n            {\"name\": \"tpl_Yo_yos_ce3\", \"path\": \"tpl_Yo_yos_ce3/img\", \"startFrame\": 1, \"endFrame\": 201, \"nz\": 4,\n             \"ext\": \"jpg\", \"anno_path\": \"tpl_Yo_yos_ce3/Yo-yos_ce3_gt.txt\"},\n            {\"name\": \"tpl_Board\", \"path\": \"tpl_Board/img\", \"startFrame\": 1, \"endFrame\": 598, \"nz\": 4, \"ext\": \"jpg\",\n             \"anno_path\": \"tpl_Board/Board_gt.txt\"},\n            {\"name\": \"tpl_Tennis_ce3\", \"path\": \"tpl_Tennis_ce3/img\", \"startFrame\": 1, \"endFrame\": 204, \"nz\": 4,\n             \"ext\": \"jpg\", \"anno_path\": \"tpl_Tennis_ce3/Tennis_ce3_gt.txt\"},\n            {\"name\": \"tpl_SuperMario_ce\", \"path\": \"tpl_SuperMario_ce/img\", \"startFrame\": 1, \"endFrame\": 146, \"nz\": 4,\n             \"ext\": \"jpg\", \"anno_path\": \"tpl_SuperMario_ce/SuperMario_ce_gt.txt\"},\n            {\"name\": \"tpl_Yo_yos_ce1\", \"path\": \"tpl_Yo_yos_ce1/img\", \"startFrame\": 1, \"endFrame\": 235, \"nz\": 4,\n             \"ext\": \"jpg\", \"anno_path\": \"tpl_Yo_yos_ce1/Yo-yos_ce1_gt.txt\"},\n            {\"name\": \"tpl_Soccer\", \"path\": \"tpl_Soccer/img\", \"startFrame\": 1, \"endFrame\": 392, \"nz\": 4, \"ext\": \"jpg\",\n             \"anno_path\": \"tpl_Soccer/Soccer_gt.txt\"},\n            {\"name\": \"tpl_Fish_ce2\", \"path\": \"tpl_Fish_ce2/img\", \"startFrame\": 1, \"endFrame\": 573, \"nz\": 4,\n             \"ext\": \"jpg\", \"anno_path\": \"tpl_Fish_ce2/Fish_ce2_gt.txt\"},\n            {\"name\": \"tpl_Liquor\", \"path\": \"tpl_Liquor/img\", \"startFrame\": 1, \"endFrame\": 1741, \"nz\": 4, \"ext\": \"jpg\",\n             \"anno_path\": \"tpl_Liquor/Liquor_gt.txt\"},\n            {\"name\": \"tpl_Plane_ce2\", \"path\": \"tpl_Plane_ce2/img\", \"startFrame\": 1, \"endFrame\": 653, \"nz\": 4,\n             \"ext\": \"jpg\", \"anno_path\": \"tpl_Plane_ce2/Plane_ce2_gt.txt\"},\n            {\"name\": \"tpl_Couple\", \"path\": \"tpl_Couple/img\", \"startFrame\": 1, \"endFrame\": 140, \"nz\": 4, \"ext\": \"jpg\",\n             \"anno_path\": \"tpl_Couple/Couple_gt.txt\"},\n            {\"name\": \"tpl_Logo_ce\", \"path\": \"tpl_Logo_ce/img\", \"startFrame\": 1, \"endFrame\": 610, \"nz\": 4, \"ext\": \"jpg\",\n             \"anno_path\": \"tpl_Logo_ce/Logo_ce_gt.txt\"},\n            {\"name\": \"tpl_Hand_ce2\", \"path\": \"tpl_Hand_ce2/img\", \"startFrame\": 1, \"endFrame\": 251, \"nz\": 4,\n             \"ext\": \"jpg\", \"anno_path\": \"tpl_Hand_ce2/Hand_ce2_gt.txt\"},\n            {\"name\": \"tpl_Kite_ce2\", \"path\": \"tpl_Kite_ce2/img\", \"startFrame\": 1, \"endFrame\": 658, \"nz\": 4,\n             \"ext\": \"jpg\", \"anno_path\": \"tpl_Kite_ce2/Kite_ce2_gt.txt\"},\n            {\"name\": \"tpl_Walking\", \"path\": \"tpl_Walking/img\", \"startFrame\": 1, \"endFrame\": 412, \"nz\": 4, \"ext\": \"jpg\",\n             \"anno_path\": \"tpl_Walking/Walking_gt.txt\"},\n            {\"name\": \"tpl_David\", \"path\": \"tpl_David/img\", \"startFrame\": 300, \"endFrame\": 770, \"nz\": 4, \"ext\": \"jpg\",\n             \"anno_path\": \"tpl_David/David_gt.txt\"},\n            {\"name\": \"tpl_Boat_ce1\", \"path\": \"tpl_Boat_ce1/img\", \"startFrame\": 1, \"endFrame\": 377, \"nz\": 4,\n             \"ext\": \"jpg\", \"anno_path\": \"tpl_Boat_ce1/Boat_ce1_gt.txt\"},\n            {\"name\": \"tpl_Airport_ce\", \"path\": \"tpl_Airport_ce/img\", \"startFrame\": 1, \"endFrame\": 148, \"nz\": 4,\n             \"ext\": \"jpg\", \"anno_path\": \"tpl_Airport_ce/Airport_ce_gt.txt\"},\n            {\"name\": \"tpl_Tiger2\", \"path\": \"tpl_Tiger2/img\", \"startFrame\": 1, \"endFrame\": 365, \"nz\": 4, \"ext\": \"jpg\",\n             \"anno_path\": \"tpl_Tiger2/Tiger2_gt.txt\"},\n            {\"name\": \"tpl_Suitcase_ce\", \"path\": \"tpl_Suitcase_ce/img\", \"startFrame\": 1, \"endFrame\": 184, \"nz\": 4,\n             \"ext\": \"jpg\", \"anno_path\": \"tpl_Suitcase_ce/Suitcase_ce_gt.txt\"},\n            {\"name\": \"tpl_TennisBall_ce\", \"path\": \"tpl_TennisBall_ce/img\", \"startFrame\": 1, \"endFrame\": 288, \"nz\": 4,\n             \"ext\": \"jpg\", \"anno_path\": \"tpl_TennisBall_ce/TennisBall_ce_gt.txt\"},\n            {\"name\": \"tpl_Singer_ce1\", \"path\": \"tpl_Singer_ce1/img\", \"startFrame\": 1, \"endFrame\": 214, \"nz\": 4,\n             \"ext\": \"jpg\", \"anno_path\": \"tpl_Singer_ce1/Singer_ce1_gt.txt\"},\n            {\"name\": \"tpl_Pool_ce2\", \"path\": \"tpl_Pool_ce2/img\", \"startFrame\": 1, \"endFrame\": 133, \"nz\": 4,\n             \"ext\": \"jpg\", \"anno_path\": \"tpl_Pool_ce2/Pool_ce2_gt.txt\"},\n            {\"name\": \"tpl_Surf_ce3\", \"path\": \"tpl_Surf_ce3/img\", \"startFrame\": 1, \"endFrame\": 279, \"nz\": 4,\n             \"ext\": \"jpg\", \"anno_path\": \"tpl_Surf_ce3/Surf_ce3_gt.txt\"},\n            {\"name\": \"tpl_Bird\", \"path\": \"tpl_Bird/img\", \"startFrame\": 1, \"endFrame\": 99, \"nz\": 4, \"ext\": \"jpg\",\n             \"anno_path\": \"tpl_Bird/Bird_gt.txt\"},\n            {\"name\": \"tpl_Crossing\", \"path\": \"tpl_Crossing/img\", \"startFrame\": 1, \"endFrame\": 120, \"nz\": 4,\n             \"ext\": \"jpg\", \"anno_path\": \"tpl_Crossing/Crossing_gt.txt\"},\n            {\"name\": \"tpl_Plate_ce1\", \"path\": \"tpl_Plate_ce1/img\", \"startFrame\": 1, \"endFrame\": 142, \"nz\": 4,\n             \"ext\": \"jpg\", \"anno_path\": \"tpl_Plate_ce1/Plate_ce1_gt.txt\"},\n            {\"name\": \"tpl_Cup\", \"path\": \"tpl_Cup/img\", \"startFrame\": 1, \"endFrame\": 303, \"nz\": 4, \"ext\": \"jpg\",\n             \"anno_path\": \"tpl_Cup/Cup_gt.txt\"},\n            {\"name\": \"tpl_Surf_ce2\", \"path\": \"tpl_Surf_ce2/img\", \"startFrame\": 1, \"endFrame\": 391, \"nz\": 4,\n             \"ext\": \"jpg\", \"anno_path\": \"tpl_Surf_ce2/Surf_ce2_gt.txt\"},\n            {\"name\": \"tpl_Busstation_ce2\", \"path\": \"tpl_Busstation_ce2/img\", \"startFrame\": 6, \"endFrame\": 400, \"nz\": 4,\n             \"ext\": \"jpg\", \"anno_path\": \"tpl_Busstation_ce2/Busstation_ce2_gt.txt\"},\n            {\"name\": \"tpl_Charger_ce\", \"path\": \"tpl_Charger_ce/img\", \"startFrame\": 1, \"endFrame\": 298, \"nz\": 4,\n             \"ext\": \"jpg\", \"anno_path\": \"tpl_Charger_ce/Charger_ce_gt.txt\"},\n            {\"name\": \"tpl_Pool_ce1\", \"path\": \"tpl_Pool_ce1/img\", \"startFrame\": 1, \"endFrame\": 166, \"nz\": 4,\n             \"ext\": \"jpg\", \"anno_path\": \"tpl_Pool_ce1/Pool_ce1_gt.txt\"},\n            {\"name\": \"tpl_MountainBike\", \"path\": \"tpl_MountainBike/img\", \"startFrame\": 1, \"endFrame\": 228, \"nz\": 4,\n             \"ext\": \"jpg\", \"anno_path\": \"tpl_MountainBike/MountainBike_gt.txt\"},\n            {\"name\": \"tpl_Guitar_ce1\", \"path\": \"tpl_Guitar_ce1/img\", \"startFrame\": 1, \"endFrame\": 268, \"nz\": 4,\n             \"ext\": \"jpg\", \"anno_path\": \"tpl_Guitar_ce1/Guitar_ce1_gt.txt\"},\n            {\"name\": \"tpl_Busstation_ce1\", \"path\": \"tpl_Busstation_ce1/img\", \"startFrame\": 1, \"endFrame\": 363, \"nz\": 4,\n             \"ext\": \"jpg\", \"anno_path\": \"tpl_Busstation_ce1/Busstation_ce1_gt.txt\"},\n            {\"name\": \"tpl_Diving\", \"path\": \"tpl_Diving/img\", \"startFrame\": 1, \"endFrame\": 231, \"nz\": 4, \"ext\": \"jpg\",\n             \"anno_path\": \"tpl_Diving/Diving_gt.txt\"},\n            {\"name\": \"tpl_Skating_ce1\", \"path\": \"tpl_Skating_ce1/img\", \"startFrame\": 1, \"endFrame\": 409, \"nz\": 4,\n             \"ext\": \"jpg\", \"anno_path\": \"tpl_Skating_ce1/Skating_ce1_gt.txt\"},\n            {\"name\": \"tpl_Hurdle_ce2\", \"path\": \"tpl_Hurdle_ce2/img\", \"startFrame\": 27, \"endFrame\": 330, \"nz\": 4,\n             \"ext\": \"jpg\", \"anno_path\": \"tpl_Hurdle_ce2/Hurdle_ce2_gt.txt\"},\n            {\"name\": \"tpl_Plate_ce2\", \"path\": \"tpl_Plate_ce2/img\", \"startFrame\": 1, \"endFrame\": 181, \"nz\": 4,\n             \"ext\": \"jpg\", \"anno_path\": \"tpl_Plate_ce2/Plate_ce2_gt.txt\"},\n            {\"name\": \"tpl_CarDark\", \"path\": \"tpl_CarDark/img\", \"startFrame\": 1, \"endFrame\": 393, \"nz\": 4, \"ext\": \"jpg\",\n             \"anno_path\": \"tpl_CarDark/CarDark_gt.txt\"},\n            {\"name\": \"tpl_Singer_ce2\", \"path\": \"tpl_Singer_ce2/img\", \"startFrame\": 1, \"endFrame\": 999, \"nz\": 4,\n             \"ext\": \"jpg\", \"anno_path\": \"tpl_Singer_ce2/Singer_ce2_gt.txt\"},\n            {\"name\": \"tpl_Shaking\", \"path\": \"tpl_Shaking/img\", \"startFrame\": 1, \"endFrame\": 365, \"nz\": 4, \"ext\": \"jpg\",\n             \"anno_path\": \"tpl_Shaking/Shaking_gt.txt\"},\n            {\"name\": \"tpl_Iceskater\", \"path\": \"tpl_Iceskater/img\", \"startFrame\": 1, \"endFrame\": 500, \"nz\": 4,\n             \"ext\": \"jpg\", \"anno_path\": \"tpl_Iceskater/Iceskater_gt.txt\"},\n            {\"name\": \"tpl_Badminton_ce2\", \"path\": \"tpl_Badminton_ce2/img\", \"startFrame\": 1, \"endFrame\": 705, \"nz\": 4,\n             \"ext\": \"jpg\", \"anno_path\": \"tpl_Badminton_ce2/Badminton_ce2_gt.txt\"},\n            {\"name\": \"tpl_Spiderman_ce\", \"path\": \"tpl_Spiderman_ce/img\", \"startFrame\": 1, \"endFrame\": 351, \"nz\": 4,\n             \"ext\": \"jpg\", \"anno_path\": \"tpl_Spiderman_ce/Spiderman_ce_gt.txt\"},\n            {\"name\": \"tpl_Kite_ce1\", \"path\": \"tpl_Kite_ce1/img\", \"startFrame\": 1, \"endFrame\": 484, \"nz\": 4,\n             \"ext\": \"jpg\", \"anno_path\": \"tpl_Kite_ce1/Kite_ce1_gt.txt\"},\n            {\"name\": \"tpl_Skyjumping_ce\", \"path\": \"tpl_Skyjumping_ce/img\", \"startFrame\": 1, \"endFrame\": 938, \"nz\": 4,\n             \"ext\": \"jpg\", \"anno_path\": \"tpl_Skyjumping_ce/Skyjumping_ce_gt.txt\"},\n            {\"name\": \"tpl_Ball_ce1\", \"path\": \"tpl_Ball_ce1/img\", \"startFrame\": 1, \"endFrame\": 391, \"nz\": 4,\n             \"ext\": \"jpg\", \"anno_path\": \"tpl_Ball_ce1/Ball_ce1_gt.txt\"},\n            {\"name\": \"tpl_Yo_yos_ce2\", \"path\": \"tpl_Yo_yos_ce2/img\", \"startFrame\": 1, \"endFrame\": 454, \"nz\": 4,\n             \"ext\": \"jpg\", \"anno_path\": \"tpl_Yo_yos_ce2/Yo-yos_ce2_gt.txt\"},\n            {\"name\": \"tpl_Ironman\", \"path\": \"tpl_Ironman/img\", \"startFrame\": 1, \"endFrame\": 166, \"nz\": 4, \"ext\": \"jpg\",\n             \"anno_path\": \"tpl_Ironman/Ironman_gt.txt\"},\n            {\"name\": \"tpl_FaceOcc1\", \"path\": \"tpl_FaceOcc1/img\", \"startFrame\": 1, \"endFrame\": 892, \"nz\": 4,\n             \"ext\": \"jpg\", \"anno_path\": \"tpl_FaceOcc1/FaceOcc1_gt.txt\"},\n            {\"name\": \"tpl_Surf_ce1\", \"path\": \"tpl_Surf_ce1/img\", \"startFrame\": 1, \"endFrame\": 404, \"nz\": 4,\n             \"ext\": \"jpg\", \"anno_path\": \"tpl_Surf_ce1/Surf_ce1_gt.txt\"},\n            {\"name\": \"tpl_Ring_ce\", \"path\": \"tpl_Ring_ce/img\", \"startFrame\": 1, \"endFrame\": 201, \"nz\": 4, \"ext\": \"jpg\",\n             \"anno_path\": \"tpl_Ring_ce/Ring_ce_gt.txt\"},\n            {\"name\": \"tpl_Surf_ce4\", \"path\": \"tpl_Surf_ce4/img\", \"startFrame\": 1, \"endFrame\": 135, \"nz\": 4,\n             \"ext\": \"jpg\", \"anno_path\": \"tpl_Surf_ce4/Surf_ce4_gt.txt\"},\n            {\"name\": \"tpl_Ball_ce4\", \"path\": \"tpl_Ball_ce4/img\", \"startFrame\": 1, \"endFrame\": 538, \"nz\": 4,\n             \"ext\": \"jpg\", \"anno_path\": \"tpl_Ball_ce4/Ball_ce4_gt.txt\"},\n            {\"name\": \"tpl_Bikeshow_ce\", \"path\": \"tpl_Bikeshow_ce/img\", \"startFrame\": 1, \"endFrame\": 361, \"nz\": 4,\n             \"ext\": \"jpg\", \"anno_path\": \"tpl_Bikeshow_ce/Bikeshow_ce_gt.txt\"},\n            {\"name\": \"tpl_Kobe_ce\", \"path\": \"tpl_Kobe_ce/img\", \"startFrame\": 1, \"endFrame\": 582, \"nz\": 4, \"ext\": \"jpg\",\n             \"anno_path\": \"tpl_Kobe_ce/Kobe_ce_gt.txt\"},\n            {\"name\": \"tpl_Tiger1\", \"path\": \"tpl_Tiger1/img\", \"startFrame\": 1, \"endFrame\": 354, \"nz\": 4, \"ext\": \"jpg\",\n             \"anno_path\": \"tpl_Tiger1/Tiger1_gt.txt\"},\n            {\"name\": \"tpl_Skiing\", \"path\": \"tpl_Skiing/img\", \"startFrame\": 1, \"endFrame\": 81, \"nz\": 4, \"ext\": \"jpg\",\n             \"anno_path\": \"tpl_Skiing/Skiing_gt.txt\"},\n            {\"name\": \"tpl_Tennis_ce1\", \"path\": \"tpl_Tennis_ce1/img\", \"startFrame\": 1, \"endFrame\": 454, \"nz\": 4,\n             \"ext\": \"jpg\", \"anno_path\": \"tpl_Tennis_ce1/Tennis_ce1_gt.txt\"},\n            {\"name\": \"tpl_Carchasing_ce4\", \"path\": \"tpl_Carchasing_ce4/img\", \"startFrame\": 1, \"endFrame\": 442, \"nz\": 4,\n             \"ext\": \"jpg\", \"anno_path\": \"tpl_Carchasing_ce4/Carchasing_ce4_gt.txt\"},\n            {\"name\": \"tpl_Walking2\", \"path\": \"tpl_Walking2/img\", \"startFrame\": 1, \"endFrame\": 500, \"nz\": 4,\n             \"ext\": \"jpg\", \"anno_path\": \"tpl_Walking2/Walking2_gt.txt\"},\n            {\"name\": \"tpl_Sailor_ce\", \"path\": \"tpl_Sailor_ce/img\", \"startFrame\": 1, \"endFrame\": 402, \"nz\": 4,\n             \"ext\": \"jpg\", \"anno_path\": \"tpl_Sailor_ce/Sailor_ce_gt.txt\"},\n            {\"name\": \"tpl_Railwaystation_ce\", \"path\": \"tpl_Railwaystation_ce/img\", \"startFrame\": 1, \"endFrame\": 413,\n             \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"tpl_Railwaystation_ce/Railwaystation_ce_gt.txt\"},\n            {\"name\": \"tpl_Bee_ce\", \"path\": \"tpl_Bee_ce/img\", \"startFrame\": 1, \"endFrame\": 90, \"nz\": 4, \"ext\": \"jpg\",\n             \"anno_path\": \"tpl_Bee_ce/Bee_ce_gt.txt\"},\n            {\"name\": \"tpl_Girl\", \"path\": \"tpl_Girl/img\", \"startFrame\": 1, \"endFrame\": 500, \"nz\": 4, \"ext\": \"jpg\",\n             \"anno_path\": \"tpl_Girl/Girl_gt.txt\"},\n            {\"name\": \"tpl_Subway\", \"path\": \"tpl_Subway/img\", \"startFrame\": 1, \"endFrame\": 175, \"nz\": 4, \"ext\": \"jpg\",\n             \"anno_path\": \"tpl_Subway/Subway_gt.txt\"},\n            {\"name\": \"tpl_David3\", \"path\": \"tpl_David3/img\", \"startFrame\": 1, \"endFrame\": 252, \"nz\": 4, \"ext\": \"jpg\",\n             \"anno_path\": \"tpl_David3/David3_gt.txt\"},\n            {\"name\": \"tpl_Electricalbike_ce\", \"path\": \"tpl_Electricalbike_ce/img\", \"startFrame\": 1, \"endFrame\": 818,\n             \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"tpl_Electricalbike_ce/Electricalbike_ce_gt.txt\"},\n            {\"name\": \"tpl_Michaeljackson_ce\", \"path\": \"tpl_Michaeljackson_ce/img\", \"startFrame\": 1, \"endFrame\": 393,\n             \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"tpl_Michaeljackson_ce/Michaeljackson_ce_gt.txt\"},\n            {\"name\": \"tpl_Woman\", \"path\": \"tpl_Woman/img\", \"startFrame\": 1, \"endFrame\": 597, \"nz\": 4, \"ext\": \"jpg\",\n             \"anno_path\": \"tpl_Woman/Woman_gt.txt\"},\n            {\"name\": \"tpl_TableTennis_ce\", \"path\": \"tpl_TableTennis_ce/img\", \"startFrame\": 1, \"endFrame\": 198, \"nz\": 4,\n             \"ext\": \"jpg\", \"anno_path\": \"tpl_TableTennis_ce/TableTennis_ce_gt.txt\"},\n            {\"name\": \"tpl_Motorbike_ce\", \"path\": \"tpl_Motorbike_ce/img\", \"startFrame\": 1, \"endFrame\": 563, \"nz\": 4,\n             \"ext\": \"jpg\", \"anno_path\": \"tpl_Motorbike_ce/Motorbike_ce_gt.txt\"},\n            {\"name\": \"tpl_Baby_ce\", \"path\": \"tpl_Baby_ce/img\", \"startFrame\": 1, \"endFrame\": 296, \"nz\": 4, \"ext\": \"jpg\",\n             \"anno_path\": \"tpl_Baby_ce/Baby_ce_gt.txt\"},\n            {\"name\": \"tpl_Gym\", \"path\": \"tpl_Gym/img\", \"startFrame\": 1, \"endFrame\": 766, \"nz\": 4, \"ext\": \"jpg\",\n             \"anno_path\": \"tpl_Gym/Gym_gt.txt\"},\n            {\"name\": \"tpl_Matrix\", \"path\": \"tpl_Matrix/img\", \"startFrame\": 1, \"endFrame\": 100, \"nz\": 4, \"ext\": \"jpg\",\n             \"anno_path\": \"tpl_Matrix/Matrix_gt.txt\"},\n            {\"name\": \"tpl_Kite_ce3\", \"path\": \"tpl_Kite_ce3/img\", \"startFrame\": 1, \"endFrame\": 528, \"nz\": 4,\n             \"ext\": \"jpg\", \"anno_path\": \"tpl_Kite_ce3/Kite_ce3_gt.txt\"},\n            {\"name\": \"tpl_Fish_ce1\", \"path\": \"tpl_Fish_ce1/img\", \"startFrame\": 1, \"endFrame\": 401, \"nz\": 4,\n             \"ext\": \"jpg\", \"anno_path\": \"tpl_Fish_ce1/Fish_ce1_gt.txt\"},\n            {\"name\": \"tpl_Hand_ce1\", \"path\": \"tpl_Hand_ce1/img\", \"startFrame\": 1, \"endFrame\": 401, \"nz\": 4,\n             \"ext\": \"jpg\", \"anno_path\": \"tpl_Hand_ce1/Hand_ce1_gt.txt\"},\n            {\"name\": \"tpl_Doll\", \"path\": \"tpl_Doll/img\", \"startFrame\": 1, \"endFrame\": 3872, \"nz\": 4, \"ext\": \"jpg\",\n             \"anno_path\": \"tpl_Doll/Doll_gt.txt\"},\n            {\"name\": \"tpl_Carchasing_ce3\", \"path\": \"tpl_Carchasing_ce3/img\", \"startFrame\": 1, \"endFrame\": 572, \"nz\": 4,\n             \"ext\": \"jpg\", \"anno_path\": \"tpl_Carchasing_ce3/Carchasing_ce3_gt.txt\"},\n            {\"name\": \"tpl_Thunder_ce\", \"path\": \"tpl_Thunder_ce/img\", \"startFrame\": 1, \"endFrame\": 375, \"nz\": 4,\n             \"ext\": \"jpg\", \"anno_path\": \"tpl_Thunder_ce/Thunder_ce_gt.txt\"},\n            {\"name\": \"tpl_Singer2\", \"path\": \"tpl_Singer2/img\", \"startFrame\": 1, \"endFrame\": 366, \"nz\": 4, \"ext\": \"jpg\",\n             \"anno_path\": \"tpl_Singer2/Singer2_gt.txt\"},\n            {\"name\": \"tpl_Basketball\", \"path\": \"tpl_Basketball/img\", \"startFrame\": 1, \"endFrame\": 725, \"nz\": 4,\n             \"ext\": \"jpg\", \"anno_path\": \"tpl_Basketball/Basketball_gt.txt\"},\n            {\"name\": \"tpl_Hand\", \"path\": \"tpl_Hand/img\", \"startFrame\": 1, \"endFrame\": 244, \"nz\": 4, \"ext\": \"jpg\",\n             \"anno_path\": \"tpl_Hand/Hand_gt.txt\"},\n            {\"name\": \"tpl_Cup_ce\", \"path\": \"tpl_Cup_ce/img\", \"startFrame\": 1, \"endFrame\": 338, \"nz\": 4, \"ext\": \"jpg\",\n             \"anno_path\": \"tpl_Cup_ce/Cup_ce_gt.txt\"},\n            {\"name\": \"tpl_MotorRolling\", \"path\": \"tpl_MotorRolling/img\", \"startFrame\": 1, \"endFrame\": 164, \"nz\": 4,\n             \"ext\": \"jpg\", \"anno_path\": \"tpl_MotorRolling/MotorRolling_gt.txt\"},\n            {\"name\": \"tpl_Boat_ce2\", \"path\": \"tpl_Boat_ce2/img\", \"startFrame\": 1, \"endFrame\": 412, \"nz\": 4,\n             \"ext\": \"jpg\", \"anno_path\": \"tpl_Boat_ce2/Boat_ce2_gt.txt\"},\n            {\"name\": \"tpl_CarScale\", \"path\": \"tpl_CarScale/img\", \"startFrame\": 1, \"endFrame\": 252, \"nz\": 4,\n             \"ext\": \"jpg\", \"anno_path\": \"tpl_CarScale/CarScale_gt.txt\"},\n            {\"name\": \"tpl_Sunshade\", \"path\": \"tpl_Sunshade/img\", \"startFrame\": 1, \"endFrame\": 172, \"nz\": 4,\n             \"ext\": \"jpg\", \"anno_path\": \"tpl_Sunshade/Sunshade_gt.txt\"},\n            {\"name\": \"tpl_Football1\", \"path\": \"tpl_Football1/img\", \"startFrame\": 1, \"endFrame\": 74, \"nz\": 4,\n             \"ext\": \"jpg\", \"anno_path\": \"tpl_Football1/Football1_gt.txt\"},\n            {\"name\": \"tpl_Singer1\", \"path\": \"tpl_Singer1/img\", \"startFrame\": 1, \"endFrame\": 351, \"nz\": 4, \"ext\": \"jpg\",\n             \"anno_path\": \"tpl_Singer1/Singer1_gt.txt\"},\n            {\"name\": \"tpl_Hurdle_ce1\", \"path\": \"tpl_Hurdle_ce1/img\", \"startFrame\": 1, \"endFrame\": 300, \"nz\": 4,\n             \"ext\": \"jpg\", \"anno_path\": \"tpl_Hurdle_ce1/Hurdle_ce1_gt.txt\"},\n            {\"name\": \"tpl_Basketball_ce3\", \"path\": \"tpl_Basketball_ce3/img\", \"startFrame\": 1, \"endFrame\": 441, \"nz\": 4,\n             \"ext\": \"jpg\", \"anno_path\": \"tpl_Basketball_ce3/Basketball_ce3_gt.txt\"},\n            {\"name\": \"tpl_Toyplane_ce\", \"path\": \"tpl_Toyplane_ce/img\", \"startFrame\": 1, \"endFrame\": 405, \"nz\": 4,\n             \"ext\": \"jpg\", \"anno_path\": \"tpl_Toyplane_ce/Toyplane_ce_gt.txt\"},\n            {\"name\": \"tpl_Skating1\", \"path\": \"tpl_Skating1/img\", \"startFrame\": 1, \"endFrame\": 400, \"nz\": 4,\n             \"ext\": \"jpg\", \"anno_path\": \"tpl_Skating1/Skating1_gt.txt\"},\n            {\"name\": \"tpl_Juice\", \"path\": \"tpl_Juice/img\", \"startFrame\": 1, \"endFrame\": 404, \"nz\": 4, \"ext\": \"jpg\",\n             \"anno_path\": \"tpl_Juice/Juice_gt.txt\"},\n            {\"name\": \"tpl_Biker\", \"path\": \"tpl_Biker/img\", \"startFrame\": 1, \"endFrame\": 180, \"nz\": 4, \"ext\": \"jpg\",\n             \"anno_path\": \"tpl_Biker/Biker_gt.txt\"},\n            {\"name\": \"tpl_Boy\", \"path\": \"tpl_Boy/img\", \"startFrame\": 1, \"endFrame\": 602, \"nz\": 4, \"ext\": \"jpg\",\n             \"anno_path\": \"tpl_Boy/Boy_gt.txt\"},\n            {\"name\": \"tpl_Jogging1\", \"path\": \"tpl_Jogging1/img\", \"startFrame\": 1, \"endFrame\": 307, \"nz\": 4,\n             \"ext\": \"jpg\", \"anno_path\": \"tpl_Jogging1/Jogging1_gt.txt\"},\n            {\"name\": \"tpl_Deer\", \"path\": \"tpl_Deer/img\", \"startFrame\": 1, \"endFrame\": 71, \"nz\": 4, \"ext\": \"jpg\",\n             \"anno_path\": \"tpl_Deer/Deer_gt.txt\"},\n            {\"name\": \"tpl_Panda\", \"path\": \"tpl_Panda/img\", \"startFrame\": 1, \"endFrame\": 241, \"nz\": 4, \"ext\": \"jpg\",\n             \"anno_path\": \"tpl_Panda/Panda_gt.txt\"},\n            {\"name\": \"tpl_Coke\", \"path\": \"tpl_Coke/img\", \"startFrame\": 1, \"endFrame\": 291, \"nz\": 4, \"ext\": \"jpg\",\n             \"anno_path\": \"tpl_Coke/Coke_gt.txt\"},\n            {\"name\": \"tpl_Carchasing_ce1\", \"path\": \"tpl_Carchasing_ce1/img\", \"startFrame\": 1, \"endFrame\": 501, \"nz\": 4,\n             \"ext\": \"jpg\", \"anno_path\": \"tpl_Carchasing_ce1/Carchasing_ce1_gt.txt\"},\n            {\"name\": \"tpl_Badminton_ce1\", \"path\": \"tpl_Badminton_ce1/img\", \"startFrame\": 1, \"endFrame\": 579, \"nz\": 4,\n             \"ext\": \"jpg\", \"anno_path\": \"tpl_Badminton_ce1/Badminton_ce1_gt.txt\"},\n            {\"name\": \"tpl_Trellis\", \"path\": \"tpl_Trellis/img\", \"startFrame\": 1, \"endFrame\": 569, \"nz\": 4, \"ext\": \"jpg\",\n             \"anno_path\": \"tpl_Trellis/Trellis_gt.txt\"},\n            {\"name\": \"tpl_Face_ce2\", \"path\": \"tpl_Face_ce2/img\", \"startFrame\": 1, \"endFrame\": 148, \"nz\": 4,\n             \"ext\": \"jpg\", \"anno_path\": \"tpl_Face_ce2/Face_ce2_gt.txt\"},\n            {\"name\": \"tpl_Ball_ce2\", \"path\": \"tpl_Ball_ce2/img\", \"startFrame\": 1, \"endFrame\": 603, \"nz\": 4,\n             \"ext\": \"jpg\", \"anno_path\": \"tpl_Ball_ce2/Ball_ce2_gt.txt\"},\n            {\"name\": \"tpl_Skiing_ce\", \"path\": \"tpl_Skiing_ce/img\", \"startFrame\": 1, \"endFrame\": 511, \"nz\": 4,\n             \"ext\": \"jpg\", \"anno_path\": \"tpl_Skiing_ce/Skiing_ce_gt.txt\"},\n            {\"name\": \"tpl_Jogging2\", \"path\": \"tpl_Jogging2/img\", \"startFrame\": 1, \"endFrame\": 307, \"nz\": 4,\n             \"ext\": \"jpg\", \"anno_path\": \"tpl_Jogging2/Jogging2_gt.txt\"},\n            {\"name\": \"tpl_Bike_ce1\", \"path\": \"tpl_Bike_ce1/img\", \"startFrame\": 1, \"endFrame\": 801, \"nz\": 4,\n             \"ext\": \"jpg\", \"anno_path\": \"tpl_Bike_ce1/Bike_ce1_gt.txt\"},\n            {\"name\": \"tpl_Bike_ce2\", \"path\": \"tpl_Bike_ce2/img\", \"startFrame\": 1, \"endFrame\": 812, \"nz\": 4,\n             \"ext\": \"jpg\", \"anno_path\": \"tpl_Bike_ce2/Bike_ce2_gt.txt\"},\n            {\"name\": \"tpl_Ball_ce3\", \"path\": \"tpl_Ball_ce3/img\", \"startFrame\": 1, \"endFrame\": 273, \"nz\": 4,\n             \"ext\": \"jpg\", \"anno_path\": \"tpl_Ball_ce3/Ball_ce3_gt.txt\"},\n            {\"name\": \"tpl_Girlmov\", \"path\": \"tpl_Girlmov/img\", \"startFrame\": 1, \"endFrame\": 1500, \"nz\": 4, \"ext\": \"jpg\",\n             \"anno_path\": \"tpl_Girlmov/Girlmov_gt.txt\"},\n            {\"name\": \"tpl_Bolt\", \"path\": \"tpl_Bolt/img\", \"startFrame\": 1, \"endFrame\": 350, \"nz\": 4, \"ext\": \"jpg\",\n             \"anno_path\": \"tpl_Bolt/Bolt_gt.txt\"},\n            {\"name\": \"tpl_Basketball_ce2\", \"path\": \"tpl_Basketball_ce2/img\", \"startFrame\": 1, \"endFrame\": 455, \"nz\": 4,\n             \"ext\": \"jpg\", \"anno_path\": \"tpl_Basketball_ce2/Basketball_ce2_gt.txt\"},\n            {\"name\": \"tpl_Bicycle\", \"path\": \"tpl_Bicycle/img\", \"startFrame\": 1, \"endFrame\": 271, \"nz\": 4, \"ext\": \"jpg\",\n             \"anno_path\": \"tpl_Bicycle/Bicycle_gt.txt\"},\n            {\"name\": \"tpl_Face_ce\", \"path\": \"tpl_Face_ce/img\", \"startFrame\": 1, \"endFrame\": 620, \"nz\": 4, \"ext\": \"jpg\",\n             \"anno_path\": \"tpl_Face_ce/Face_ce_gt.txt\"},\n            {\"name\": \"tpl_Basketball_ce1\", \"path\": \"tpl_Basketball_ce1/img\", \"startFrame\": 1, \"endFrame\": 496, \"nz\": 4,\n             \"ext\": \"jpg\", \"anno_path\": \"tpl_Basketball_ce1/Basketball_ce1_gt.txt\"},\n            {\"name\": \"tpl_Messi_ce\", \"path\": \"tpl_Messi_ce/img\", \"startFrame\": 1, \"endFrame\": 272, \"nz\": 4,\n             \"ext\": \"jpg\", \"anno_path\": \"tpl_Messi_ce/Messi_ce_gt.txt\"},\n            {\"name\": \"tpl_Tennis_ce2\", \"path\": \"tpl_Tennis_ce2/img\", \"startFrame\": 1, \"endFrame\": 305, \"nz\": 4,\n             \"ext\": \"jpg\", \"anno_path\": \"tpl_Tennis_ce2/Tennis_ce2_gt.txt\"},\n            {\"name\": \"tpl_Microphone_ce2\", \"path\": \"tpl_Microphone_ce2/img\", \"startFrame\": 1, \"endFrame\": 103, \"nz\": 4,\n             \"ext\": \"jpg\", \"anno_path\": \"tpl_Microphone_ce2/Microphone_ce2_gt.txt\"},\n            {\"name\": \"tpl_Guitar_ce2\", \"path\": \"tpl_Guitar_ce2/img\", \"startFrame\": 1, \"endFrame\": 313, \"nz\": 4,\n             \"ext\": \"jpg\", \"anno_path\": \"tpl_Guitar_ce2/Guitar_ce2_gt.txt\"}\n\n        ]\n\n        otb_sequences = ['tpl_Skating2', 'tpl_Lemming', 'tpl_Board', 'tpl_Soccer', 'tpl_Liquor', 'tpl_Couple', 'tpl_Walking', 'tpl_David', 'tpl_Tiger2', 'tpl_Bird', 'tpl_Crossing', 'tpl_MountainBike',\n                         'tpl_Diving', 'tpl_CarDark', 'tpl_Shaking', 'tpl_Ironman', 'tpl_FaceOcc1', 'tpl_Tiger1', 'tpl_Skiing', 'tpl_Walking2', 'tpl_Girl', 'tpl_Girlmov', 'tpl_Subway', 'tpl_David3', 'tpl_Woman',\n                         'tpl_Gym', 'tpl_Matrix', 'tpl_Doll', 'tpl_Singer2', 'tpl_Basketball', 'tpl_MotorRolling', 'tpl_CarScale', 'tpl_Football1', 'tpl_Singer1', 'tpl_Skating1', 'tpl_Biker',\n                         'tpl_Boy', 'tpl_Jogging1', 'tpl_Deer', 'tpl_Panda', 'tpl_Coke', 'tpl_Trellis', 'tpl_Jogging2', 'tpl_Bolt', ]\n        if exclude_otb:\n            sequence_info_list_nootb = []\n            for seq in sequence_info_list:\n                if seq['name'] not in otb_sequences:\n                    sequence_info_list_nootb.append(seq)\n\n            sequence_info_list = sequence_info_list_nootb\n\n        return sequence_info_list\n"
  },
  {
    "path": "external/AR/pytracking/evaluation/tracker.py",
    "content": "import importlib\nimport os\nimport numpy as np\nfrom collections import OrderedDict\nfrom pytracking.evaluation.environment import env_settings\nimport time\nimport cv2 as cv\nfrom pytracking.utils.visdom import Visdom\nimport matplotlib.pyplot as plt\nimport matplotlib.patches as patches\nfrom pytracking.utils.plotting import draw_figure, overlay_mask\nfrom pytracking.utils.convert_vot_anno_to_rect import convert_vot_anno_to_rect\nfrom ltr.data.bounding_box_utils import masks_to_bboxes\nfrom pytracking.evaluation.multi_object_wrapper import MultiObjectWrapper\nimport torch\n\n\n_tracker_disp_colors = {1: (0, 255, 0), 2: (0, 0, 255), 3: (255, 0, 0),\n                        4: (255, 255, 255), 5: (0, 0, 0), 6: (0, 255, 128),\n                        7: (123, 123, 123), 8: (255, 128, 0), 9: (128, 0, 255)}\n\n\ndef trackerlist(name: str, parameter_name: str, run_ids = None, display_name: str = None):\n    \"\"\"Generate list of trackers.\n    args:\n        name: Name of tracking method.\n        parameter_name: Name of parameter file.\n        run_ids: A single or list of run_ids.\n        display_name: Name to be displayed in the result plots.\n    \"\"\"\n    if run_ids is None or isinstance(run_ids, int):\n        run_ids = [run_ids]\n    return [Tracker(name, parameter_name, run_id, display_name) for run_id in run_ids]\n\n\nclass Tracker:\n    \"\"\"Wraps the tracker for evaluation and running purposes.\n    args:\n        name: Name of tracking method.\n        parameter_name: Name of parameter file.\n        run_id: The run id.\n        display_name: Name to be displayed in the result plots.\n    \"\"\"\n\n    def __init__(self, name: str, parameter_name: str, run_id: int = None, display_name: str = None):\n        assert run_id is None or isinstance(run_id, int)\n\n        self.name = name\n        self.parameter_name = parameter_name\n        self.run_id = run_id\n        self.display_name = display_name\n\n        env = env_settings()\n        if self.run_id is None:\n            self.results_dir = '{}/{}/{}'.format(env.results_path, self.name, self.parameter_name)\n            self.segmentation_dir = '{}/{}/{}'.format(env.segmentation_path, self.name, self.parameter_name)\n        else:\n            self.results_dir = '{}/{}/{}_{:03d}'.format(env.results_path, self.name, self.parameter_name, self.run_id)\n            self.segmentation_dir = '{}/{}/{}_{:03d}'.format(env.segmentation_path, self.name, self.parameter_name, self.run_id)\n\n        tracker_module_abspath = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', 'tracker', self.name))\n        if os.path.isdir(tracker_module_abspath):\n            tracker_module = importlib.import_module('pytracking.tracker.{}'.format(self.name))\n            self.tracker_class = tracker_module.get_tracker_class()\n        else:\n            self.tracker_class = None\n\n        self.visdom = None\n\n\n    def _init_visdom(self, visdom_info, debug):\n        visdom_info = {} if visdom_info is None else visdom_info\n        self.pause_mode = False\n        self.step = False\n        if debug > 0 and visdom_info.get('use_visdom', True):\n            try:\n                self.visdom = Visdom(debug, {'handler': self._visdom_ui_handler, 'win_id': 'Tracking'},\n                                     visdom_info=visdom_info)\n\n                # Show help\n                help_text = 'You can pause/unpause the tracker by pressing ''space'' with the ''Tracking'' window ' \\\n                            'selected. During paused mode, you can track for one frame by pressing the right arrow key.' \\\n                            'To enable/disable plotting of a data block, tick/untick the corresponding entry in ' \\\n                            'block list.'\n                self.visdom.register(help_text, 'text', 1, 'Help')\n            except:\n                time.sleep(0.5)\n                print('!!! WARNING: Visdom could not start, so using matplotlib visualization instead !!!\\n'\n                      '!!! Start Visdom in a separate terminal window by typing \\'visdom\\' !!!')\n\n    def _visdom_ui_handler(self, data):\n        if data['event_type'] == 'KeyPress':\n            if data['key'] == ' ':\n                self.pause_mode = not self.pause_mode\n\n            elif data['key'] == 'ArrowRight' and self.pause_mode:\n                self.step = True\n\n\n    def create_tracker(self, params):\n        tracker = self.tracker_class(params)\n        tracker.visdom = self.visdom\n        return tracker\n\n    def run_sequence(self, seq, visualization=None, debug=None, visdom_info=None, multiobj_mode=None):\n        \"\"\"Run tracker on sequence.\n        args:\n            seq: Sequence to run the tracker on.\n            visualization: Set visualization flag (None means default value specified in the parameters).\n            debug: Set debug level (None means default value specified in the parameters).\n            visdom_info: Visdom info.\n            multiobj_mode: Which mode to use for multiple objects.\n        \"\"\"\n        params = self.get_parameters()\n        visualization_ = visualization\n\n        debug_ = debug\n        if debug is None:\n            debug_ = getattr(params, 'debug', 0)\n        if visualization is None:\n            if debug is None:\n                visualization_ = getattr(params, 'visualization', False)\n            else:\n                visualization_ = True if debug else False\n\n        params.visualization = visualization_\n        params.debug = debug_\n\n        self._init_visdom(visdom_info, debug_)\n        if visualization_ and self.visdom is None:\n            self.init_visualization()\n\n        # Get init information\n        init_info = seq.init_info()\n        is_single_object = not seq.multiobj_mode\n\n        if multiobj_mode is None:\n            multiobj_mode = getattr(params, 'multiobj_mode', getattr(self.tracker_class, 'multiobj_mode', 'default'))\n\n        if multiobj_mode == 'default' or is_single_object:\n            tracker = self.create_tracker(params)\n        elif multiobj_mode == 'parallel':\n            tracker = MultiObjectWrapper(self.tracker_class, params, self.visdom)\n        else:\n            raise ValueError('Unknown multi object mode {}'.format(multiobj_mode))\n\n        output = self._track_sequence(tracker, seq, init_info)\n        return output\n\n    def _track_sequence(self, tracker, seq, init_info):\n        # Define outputs\n        # Each field in output is a list containing tracker prediction for each frame.\n\n        # In case of single object tracking mode:\n        # target_bbox[i] is the predicted bounding box for frame i\n        # time[i] is the processing time for frame i\n        # segmentation[i] is the segmentation mask for frame i (numpy array)\n\n        # In case of multi object tracking mode:\n        # target_bbox[i] is an OrderedDict, where target_bbox[i][obj_id] is the predicted box for target obj_id in\n        # frame i\n        # time[i] is either the processing time for frame i, or an OrderedDict containing processing times for each\n        # object in frame i\n        # segmentation[i] is the multi-label segmentation mask for frame i (numpy array)\n\n        output = {'target_bbox': [],\n                  'time': [],\n                  'segmentation': []}\n\n        def _store_outputs(tracker_out: dict, defaults=None):\n            defaults = {} if defaults is None else defaults\n            for key in output.keys():\n                val = tracker_out.get(key, defaults.get(key, None))\n                if key in tracker_out or val is not None:\n                    output[key].append(val)\n\n        # Initialize\n        image = self._read_image(seq.frames[0])\n\n        if tracker.params.visualization and self.visdom is None:\n            self.visualize(image, init_info.get('init_bbox'))\n\n        start_time = time.time()\n        out = tracker.initialize(image, init_info)\n        if out is None:\n            out = {}\n\n        prev_output = OrderedDict(out)\n\n        init_default = {'target_bbox': init_info.get('init_bbox'),\n                        'time': time.time() - start_time,\n                        'segmentation': init_info.get('init_mask')}\n\n        _store_outputs(out, init_default)\n\n        for frame_num, frame_path in enumerate(seq.frames[1:], start=1):\n            while True:\n                if not self.pause_mode:\n                    break\n                elif self.step:\n                    self.step = False\n                    break\n                else:\n                    time.sleep(0.1)\n\n            image = self._read_image(frame_path)\n\n            start_time = time.time()\n\n            info = seq.frame_info(frame_num)\n            info['previous_output'] = prev_output\n\n            out = tracker.track(image, info)\n            prev_output = OrderedDict(out)\n            _store_outputs(out, {'time': time.time() - start_time})\n\n            segmentation = out['segmentation'] if 'segmentation' in out else None\n            if self.visdom is not None:\n                tracker.visdom_draw_tracking(image, out['target_bbox'], segmentation)\n            elif tracker.params.visualization:\n                self.visualize(image, out['target_bbox'], segmentation)\n\n        for key in ['target_bbox', 'segmentation']:\n            if key in output and len(output[key]) <= 1:\n                output.pop(key)\n\n        return output\n\n    def run_video(self, videofilepath, optional_box=None, debug=None, visdom_info=None):\n        \"\"\"Run the tracker with the vieofile.\n        args:\n            debug: Debug level.\n        \"\"\"\n\n        params = self.get_parameters()\n\n        debug_ = debug\n        if debug is None:\n            debug_ = getattr(params, 'debug', 0)\n        params.debug = debug_\n\n        params.tracker_name = self.name\n        params.param_name = self.parameter_name\n        self._init_visdom(visdom_info, debug_)\n\n        multiobj_mode = getattr(params, 'multiobj_mode', getattr(self.tracker_class, 'multiobj_mode', 'default'))\n\n        if multiobj_mode == 'default':\n            tracker = self.create_tracker(params)\n            if hasattr(tracker, 'initialize_features'):\n                tracker.initialize_features()\n\n        elif multiobj_mode == 'parallel':\n            tracker = MultiObjectWrapper(self.tracker_class, params, self.visdom, fast_load=True)\n        else:\n            raise ValueError('Unknown multi object mode {}'.format(multiobj_mode))\n\n        assert os.path.isfile(videofilepath), \"Invalid param {}\".format(videofilepath)\n        \", videofilepath must be a valid videofile\"\n\n        cap = cv.VideoCapture(videofilepath)\n        display_name = 'Display: ' + tracker.params.tracker_name\n        cv.namedWindow(display_name, cv.WINDOW_NORMAL | cv.WINDOW_KEEPRATIO)\n        cv.resizeWindow(display_name, 960, 720)\n        success, frame = cap.read()\n        cv.imshow(display_name, frame)\n\n        def _build_init_info(box):\n            return {'init_bbox': OrderedDict({1: box}), 'init_object_ids': [1, ], 'object_ids': [1, ],\n                    'sequence_object_ids': [1, ]}\n\n        if success is not True:\n            print(\"Read frame from {} failed.\".format(videofilepath))\n            exit(-1)\n        if optional_box is not None:\n            assert isinstance(optional_box, list, tuple)\n            assert len(optional_box) == 4, \"valid box's foramt is [x,y,w,h]\"\n            tracker.initialize(frame, _build_init_info(optional_box))\n        else:\n            while True:\n                # cv.waitKey()\n                frame_disp = frame.copy()\n\n                cv.putText(frame_disp, 'Select target ROI and press ENTER', (20, 30), cv.FONT_HERSHEY_COMPLEX_SMALL,\n                           1.5, (0, 0, 0), 1)\n\n                x, y, w, h = cv.selectROI(display_name, frame_disp, fromCenter=False)\n                init_state = [x, y, w, h]\n                tracker.initialize(frame, _build_init_info(init_state))\n                break\n\n        while True:\n            ret, frame = cap.read()\n\n            if frame is None:\n                return\n\n            frame_disp = frame.copy()\n\n            # Draw box\n            out = tracker.track(frame)\n            state = [int(s) for s in out['target_bbox'][1]]\n            cv.rectangle(frame_disp, (state[0], state[1]), (state[2] + state[0], state[3] + state[1]),\n                         (0, 255, 0), 5)\n\n            font_color = (0, 0, 0)\n            cv.putText(frame_disp, 'Tracking!', (20, 30), cv.FONT_HERSHEY_COMPLEX_SMALL, 1,\n                       font_color, 1)\n            cv.putText(frame_disp, 'Press r to reset', (20, 55), cv.FONT_HERSHEY_COMPLEX_SMALL, 1,\n                       font_color, 1)\n            cv.putText(frame_disp, 'Press q to quit', (20, 80), cv.FONT_HERSHEY_COMPLEX_SMALL, 1,\n                       font_color, 1)\n\n            # Display the resulting frame\n            cv.imshow(display_name, frame_disp)\n            key = cv.waitKey(1)\n            if key == ord('q'):\n                break\n            elif key == ord('r'):\n                ret, frame = cap.read()\n                frame_disp = frame.copy()\n\n                cv.putText(frame_disp, 'Select target ROI and press ENTER', (20, 30), cv.FONT_HERSHEY_COMPLEX_SMALL, 1.5,\n                           (0, 0, 0), 1)\n\n                cv.imshow(display_name, frame_disp)\n                x, y, w, h = cv.selectROI(display_name, frame_disp, fromCenter=False)\n                init_state = [x, y, w, h]\n                tracker.initialize(frame, _build_init_info(init_state))\n\n        # When everything done, release the capture\n        cap.release()\n        cv.destroyAllWindows()\n\n\n    def run_webcam(self, debug=None, visdom_info=None):\n        \"\"\"Run the tracker with the webcam.\n        args:\n            debug: Debug level.\n        \"\"\"\n\n        params = self.get_parameters()\n\n        debug_ = debug\n        if debug is None:\n            debug_ = getattr(params, 'debug', 0)\n        params.debug = debug_\n\n        params.tracker_name = self.name\n        params.param_name = self.parameter_name\n\n        self._init_visdom(visdom_info, debug_)\n\n        multiobj_mode = getattr(params, 'multiobj_mode', getattr(self.tracker_class, 'multiobj_mode', 'default'))\n\n        if multiobj_mode == 'default':\n            tracker = self.create_tracker(params)\n        elif multiobj_mode == 'parallel':\n            tracker = MultiObjectWrapper(self.tracker_class, params, self.visdom, fast_load=True)\n        else:\n            raise ValueError('Unknown multi object mode {}'.format(multiobj_mode))\n\n        class UIControl:\n            def __init__(self):\n                self.mode = 'init'  # init, select, track\n                self.target_tl = (-1, -1)\n                self.target_br = (-1, -1)\n                self.new_init = False\n\n            def mouse_callback(self, event, x, y, flags, param):\n                if event == cv.EVENT_LBUTTONDOWN and self.mode == 'init':\n                    self.target_tl = (x, y)\n                    self.target_br = (x, y)\n                    self.mode = 'select'\n                elif event == cv.EVENT_MOUSEMOVE and self.mode == 'select':\n                    self.target_br = (x, y)\n                elif event == cv.EVENT_LBUTTONDOWN and self.mode == 'select':\n                    self.target_br = (x, y)\n                    self.mode = 'init'\n                    self.new_init = True\n\n            def get_tl(self):\n                return self.target_tl if self.target_tl[0] < self.target_br[0] else self.target_br\n\n            def get_br(self):\n                return self.target_br if self.target_tl[0] < self.target_br[0] else self.target_tl\n\n            def get_bb(self):\n                tl = self.get_tl()\n                br = self.get_br()\n\n                bb = [min(tl[0], br[0]), min(tl[1], br[1]), abs(br[0] - tl[0]), abs(br[1] - tl[1])]\n                return bb\n\n        ui_control = UIControl()\n        cap = cv.VideoCapture(0)\n        display_name = 'Display: ' + self.name\n        cv.namedWindow(display_name, cv.WINDOW_NORMAL | cv.WINDOW_KEEPRATIO)\n        cv.resizeWindow(display_name, 960, 720)\n        cv.setMouseCallback(display_name, ui_control.mouse_callback)\n\n        next_object_id = 1\n        sequence_object_ids = []\n        prev_output = OrderedDict()\n        while True:\n            # Capture frame-by-frame\n            ret, frame = cap.read()\n            frame_disp = frame.copy()\n\n            info = OrderedDict()\n            info['previous_output'] = prev_output\n\n            if ui_control.new_init:\n                ui_control.new_init = False\n                init_state = ui_control.get_bb()\n\n                info['init_object_ids'] = [next_object_id, ]\n                info['init_bbox'] = OrderedDict({next_object_id: init_state})\n                sequence_object_ids.append(next_object_id)\n\n                next_object_id += 1\n\n            # Draw box\n            if ui_control.mode == 'select':\n                cv.rectangle(frame_disp, ui_control.get_tl(), ui_control.get_br(), (255, 0, 0), 2)\n\n            if len(sequence_object_ids) > 0:\n                info['sequence_object_ids'] = sequence_object_ids\n                out = tracker.track(frame, info)\n                prev_output = OrderedDict(out)\n\n                if 'segmentation' in out:\n                    frame_disp = overlay_mask(frame_disp, out['segmentation'])\n\n                if 'target_bbox' in out:\n                    for obj_id, state in out['target_bbox'].items():\n                        state = [int(s) for s in state]\n                        cv.rectangle(frame_disp, (state[0], state[1]), (state[2] + state[0], state[3] + state[1]),\n                                     _tracker_disp_colors[obj_id], 5)\n\n            # Put text\n            font_color = (0, 0, 0)\n            cv.putText(frame_disp, 'Select target', (20, 30), cv.FONT_HERSHEY_COMPLEX_SMALL, 1, font_color, 1)\n            cv.putText(frame_disp, 'Press r to reset', (20, 55), cv.FONT_HERSHEY_COMPLEX_SMALL, 1,\n                       font_color, 1)\n            cv.putText(frame_disp, 'Press q to quit', (20, 85), cv.FONT_HERSHEY_COMPLEX_SMALL, 1,\n                       font_color, 1)\n\n            # Display the resulting frame\n            cv.imshow(display_name, frame_disp)\n            key = cv.waitKey(1)\n            if key == ord('q'):\n                break\n            elif key == ord('r'):\n                next_object_id = 1\n                sequence_object_ids = []\n                prev_output = OrderedDict()\n\n                info = OrderedDict()\n\n                info['object_ids'] = []\n                info['init_object_ids'] = []\n                info['init_bbox'] = OrderedDict()\n                tracker.initialize(frame, info)\n                ui_control.mode = 'init'\n\n        # When everything done, release the capture\n        cap.release()\n        cv.destroyAllWindows()\n\n    def run_vot2020(self, debug=None, visdom_info=None):\n        params = self.get_parameters()\n        params.tracker_name = self.name\n        params.param_name = self.parameter_name\n        params.run_id = self.run_id\n\n        debug_ = debug\n        if debug is None:\n            debug_ = getattr(params, 'debug', 0)\n\n        if debug is None:\n            visualization_ = getattr(params, 'visualization', False)\n        else:\n            visualization_ = True if debug else False\n\n        params.visualization = visualization_\n        params.debug = debug_\n\n        self._init_visdom(visdom_info, debug_)\n\n        tracker = self.create_tracker(params)\n        tracker.initialize_features()\n\n        output_segmentation = tracker.predicts_segmentation_mask()\n\n        import pytracking.evaluation.vot2020 as vot\n\n        def _convert_anno_to_list(vot_anno):\n            vot_anno = [vot_anno[0], vot_anno[1], vot_anno[2], vot_anno[3]]\n            return vot_anno\n\n        def _convert_image_path(image_path):\n            return image_path\n\n        \"\"\"Run tracker on VOT.\"\"\"\n\n        if output_segmentation:\n            handle = vot.VOT(\"mask\")\n        else:\n            handle = vot.VOT(\"rectangle\")\n\n        vot_anno = handle.region()\n\n        image_path = handle.frame()\n        if not image_path:\n            return\n        image_path = _convert_image_path(image_path)\n\n        image = self._read_image(image_path)\n\n        if output_segmentation:\n            vot_anno_mask = vot.make_full_size(vot_anno, (image.shape[1], image.shape[0]))\n            bbox = masks_to_bboxes(torch.from_numpy(vot_anno_mask), fmt='t').squeeze().tolist()\n        else:\n            bbox = _convert_anno_to_list(vot_anno)\n            vot_anno_mask = None\n\n        out = tracker.initialize(image, {'init_mask': vot_anno_mask, 'init_bbox': bbox})\n\n        if out is None:\n            out = {}\n        prev_output = OrderedDict(out)\n\n        # Track\n        while True:\n            image_path = handle.frame()\n            if not image_path:\n                break\n            image_path = _convert_image_path(image_path)\n\n            image = self._read_image(image_path)\n\n            info = OrderedDict()\n            info['previous_output'] = prev_output\n\n            out = tracker.track(image, info)\n            prev_output = OrderedDict(out)\n\n            if output_segmentation:\n                pred = out['segmentation'].astype(np.uint8)\n            else:\n                state = out['target_bbox']\n                pred = vot.Rectangle(*state)\n            handle.report(pred, 1.0)\n\n            segmentation = out['segmentation'] if 'segmentation' in out else None\n            if self.visdom is not None:\n                tracker.visdom_draw_tracking(image, out['target_bbox'], segmentation)\n            elif tracker.params.visualization:\n                self.visualize(image, out['target_bbox'], segmentation)\n\n\n    def run_vot(self, debug=None, visdom_info=None):\n        params = self.get_parameters()\n        params.tracker_name = self.name\n        params.param_name = self.parameter_name\n        params.run_id = self.run_id\n\n        debug_ = debug\n        if debug is None:\n            debug_ = getattr(params, 'debug', 0)\n\n        if debug is None:\n            visualization_ = getattr(params, 'visualization', False)\n        else:\n            visualization_ = True if debug else False\n\n        params.visualization = visualization_\n        params.debug = debug_\n\n        self._init_visdom(visdom_info, debug_)\n\n        tracker = self.create_tracker(params)\n        tracker.initialize_features()\n\n        import pytracking.evaluation.vot as vot\n\n        def _convert_anno_to_list(vot_anno):\n            vot_anno = [vot_anno[0][0][0], vot_anno[0][0][1], vot_anno[0][1][0], vot_anno[0][1][1],\n                        vot_anno[0][2][0], vot_anno[0][2][1], vot_anno[0][3][0], vot_anno[0][3][1]]\n            return vot_anno\n\n        def _convert_image_path(image_path):\n            image_path_new = image_path[20:- 2]\n            return \"\".join(image_path_new)\n\n        \"\"\"Run tracker on VOT.\"\"\"\n\n        handle = vot.VOT(\"polygon\")\n\n        vot_anno_polygon = handle.region()\n        vot_anno_polygon = _convert_anno_to_list(vot_anno_polygon)\n\n        init_state = convert_vot_anno_to_rect(vot_anno_polygon, tracker.params.vot_anno_conversion_type)\n\n        image_path = handle.frame()\n        if not image_path:\n            return\n        image_path = _convert_image_path(image_path)\n\n        image = self._read_image(image_path)\n        tracker.initialize(image, {'init_bbox': init_state})\n\n        # Track\n        while True:\n            image_path = handle.frame()\n            if not image_path:\n                break\n            image_path = _convert_image_path(image_path)\n\n            image = self._read_image(image_path)\n            out = tracker.track(image)\n            state = out['target_bbox']\n\n            handle.report(vot.Rectangle(state[0], state[1], state[2], state[3]))\n\n            segmentation = out['segmentation'] if 'segmentation' in out else None\n            if self.visdom is not None:\n                tracker.visdom_draw_tracking(image, out['target_bbox'], segmentation)\n            elif tracker.params.visualization:\n                self.visualize(image, out['target_bbox'], segmentation)\n\n    def get_parameters(self):\n        \"\"\"Get parameters.\"\"\"\n        param_module = importlib.import_module('pytracking.parameter.{}.{}'.format(self.name, self.parameter_name))\n        params = param_module.parameters()\n        return params\n\n\n    def init_visualization(self):\n        self.pause_mode = False\n        self.fig, self.ax = plt.subplots(1)\n        self.fig.canvas.mpl_connect('key_press_event', self.press)\n        plt.tight_layout()\n\n\n    def visualize(self, image, state, segmentation=None):\n        self.ax.cla()\n        self.ax.imshow(image)\n        if segmentation is not None:\n            self.ax.imshow(segmentation, alpha=0.5)\n\n        if isinstance(state, (OrderedDict, dict)):\n            boxes = [v for k, v in state.items()]\n        else:\n            boxes = (state,)\n\n        for i, box in enumerate(boxes, start=1):\n            col = _tracker_disp_colors[i]\n            col = [float(c) / 255.0 for c in col]\n            rect = patches.Rectangle((box[0], box[1]), box[2], box[3], linewidth=1, edgecolor=col, facecolor='none')\n            self.ax.add_patch(rect)\n\n        if getattr(self, 'gt_state', None) is not None:\n            gt_state = self.gt_state\n            rect = patches.Rectangle((gt_state[0], gt_state[1]), gt_state[2], gt_state[3], linewidth=1, edgecolor='g', facecolor='none')\n            self.ax.add_patch(rect)\n        self.ax.set_axis_off()\n        self.ax.axis('equal')\n        draw_figure(self.fig)\n\n        if self.pause_mode:\n            keypress = False\n            while not keypress:\n                keypress = plt.waitforbuttonpress()\n\n    def reset_tracker(self):\n        pass\n\n    def press(self, event):\n        if event.key == 'p':\n            self.pause_mode = not self.pause_mode\n            print(\"Switching pause mode!\")\n        elif event.key == 'r':\n            self.reset_tracker()\n            print(\"Resetting target pos to gt!\")\n\n    def _read_image(self, image_file: str):\n        im = cv.imread(image_file)\n        return cv.cvtColor(im, cv.COLOR_BGR2RGB)\n\n\n\n"
  },
  {
    "path": "external/AR/pytracking/evaluation/trackingnetdataset.py",
    "content": "import numpy as np\nfrom pytracking.evaluation.data import Sequence, BaseDataset, SequenceList\nimport os\nfrom pytracking.utils.load_text import load_text\n\n\nclass TrackingNetDataset(BaseDataset):\n    \"\"\" TrackingNet test set.\n\n    Publication:\n        TrackingNet: A Large-Scale Dataset and Benchmark for Object Tracking in the Wild.\n        Matthias Mueller,Adel Bibi, Silvio Giancola, Salman Al-Subaihi and Bernard Ghanem\n        ECCV, 2018\n        https://ivul.kaust.edu.sa/Documents/Publications/2018/TrackingNet%20A%20Large%20Scale%20Dataset%20and%20Benchmark%20for%20Object%20Tracking%20in%20the%20Wild.pdf\n\n    Download the dataset using the toolkit https://github.com/SilvioGiancola/TrackingNet-devkit.\n    \"\"\"\n    def __init__(self):\n        super().__init__()\n        self.base_path = self.env_settings.trackingnet_path\n\n        sets = 'TEST'\n        if not isinstance(sets, (list, tuple)):\n            if sets == 'TEST':\n                sets = ['TEST']\n            elif sets == 'TRAIN':\n                sets = ['TRAIN_{}'.format(i) for i in range(5)]\n\n        self.sequence_list = self._list_sequences(self.base_path, sets)\n\n    def get_sequence_list(self):\n        return SequenceList([self._construct_sequence(set, seq_name) for set, seq_name in self.sequence_list])\n\n    def _construct_sequence(self, set, sequence_name):\n        anno_path = '{}/{}/anno/{}.txt'.format(self.base_path, set, sequence_name)\n\n        ground_truth_rect = load_text(str(anno_path), delimiter=',', dtype=np.float64, backend='numpy')\n\n        frames_path = '{}/{}/frames/{}'.format(self.base_path, set, sequence_name)\n        frame_list = [frame for frame in os.listdir(frames_path) if frame.endswith(\".jpg\")]\n        frame_list.sort(key=lambda f: int(f[:-4]))\n        frames_list = [os.path.join(frames_path, frame) for frame in frame_list]\n\n        return Sequence(sequence_name, frames_list, 'trackingnet', ground_truth_rect.reshape(-1, 4))\n\n    def __len__(self):\n        return len(self.sequence_list)\n\n    def _list_sequences(self, root, set_ids):\n        sequence_list = []\n\n        for s in set_ids:\n            anno_dir = os.path.join(root, s, \"anno\")\n            sequences_cur_set = [(s, os.path.splitext(f)[0]) for f in os.listdir(anno_dir) if f.endswith('.txt')]\n\n            sequence_list += sequences_cur_set\n\n        return sequence_list\n"
  },
  {
    "path": "external/AR/pytracking/evaluation/uavdataset.py",
    "content": "import numpy as np\nfrom pytracking.evaluation.data import Sequence, BaseDataset, SequenceList\nfrom pytracking.utils.load_text import load_text\n\n\nclass UAVDataset(BaseDataset):\n    \"\"\" UAV123 dataset.\n\n    Publication:\n        A Benchmark and Simulator for UAV Tracking.\n        Matthias Mueller, Neil Smith and Bernard Ghanem\n        ECCV, 2016\n        https://ivul.kaust.edu.sa/Documents/Publications/2016/A%20Benchmark%20and%20Simulator%20for%20UAV%20Tracking.pdf\n\n    Download the dataset from https://ivul.kaust.edu.sa/Pages/pub-benchmark-simulator-uav.aspx\n    \"\"\"\n    def __init__(self):\n        super().__init__()\n        self.base_path = self.env_settings.uav_path\n        self.sequence_info_list = self._get_sequence_info_list()\n\n    def get_sequence_list(self):\n        return SequenceList([self._construct_sequence(s) for s in self.sequence_info_list])\n\n    def _construct_sequence(self, sequence_info):\n        sequence_path = sequence_info['path']\n        nz = sequence_info['nz']\n        ext = sequence_info['ext']\n        start_frame = sequence_info['startFrame']\n        end_frame = sequence_info['endFrame']\n\n        init_omit = 0\n        if 'initOmit' in sequence_info:\n            init_omit = sequence_info['initOmit']\n\n        frames = ['{base_path}/{sequence_path}/{frame:0{nz}}.{ext}'.format(base_path=self.base_path, \n        sequence_path=sequence_path, frame=frame_num, nz=nz, ext=ext) for frame_num in range(start_frame+init_omit, end_frame+1)]\n\n        anno_path = '{}/{}'.format(self.base_path, sequence_info['anno_path'])\n\n        ground_truth_rect = load_text(str(anno_path), delimiter=',', dtype=np.float64, backend='numpy')\n\n        return Sequence(sequence_info['name'], frames, 'uav', ground_truth_rect[init_omit:,:],\n                        object_class=sequence_info['object_class'])\n\n    def __len__(self):\n        return len(self.sequence_info_list)\n\n    def _get_sequence_info_list(self):\n        sequence_info_list = [\n            {\"name\": \"uav_bike1\", \"path\": \"data_seq/UAV123/bike1\", \"startFrame\": 1, \"endFrame\": 3085, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/bike1.txt\", \"object_class\": \"vehicle\"},\n            {\"name\": \"uav_bike2\", \"path\": \"data_seq/UAV123/bike2\", \"startFrame\": 1, \"endFrame\": 553, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/bike2.txt\", \"object_class\": \"vehicle\"},\n            {\"name\": \"uav_bike3\", \"path\": \"data_seq/UAV123/bike3\", \"startFrame\": 1, \"endFrame\": 433, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/bike3.txt\", \"object_class\": \"vehicle\"},\n            {\"name\": \"uav_bird1_1\", \"path\": \"data_seq/UAV123/bird1\", \"startFrame\": 1, \"endFrame\": 253, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/bird1_1.txt\", \"object_class\": \"bird\"},\n            {\"name\": \"uav_bird1_2\", \"path\": \"data_seq/UAV123/bird1\", \"startFrame\": 775, \"endFrame\": 1477, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/bird1_2.txt\", \"object_class\": \"bird\"},\n            {\"name\": \"uav_bird1_3\", \"path\": \"data_seq/UAV123/bird1\", \"startFrame\": 1573, \"endFrame\": 2437, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/bird1_3.txt\", \"object_class\": \"bird\"},\n            {\"name\": \"uav_boat1\", \"path\": \"data_seq/UAV123/boat1\", \"startFrame\": 1, \"endFrame\": 901, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/boat1.txt\", \"object_class\": \"vessel\"},\n            {\"name\": \"uav_boat2\", \"path\": \"data_seq/UAV123/boat2\", \"startFrame\": 1, \"endFrame\": 799, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/boat2.txt\", \"object_class\": \"vessel\"},\n            {\"name\": \"uav_boat3\", \"path\": \"data_seq/UAV123/boat3\", \"startFrame\": 1, \"endFrame\": 901, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/boat3.txt\", \"object_class\": \"vessel\"},\n            {\"name\": \"uav_boat4\", \"path\": \"data_seq/UAV123/boat4\", \"startFrame\": 1, \"endFrame\": 553, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/boat4.txt\", \"object_class\": \"vessel\"},\n            {\"name\": \"uav_boat5\", \"path\": \"data_seq/UAV123/boat5\", \"startFrame\": 1, \"endFrame\": 505, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/boat5.txt\", \"object_class\": \"vessel\"},\n            {\"name\": \"uav_boat6\", \"path\": \"data_seq/UAV123/boat6\", \"startFrame\": 1, \"endFrame\": 805, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/boat6.txt\", \"object_class\": \"vessel\"},\n            {\"name\": \"uav_boat7\", \"path\": \"data_seq/UAV123/boat7\", \"startFrame\": 1, \"endFrame\": 535, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/boat7.txt\", \"object_class\": \"vessel\"},\n            {\"name\": \"uav_boat8\", \"path\": \"data_seq/UAV123/boat8\", \"startFrame\": 1, \"endFrame\": 685, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/boat8.txt\", \"object_class\": \"vessel\"},\n            {\"name\": \"uav_boat9\", \"path\": \"data_seq/UAV123/boat9\", \"startFrame\": 1, \"endFrame\": 1399, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/boat9.txt\", \"object_class\": \"vessel\"},\n            {\"name\": \"uav_building1\", \"path\": \"data_seq/UAV123/building1\", \"startFrame\": 1, \"endFrame\": 469, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/building1.txt\", \"object_class\": \"other\"},\n            {\"name\": \"uav_building2\", \"path\": \"data_seq/UAV123/building2\", \"startFrame\": 1, \"endFrame\": 577, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/building2.txt\", \"object_class\": \"other\"},\n            {\"name\": \"uav_building3\", \"path\": \"data_seq/UAV123/building3\", \"startFrame\": 1, \"endFrame\": 829, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/building3.txt\", \"object_class\": \"other\"},\n            {\"name\": \"uav_building4\", \"path\": \"data_seq/UAV123/building4\", \"startFrame\": 1, \"endFrame\": 787, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/building4.txt\", \"object_class\": \"other\"},\n            {\"name\": \"uav_building5\", \"path\": \"data_seq/UAV123/building5\", \"startFrame\": 1, \"endFrame\": 481, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/building5.txt\", \"object_class\": \"other\"},\n            {\"name\": \"uav_car1_1\", \"path\": \"data_seq/UAV123/car1\", \"startFrame\": 1, \"endFrame\": 751, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/car1_1.txt\", \"object_class\": \"car\"},\n            {\"name\": \"uav_car1_2\", \"path\": \"data_seq/UAV123/car1\", \"startFrame\": 751, \"endFrame\": 1627, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/car1_2.txt\", \"object_class\": \"car\"},\n            {\"name\": \"uav_car1_3\", \"path\": \"data_seq/UAV123/car1\", \"startFrame\": 1627, \"endFrame\": 2629, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/car1_3.txt\", \"object_class\": \"car\"},\n            {\"name\": \"uav_car10\", \"path\": \"data_seq/UAV123/car10\", \"startFrame\": 1, \"endFrame\": 1405, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/car10.txt\", \"object_class\": \"car\"},\n            {\"name\": \"uav_car11\", \"path\": \"data_seq/UAV123/car11\", \"startFrame\": 1, \"endFrame\": 337, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/car11.txt\", \"object_class\": \"car\"},\n            {\"name\": \"uav_car12\", \"path\": \"data_seq/UAV123/car12\", \"startFrame\": 1, \"endFrame\": 499, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/car12.txt\", \"object_class\": \"car\"},\n            {\"name\": \"uav_car13\", \"path\": \"data_seq/UAV123/car13\", \"startFrame\": 1, \"endFrame\": 415, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/car13.txt\", \"object_class\": \"car\"},\n            {\"name\": \"uav_car14\", \"path\": \"data_seq/UAV123/car14\", \"startFrame\": 1, \"endFrame\": 1327, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/car14.txt\", \"object_class\": \"car\"},\n            {\"name\": \"uav_car15\", \"path\": \"data_seq/UAV123/car15\", \"startFrame\": 1, \"endFrame\": 469, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/car15.txt\", \"object_class\": \"car\"},\n            {\"name\": \"uav_car16_1\", \"path\": \"data_seq/UAV123/car16\", \"startFrame\": 1, \"endFrame\": 415, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/car16_1.txt\", \"object_class\": \"car\"},\n            {\"name\": \"uav_car16_2\", \"path\": \"data_seq/UAV123/car16\", \"startFrame\": 415, \"endFrame\": 1993, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/car16_2.txt\", \"object_class\": \"car\"},\n            {\"name\": \"uav_car17\", \"path\": \"data_seq/UAV123/car17\", \"startFrame\": 1, \"endFrame\": 1057, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/car17.txt\", \"object_class\": \"car\"},\n            {\"name\": \"uav_car18\", \"path\": \"data_seq/UAV123/car18\", \"startFrame\": 1, \"endFrame\": 1207, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/car18.txt\", \"object_class\": \"car\"},\n            {\"name\": \"uav_car1_s\", \"path\": \"data_seq/UAV123/car1_s\", \"startFrame\": 1, \"endFrame\": 1475, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/car1_s.txt\", \"object_class\": \"car\"},\n            {\"name\": \"uav_car2\", \"path\": \"data_seq/UAV123/car2\", \"startFrame\": 1, \"endFrame\": 1321, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/car2.txt\", \"object_class\": \"car\"},\n            {\"name\": \"uav_car2_s\", \"path\": \"data_seq/UAV123/car2_s\", \"startFrame\": 1, \"endFrame\": 320, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/car2_s.txt\", \"object_class\": \"car\"},\n            {\"name\": \"uav_car3\", \"path\": \"data_seq/UAV123/car3\", \"startFrame\": 1, \"endFrame\": 1717, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/car3.txt\", \"object_class\": \"car\"},\n            {\"name\": \"uav_car3_s\", \"path\": \"data_seq/UAV123/car3_s\", \"startFrame\": 1, \"endFrame\": 1300, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/car3_s.txt\", \"object_class\": \"car\"},\n            {\"name\": \"uav_car4\", \"path\": \"data_seq/UAV123/car4\", \"startFrame\": 1, \"endFrame\": 1345, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/car4.txt\", \"object_class\": \"car\"},\n            {\"name\": \"uav_car4_s\", \"path\": \"data_seq/UAV123/car4_s\", \"startFrame\": 1, \"endFrame\": 830, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/car4_s.txt\", \"object_class\": \"car\"},\n            {\"name\": \"uav_car5\", \"path\": \"data_seq/UAV123/car5\", \"startFrame\": 1, \"endFrame\": 745, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/car5.txt\", \"object_class\": \"car\"},\n            {\"name\": \"uav_car6_1\", \"path\": \"data_seq/UAV123/car6\", \"startFrame\": 1, \"endFrame\": 487, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/car6_1.txt\", \"object_class\": \"car\"},\n            {\"name\": \"uav_car6_2\", \"path\": \"data_seq/UAV123/car6\", \"startFrame\": 487, \"endFrame\": 1807, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/car6_2.txt\", \"object_class\": \"car\"},\n            {\"name\": \"uav_car6_3\", \"path\": \"data_seq/UAV123/car6\", \"startFrame\": 1807, \"endFrame\": 2953, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/car6_3.txt\", \"object_class\": \"car\"},\n            {\"name\": \"uav_car6_4\", \"path\": \"data_seq/UAV123/car6\", \"startFrame\": 2953, \"endFrame\": 3925, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/car6_4.txt\", \"object_class\": \"car\"},\n            {\"name\": \"uav_car6_5\", \"path\": \"data_seq/UAV123/car6\", \"startFrame\": 3925, \"endFrame\": 4861, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/car6_5.txt\", \"object_class\": \"car\"},\n            {\"name\": \"uav_car7\", \"path\": \"data_seq/UAV123/car7\", \"startFrame\": 1, \"endFrame\": 1033, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/car7.txt\", \"object_class\": \"car\"},\n            {\"name\": \"uav_car8_1\", \"path\": \"data_seq/UAV123/car8\", \"startFrame\": 1, \"endFrame\": 1357, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/car8_1.txt\", \"object_class\": \"car\"},\n            {\"name\": \"uav_car8_2\", \"path\": \"data_seq/UAV123/car8\", \"startFrame\": 1357, \"endFrame\": 2575, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/car8_2.txt\", \"object_class\": \"car\"},\n            {\"name\": \"uav_car9\", \"path\": \"data_seq/UAV123/car9\", \"startFrame\": 1, \"endFrame\": 1879, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/car9.txt\", \"object_class\": \"car\"},\n            {\"name\": \"uav_group1_1\", \"path\": \"data_seq/UAV123/group1\", \"startFrame\": 1, \"endFrame\": 1333, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/group1_1.txt\", \"object_class\": \"person\"},\n            {\"name\": \"uav_group1_2\", \"path\": \"data_seq/UAV123/group1\", \"startFrame\": 1333, \"endFrame\": 2515, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/group1_2.txt\", \"object_class\": \"person\"},\n            {\"name\": \"uav_group1_3\", \"path\": \"data_seq/UAV123/group1\", \"startFrame\": 2515, \"endFrame\": 3925, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/group1_3.txt\", \"object_class\": \"person\"},\n            {\"name\": \"uav_group1_4\", \"path\": \"data_seq/UAV123/group1\", \"startFrame\": 3925, \"endFrame\": 4873, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/group1_4.txt\", \"object_class\": \"person\"},\n            {\"name\": \"uav_group2_1\", \"path\": \"data_seq/UAV123/group2\", \"startFrame\": 1, \"endFrame\": 907, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/group2_1.txt\", \"object_class\": \"person\"},\n            {\"name\": \"uav_group2_2\", \"path\": \"data_seq/UAV123/group2\", \"startFrame\": 907, \"endFrame\": 1771, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/group2_2.txt\", \"object_class\": \"person\"},\n            {\"name\": \"uav_group2_3\", \"path\": \"data_seq/UAV123/group2\", \"startFrame\": 1771, \"endFrame\": 2683, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/group2_3.txt\", \"object_class\": \"person\"},\n            {\"name\": \"uav_group3_1\", \"path\": \"data_seq/UAV123/group3\", \"startFrame\": 1, \"endFrame\": 1567, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/group3_1.txt\", \"object_class\": \"person\"},\n            {\"name\": \"uav_group3_2\", \"path\": \"data_seq/UAV123/group3\", \"startFrame\": 1567, \"endFrame\": 2827, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/group3_2.txt\", \"object_class\": \"person\"},\n            {\"name\": \"uav_group3_3\", \"path\": \"data_seq/UAV123/group3\", \"startFrame\": 2827, \"endFrame\": 4369, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/group3_3.txt\", \"object_class\": \"person\"},\n            {\"name\": \"uav_group3_4\", \"path\": \"data_seq/UAV123/group3\", \"startFrame\": 4369, \"endFrame\": 5527, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/group3_4.txt\", \"object_class\": \"person\"},\n            {\"name\": \"uav_person1\", \"path\": \"data_seq/UAV123/person1\", \"startFrame\": 1, \"endFrame\": 799, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/person1.txt\", \"object_class\": \"person\"},\n            {\"name\": \"uav_person10\", \"path\": \"data_seq/UAV123/person10\", \"startFrame\": 1, \"endFrame\": 1021, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/person10.txt\", \"object_class\": \"person\"},\n            {\"name\": \"uav_person11\", \"path\": \"data_seq/UAV123/person11\", \"startFrame\": 1, \"endFrame\": 721, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/person11.txt\", \"object_class\": \"person\"},\n            {\"name\": \"uav_person12_1\", \"path\": \"data_seq/UAV123/person12\", \"startFrame\": 1, \"endFrame\": 601, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/person12_1.txt\", \"object_class\": \"person\"},\n            {\"name\": \"uav_person12_2\", \"path\": \"data_seq/UAV123/person12\", \"startFrame\": 601, \"endFrame\": 1621, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/person12_2.txt\", \"object_class\": \"person\"},\n            {\"name\": \"uav_person13\", \"path\": \"data_seq/UAV123/person13\", \"startFrame\": 1, \"endFrame\": 883, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/person13.txt\", \"object_class\": \"person\"},\n            {\"name\": \"uav_person14_1\", \"path\": \"data_seq/UAV123/person14\", \"startFrame\": 1, \"endFrame\": 847, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/person14_1.txt\", \"object_class\": \"person\"},\n            {\"name\": \"uav_person14_2\", \"path\": \"data_seq/UAV123/person14\", \"startFrame\": 847, \"endFrame\": 1813, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/person14_2.txt\", \"object_class\": \"person\"},\n            {\"name\": \"uav_person14_3\", \"path\": \"data_seq/UAV123/person14\", \"startFrame\": 1813, \"endFrame\": 2923,\n             \"nz\": 6, \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/person14_3.txt\", \"object_class\": \"person\"},\n            {\"name\": \"uav_person15\", \"path\": \"data_seq/UAV123/person15\", \"startFrame\": 1, \"endFrame\": 1339, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/person15.txt\", \"object_class\": \"person\"},\n            {\"name\": \"uav_person16\", \"path\": \"data_seq/UAV123/person16\", \"startFrame\": 1, \"endFrame\": 1147, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/person16.txt\", \"object_class\": \"person\"},\n            {\"name\": \"uav_person17_1\", \"path\": \"data_seq/UAV123/person17\", \"startFrame\": 1, \"endFrame\": 1501, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/person17_1.txt\", \"object_class\": \"person\"},\n            {\"name\": \"uav_person17_2\", \"path\": \"data_seq/UAV123/person17\", \"startFrame\": 1501, \"endFrame\": 2347,\n             \"nz\": 6, \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/person17_2.txt\", \"object_class\": \"person\"},\n            {\"name\": \"uav_person18\", \"path\": \"data_seq/UAV123/person18\", \"startFrame\": 1, \"endFrame\": 1393, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/person18.txt\", \"object_class\": \"person\"},\n            {\"name\": \"uav_person19_1\", \"path\": \"data_seq/UAV123/person19\", \"startFrame\": 1, \"endFrame\": 1243, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/person19_1.txt\", \"object_class\": \"person\"},\n            {\"name\": \"uav_person19_2\", \"path\": \"data_seq/UAV123/person19\", \"startFrame\": 1243, \"endFrame\": 2791,\n             \"nz\": 6, \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/person19_2.txt\", \"object_class\": \"person\"},\n            {\"name\": \"uav_person19_3\", \"path\": \"data_seq/UAV123/person19\", \"startFrame\": 2791, \"endFrame\": 4357,\n             \"nz\": 6, \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/person19_3.txt\", \"object_class\": \"person\"},\n            {\"name\": \"uav_person1_s\", \"path\": \"data_seq/UAV123/person1_s\", \"startFrame\": 1, \"endFrame\": 1600, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/person1_s.txt\", \"object_class\": \"person\"},\n            {\"name\": \"uav_person2_1\", \"path\": \"data_seq/UAV123/person2\", \"startFrame\": 1, \"endFrame\": 1189, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/person2_1.txt\", \"object_class\": \"person\"},\n            {\"name\": \"uav_person2_2\", \"path\": \"data_seq/UAV123/person2\", \"startFrame\": 1189, \"endFrame\": 2623, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/person2_2.txt\", \"object_class\": \"person\"},\n            {\"name\": \"uav_person20\", \"path\": \"data_seq/UAV123/person20\", \"startFrame\": 1, \"endFrame\": 1783, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/person20.txt\", \"object_class\": \"person\"},\n            {\"name\": \"uav_person21\", \"path\": \"data_seq/UAV123/person21\", \"startFrame\": 1, \"endFrame\": 487, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/person21.txt\", \"object_class\": \"person\"},\n            {\"name\": \"uav_person22\", \"path\": \"data_seq/UAV123/person22\", \"startFrame\": 1, \"endFrame\": 199, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/person22.txt\", \"object_class\": \"person\"},\n            {\"name\": \"uav_person23\", \"path\": \"data_seq/UAV123/person23\", \"startFrame\": 1, \"endFrame\": 397, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/person23.txt\", \"object_class\": \"person\"},\n            {\"name\": \"uav_person2_s\", \"path\": \"data_seq/UAV123/person2_s\", \"startFrame\": 1, \"endFrame\": 250, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/person2_s.txt\", \"object_class\": \"person\"},\n            {\"name\": \"uav_person3\", \"path\": \"data_seq/UAV123/person3\", \"startFrame\": 1, \"endFrame\": 643, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/person3.txt\", \"object_class\": \"person\"},\n            {\"name\": \"uav_person3_s\", \"path\": \"data_seq/UAV123/person3_s\", \"startFrame\": 1, \"endFrame\": 505, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/person3_s.txt\", \"object_class\": \"person\"},\n            {\"name\": \"uav_person4_1\", \"path\": \"data_seq/UAV123/person4\", \"startFrame\": 1, \"endFrame\": 1501, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/person4_1.txt\", \"object_class\": \"person\"},\n            {\"name\": \"uav_person4_2\", \"path\": \"data_seq/UAV123/person4\", \"startFrame\": 1501, \"endFrame\": 2743, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/person4_2.txt\", \"object_class\": \"person\"},\n            {\"name\": \"uav_person5_1\", \"path\": \"data_seq/UAV123/person5\", \"startFrame\": 1, \"endFrame\": 877, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/person5_1.txt\", \"object_class\": \"person\"},\n            {\"name\": \"uav_person5_2\", \"path\": \"data_seq/UAV123/person5\", \"startFrame\": 877, \"endFrame\": 2101, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/person5_2.txt\", \"object_class\": \"person\"},\n            {\"name\": \"uav_person6\", \"path\": \"data_seq/UAV123/person6\", \"startFrame\": 1, \"endFrame\": 901, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/person6.txt\", \"object_class\": \"person\"},\n            {\"name\": \"uav_person7_1\", \"path\": \"data_seq/UAV123/person7\", \"startFrame\": 1, \"endFrame\": 1249, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/person7_1.txt\", \"object_class\": \"person\"},\n            {\"name\": \"uav_person7_2\", \"path\": \"data_seq/UAV123/person7\", \"startFrame\": 1249, \"endFrame\": 2065, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/person7_2.txt\", \"object_class\": \"person\"},\n            {\"name\": \"uav_person8_1\", \"path\": \"data_seq/UAV123/person8\", \"startFrame\": 1, \"endFrame\": 1075, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/person8_1.txt\", \"object_class\": \"person\"},\n            {\"name\": \"uav_person8_2\", \"path\": \"data_seq/UAV123/person8\", \"startFrame\": 1075, \"endFrame\": 1525, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/person8_2.txt\", \"object_class\": \"person\"},\n            {\"name\": \"uav_person9\", \"path\": \"data_seq/UAV123/person9\", \"startFrame\": 1, \"endFrame\": 661, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/person9.txt\", \"object_class\": \"person\"},\n            {\"name\": \"uav_truck1\", \"path\": \"data_seq/UAV123/truck1\", \"startFrame\": 1, \"endFrame\": 463, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/truck1.txt\", \"object_class\": \"truck\"},\n            {\"name\": \"uav_truck2\", \"path\": \"data_seq/UAV123/truck2\", \"startFrame\": 1, \"endFrame\": 385, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/truck2.txt\", \"object_class\": \"truck\"},\n            {\"name\": \"uav_truck3\", \"path\": \"data_seq/UAV123/truck3\", \"startFrame\": 1, \"endFrame\": 535, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/truck3.txt\", \"object_class\": \"truck\"},\n            {\"name\": \"uav_truck4_1\", \"path\": \"data_seq/UAV123/truck4\", \"startFrame\": 1, \"endFrame\": 577, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/truck4_1.txt\", \"object_class\": \"truck\"},\n            {\"name\": \"uav_truck4_2\", \"path\": \"data_seq/UAV123/truck4\", \"startFrame\": 577, \"endFrame\": 1261, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/truck4_2.txt\", \"object_class\": \"truck\"},\n            {\"name\": \"uav_uav1_1\", \"path\": \"data_seq/UAV123/uav1\", \"startFrame\": 1, \"endFrame\": 1555, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/uav1_1.txt\", \"object_class\": \"aircraft\"},\n            {\"name\": \"uav_uav1_2\", \"path\": \"data_seq/UAV123/uav1\", \"startFrame\": 1555, \"endFrame\": 2377, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/uav1_2.txt\", \"object_class\": \"aircraft\"},\n            {\"name\": \"uav_uav1_3\", \"path\": \"data_seq/UAV123/uav1\", \"startFrame\": 2473, \"endFrame\": 3469, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/uav1_3.txt\", \"object_class\": \"aircraft\"},\n            {\"name\": \"uav_uav2\", \"path\": \"data_seq/UAV123/uav2\", \"startFrame\": 1, \"endFrame\": 133, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/uav2.txt\", \"object_class\": \"aircraft\"},\n            {\"name\": \"uav_uav3\", \"path\": \"data_seq/UAV123/uav3\", \"startFrame\": 1, \"endFrame\": 265, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/uav3.txt\", \"object_class\": \"aircraft\"},\n            {\"name\": \"uav_uav4\", \"path\": \"data_seq/UAV123/uav4\", \"startFrame\": 1, \"endFrame\": 157, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/uav4.txt\", \"object_class\": \"aircraft\"},\n            {\"name\": \"uav_uav5\", \"path\": \"data_seq/UAV123/uav5\", \"startFrame\": 1, \"endFrame\": 139, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/uav5.txt\", \"object_class\": \"aircraft\"},\n            {\"name\": \"uav_uav6\", \"path\": \"data_seq/UAV123/uav6\", \"startFrame\": 1, \"endFrame\": 109, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/uav6.txt\", \"object_class\": \"aircraft\"},\n            {\"name\": \"uav_uav7\", \"path\": \"data_seq/UAV123/uav7\", \"startFrame\": 1, \"endFrame\": 373, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/uav7.txt\", \"object_class\": \"aircraft\"},\n            {\"name\": \"uav_uav8\", \"path\": \"data_seq/UAV123/uav8\", \"startFrame\": 1, \"endFrame\": 301, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/uav8.txt\", \"object_class\": \"aircraft\"},\n            {\"name\": \"uav_wakeboard1\", \"path\": \"data_seq/UAV123/wakeboard1\", \"startFrame\": 1, \"endFrame\": 421, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/wakeboard1.txt\", \"object_class\": \"person\"},\n            {\"name\": \"uav_wakeboard10\", \"path\": \"data_seq/UAV123/wakeboard10\", \"startFrame\": 1, \"endFrame\": 469,\n             \"nz\": 6, \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/wakeboard10.txt\", \"object_class\": \"person\"},\n            {\"name\": \"uav_wakeboard2\", \"path\": \"data_seq/UAV123/wakeboard2\", \"startFrame\": 1, \"endFrame\": 733, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/wakeboard2.txt\", \"object_class\": \"person\"},\n            {\"name\": \"uav_wakeboard3\", \"path\": \"data_seq/UAV123/wakeboard3\", \"startFrame\": 1, \"endFrame\": 823, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/wakeboard3.txt\", \"object_class\": \"person\"},\n            {\"name\": \"uav_wakeboard4\", \"path\": \"data_seq/UAV123/wakeboard4\", \"startFrame\": 1, \"endFrame\": 697, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/wakeboard4.txt\", \"object_class\": \"person\"},\n            {\"name\": \"uav_wakeboard5\", \"path\": \"data_seq/UAV123/wakeboard5\", \"startFrame\": 1, \"endFrame\": 1675, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/wakeboard5.txt\", \"object_class\": \"person\"},\n            {\"name\": \"uav_wakeboard6\", \"path\": \"data_seq/UAV123/wakeboard6\", \"startFrame\": 1, \"endFrame\": 1165, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/wakeboard6.txt\", \"object_class\": \"person\"},\n            {\"name\": \"uav_wakeboard7\", \"path\": \"data_seq/UAV123/wakeboard7\", \"startFrame\": 1, \"endFrame\": 199, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/wakeboard7.txt\", \"object_class\": \"person\"},\n            {\"name\": \"uav_wakeboard8\", \"path\": \"data_seq/UAV123/wakeboard8\", \"startFrame\": 1, \"endFrame\": 1543, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/wakeboard8.txt\", \"object_class\": \"person\"},\n            {\"name\": \"uav_wakeboard9\", \"path\": \"data_seq/UAV123/wakeboard9\", \"startFrame\": 1, \"endFrame\": 355, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/wakeboard9.txt\", \"object_class\": \"person\"}\n        ]\n\n        return sequence_info_list\n"
  },
  {
    "path": "external/AR/pytracking/evaluation/vot.py",
    "content": "\"\"\"\n\\file vot.py\n\n@brief Python utility functions for VOT integration\n\n@author Luka Cehovin, Alessio Dore\n\n@date 2016\n\n\"\"\"\n\nimport sys\nimport copy\nimport collections\n\ntry:\n    import trax\n    import trax.server\n    TRAX = True\nexcept ImportError:\n    TRAX = False\n\nRectangle = collections.namedtuple('Rectangle', ['x', 'y', 'width', 'height'])\nPoint = collections.namedtuple('Point', ['x', 'y'])\nPolygon = collections.namedtuple('Polygon', ['points'])\n\ndef parse_region(string):\n    tokens = map(float, string.split(','))\n    if len(tokens) == 4:\n        return Rectangle(tokens[0], tokens[1], tokens[2], tokens[3])\n    elif len(tokens) % 2 == 0 and len(tokens) > 4:\n        return Polygon([Point(tokens[i],tokens[i+1]) for i in xrange(0,len(tokens),2)])\n    return None\n\ndef encode_region(region):\n    if isinstance(region, Polygon):\n        return ','.join(['{},{}'.format(p.x,p.y) for p in region.points])\n    elif isinstance(region, Rectangle):\n        return '{},{},{},{}'.format(region.x, region.y, region.width, region.height)\n    else:\n        return \"\"\n\ndef convert_region(region, to):\n\n    if to == 'rectangle':\n\n        if isinstance(region, Rectangle):\n            return copy.copy(region)\n        elif isinstance(region, Polygon):\n            top = sys.float_info.max\n            bottom = sys.float_info.min\n            left = sys.float_info.max\n            right = sys.float_info.min\n\n            for point in region.points:\n                top = min(top, point.y)\n                bottom = max(bottom, point.y)\n                left = min(left, point.x)\n                right = max(right, point.x)\n\n            return Rectangle(left, top, right - left, bottom - top)\n\n        else:\n            return None\n    if to == 'polygon':\n\n        if isinstance(region, Rectangle):\n            points = []\n            points.append((region.x, region.y))\n            points.append((region.x + region.width, region.y))\n            points.append((region.x + region.width, region.y + region.height))\n            points.append((region.x, region.y + region.height))\n            return Polygon(points)\n\n        elif isinstance(region, Polygon):\n            return copy.copy(region)\n        else:\n            return None\n\n    return None\n\nclass VOT(object):\n    \"\"\" Base class for Python VOT integration \"\"\"\n    def __init__(self, region_format):\n        \"\"\" Constructor\n\n        Args:\n            region_format: Region format options\n        \"\"\"\n        assert(region_format in ['rectangle', 'polygon'])\n        if TRAX:\n            options = trax.server.ServerOptions(region_format, trax.image.PATH)\n            self._trax = trax.server.Server(options)\n\n            request = self._trax.wait()\n            assert(request.type == 'initialize')\n            if request.region.type == 'polygon':\n                self._region = Polygon([Point(x[0], x[1]) for x in request.region.points])\n            else:\n                self._region = Rectangle(request.region.x, request.region.y, request.region.width, request.region.height)\n            self._image = str(request.image)\n            self._trax.status(request.region)\n        else:\n            self._files = [x.strip('\\n') for x in open('images.txt', 'r').readlines()]\n            self._frame = 0\n            self._region = convert_region(parse_region(open('region.txt', 'r').readline()), region_format)\n            self._result = []\n\n    def region(self):\n        \"\"\"\n        Send configuration message to the client and receive the initialization\n        region and the path of the first image\n\n        Returns:\n            initialization region\n        \"\"\"\n\n        return self._region\n\n    def report(self, region, confidence = 0):\n        \"\"\"\n        Report the tracking results to the client\n\n        Arguments:\n            region: region for the frame\n        \"\"\"\n        assert(isinstance(region, Rectangle) or isinstance(region, Polygon))\n        if TRAX:\n            if isinstance(region, Polygon):\n                tregion = trax.region.Polygon([(x.x, x.y) for x in region.points])\n            else:\n                tregion = trax.region.Rectangle(region.x, region.y, region.width, region.height)\n            self._trax.status(tregion, {\"confidence\" : confidence})\n        else:\n            self._result.append(region)\n            self._frame += 1\n\n    def frame(self):\n        \"\"\"\n        Get a frame (image path) from client\n\n        Returns:\n            absolute path of the image\n        \"\"\"\n        if TRAX:\n            if hasattr(self, \"_image\"):\n                image = str(self._image)\n                del self._image\n                return image\n\n            request = self._trax.wait()\n\n            if request.type == 'frame':\n                return str(request.image)\n            else:\n                return None\n\n        else:\n            if self._frame >= len(self._files):\n                return None\n            return self._files[self._frame]\n\n    def quit(self):\n        if TRAX:\n            self._trax.quit()\n        elif hasattr(self, '_result'):\n            with open('output.txt', 'w') as f:\n                for r in self._result:\n                    f.write(encode_region(r))\n                    f.write('\\n')\n\n    def __del__(self):\n        self.quit()\n\n"
  },
  {
    "path": "external/AR/pytracking/evaluation/vot2020.py",
    "content": "\"\"\"\n\\file vot.py\n\n@brief Python utility functions for VOT integration\n\n@author Luka Cehovin, Alessio Dore\n\n@date 2016\n\n\"\"\"\n\nimport sys\nimport copy\nimport collections\nimport numpy as np\n\ntry:\n    import trax\nexcept ImportError:\n    raise Exception('TraX support not found. Please add trax module to Python path.')\n\n\ndef make_full_size(x, output_sz):\n    '''\n    zero-pad input x (right and down) to match output_sz\n    x: numpy array e.g., binary mask\n    output_sz: size of the output [width, height]\n    '''\n    if x.shape[0] == output_sz[1] and x.shape[1] == output_sz[0]:\n        return x\n    pad_x = output_sz[0] - x.shape[1]\n    if pad_x < 0:\n        x = x[:, :x.shape[1] + pad_x]\n        # padding has to be set to zero, otherwise pad function fails\n        pad_x = 0\n    pad_y = output_sz[1] - x.shape[0]\n    if pad_y < 0:\n        x = x[:x.shape[0] + pad_y, :]\n        # padding has to be set to zero, otherwise pad function fails\n        pad_y = 0\n    return np.pad(x, ((0, pad_y), (0, pad_x)), 'constant', constant_values=0)\n\n\nRectangle = collections.namedtuple('Rectangle', ['x', 'y', 'width', 'height'])\nPoint = collections.namedtuple('Point', ['x', 'y'])\nPolygon = collections.namedtuple('Polygon', ['points'])\n\nclass VOT(object):\n    \"\"\" Base class for Python VOT integration \"\"\"\n    def __init__(self, region_format, channels=None):\n        \"\"\" Constructor\n\n        Args:\n            region_format: Region format options\n        \"\"\"\n        assert(region_format in [trax.Region.RECTANGLE, trax.Region.POLYGON, trax.Region.MASK])\n\n        if channels is None:\n            channels = ['color']\n        elif channels == 'rgbd':\n            channels = ['color', 'depth']\n        elif channels == 'rgbt':\n            channels = ['color', 'ir']\n        elif channels == 'ir':\n            channels = ['ir']\n        else:\n            raise Exception('Illegal configuration {}.'.format(channels))\n\n        self._trax = trax.Server([region_format], [trax.Image.PATH], channels, customMetadata=dict(vot=\"python\"))\n\n        request = self._trax.wait()\n        assert(request.type == 'initialize')\n        if isinstance(request.region, trax.Polygon):\n            self._region = Polygon([Point(x[0], x[1]) for x in request.region])\n        if isinstance(request.region, trax.Mask):\n            self._region = request.region.array(True)\n        else:\n            self._region = Rectangle(*request.region.bounds())\n        self._image = [x.path() for k, x in request.image.items()]\n        if len(self._image) == 1:\n            self._image = self._image[0]\n\n        self._trax.status(request.region)\n\n    def region(self):\n        \"\"\"\n        Send configuration message to the client and receive the initialization\n        region and the path of the first image\n\n        Returns:\n            initialization region\n        \"\"\"\n\n        return self._region\n\n    def report(self, region, confidence = None):\n        \"\"\"\n        Report the tracking results to the client\n\n        Arguments:\n            region: region for the frame\n        \"\"\"\n        assert(isinstance(region, (Rectangle, Polygon, np.ndarray)))\n        if isinstance(region, Polygon):\n            tregion = trax.Polygon.create([(x.x, x.y) for x in region.points])\n        if isinstance(region, np.ndarray):\n            tregion = trax.Mask.create(region)\n        else:\n            tregion = trax.Rectangle.create(region.x, region.y, region.width, region.height)\n        properties = {}\n        if not confidence is None:\n            properties['confidence'] = confidence\n        self._trax.status(tregion, properties)\n\n    def frame(self):\n        \"\"\"\n        Get a frame (image path) from client\n\n        Returns:\n            absolute path of the image\n        \"\"\"\n        if hasattr(self, \"_image\"):\n            image = self._image\n            del self._image\n            return image\n\n        request = self._trax.wait()\n\n        if request.type == 'frame':\n            image = [x.path() for k, x in request.image.items()]\n            if len(image) == 1:\n                return image[0]\n            return image\n        else:\n            return None\n\n\n    def quit(self):\n        if hasattr(self, '_trax'):\n            self._trax.quit()\n\n    def __del__(self):\n        self.quit()\n"
  },
  {
    "path": "external/AR/pytracking/evaluation/votdataset.py",
    "content": "import numpy as np\nfrom pytracking.evaluation.data import Sequence, BaseDataset, SequenceList\n\n\nclass VOTDataset(BaseDataset):\n    \"\"\"\n    VOT2018 dataset\n\n    Publication:\n        The sixth Visual Object Tracking VOT2018 challenge results.\n        Matej Kristan, Ales Leonardis, Jiri Matas, Michael Felsberg, Roman Pfugfelder, Luka Cehovin Zajc, Tomas Vojir,\n        Goutam Bhat, Alan Lukezic et al.\n        ECCV, 2018\n        https://prints.vicos.si/publications/365\n\n    Download the dataset from http://www.votchallenge.net/vot2018/dataset.html\n    \"\"\"\n    def __init__(self):\n        super().__init__()\n        self.base_path = self.env_settings.vot_path\n        self.sequence_list = self._get_sequence_list()\n\n    def get_sequence_list(self):\n        return SequenceList([self._construct_sequence(s) for s in self.sequence_list])\n\n    def _construct_sequence(self, sequence_name):\n        sequence_path = sequence_name\n        nz = 8\n        ext = 'jpg'\n        start_frame = 1\n\n        anno_path = '{}/{}/groundtruth.txt'.format(self.base_path, sequence_name)\n        try:\n            ground_truth_rect = np.loadtxt(str(anno_path), dtype=np.float64)\n        except:\n            ground_truth_rect = np.loadtxt(str(anno_path), delimiter=',', dtype=np.float64)\n\n        end_frame = ground_truth_rect.shape[0]\n\n        frames = ['{base_path}/{sequence_path}/color/{frame:0{nz}}.{ext}'.format(base_path=self.base_path,\n                  sequence_path=sequence_path, frame=frame_num, nz=nz, ext=ext)\n                  for frame_num in range(start_frame, end_frame+1)]\n\n        # Convert gt\n        if ground_truth_rect.shape[1] > 4:\n            gt_x_all = ground_truth_rect[:, [0, 2, 4, 6]]\n            gt_y_all = ground_truth_rect[:, [1, 3, 5, 7]]\n\n            x1 = np.amin(gt_x_all, 1).reshape(-1,1)\n            y1 = np.amin(gt_y_all, 1).reshape(-1,1)\n            x2 = np.amax(gt_x_all, 1).reshape(-1,1)\n            y2 = np.amax(gt_y_all, 1).reshape(-1,1)\n\n            ground_truth_rect = np.concatenate((x1, y1, x2-x1, y2-y1), 1)\n        return Sequence(sequence_name, frames, 'vot', ground_truth_rect)\n\n    def __len__(self):\n        return len(self.sequence_list)\n\n    def _get_sequence_list(self):\n        sequence_list= ['ants1',\n                        'ants3',\n                        'bag',\n                        'ball1',\n                        'ball2',\n                        'basketball',\n                        'birds1',\n                        'blanket',\n                        'bmx',\n                        'bolt1',\n                        'bolt2',\n                        'book',\n                        'butterfly',\n                        'car1',\n                        'conduction1',\n                        'crabs1',\n                        'crossing',\n                        'dinosaur',\n                        'drone_across',\n                        'drone_flip',\n                        'drone1',\n                        'fernando',\n                        'fish1',\n                        'fish2',\n                        'fish3',\n                        'flamingo1',\n                        'frisbee',\n                        'girl',\n                        'glove',\n                        'godfather',\n                        'graduate',\n                        'gymnastics1',\n                        'gymnastics2',\n                        'gymnastics3',\n                        'hand',\n                        'handball1',\n                        'handball2',\n                        'helicopter',\n                        'iceskater1',\n                        'iceskater2',\n                        'leaves',\n                        'matrix',\n                        'motocross1',\n                        'motocross2',\n                        'nature',\n                        'pedestrian1',\n                        'rabbit',\n                        'racing',\n                        'road',\n                        'shaking',\n                        'sheep',\n                        'singer2',\n                        'singer3',\n                        'soccer1',\n                        'soccer2',\n                        'soldier',\n                        'tiger',\n                        'traffic',\n                        'wiper',\n                        'zebrafish1']\n\n        return sequence_list\n"
  },
  {
    "path": "external/AR/pytracking/experiments/__init__.py",
    "content": ""
  },
  {
    "path": "external/AR/pytracking/experiments/myexperiments.py",
    "content": "from pytracking.evaluation import Tracker, get_dataset, trackerlist\n\n\ndef atom_nfs_uav():\n    # Run three runs of ATOM on NFS and UAV datasets\n    trackers = trackerlist('atom', 'default', range(3))\n\n    dataset = get_dataset('nfs', 'uav')\n    return trackers, dataset\n\n\ndef uav_test():\n    # Run DiMP18, ATOM and ECO on the UAV dataset\n    trackers = trackerlist('dimp', 'dimp18', range(1)) + \\\n               trackerlist('atom', 'default', range(1)) + \\\n               trackerlist('eco', 'default', range(1))\n\n    dataset = get_dataset('uav')\n    return trackers, dataset\n"
  },
  {
    "path": "external/AR/pytracking/features/__init__.py",
    "content": ""
  },
  {
    "path": "external/AR/pytracking/features/augmentation.py",
    "content": "import numpy as np\nimport math\nimport torch\nimport torch.nn.functional as F\nimport cv2 as cv\nimport random\nfrom pytracking.features.preprocessing import numpy_to_torch, torch_to_numpy\n\n\nclass Transform:\n    \"\"\"Base data augmentation transform class.\"\"\"\n\n    def __init__(self, output_sz = None, shift = None):\n        self.output_sz = output_sz\n        self.shift = (0,0) if shift is None else shift\n\n    def __call__(self, image, is_mask=False):\n        raise NotImplementedError\n\n    def crop_to_output(self, image):\n        if isinstance(image, torch.Tensor):\n            imsz = image.shape[2:]\n            if self.output_sz is None:\n                pad_h = 0\n                pad_w = 0\n            else:\n                pad_h = (self.output_sz[0] - imsz[0]) / 2\n                pad_w = (self.output_sz[1] - imsz[1]) / 2\n\n            pad_left = math.floor(pad_w) + self.shift[1]\n            pad_right = math.ceil(pad_w) - self.shift[1]\n            pad_top = math.floor(pad_h) + self.shift[0]\n            pad_bottom = math.ceil(pad_h) - self.shift[0]\n\n            return F.pad(image, (pad_left, pad_right, pad_top, pad_bottom), 'replicate')\n        else:\n            raise NotImplementedError\n\nclass Identity(Transform):\n    \"\"\"Identity transformation.\"\"\"\n    def __call__(self, image, is_mask=False):\n        return self.crop_to_output(image)\n\nclass FlipHorizontal(Transform):\n    \"\"\"Flip along horizontal axis.\"\"\"\n    def __call__(self, image, is_mask=False):\n        if isinstance(image, torch.Tensor):\n            return self.crop_to_output(image.flip((3,)))\n        else:\n            return np.fliplr(image)\n\nclass FlipVertical(Transform):\n    \"\"\"Flip along vertical axis.\"\"\"\n    def __call__(self, image: torch.Tensor, is_mask=False):\n        if isinstance(image, torch.Tensor):\n            return self.crop_to_output(image.flip((2,)))\n        else:\n            return np.flipud(image)\n\nclass Translation(Transform):\n    \"\"\"Translate.\"\"\"\n    def __init__(self, translation, output_sz = None, shift = None):\n        super().__init__(output_sz, shift)\n        self.shift = (self.shift[0] + translation[0], self.shift[1] + translation[1])\n\n    def __call__(self, image, is_mask=False):\n        if isinstance(image, torch.Tensor):\n            return self.crop_to_output(image)\n        else:\n            raise NotImplementedError\n\nclass Scale(Transform):\n    \"\"\"Scale.\"\"\"\n    def __init__(self, scale_factor, output_sz = None, shift = None):\n        super().__init__(output_sz, shift)\n        self.scale_factor = scale_factor\n\n    def __call__(self, image, is_mask=False):\n        if isinstance(image, torch.Tensor):\n            # Calculate new size. Ensure that it is even so that crop/pad becomes easier\n            h_orig, w_orig = image.shape[2:]\n\n            if h_orig != w_orig:\n                raise NotImplementedError\n\n            h_new = round(h_orig /self.scale_factor)\n            h_new += (h_new - h_orig) % 2\n            w_new = round(w_orig /self.scale_factor)\n            w_new += (w_new - w_orig) % 2\n\n            image_resized = F.interpolate(image, [h_new, w_new], mode='bilinear')\n\n            return self.crop_to_output(image_resized)\n        else:\n            raise NotImplementedError\n\n\nclass Affine(Transform):\n    \"\"\"Affine transformation.\"\"\"\n    def __init__(self, transform_matrix, output_sz = None, shift = None):\n        super().__init__(output_sz, shift)\n        self.transform_matrix = transform_matrix\n\n    def __call__(self, image, is_mask=False):\n        if isinstance(image, torch.Tensor):\n            return self.crop_to_output(numpy_to_torch(self(torch_to_numpy(image))))\n        else:\n            return cv.warpAffine(image, self.transform_matrix, image.shape[1::-1], borderMode=cv.BORDER_REPLICATE)\n\n\nclass Rotate(Transform):\n    \"\"\"Rotate with given angle.\"\"\"\n    def __init__(self, angle, output_sz = None, shift = None):\n        super().__init__(output_sz, shift)\n        self.angle = math.pi * angle/180\n\n    def __call__(self, image, is_mask=False):\n        if isinstance(image, torch.Tensor):\n            return self.crop_to_output(numpy_to_torch(self(torch_to_numpy(image))))\n        else:\n            c = (np.expand_dims(np.array(image.shape[:2]),1)-1)/2\n            R = np.array([[math.cos(self.angle), math.sin(self.angle)],\n                          [-math.sin(self.angle), math.cos(self.angle)]])\n            H =np.concatenate([R, c - R @ c], 1)\n            return cv.warpAffine(image, H, image.shape[1::-1], borderMode=cv.BORDER_REPLICATE)\n\n\nclass Blur(Transform):\n    \"\"\"Blur with given sigma (can be axis dependent).\"\"\"\n    def __init__(self, sigma, output_sz = None, shift = None):\n        super().__init__(output_sz, shift)\n        if isinstance(sigma, (float, int)):\n            sigma = (sigma, sigma)\n        self.sigma = sigma\n        self.filter_size = [math.ceil(2*s) for s in self.sigma]\n        x_coord = [torch.arange(-sz, sz+1, dtype=torch.float32) for sz in self.filter_size]\n        self.filter = [torch.exp(-(x**2)/(2*s**2)) for x, s in zip(x_coord, self.sigma)]\n        self.filter[0] = self.filter[0].view(1,1,-1,1) / self.filter[0].sum()\n        self.filter[1] = self.filter[1].view(1,1,1,-1) / self.filter[1].sum()\n\n    def __call__(self, image, is_mask=False):\n        if isinstance(image, torch.Tensor):\n            sz = image.shape[2:]\n            im1 = F.conv2d(image.view(-1,1,sz[0],sz[1]), self.filter[0], padding=(self.filter_size[0],0))\n            return self.crop_to_output(F.conv2d(im1, self.filter[1], padding=(0,self.filter_size[1])).view(1,-1,sz[0],sz[1]))\n        else:\n            raise NotImplementedError\n\n\nclass RandomAffine(Transform):\n    \"\"\"Affine transformation.\"\"\"\n    def __init__(self, p_flip=0.0, max_rotation=0.0, max_shear=0.0, max_scale=0.0, max_ar_factor=0.0,\n                 border_mode='constant', output_sz = None, shift = None):\n        super().__init__(output_sz, shift)\n        self.p_flip = p_flip\n        self.max_rotation = max_rotation\n        self.max_shear = max_shear\n        self.max_scale = max_scale\n        self.max_ar_factor = max_ar_factor\n\n        self.pad_amount = 0\n        if border_mode == 'constant':\n            self.border_flag = cv.BORDER_CONSTANT\n        elif border_mode == 'replicate':\n            self.border_flag == cv.BORDER_REPLICATE\n        else:\n            raise Exception\n\n        self.roll_values = self.roll()\n\n    def roll(self):\n        do_flip = random.random() < self.p_flip\n        theta = random.uniform(-self.max_rotation, self.max_rotation)\n\n        shear_x = random.uniform(-self.max_shear, self.max_shear)\n        shear_y = random.uniform(-self.max_shear, self.max_shear)\n\n        ar_factor = np.exp(random.uniform(-self.max_ar_factor, self.max_ar_factor))\n        scale_factor = np.exp(random.uniform(-self.max_scale, self.max_scale))\n\n        return do_flip, theta, (shear_x, shear_y), (scale_factor, scale_factor * ar_factor)\n\n    def _construct_t_mat(self, image_shape, do_flip, theta, shear_values, scale_factors):\n        im_h, im_w = image_shape\n        t_mat = np.identity(3)\n\n        if do_flip:\n            if do_flip:\n                t_mat[0, 0] = -1.0\n                t_mat[0, 2] = im_w\n\n        t_rot = cv.getRotationMatrix2D((im_w * 0.5, im_h * 0.5), theta, 1.0)\n        t_rot = np.concatenate((t_rot, np.array([0.0, 0.0, 1.0]).reshape(1, 3)))\n\n        t_shear = np.array([[1.0, shear_values[0], -shear_values[0] * 0.5 * im_w],\n                            [shear_values[1], 1.0, -shear_values[1] * 0.5 * im_h],\n                            [0.0, 0.0, 1.0]])\n\n        t_scale = np.array([[scale_factors[0], 0.0, (1.0 - scale_factors[0]) * 0.5 * im_w],\n                            [0.0, scale_factors[1], (1.0 - scale_factors[1]) * 0.5 * im_h],\n                            [0.0, 0.0, 1.0]])\n\n        t_mat = t_scale @ t_rot @ t_shear @ t_mat\n\n        t_mat[0, 2] += self.pad_amount\n        t_mat[1, 2] += self.pad_amount\n\n        t_mat = t_mat[:2, :]\n\n        return t_mat\n\n    def __call__(self, image, is_mask=False):\n        input_tensor = torch.is_tensor(image)\n        if input_tensor:\n            image = torch_to_numpy(image)\n\n        do_flip, theta, shear_values, scale_factors = self.roll_values\n        t_mat = self._construct_t_mat(image.shape[:2], do_flip, theta, shear_values, scale_factors)\n        output_sz = (image.shape[1] + 2*self.pad_amount, image.shape[0] + 2*self.pad_amount)\n\n        if not is_mask:\n            image_t = cv.warpAffine(image, t_mat, output_sz, flags=cv.INTER_LINEAR,\n                                    borderMode=self.border_flag)\n        else:\n            image_t = cv.warpAffine(image, t_mat, output_sz, flags=cv.INTER_NEAREST,\n                                    borderMode=self.border_flag)\n            image_t = image_t.reshape(image.shape)\n\n        if input_tensor:\n            image_t = numpy_to_torch(image_t)\n\n        return self.crop_to_output(image_t)\n"
  },
  {
    "path": "external/AR/pytracking/features/color.py",
    "content": "import torch\nfrom pytracking.features.featurebase import FeatureBase\n\n\nclass RGB(FeatureBase):\n    \"\"\"RGB feature normalized to [-0.5, 0.5].\"\"\"\n    def dim(self):\n        return 3\n\n    def stride(self):\n        return self.pool_stride\n\n    def extract(self, im: torch.Tensor):\n        return im/255 - 0.5\n\n\nclass Grayscale(FeatureBase):\n    \"\"\"Grayscale feature normalized to [-0.5, 0.5].\"\"\"\n    def dim(self):\n        return 1\n\n    def stride(self):\n        return self.pool_stride\n\n    def extract(self, im: torch.Tensor):\n        return torch.mean(im/255 - 0.5, 1, keepdim=True)\n"
  },
  {
    "path": "external/AR/pytracking/features/deep.py",
    "content": "from pytracking.features.featurebase import FeatureBase, MultiFeatureBase\nimport torch\nimport torchvision\nfrom pytracking import TensorList\nfrom pytracking.evaluation.environment import env_settings\nimport os\nfrom pytracking.utils.loading import load_network\nfrom ltr.models.backbone.resnet18_vggm import resnet18_vggmconv1\n\nnormalize = torchvision.transforms.Normalize(mean=[0.485, 0.456, 0.406],\n                                             std=[0.229, 0.224, 0.225])\n\n\nclass ResNet18m1(MultiFeatureBase):\n    \"\"\"ResNet18 feature together with the VGG-m conv1 layer.\n    args:\n        output_layers: List of layers to output.\n        net_path: Relative or absolute net path (default should be fine).\n        use_gpu: Use GPU or CPU.\n    \"\"\"\n\n    def __init__(self, output_layers, net_path=None, use_gpu=True, *args, **kwargs):\n        super(ResNet18m1, self).__init__(*args, **kwargs)\n\n        for l in output_layers:\n            if l not in ['vggconv1', 'conv1', 'layer1', 'layer2', 'layer3', 'layer4', 'fc']:\n                raise ValueError('Unknown layer')\n\n        self.output_layers = list(output_layers)\n        self.use_gpu = use_gpu\n        self.net_path = 'resnet18_vggmconv1/resnet18_vggmconv1.pth' if net_path is None else net_path\n\n    def initialize(self):\n\n        if isinstance(self.pool_stride, int) and self.pool_stride == 1:\n            self.pool_stride = [1] * len(self.output_layers)\n\n        self.layer_stride = {'vggconv1': 2, 'conv1': 2, 'layer1': 4, 'layer2': 8, 'layer3': 16, 'layer4': 32,\n                             'fc': None}\n        self.layer_dim = {'vggconv1': 96, 'conv1': 64, 'layer1': 64, 'layer2': 128, 'layer3': 256, 'layer4': 512,\n                          'fc': None}\n\n        self.mean = torch.Tensor([0.485, 0.456, 0.406]).view(1, -1, 1, 1)\n        self.std = torch.Tensor([0.229, 0.224, 0.225]).view(1, -1, 1, 1)\n\n        if os.path.isabs(self.net_path):\n            net_path_full = [self.net_path]\n        else:\n            root_paths = env_settings().network_path\n            if isinstance(root_paths, str):\n                root_paths = [root_paths]\n            net_path_full = [os.path.join(root, self.net_path) for root in root_paths]\n\n        self.net = None\n        for net_path in net_path_full:\n            try:\n                self.net = resnet18_vggmconv1(self.output_layers, path=net_path)\n                break\n            except:\n                pass\n        if self.net is None:\n            raise Exception('Did not find network file {}'.format(self.net_path))\n\n        if self.use_gpu:\n            self.net.cuda()\n        self.net.eval()\n\n    def dim(self):\n        return TensorList([self.layer_dim[l] for l in self.output_layers])\n\n    def stride(self):\n        return TensorList([s * self.layer_stride[l] for l, s in zip(self.output_layers, self.pool_stride)])\n\n    def extract(self, im: torch.Tensor):\n        im = im / 255\n        im -= self.mean\n        im /= self.std\n\n        if self.use_gpu:\n            im = im.cuda()\n\n        with torch.no_grad():\n            return TensorList(self.net(im).values())\n\n\nclass ATOMResNet18(MultiFeatureBase):\n    \"\"\"ResNet18 feature with the ATOM IoUNet.\n    args:\n        output_layers: List of layers to output.\n        net_path: Relative or absolute net path (default should be fine).\n        use_gpu: Use GPU or CPU.\n    \"\"\"\n\n    def __init__(self, output_layers=('layer3',), net_path='atom_iou', use_gpu=True, *args, **kwargs):\n        super().__init__(*args, **kwargs)\n\n        self.output_layers = list(output_layers)\n        self.use_gpu = use_gpu\n        self.net_path = net_path\n\n    def initialize(self):\n        self.net = load_network(self.net_path)\n\n        if self.use_gpu:\n            self.net.cuda()\n        self.net.eval()\n\n        self.iou_predictor = self.net.bb_regressor\n\n        self.layer_stride = {'conv1': 2, 'layer1': 4, 'layer2': 8, 'layer3': 16, 'layer4': 32, 'classification': 16,\n                             'fc': None}\n        self.layer_dim = {'conv1': 64, 'layer1': 64, 'layer2': 128, 'layer3': 256, 'layer4': 512, 'classification': 256,\n                          'fc': None}\n\n        self.iounet_feature_layers = self.net.bb_regressor_layer\n\n        if isinstance(self.pool_stride, int) and self.pool_stride == 1:\n            self.pool_stride = [1] * len(self.output_layers)\n\n        self.feature_layers = sorted(list(set(self.output_layers + self.iounet_feature_layers)))\n\n        self.mean = torch.Tensor([0.485, 0.456, 0.406]).view(1, -1, 1, 1)\n        self.std = torch.Tensor([0.229, 0.224, 0.225]).view(1, -1, 1, 1)\n\n    def dim(self):\n        return TensorList([self.layer_dim[l] for l in self.output_layers])\n\n    def stride(self):\n        return TensorList([s * self.layer_stride[l] for l, s in zip(self.output_layers, self.pool_stride)])\n\n    def extract(self, im: torch.Tensor):\n        im = im / 255\n        im -= self.mean\n        im /= self.std\n\n        if self.use_gpu:\n            im = im.cuda()\n\n        with torch.no_grad():\n            output_features = self.net.extract_features(im, self.feature_layers)\n\n        # Store the raw resnet features which are input to iounet\n        self.iounet_backbone_features = TensorList(\n            [output_features[layer].clone() for layer in self.iounet_feature_layers])\n\n        # Store the processed features from iounet, just before pooling\n        with torch.no_grad():\n            self.iounet_features = TensorList(self.iou_predictor.get_iou_feat(self.iounet_backbone_features))\n\n        return TensorList([output_features[layer] for layer in self.output_layers])\n"
  },
  {
    "path": "external/AR/pytracking/features/extractor.py",
    "content": "import torch\nfrom pytracking.features.preprocessing import sample_patch\nfrom pytracking import TensorList\n\nclass ExtractorBase:\n    \"\"\"Base feature extractor class.\n    args:\n        features: List of features.\n    \"\"\"\n    def __init__(self, features):\n        self.features = features\n\n    def initialize(self):\n        for f in self.features:\n            f.initialize()\n\n\nclass SingleResolutionExtractor(ExtractorBase):\n    \"\"\"Single resolution feature extractor.\n    args:\n        features: List of features.\n    \"\"\"\n    def __init__(self, features):\n        super().__init__(features)\n\n        self.feature_stride = self.features[0].stride()\n        if isinstance(self.feature_stride, (list, TensorList)):\n            self.feature_stride = self.feature_stride[0]\n\n    def stride(self):\n        return self.feature_stride\n\n    def size(self, input_sz):\n        return input_sz // self.stride()\n\n    def extract(self, im, pos, scales, image_sz):\n        if isinstance(scales, (int, float)):\n            scales = [scales]\n\n        # Get image patches\n        im_patches = torch.cat([sample_patch(im, pos, s*image_sz, image_sz) for s in scales])\n\n        # Compute features\n        feature_map = torch.cat(TensorList([f.get_feature(im_patches) for f in self.features]).unroll(), dim=1)\n\n        return feature_map\n\n\nclass MultiResolutionExtractor(ExtractorBase):\n    \"\"\"Multi-resolution feature extractor.\n    args:\n        features: List of features.\n    \"\"\"\n    def __init__(self, features, patch_mode='replicate', max_scale_change=None):\n        super().__init__(features)\n        self.patch_mode = patch_mode\n        self.max_scale_change = max_scale_change\n        self.is_color = None\n\n    def stride(self):\n        return torch.Tensor(TensorList([f.stride() for f in self.features if self._return_feature(f)]).unroll().list())\n\n    def size(self, input_sz):\n        return TensorList([f.size(input_sz) for f in self.features if self._return_feature(f)]).unroll()\n\n    def dim(self):\n        return TensorList([f.dim() for f in self.features if self._return_feature(f)]).unroll()\n\n    def get_fparams(self, name: str = None):\n        if name is None:\n            return [f.fparams for f in self.features if self._return_feature(f)]\n        return TensorList([getattr(f.fparams, name) for f in self.features if self._return_feature(f)]).unroll()\n\n    def get_attribute(self, name: str, ignore_missing: bool = False):\n        if ignore_missing:\n            return TensorList([getattr(f, name) for f in self.features if self._return_feature(f) and hasattr(f, name)])\n        else:\n            return TensorList([getattr(f, name, None) for f in self.features if self._return_feature(f)])\n\n    def get_unique_attribute(self, name: str):\n        feat = None\n        for f in self.features:\n            if self._return_feature(f) and hasattr(f, name):\n                if feat is not None:\n                    raise RuntimeError('The attribute was not unique.')\n                feat = f\n        if feat is None:\n            raise RuntimeError('The attribute did not exist')\n        return getattr(feat, name)\n\n    def _return_feature(self, f):\n        return self.is_color is None or self.is_color and f.use_for_color or not self.is_color and f.use_for_gray\n\n    def set_is_color(self, is_color: bool):\n        self.is_color = is_color\n\n    def extract(self, im, pos, scales, image_sz, return_patches=False):\n        \"\"\"Extract features.\n        args:\n            im: Image.\n            pos: Center position for extraction.\n            scales: Image scales to extract features from.\n            image_sz: Size to resize the image samples to before extraction.\n        \"\"\"\n        if isinstance(scales, (int, float)):\n            scales = [scales]\n\n        # Get image patches\n        patch_iter, coord_iter = zip(*(sample_patch(im, pos, s*image_sz, image_sz, mode=self.patch_mode,\n                                                    max_scale_change=self.max_scale_change) for s in scales))\n        im_patches = torch.cat(list(patch_iter))\n        patch_coords = torch.cat(list(coord_iter))\n\n        # im_patches = torch.cat([sample_patch(im, pos, s*image_sz, image_sz) for s in scales])\n\n        # Compute features\n        feature_map = TensorList([f.get_feature(im_patches) for f in self.features]).unroll()\n\n        if return_patches:\n            return feature_map, patch_coords, im_patches\n        else:\n            return feature_map, patch_coords\n\n    def extract_transformed(self, im, pos, scale, image_sz, transforms):\n        \"\"\"Extract features from a set of transformed image samples.\n        args:\n            im: Image.\n            pos: Center position for extraction.\n            scale: Image scale to extract features from.\n            image_sz: Size to resize the image samples to before extraction.\n            transforms: A set of image transforms to apply.\n        \"\"\"\n\n        # Get image patche\n        im_patch, _ = sample_patch(im, pos, scale*image_sz, image_sz)\n\n        # Apply transforms\n        im_patches = torch.cat([T(im_patch) for T in transforms])\n\n        # Compute features\n        feature_map = TensorList([f.get_feature(im_patches) for f in self.features]).unroll()\n\n        return feature_map \n"
  },
  {
    "path": "external/AR/pytracking/features/featurebase.py",
    "content": "import torch\nimport torch.nn.functional as F\nfrom pytracking import TensorList\n\n\nclass FeatureBase:\n    \"\"\"Base feature class.\n    args:\n        fparams: Feature specific parameters.\n        pool_stride: Amount of average pooling to apply do downsample the feature map.\n        output_size: Alternatively, specify the output size of the feature map. Adaptive average pooling will be applied.\n        normalize_power: The power exponent for the normalization. None means no normalization (default).\n        use_for_color: Use this feature for color images.\n        use_for_gray: Use this feature for grayscale images.\n    \"\"\"\n    def __init__(self, fparams = None, pool_stride = None, output_size = None, normalize_power = None, use_for_color = True, use_for_gray = True):\n        self.fparams = fparams\n        self.pool_stride = 1 if pool_stride is None else pool_stride\n        self.output_size = output_size\n        self.normalize_power = normalize_power\n        self.use_for_color = use_for_color\n        self.use_for_gray = use_for_gray\n\n    def initialize(self):\n        pass\n\n    def dim(self):\n        raise NotImplementedError\n\n    def stride(self):\n        raise NotImplementedError\n\n    def size(self, im_sz):\n        if self.output_size is None:\n            return im_sz // self.stride()\n        if isinstance(im_sz, torch.Tensor):\n            return torch.Tensor([self.output_size[0], self.output_size[1]])\n        return self.output_size\n\n    def extract(self, im):\n        \"\"\"Performs feature extraction.\"\"\"\n        raise NotImplementedError\n\n    def get_feature(self, im: torch.Tensor):\n        \"\"\"Get the feature. Generally, call this function.\n        args:\n            im: image patch as a torch.Tensor.\n        \"\"\"\n\n        # Return empty tensor if it should not be used\n        is_color = im.shape[1] == 3\n        if is_color and not self.use_for_color or not is_color and not self.use_for_gray:\n            return torch.Tensor([])\n\n        # Extract feature\n        feat = self.extract(im)\n\n        # Pool/downsample\n        if self.output_size is not None:\n            feat = F.adaptive_avg_pool2d(feat, self.output_size)\n        elif self.pool_stride != 1:\n            feat = F.avg_pool2d(feat, self.pool_stride, self.pool_stride)\n\n        # Normalize\n        if self.normalize_power is not None:\n            feat /= (torch.sum(feat.abs().view(feat.shape[0],1,1,-1)**self.normalize_power, dim=3, keepdim=True) /\n                     (feat.shape[1]*feat.shape[2]*feat.shape[3]) + 1e-10)**(1/self.normalize_power)\n\n        return feat\n\n\nclass MultiFeatureBase(FeatureBase):\n    \"\"\"Base class for features potentially having multiple feature blocks as output (like CNNs).\n    See FeatureBase for more info.\n    \"\"\"\n    def size(self, im_sz):\n        if self.output_size is None:\n            return TensorList([im_sz // s for s in self.stride()])\n        if isinstance(im_sz, torch.Tensor):\n            return TensorList([im_sz // s if sz is None else torch.Tensor([sz[0], sz[1]]) for sz, s in zip(self.output_size, self.stride())])\n\n    def get_feature(self, im: torch.Tensor):\n        \"\"\"Get the feature. Generally, call this function.\n        args:\n            im: image patch as a torch.Tensor.\n        \"\"\"\n\n        # Return empty tensor if it should not be used\n        is_color = im.shape[1] == 3\n        if is_color and not self.use_for_color or not is_color and not self.use_for_gray:\n            return torch.Tensor([])\n\n        feat_list = self.extract(im)\n\n        output_sz = [None]*len(feat_list) if self.output_size is None else self.output_size\n\n        # Pool/downsample\n        for i, (sz, s) in enumerate(zip(output_sz, self.pool_stride)):\n            if sz is not None:\n                feat_list[i] = F.adaptive_avg_pool2d(feat_list[i], sz)\n            elif s != 1:\n                feat_list[i] = F.avg_pool2d(feat_list[i], s, s)\n\n        # Normalize\n        if self.normalize_power is not None:\n            for feat in feat_list:\n                feat /= (torch.sum(feat.abs().view(feat.shape[0],1,1,-1)**self.normalize_power, dim=3, keepdim=True) /\n                         (feat.shape[1]*feat.shape[2]*feat.shape[3]) + 1e-10)**(1/self.normalize_power)\n\n        return feat_list"
  },
  {
    "path": "external/AR/pytracking/features/net_wrappers.py",
    "content": "import torch\nfrom pytracking.utils.loading import load_network\n\n\nclass NetWrapper:\n    \"\"\"Used for wrapping networks in pytracking.\n    Network modules and functions can be accessed directly as if they were members of this class.\"\"\"\n    _rec_iter=0\n    def __init__(self, net_path, use_gpu=True, initialize=False, **kwargs):\n        self.net_path = net_path\n        self.use_gpu = use_gpu\n        self.net = None\n        self.net_kwargs = kwargs\n        if initialize:\n            self.initialize()\n\n    def __getattr__(self, name):\n        if self._rec_iter > 0:\n            self._rec_iter = 0\n            return None\n        self._rec_iter += 1\n        try:\n            ret_val = getattr(self.net, name)\n        except Exception as e:\n            self._rec_iter = 0\n            raise e\n        self._rec_iter = 0\n        return ret_val\n\n    def load_network(self):\n        self.net = load_network(self.net_path, **self.net_kwargs)\n        if self.use_gpu:\n            self.cuda()\n        self.eval()\n\n    def initialize(self):\n        self.load_network()\n\n\nclass NetWithBackbone(NetWrapper):\n    \"\"\"Wraps a network with a common backbone.\n    Assumes the network have a 'extract_backbone_features(image)' function.\"\"\"\n\n    def __init__(self, net_path, use_gpu=True, initialize=False, image_format='rgb',\n                 mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225), **kwargs):\n        super().__init__(net_path, use_gpu, initialize, **kwargs)\n\n        self.image_format = image_format\n        self._mean = torch.Tensor(mean).view(1, -1, 1, 1)\n        self._std = torch.Tensor(std).view(1, -1, 1, 1)\n\n    def initialize(self, image_format='rgb', mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)):\n        super().initialize()\n\n    def preprocess_image(self, im: torch.Tensor):\n        \"\"\"Normalize the image with the mean and standard deviation used by the network.\"\"\"\n\n        if self.image_format in ['rgb', 'bgr']:\n            im = im/255\n\n        if self.image_format in ['bgr', 'bgr255']:\n            im = im[:, [2, 1, 0], :, :]\n        im -= self._mean\n        im /= self._std\n\n        if self.use_gpu:\n            im = im.cuda()\n\n        return im\n\n    def extract_backbone(self, im: torch.Tensor):\n        \"\"\"Extract backbone features from the network.\n        Expects a float tensor image with pixel range [0, 255].\"\"\"\n        im = self.preprocess_image(im)\n        return self.net.extract_backbone_features(im)\n"
  },
  {
    "path": "external/AR/pytracking/features/preprocessing.py",
    "content": "import torch\nimport torch.nn.functional as F\nimport numpy as np\n\n\ndef numpy_to_torch(a: np.ndarray):\n    return torch.from_numpy(a).float().permute(2, 0, 1).unsqueeze(0)\n\n\ndef torch_to_numpy(a: torch.Tensor):\n    return a.squeeze(0).permute(1,2,0).numpy()\n\n\ndef sample_patch_transformed(im, pos, scale, image_sz, transforms, is_mask=False):\n    \"\"\"Extract transformed image samples.\n    args:\n        im: Image.\n        pos: Center position for extraction.\n        scale: Image scale to extract features from.\n        image_sz: Size to resize the image samples to before extraction.\n        transforms: A set of image transforms to apply.\n    \"\"\"\n\n    # Get image patche\n    im_patch, _ = sample_patch(im, pos, scale*image_sz, image_sz, is_mask=is_mask)\n\n    # Apply transforms\n    im_patches = torch.cat([T(im_patch, is_mask=is_mask) for T in transforms])\n\n    return im_patches\n\n\ndef sample_patch_multiscale(im, pos, scales, image_sz, mode: str='replicate', max_scale_change=None):\n    \"\"\"Extract image patches at multiple scales.\n    args:\n        im: Image.\n        pos: Center position for extraction.\n        scales: Image scales to extract image patches from.\n        image_sz: Size to resize the image samples to\n        mode: how to treat image borders: 'replicate' (default), 'inside' or 'inside_major'\n        max_scale_change: maximum allowed scale change when using 'inside' and 'inside_major' mode\n    \"\"\"\n    if isinstance(scales, (int, float)):\n        scales = [scales]\n\n    # Get image patches\n    patch_iter, coord_iter = zip(*(sample_patch(im, pos, s*image_sz, image_sz, mode=mode,\n                                                max_scale_change=max_scale_change) for s in scales))\n    im_patches = torch.cat(list(patch_iter))\n    patch_coords = torch.cat(list(coord_iter))\n\n    return  im_patches, patch_coords\n\n\ndef sample_patch(im: torch.Tensor, pos: torch.Tensor, sample_sz: torch.Tensor, output_sz: torch.Tensor = None,\n                 mode: str = 'replicate', max_scale_change=None, is_mask=False):\n    \"\"\"Sample an image patch.\n\n    args:\n        im: Image\n        pos: center position of crop\n        sample_sz: size to crop\n        output_sz: size to resize to\n        mode: how to treat image borders: 'replicate' (default), 'inside' or 'inside_major'\n        max_scale_change: maximum allowed scale change when using 'inside' and 'inside_major' mode\n    \"\"\"\n\n    # if mode not in ['replicate', 'inside']:\n    #     raise ValueError('Unknown border mode \\'{}\\'.'.format(mode))\n\n    # copy and convert\n    posl = pos.long().clone()\n\n    pad_mode = mode\n\n    # Get new sample size if forced inside the image\n    if mode == 'inside' or mode == 'inside_major':\n        pad_mode = 'replicate'\n        im_sz = torch.Tensor([im.shape[2], im.shape[3]])\n        shrink_factor = (sample_sz.float() / im_sz)\n        if mode == 'inside':\n            shrink_factor = shrink_factor.max()\n        elif mode == 'inside_major':\n            shrink_factor = shrink_factor.min()\n        shrink_factor.clamp_(min=1, max=max_scale_change)\n        sample_sz = (sample_sz.float() / shrink_factor).long()\n\n    # Compute pre-downsampling factor\n    if output_sz is not None:\n        resize_factor = torch.min(sample_sz.float() / output_sz.float()).item()\n        df = int(max(int(resize_factor - 0.1), 1))\n    else:\n        df = int(1)\n\n    sz = sample_sz.float() / df     # new size\n\n    # Do downsampling\n    if df > 1:\n        os = posl % df              # offset\n        posl = (posl - os) / df     # new position\n        im2 = im[..., os[0].item()::df, os[1].item()::df]   # downsample\n    else:\n        im2 = im\n\n    # compute size to crop\n    szl = torch.max(sz.round(), torch.Tensor([2])).long()\n\n    # Extract top and bottom coordinates\n    tl = posl - (szl - 1)/2\n    br = posl + szl/2 + 1\n\n    # Shift the crop to inside\n    if mode == 'inside' or mode == 'inside_major':\n        im2_sz = torch.LongTensor([im2.shape[2], im2.shape[3]])\n        shift = (-tl).clamp(0) - (br - im2_sz).clamp(0)\n        tl += shift\n        br += shift\n\n        outside = ((-tl).clamp(0) + (br - im2_sz).clamp(0)) // 2\n        shift = (-tl - outside) * (outside > 0).long()\n        tl += shift\n        br += shift\n\n        # Get image patch\n        # im_patch = im2[...,tl[0].item():br[0].item(),tl[1].item():br[1].item()]\n\n    # Get image patch\n    if not is_mask:\n        im_patch = F.pad(im2, (-tl[1].item(), br[1].item() - im2.shape[3], -tl[0].item(), br[0].item() - im2.shape[2]), pad_mode)\n    else:\n        im_patch = F.pad(im2, (-tl[1].item(), br[1].item() - im2.shape[3], -tl[0].item(), br[0].item() - im2.shape[2]))\n\n    # Get image coordinates\n    patch_coord = df * torch.cat((tl, br)).view(1,4)\n\n    if output_sz is None or (im_patch.shape[-2] == output_sz[0] and im_patch.shape[-1] == output_sz[1]):\n        return im_patch.clone(), patch_coord\n\n    # Resample\n    if not is_mask:\n        im_patch = F.interpolate(im_patch, output_sz.long().tolist(), mode='bilinear')\n    else:\n        im_patch = F.interpolate(im_patch, output_sz.long().tolist(), mode='nearest')\n\n    return im_patch, patch_coord\n"
  },
  {
    "path": "external/AR/pytracking/features/util.py",
    "content": "import torch\nfrom pytracking.features.featurebase import FeatureBase\n\n\nclass Concatenate(FeatureBase):\n    \"\"\"A feature that concatenates other features.\n    args:\n        features: List of features to concatenate.\n    \"\"\"\n    def __init__(self, features, pool_stride = None, normalize_power = None, use_for_color = True, use_for_gray = True):\n        super(Concatenate, self).__init__(pool_stride, normalize_power, use_for_color, use_for_gray)\n        self.features = features\n\n        self.input_stride = self.features[0].stride()\n\n        for feat in self.features:\n            if self.input_stride != feat.stride():\n                raise ValueError('Strides for the features must be the same for a bultiresolution feature.')\n\n    def dim(self):\n        return sum([f.dim() for f in self.features])\n\n    def stride(self):\n        return self.pool_stride * self.input_stride\n\n    def extract(self, im: torch.Tensor):\n        return torch.cat([f.get_feature(im) for f in self.features], 1)"
  },
  {
    "path": "external/AR/pytracking/libs/__init__.py",
    "content": "from .tensorlist import TensorList\nfrom .tensordict import TensorDict"
  },
  {
    "path": "external/AR/pytracking/libs/complex.py",
    "content": "import torch\nfrom pytracking.libs.tensorlist import tensor_operation\n\n\ndef is_complex(a: torch.Tensor) -> bool:\n    return a.dim() >= 4 and a.shape[-1] == 2\n\n\ndef is_real(a: torch.Tensor) -> bool:\n    return not is_complex(a)\n\n\n@tensor_operation\ndef mult(a: torch.Tensor, b: torch.Tensor):\n    \"\"\"Pointwise complex multiplication of complex tensors.\"\"\"\n\n    if is_real(a):\n        if a.dim() >= b.dim():\n            raise ValueError('Incorrect dimensions.')\n        # a is real\n        return mult_real_cplx(a, b)\n    if is_real(b):\n        if b.dim() >= a.dim():\n            raise ValueError('Incorrect dimensions.')\n        # b is real\n        return mult_real_cplx(b, a)\n\n    # Both complex\n    c = mult_real_cplx(a[..., 0], b)\n    c[..., 0] -= a[..., 1] * b[..., 1]\n    c[..., 1] += a[..., 1] * b[..., 0]\n    return c\n\n\n@tensor_operation\ndef mult_conj(a: torch.Tensor, b: torch.Tensor):\n    \"\"\"Pointwise complex multiplication of complex tensors, with conjugate on b: a*conj(b).\"\"\"\n\n    if is_real(a):\n        if a.dim() >= b.dim():\n            raise ValueError('Incorrect dimensions.')\n        # a is real\n        return mult_real_cplx(a, conj(b))\n    if is_real(b):\n        if b.dim() >= a.dim():\n            raise ValueError('Incorrect dimensions.')\n        # b is real\n        return mult_real_cplx(b, a)\n\n    # Both complex\n    c = mult_real_cplx(b[...,0], a)\n    c[..., 0] += a[..., 1] * b[..., 1]\n    c[..., 1] -= a[..., 0] * b[..., 1]\n    return c\n\n\n@tensor_operation\ndef mult_real_cplx(a: torch.Tensor, b: torch.Tensor):\n    \"\"\"Pointwise complex multiplication of real tensor a with complex tensor b.\"\"\"\n\n    if is_real(b):\n        raise ValueError('Last dimension must have length 2.')\n\n    return a.unsqueeze(-1) * b\n\n\n@tensor_operation\ndef div(a: torch.Tensor, b: torch.Tensor):\n    \"\"\"Pointwise complex division of complex tensors.\"\"\"\n\n    if is_real(b):\n        if b.dim() >= a.dim():\n            raise ValueError('Incorrect dimensions.')\n        # b is real\n        return div_cplx_real(a, b)\n\n    return div_cplx_real(mult_conj(a, b), abs_sqr(b))\n\n\n@tensor_operation\ndef div_cplx_real(a: torch.Tensor, b: torch.Tensor):\n    \"\"\"Pointwise complex division of complex tensor a with real tensor b.\"\"\"\n\n    if is_real(a):\n        raise ValueError('Last dimension must have length 2.')\n\n    return a / b.unsqueeze(-1)\n\n\n@tensor_operation\ndef abs_sqr(a: torch.Tensor):\n    \"\"\"Squared absolute value.\"\"\"\n\n    if is_real(a):\n        raise ValueError('Last dimension must have length 2.')\n\n    return torch.sum(a*a, -1)\n\n\n@tensor_operation\ndef abs(a: torch.Tensor):\n    \"\"\"Absolute value.\"\"\"\n\n    if is_real(a):\n        raise ValueError('Last dimension must have length 2.')\n\n    return torch.sqrt(abs_sqr(a))\n\n\n@tensor_operation\ndef conj(a: torch.Tensor):\n    \"\"\"Complex conjugate.\"\"\"\n\n    if is_real(a):\n        raise ValueError('Last dimension must have length 2.')\n\n    # return a * torch.Tensor([1, -1], device=a.device)\n    return complex(a[...,0], -a[...,1])\n\n\n@tensor_operation\ndef real(a: torch.Tensor):\n    \"\"\"Real part.\"\"\"\n\n    if is_real(a):\n        raise ValueError('Last dimension must have length 2.')\n\n    return a[..., 0]\n\n\n@tensor_operation\ndef imag(a: torch.Tensor):\n    \"\"\"Imaginary part.\"\"\"\n\n    if is_real(a):\n        raise ValueError('Last dimension must have length 2.')\n\n    return a[..., 1]\n\n\n@tensor_operation\ndef complex(a: torch.Tensor, b: torch.Tensor = None):\n    \"\"\"Create complex tensor from real and imaginary part.\"\"\"\n\n    if b is None:\n        b = a.new_zeros(a.shape)\n    elif a is None:\n        a = b.new_zeros(b.shape)\n\n    return torch.cat((a.unsqueeze(-1), b.unsqueeze(-1)), -1)\n\n\n@tensor_operation\ndef mtimes(a: torch.Tensor, b: torch.Tensor, conj_a=False, conj_b=False):\n    \"\"\"Complex matrix multiplication of complex tensors.\n    The dimensions (-3, -2) are matrix multiplied. -1 is the complex dimension.\"\"\"\n\n    if is_real(a):\n        if a.dim() >= b.dim():\n            raise ValueError('Incorrect dimensions.')\n        return mtimes_real_complex(a, b, conj_b=conj_b)\n    if is_real(b):\n        if b.dim() >= a.dim():\n            raise ValueError('Incorrect dimensions.')\n        return mtimes_complex_real(a, b, conj_a=conj_a)\n\n    if not conj_a and not conj_b:\n        return complex(torch.matmul(a[..., 0], b[..., 0]) - torch.matmul(a[..., 1], b[..., 1]),\n                       torch.matmul(a[..., 0], b[..., 1]) + torch.matmul(a[..., 1], b[..., 0]))\n    if conj_a and not conj_b:\n        return complex(torch.matmul(a[..., 0], b[..., 0]) + torch.matmul(a[..., 1], b[..., 1]),\n                       torch.matmul(a[..., 0], b[..., 1]) - torch.matmul(a[..., 1], b[..., 0]))\n    if not conj_a and conj_b:\n        return complex(torch.matmul(a[..., 0], b[..., 0]) + torch.matmul(a[..., 1], b[..., 1]),\n                       torch.matmul(a[..., 1], b[..., 0]) - torch.matmul(a[..., 0], b[..., 1]))\n    if conj_a and conj_b:\n        return complex(torch.matmul(a[..., 0], b[..., 0]) - torch.matmul(a[..., 1], b[..., 1]),\n                       -torch.matmul(a[..., 0], b[..., 1]) - torch.matmul(a[..., 1], b[..., 0]))\n\n\n@tensor_operation\ndef mtimes_real_complex(a: torch.Tensor, b: torch.Tensor, conj_b=False):\n    if is_real(b):\n        raise ValueError('Incorrect dimensions.')\n\n    if not conj_b:\n        return complex(torch.matmul(a, b[..., 0]), torch.matmul(a, b[..., 1]))\n    if conj_b:\n        return complex(torch.matmul(a, b[..., 0]), -torch.matmul(a, b[..., 1]))\n\n\n@tensor_operation\ndef mtimes_complex_real(a: torch.Tensor, b: torch.Tensor, conj_a=False):\n    if is_real(a):\n        raise ValueError('Incorrect dimensions.')\n\n    if not conj_a:\n        return complex(torch.matmul(a[..., 0], b), torch.matmul(a[..., 1], b))\n    if conj_a:\n        return complex(torch.matmul(a[..., 0], b), -torch.matmul(a[..., 1], b))\n\n\n@tensor_operation\ndef exp_imag(a: torch.Tensor):\n    \"\"\"Complex exponential with imaginary input: e^(i*a)\"\"\"\n\n    a = a.unsqueeze(-1)\n    return torch.cat((torch.cos(a), torch.sin(a)), -1)\n\n\n\n"
  },
  {
    "path": "external/AR/pytracking/libs/dcf.py",
    "content": "import torch\nimport math\nfrom pytracking import fourier\nfrom pytracking import complex\nimport torch.nn.functional as F\n\n\ndef hann1d(sz: int, centered = True) -> torch.Tensor:\n    \"\"\"1D cosine window.\"\"\"\n    if centered:\n        return 0.5 * (1 - torch.cos((2 * math.pi / (sz + 1)) * torch.arange(1, sz + 1).float()))\n    w = 0.5 * (1 + torch.cos((2 * math.pi / (sz + 2)) * torch.arange(0, sz//2 + 1).float()))\n    return torch.cat([w, w[1:sz-sz//2].flip((0,))])\n\n\ndef hann2d(sz: torch.Tensor, centered = True) -> torch.Tensor:\n    \"\"\"2D cosine window.\"\"\"\n    return hann1d(sz[0].item(), centered).reshape(1, 1, -1, 1) * hann1d(sz[1].item(), centered).reshape(1, 1, 1, -1)\n\n\ndef hann2d_clipped(sz: torch.Tensor, effective_sz: torch.Tensor, centered = True) -> torch.Tensor:\n    \"\"\"1D clipped cosine window.\"\"\"\n\n    # Ensure that the difference is even\n    effective_sz += (effective_sz - sz) % 2\n    effective_window = hann1d(effective_sz[0].item(), True).reshape(1, 1, -1, 1) * hann1d(effective_sz[1].item(), True).reshape(1, 1, 1, -1)\n\n    pad = (sz - effective_sz) / 2\n\n    window = F.pad(effective_window, (pad[1].item(), pad[1].item(), pad[0].item(), pad[0].item()), 'replicate')\n\n    if centered:\n        return window\n    else:\n        mid = (sz / 2).int()\n        window_shift_lr = torch.cat((window[:, :, :, mid[1]:], window[:, :, :, :mid[1]]), 3)\n        return torch.cat((window_shift_lr[:, :, mid[0]:, :], window_shift_lr[:, :, :mid[0], :]), 2)\n\n\ndef gauss_fourier(sz: int, sigma: float, half: bool = False) -> torch.Tensor:\n    if half:\n        k = torch.arange(0, int(sz/2+1))\n    else:\n        k = torch.arange(-int((sz-1)/2), int(sz/2+1))\n    return (math.sqrt(2*math.pi) * sigma / sz) * torch.exp(-2 * (math.pi * sigma * k.float() / sz)**2)\n\n\ndef gauss_spatial(sz, sigma, center=0, end_pad=0):\n    k = torch.arange(-(sz-1)/2, (sz+1)/2+end_pad)\n    return torch.exp(-1.0/(2*sigma**2) * (k - center)**2)\n\n\ndef label_function(sz: torch.Tensor, sigma: torch.Tensor):\n    return gauss_fourier(sz[0].item(), sigma[0].item()).reshape(1, 1, -1, 1) * gauss_fourier(sz[1].item(), sigma[1].item(), True).reshape(1, 1, 1, -1)\n\ndef label_function_spatial(sz: torch.Tensor, sigma: torch.Tensor, center: torch.Tensor = torch.zeros(2), end_pad: torch.Tensor = torch.zeros(2)):\n    \"\"\"The origin is in the middle of the image.\"\"\"\n    return gauss_spatial(sz[0].item(), sigma[0].item(), center[0], end_pad[0].item()).reshape(1, 1, -1, 1) * \\\n           gauss_spatial(sz[1].item(), sigma[1].item(), center[1], end_pad[1].item()).reshape(1, 1, 1, -1)\n\n\ndef cubic_spline_fourier(f, a):\n    \"\"\"The continuous Fourier transform of a cubic spline kernel.\"\"\"\n\n    bf = (6*(1 - torch.cos(2 * math.pi * f)) + 3*a*(1 - torch.cos(4 * math.pi * f))\n           - (6 + 8*a)*math.pi*f*torch.sin(2 * math.pi * f) - 2*a*math.pi*f*torch.sin(4 * math.pi * f)) \\\n         / (4 * math.pi**4 * f**4)\n\n    bf[f == 0] = 1\n\n    return bf\n\n\ndef get_interp_fourier(sz: torch.Tensor, method='ideal', bicubic_param=0.5, centering=True, windowing=False, device='cpu'):\n\n    ky, kx = fourier.get_frequency_coord(sz)\n\n    if method=='ideal':\n        interp_y = torch.ones(ky.shape) / sz[0]\n        interp_x = torch.ones(kx.shape) / sz[1]\n    elif method=='bicubic':\n        interp_y = cubic_spline_fourier(ky / sz[0], bicubic_param) / sz[0]\n        interp_x = cubic_spline_fourier(kx / sz[1], bicubic_param) / sz[1]\n    else:\n        raise ValueError('Unknown method.')\n\n    if centering:\n        interp_y = complex.mult(interp_y, complex.exp_imag((-math.pi/sz[0]) * ky))\n        interp_x = complex.mult(interp_x, complex.exp_imag((-math.pi/sz[1]) * kx))\n\n    if windowing:\n        raise NotImplementedError\n\n    return interp_y.to(device), interp_x.to(device)\n\n\ndef interpolate_dft(a: torch.Tensor, interp_fs) -> torch.Tensor:\n\n    if isinstance(interp_fs, torch.Tensor):\n        return complex.mult(a, interp_fs)\n    if isinstance(interp_fs, (tuple, list)):\n        return complex.mult(complex.mult(a, interp_fs[0]), interp_fs[1])\n    raise ValueError('\"interp_fs\" must be tensor or tuple of tensors.')\n\n\ndef get_reg_filter(sz: torch.Tensor, target_sz: torch.Tensor, params):\n    \"\"\"Computes regularization filter in CCOT and ECO.\"\"\"\n\n    if not params.use_reg_window:\n        return params.reg_window_min * torch.ones(1,1,1,1)\n\n    if getattr(params, 'reg_window_square', False):\n        target_sz = target_sz.prod().sqrt() * torch.ones(2)\n\n    # Normalization factor\n    reg_scale = 0.5 * target_sz\n\n    # Construct grid\n    if getattr(params, 'reg_window_centered', True):\n        wrg = torch.arange(-int((sz[0]-1)/2), int(sz[0]/2+1), dtype=torch.float32).view(1,1,-1,1)\n        wcg = torch.arange(-int((sz[1]-1)/2), int(sz[1]/2+1), dtype=torch.float32).view(1,1,1,-1)\n    else:\n        wrg = torch.cat([torch.arange(0, int(sz[0]/2+1), dtype=torch.float32),\n                         torch.arange(-int((sz[0] - 1) / 2), 0, dtype=torch.float32)]).view(1,1,-1,1)\n        wcg = torch.cat([torch.arange(0, int(sz[1]/2+1), dtype=torch.float32),\n                         torch.arange(-int((sz[1] - 1) / 2), 0, dtype=torch.float32)]).view(1,1,1,-1)\n\n    # Construct regularization window\n    reg_window = (params.reg_window_edge - params.reg_window_min) * \\\n                 (torch.abs(wrg/reg_scale[0])**params.reg_window_power +\n                  torch.abs(wcg/reg_scale[1])**params.reg_window_power) + params.reg_window_min\n\n    # Compute DFT and enforce sparsity\n    reg_window_dft = torch.rfft(reg_window, 2) / sz.prod()\n    reg_window_dft_abs = complex.abs(reg_window_dft)\n    reg_window_dft[reg_window_dft_abs < params.reg_sparsity_threshold * reg_window_dft_abs.max(), :] = 0\n\n    # Do the inverse transform to correct for the window minimum\n    reg_window_sparse = torch.irfft(reg_window_dft, 2, signal_sizes=sz.long().tolist())\n    reg_window_dft[0,0,0,0,0] += params.reg_window_min - sz.prod() * reg_window_sparse.min()\n    reg_window_dft = complex.real(fourier.rfftshift2(reg_window_dft))\n\n    # Remove zeros\n    max_inds,_ = reg_window_dft.nonzero().max(dim=0)\n    mid_ind = int((reg_window_dft.shape[2]-1)/2)\n    top = max_inds[-2].item() + 1\n    bottom = 2*mid_ind - max_inds[-2].item()\n    right = max_inds[-1].item() + 1\n    reg_window_dft = reg_window_dft[..., bottom:top, :right]\n    if reg_window_dft.shape[-1] > 1:\n        reg_window_dft = torch.cat([reg_window_dft[..., 1:].flip((2, 3)), reg_window_dft], -1)\n\n    return reg_window_dft\n\n\ndef max2d(a: torch.Tensor) -> (torch.Tensor, torch.Tensor):\n    \"\"\"Computes maximum and argmax in the last two dimensions.\"\"\"\n\n    max_val_row, argmax_row = torch.max(a, dim=-2)\n    max_val, argmax_col = torch.max(max_val_row, dim=-1)\n    argmax_row = argmax_row.view(argmax_col.numel(),-1)[torch.arange(argmax_col.numel()), argmax_col.view(-1)]\n    argmax_row = argmax_row.reshape(argmax_col.shape)\n    argmax = torch.cat((argmax_row.unsqueeze(-1), argmax_col.unsqueeze(-1)), -1)\n    return max_val, argmax\n"
  },
  {
    "path": "external/AR/pytracking/libs/fourier.py",
    "content": "import torch\nimport torch.nn.functional as F\nfrom pytracking import complex, TensorList\nfrom pytracking.libs.tensorlist import tensor_operation\n\n\n@tensor_operation\ndef rfftshift2(a: torch.Tensor):\n    h = a.shape[2] + 2\n    return torch.cat((a[:,:,(h-1)//2:,...], a[:,:,:h//2,...]), 2)\n\n\n@tensor_operation\ndef irfftshift2(a: torch.Tensor):\n    mid = int((a.shape[2]-1)/2)\n    return torch.cat((a[:,:,mid:,...], a[:,:,:mid,...]), 2)\n\n\n@tensor_operation\ndef cfft2(a):\n    \"\"\"Do FFT and center the low frequency component.\n    Always produces odd (full) output sizes.\"\"\"\n\n    return rfftshift2(torch.rfft(a, 2))\n\n\n@tensor_operation\ndef cifft2(a, signal_sizes=None):\n    \"\"\"Do inverse FFT corresponding to cfft2.\"\"\"\n\n    return torch.irfft(irfftshift2(a), 2, signal_sizes=signal_sizes)\n\n\n@tensor_operation\ndef sample_fs(a: torch.Tensor, grid_sz: torch.Tensor = None, rescale = True):\n    \"\"\"Samples the Fourier series.\"\"\"\n\n    # Size of the fourier series\n    sz = torch.Tensor([a.shape[2], 2*a.shape[3]-1]).float()\n\n    # Default grid\n    if grid_sz is None or sz[0] == grid_sz[0] and sz[1] == grid_sz[1]:\n        if rescale:\n            return sz.prod().item() * cifft2(a)\n        return cifft2(a)\n\n    if sz[0] > grid_sz[0] or sz[1] > grid_sz[1]:\n        raise ValueError(\"Only grid sizes that are smaller than the Fourier series size are supported.\")\n\n    tot_pad = (grid_sz - sz).tolist()\n    is_even = [s.item() % 2 == 0 for s in sz]\n\n    # Compute paddings\n    pad_top = int((tot_pad[0]+1)/2) if is_even[0] else int(tot_pad[0]/2)\n    pad_bottom = int(tot_pad[0] - pad_top)\n    pad_right = int((tot_pad[1]+1)/2)\n\n    if rescale:\n        return grid_sz.prod().item() * cifft2(F.pad(a, (0, 0, 0, pad_right, pad_top, pad_bottom)), signal_sizes=grid_sz.long().tolist())\n    else:\n        return cifft2(F.pad(a, (0, 0, 0, pad_right, pad_top, pad_bottom)), signal_sizes=grid_sz.long().tolist())\n\n\ndef get_frequency_coord(sz, add_complex_dim = False, device='cpu'):\n    \"\"\"Frequency coordinates.\"\"\"\n\n    ky = torch.arange(-int((sz[0]-1)/2), int(sz[0]/2+1), dtype=torch.float32, device=device).view(1,1,-1,1)\n    kx = torch.arange(0, int(sz[1]/2+1), dtype=torch.float32, device=device).view(1,1,1,-1)\n\n    if add_complex_dim:\n        ky = ky.unsqueeze(-1)\n        kx = kx.unsqueeze(-1)\n\n    return ky, kx\n\n\n@tensor_operation\ndef shift_fs(a: torch.Tensor, shift: torch.Tensor):\n    \"\"\"Shift a sample a in the Fourier domain.\n    Params:\n        a : The fourier coefficiens of the sample.\n        shift : The shift to be performed normalized to the range [-pi, pi].\"\"\"\n\n    if a.dim() != 5:\n        raise ValueError('a must be the Fourier coefficients, a 5-dimensional tensor.')\n\n    if shift[0] == 0 and shift[1] == 0:\n        return a\n\n    ky, kx = get_frequency_coord((a.shape[2], 2*a.shape[3]-1), device=a.device)\n\n    return complex.mult(complex.mult(a, complex.exp_imag(shift[0].item()*ky)), complex.exp_imag(shift[1].item()*kx))\n\n\ndef sum_fs(a: TensorList) -> torch.Tensor:\n    \"\"\"Sum a list of Fourier series expansions.\"\"\"\n\n    s = None\n    mid = None\n\n    for e in sorted(a, key=lambda elem: elem.shape[-3], reverse=True):\n        if s is None:\n            s = e.clone()\n            mid = int((s.shape[-3] - 1) / 2)\n        else:\n            # Compute coordinates\n            top = mid - int((e.shape[-3] - 1) / 2)\n            bottom = mid + int(e.shape[-3] / 2) + 1\n            right = e.shape[-2]\n\n            # Add the data\n            s[..., top:bottom, :right, :] += e\n\n    return s\n\n\ndef sum_fs12(a: TensorList) -> torch.Tensor:\n    \"\"\"Sum a list of Fourier series expansions.\"\"\"\n\n    s = None\n    mid = None\n\n    for e in sorted(a, key=lambda elem: elem.shape[0], reverse=True):\n        if s is None:\n            s = e.clone()\n            mid = int((s.shape[0] - 1) / 2)\n        else:\n            # Compute coordinates\n            top = mid - int((e.shape[0] - 1) / 2)\n            bottom = mid + int(e.shape[0] / 2) + 1\n            right = e.shape[1]\n\n            # Add the data\n            s[top:bottom, :right, ...] += e\n\n    return s\n\n\n@tensor_operation\ndef inner_prod_fs(a: torch.Tensor, b: torch.Tensor):\n    if complex.is_complex(a) and complex.is_complex(b):\n        return 2 * (a.reshape(-1) @ b.reshape(-1)) - a[:, :, :, 0, :].reshape(-1) @ b[:, :, :, 0, :].reshape(-1)\n    elif complex.is_real(a) and complex.is_real(b):\n        return 2 * (a.reshape(-1) @ b.reshape(-1)) - a[:, :, :, 0].reshape(-1) @ b[:, :, :, 0].reshape(-1)\n    else:\n        raise NotImplementedError('Not implemented for mixed real and complex.')"
  },
  {
    "path": "external/AR/pytracking/libs/operation.py",
    "content": "import torch\nimport torch.nn.functional as F\nfrom pytracking.libs.tensorlist import tensor_operation, TensorList\n\n\n@tensor_operation\ndef conv2d(input: torch.Tensor, weight: torch.Tensor, bias: torch.Tensor = None, stride=1, padding=0, dilation=1, groups=1, mode=None):\n    \"\"\"Standard conv2d. Returns the input if weight=None.\"\"\"\n\n    if weight is None:\n        return input\n\n    ind = None\n    if mode is not None:\n        if padding != 0:\n            raise ValueError('Cannot input both padding and mode.')\n        if mode == 'same':\n            padding = (weight.shape[2]//2, weight.shape[3]//2)\n            if weight.shape[2] % 2 == 0 or weight.shape[3] % 2 == 0:\n                ind = (slice(-1) if weight.shape[2] % 2 == 0 else slice(None),\n                       slice(-1) if weight.shape[3] % 2 == 0 else slice(None))\n        elif mode == 'valid':\n            padding = (0, 0)\n        elif mode == 'full':\n            padding = (weight.shape[2]-1, weight.shape[3]-1)\n        else:\n            raise ValueError('Unknown mode for padding.')\n\n    out = F.conv2d(input, weight, bias=bias, stride=stride, padding=padding, dilation=dilation, groups=groups)\n    if ind is None:\n        return out\n    return out[:,:,ind[0],ind[1]]\n\n\n@tensor_operation\ndef conv1x1(input: torch.Tensor, weight: torch.Tensor):\n    \"\"\"Do a convolution with a 1x1 kernel weights. Implemented with matmul, which can be faster than using conv.\"\"\"\n\n    if weight is None:\n        return input\n\n    return torch.conv2d(input, weight)\n"
  },
  {
    "path": "external/AR/pytracking/libs/optimization.py",
    "content": "import torch\nimport torch.autograd\nimport math\nfrom pytracking.libs import TensorList\nfrom pytracking.utils.plotting import plot_graph\nfrom ltr.models.layers.activation import softmax_reg\n\n\nclass L2Problem:\n    \"\"\"Base class for representing an L2 optimization problem.\"\"\"\n\n    def __call__(self, x: TensorList) -> TensorList:\n        \"\"\"Shall compute the residuals of the problem.\"\"\"\n        raise NotImplementedError\n\n    def ip_input(self, a, b):\n        \"\"\"Inner product of the input space.\"\"\"\n        return sum(a.view(-1) @ b.view(-1))\n\n    def ip_output(self, a, b):\n        \"\"\"Inner product of the output space.\"\"\"\n        return sum(a.view(-1) @ b.view(-1))\n\n    def M1(self, x):\n        \"\"\"M1 preconditioner.\"\"\"\n        return x\n\n    def M2(self, x):\n        \"\"\"M2 preconditioner.\"\"\"\n        return x\n\nclass MinimizationProblem:\n    \"\"\"General minimization problem.\"\"\"\n    def __call__(self, x: TensorList) -> TensorList:\n        \"\"\"Shall compute the loss.\"\"\"\n        raise NotImplementedError\n\n    def ip_input(self, a, b):\n        \"\"\"Inner product of the input space.\"\"\"\n        return sum(a.view(-1) @ b.view(-1))\n\n    def M1(self, x):\n        return x\n\n    def M2(self, x):\n        return x\n\n\nclass ConjugateGradientBase:\n    \"\"\"Conjugate Gradient optimizer base class. Implements the CG loop.\"\"\"\n\n    def __init__(self, fletcher_reeves = True, standard_alpha = True, direction_forget_factor = 0, debug = False):\n        self.fletcher_reeves = fletcher_reeves\n        self.standard_alpha = standard_alpha\n        self.direction_forget_factor = direction_forget_factor\n        self.debug = debug\n\n        # State\n        self.p = None\n        self.rho = torch.ones(1)\n        self.r_prev = None\n\n        # Right hand side\n        self.b = None\n\n    def reset_state(self):\n        self.p = None\n        self.rho = torch.ones(1)\n        self.r_prev = None\n\n\n    def run_CG(self, num_iter, x=None, eps=0.0):\n        \"\"\"Main conjugate gradient method.\n\n        args:\n            num_iter: Number of iterations.\n            x: Initial guess. Assumed zero if None.\n            eps: Stop if the residual norm gets smaller than this.\n        \"\"\"\n\n        # Apply forgetting factor\n        if self.direction_forget_factor == 0:\n            self.reset_state()\n        elif self.p is not None:\n            self.rho /= self.direction_forget_factor\n\n        if x is None:\n            r = self.b.clone()\n        else:\n            r = self.b - self.A(x)\n\n        # Norms of residuals etc for debugging\n        resvec = None\n        if self.debug:\n            normr = self.residual_norm(r)\n            resvec = torch.zeros(num_iter+1)\n            resvec[0] = normr\n\n        # Loop over iterations\n        for ii in range(num_iter):\n            # Preconditioners\n            y = self.M1(r)\n            z = self.M2(y)\n\n            rho1 = self.rho\n            self.rho = self.ip(r, z)\n\n            if self.check_zero(self.rho):\n                if self.debug:\n                    print('Stopped CG since rho = 0')\n                    if resvec is not None:\n                        resvec = resvec[:ii+1]\n                return x, resvec\n\n            if self.p is None:\n                self.p = z.clone()\n            else:\n                if self.fletcher_reeves:\n                    beta = self.rho / rho1\n                else:\n                    rho2 = self.ip(self.r_prev, z)\n                    beta = (self.rho - rho2) / rho1\n\n                beta = beta.clamp(0)\n                self.p = z + self.p * beta\n\n            q = self.A(self.p)\n            pq = self.ip(self.p, q)\n\n            if self.standard_alpha:\n                alpha = self.rho / pq\n            else:\n                alpha = self.ip(self.p, r) / pq\n\n            # Save old r for PR formula\n            if not self.fletcher_reeves:\n                self.r_prev = r.clone()\n\n            # Form new iterate\n            if x is None:\n                x = self.p * alpha\n            else:\n                x += self.p * alpha\n\n            if ii < num_iter - 1 or self.debug:\n                r -= q * alpha\n\n            if eps > 0.0 or self.debug:\n                normr = self.residual_norm(r)\n\n            if self.debug:\n                self.evaluate_CG_iteration(x)\n                resvec[ii+1] = normr\n\n            if eps > 0 and normr <= eps:\n                if self.debug:\n                    print('Stopped CG since norm smaller than eps')\n                break\n\n        if resvec is not None:\n            resvec = resvec[:ii+2]\n\n        return x, resvec\n\n\n    def A(self, x):\n        # Implements the left hand operation\n        raise NotImplementedError\n\n    def ip(self, a, b):\n        # Implements the inner product\n        return a.view(-1) @ b.view(-1)\n\n    def residual_norm(self, r):\n        res = self.ip(r, r).sum()\n        if isinstance(res, (TensorList, list, tuple)):\n            res = sum(res)\n        return res.sqrt()\n\n    def check_zero(self, s, eps = 0.0):\n        ss = s.abs() <= eps\n        if isinstance(ss, (TensorList, list, tuple)):\n            ss = sum(ss)\n        return ss.item() > 0\n\n    def M1(self, x):\n        # M1 preconditioner\n        return x\n\n    def M2(self, x):\n        # M2 preconditioner\n        return x\n\n    def evaluate_CG_iteration(self, x):\n        pass\n\n\n\nclass ConjugateGradient(ConjugateGradientBase):\n    \"\"\"Conjugate Gradient optimizer, performing single linearization of the residuals in the start.\"\"\"\n\n    def __init__(self, problem: L2Problem, variable: TensorList, cg_eps = 0.0, fletcher_reeves = True,\n                 standard_alpha = True, direction_forget_factor = 0, debug = False, plotting = False, visdom=None):\n        super().__init__(fletcher_reeves, standard_alpha, direction_forget_factor, debug or plotting)\n\n        self.problem = problem\n        self.x = variable\n\n        self.plotting = plotting\n        self.fig_num = (10,11)\n        self.visdom = visdom\n\n        self.cg_eps = cg_eps\n        self.f0 = None\n        self.g = None\n        self.dfdxt_g = None\n\n        self.residuals = torch.zeros(0)\n        self.losses = torch.zeros(0)\n\n    def clear_temp(self):\n        self.f0 = None\n        self.g = None\n        self.dfdxt_g = None\n\n\n    def run(self, num_cg_iter):\n        \"\"\"Run the oprimizer with the provided number of iterations.\"\"\"\n\n        if num_cg_iter == 0:\n            return\n\n        lossvec = None\n        if self.debug:\n            lossvec = torch.zeros(2)\n\n        self.x.requires_grad_(True)\n\n        # Evaluate function at current estimate\n        self.f0 = self.problem(self.x)\n\n        # Create copy with graph detached\n        self.g = self.f0.detach()\n\n        if self.debug:\n            lossvec[0] = self.problem.ip_output(self.g, self.g)\n\n        self.g.requires_grad_(True)\n\n        # Get df/dx^t @ f0\n        self.dfdxt_g = TensorList(torch.autograd.grad(self.f0, self.x, self.g, create_graph=True))\n\n        # Get the right hand side\n        self.b = - self.dfdxt_g.detach()\n\n        # Run CG\n        delta_x, res = self.run_CG(num_cg_iter, eps=self.cg_eps)\n\n        self.x.detach_()\n        self.x += delta_x\n\n        if self.debug:\n            self.f0 = self.problem(self.x)\n            lossvec[-1] = self.problem.ip_output(self.f0, self.f0)\n            self.residuals = torch.cat((self.residuals, res))\n            self.losses = torch.cat((self.losses, lossvec))\n            if self.visdom is not None:\n                self.visdom.register(self.losses, 'lineplot', 3, 'Loss')\n                self.visdom.register(self.residuals, 'lineplot', 3, 'CG residuals')\n            elif self.plotting:\n                plot_graph(self.losses, self.fig_num[0], title='Loss')\n                plot_graph(self.residuals, self.fig_num[1], title='CG residuals')\n\n        self.x.detach_()\n        self.clear_temp()\n\n\n    def A(self, x):\n        dfdx_x = torch.autograd.grad(self.dfdxt_g, self.g, x, retain_graph=True)\n        return TensorList(torch.autograd.grad(self.f0, self.x, dfdx_x, retain_graph=True))\n\n    def ip(self, a, b):\n        return self.problem.ip_input(a, b)\n\n    def M1(self, x):\n        return self.problem.M1(x)\n\n    def M2(self, x):\n        return self.problem.M2(x)\n\n\n\nclass GaussNewtonCG(ConjugateGradientBase):\n    \"\"\"Gauss-Newton with Conjugate Gradient optimizer.\"\"\"\n\n    def __init__(self, problem: L2Problem, variable: TensorList, cg_eps = 0.0, fletcher_reeves = True,\n                 standard_alpha = True, direction_forget_factor = 0, debug = False, analyze = False, plotting = False,\n                 visdom=None):\n        super().__init__(fletcher_reeves, standard_alpha, direction_forget_factor, debug or analyze or plotting)\n\n        self.problem = problem\n        self.x = variable\n\n        self.analyze_convergence = analyze\n        self.plotting = plotting\n        self.fig_num = (10,11,12)\n        self.visdom = visdom\n\n        self.cg_eps = cg_eps\n        self.f0 = None\n        self.g = None\n        self.dfdxt_g = None\n\n        self.residuals = torch.zeros(0)\n        self.losses = torch.zeros(0)\n        self.gradient_mags = torch.zeros(0)\n\n    def clear_temp(self):\n        self.f0 = None\n        self.g = None\n        self.dfdxt_g = None\n\n\n    def run_GN(self, *args, **kwargs):\n        return self.run(*args, **kwargs)\n\n\n    def run(self, num_cg_iter, num_gn_iter=None):\n        \"\"\"Run the optimizer.\n        args:\n            num_cg_iter: Number of CG iterations per GN iter. If list, then each entry specifies number of CG iterations\n                         and number of GN iterations is given by the length of the list.\n            num_gn_iter: Number of GN iterations. Shall only be given if num_cg_iter is an integer.\n        \"\"\"\n\n        if isinstance(num_cg_iter, int):\n            if num_gn_iter is None:\n                raise ValueError('Must specify number of GN iter if CG iter is constant')\n            num_cg_iter = [num_cg_iter]*num_gn_iter\n\n        num_gn_iter = len(num_cg_iter)\n        if num_gn_iter == 0:\n            return\n\n        if self.analyze_convergence:\n            self.evaluate_CG_iteration(0)\n\n        # Outer loop for running the GN iterations.\n        for cg_iter in num_cg_iter:\n            self.run_GN_iter(cg_iter)\n\n        if self.debug:\n            if not self.analyze_convergence:\n                self.f0 = self.problem(self.x)\n                loss = self.problem.ip_output(self.f0, self.f0)\n                self.losses = torch.cat((self.losses, loss.detach().cpu().view(-1)))\n\n            if self.visdom is not None:\n                self.visdom.register(self.losses, 'lineplot', 3, 'Loss')\n                self.visdom.register(self.residuals, 'lineplot', 3, 'CG residuals')\n\n                if self.analyze_convergence:\n                    self.visdom.register(self.gradient_mags, 'lineplot', 4, 'Gradient magnitude')\n            elif self.plotting:\n                plot_graph(self.losses, self.fig_num[0], title='Loss')\n                plot_graph(self.residuals, self.fig_num[1], title='CG residuals')\n                if self.analyze_convergence:\n                    plot_graph(self.gradient_mags, self.fig_num[2], 'Gradient magnitude')\n\n\n        self.x.detach_()\n        self.clear_temp()\n\n        return self.losses, self.residuals\n\n\n    def run_GN_iter(self, num_cg_iter):\n        \"\"\"Runs a single GN iteration.\"\"\"\n\n        self.x.requires_grad_(True)\n\n        # Evaluate function at current estimate\n        self.f0 = self.problem(self.x)\n\n        # Create copy with graph detached\n        self.g = self.f0.detach()\n\n        if self.debug and not self.analyze_convergence:\n            loss = self.problem.ip_output(self.g, self.g)\n            self.losses = torch.cat((self.losses, loss.detach().cpu().view(-1)))\n\n        self.g.requires_grad_(True)\n\n        # Get df/dx^t @ f0\n        self.dfdxt_g = TensorList(torch.autograd.grad(self.f0, self.x, self.g, create_graph=True))\n\n        # Get the right hand side\n        self.b = - self.dfdxt_g.detach()\n\n        # Run CG\n        delta_x, res = self.run_CG(num_cg_iter, eps=self.cg_eps)\n\n        self.x.detach_()\n        self.x += delta_x\n\n        if self.debug:\n            self.residuals = torch.cat((self.residuals, res))\n\n\n    def A(self, x):\n        dfdx_x = torch.autograd.grad(self.dfdxt_g, self.g, x, retain_graph=True)\n        return TensorList(torch.autograd.grad(self.f0, self.x, dfdx_x, retain_graph=True))\n\n    def ip(self, a, b):\n        return self.problem.ip_input(a, b)\n\n    def M1(self, x):\n        return self.problem.M1(x)\n\n    def M2(self, x):\n        return self.problem.M2(x)\n\n    def evaluate_CG_iteration(self, delta_x):\n        if self.analyze_convergence:\n            x = (self.x + delta_x).detach()\n            x.requires_grad_(True)\n\n            # compute loss and gradient\n            f = self.problem(x)\n            loss = self.problem.ip_output(f, f)\n            grad = TensorList(torch.autograd.grad(loss, x))\n\n            # store in the vectors\n            self.losses = torch.cat((self.losses, loss.detach().cpu().view(-1)))\n            self.gradient_mags = torch.cat((self.gradient_mags, sum(grad.view(-1) @ grad.view(-1)).cpu().sqrt().detach().view(-1)))\n\n\nclass GradientDescentL2:\n    \"\"\"Gradient descent with momentum for L2 problems.\"\"\"\n\n    def __init__(self, problem: L2Problem, variable: TensorList, step_length: float, momentum: float = 0.0, debug = False, plotting = False, visdom=None):\n\n        self.problem = problem\n        self.x = variable\n\n        self.step_legnth = step_length\n        self.momentum = momentum\n\n        self.debug = debug or plotting\n        self.plotting = plotting\n        self.fig_num = (10,11)\n        self.visdom = visdom\n        self.losses = torch.zeros(0)\n        self.gradient_mags = torch.zeros(0)\n        self.residuals = None\n\n        self.clear_temp()\n\n\n    def clear_temp(self):\n        self.f0 = None\n        self.dir = None\n\n\n    def run(self, num_iter, dummy = None):\n\n        if num_iter == 0:\n            return\n\n        lossvec = None\n        if self.debug:\n            lossvec = torch.zeros(num_iter+1)\n            grad_mags = torch.zeros(num_iter+1)\n\n        for i in range(num_iter):\n            self.x.requires_grad_(True)\n\n            # Evaluate function at current estimate\n            self.f0 = self.problem(self.x)\n\n            # Compute loss\n            loss = self.problem.ip_output(self.f0, self.f0)\n\n            # Compute grad\n            grad = TensorList(torch.autograd.grad(loss, self.x))\n\n            # Update direction\n            if self.dir is None:\n                self.dir = grad\n            else:\n                self.dir = grad + self.momentum * self.dir\n\n            self.x.detach_()\n            self.x -= self.step_legnth * self.dir\n\n            if self.debug:\n                lossvec[i] = loss.item()\n                grad_mags[i] = sum(grad.view(-1) @ grad.view(-1)).sqrt().item()\n\n        if self.debug:\n            self.x.requires_grad_(True)\n            self.f0 = self.problem(self.x)\n            loss = self.problem.ip_output(self.f0, self.f0)\n            grad = TensorList(torch.autograd.grad(loss, self.x))\n            lossvec[-1] = self.problem.ip_output(self.f0, self.f0).item()\n            grad_mags[-1] = sum(grad.view(-1) @ grad.view(-1)).cpu().sqrt().item()\n            self.losses = torch.cat((self.losses, lossvec))\n            self.gradient_mags = torch.cat((self.gradient_mags, grad_mags))\n\n            if self.visdom is not None:\n                self.visdom.register(self.losses, 'lineplot', 3, 'Loss')\n                self.visdom.register(self.gradient_mags, 'lineplot', 4, 'Gradient magnitude')\n            elif self.plotting:\n                plot_graph(self.losses, self.fig_num[0], title='Loss')\n                plot_graph(self.gradient_mags, self.fig_num[1], title='Gradient magnitude')\n\n        self.x.detach_()\n        self.clear_temp()\n\n\n\nclass NewtonCG(ConjugateGradientBase):\n    \"\"\"Newton with Conjugate Gradient. Handels general minimization problems.\"\"\"\n\n    def __init__(self, problem: MinimizationProblem, variable: TensorList, init_hessian_reg = 0.0, hessian_reg_factor = 1.0,\n                 cg_eps = 0.0, fletcher_reeves = True, standard_alpha = True, direction_forget_factor = 0,\n                 debug = False, analyze = False, plotting = False, fig_num=(10, 11, 12)):\n        super().__init__(fletcher_reeves, standard_alpha, direction_forget_factor, debug or analyze or plotting)\n\n        self.problem = problem\n        self.x = variable\n\n        self.analyze_convergence = analyze\n        self.plotting = plotting\n        self.fig_num = fig_num\n\n        self.hessian_reg = init_hessian_reg\n        self.hessian_reg_factor = hessian_reg_factor\n        self.cg_eps = cg_eps\n        self.f0 = None\n        self.g = None\n\n        self.residuals = torch.zeros(0)\n        self.losses = torch.zeros(0)\n        self.gradient_mags = torch.zeros(0)\n\n    def clear_temp(self):\n        self.f0 = None\n        self.g = None\n\n\n    def run(self, num_cg_iter, num_newton_iter=None):\n\n        if isinstance(num_cg_iter, int):\n            if num_cg_iter == 0:\n                return\n            if num_newton_iter is None:\n                num_newton_iter = 1\n            num_cg_iter = [num_cg_iter] * num_newton_iter\n\n        num_newton_iter = len(num_cg_iter)\n        if num_newton_iter == 0:\n            return\n\n        if self.analyze_convergence:\n            self.evaluate_CG_iteration(0)\n\n        for cg_iter in num_cg_iter:\n            self.run_newton_iter(cg_iter)\n            self.hessian_reg *= self.hessian_reg_factor\n\n        if self.debug:\n            if not self.analyze_convergence:\n                loss = self.problem(self.x)\n                self.losses = torch.cat((self.losses, loss.detach().cpu().view(-1)))\n\n            if self.plotting:\n                plot_graph(self.losses, self.fig_num[0], title='Loss')\n                plot_graph(self.residuals, self.fig_num[1], title='CG residuals')\n                if self.analyze_convergence:\n                    plot_graph(self.gradient_mags, self.fig_num[2], 'Gradient magnitude')\n\n        self.x.detach_()\n        self.clear_temp()\n\n        return self.losses, self.residuals\n\n\n    def run_newton_iter(self, num_cg_iter):\n\n        self.x.requires_grad_(True)\n\n        # Evaluate function at current estimate\n        self.f0 = self.problem(self.x)\n\n        if self.debug and not self.analyze_convergence:\n            self.losses = torch.cat((self.losses, self.f0.detach().cpu().view(-1)))\n\n        # Gradient of loss\n        self.g = TensorList(torch.autograd.grad(self.f0, self.x, create_graph=True))\n\n        # Get the right hand side\n        self.b = - self.g.detach()\n\n        # Run CG\n        delta_x, res = self.run_CG(num_cg_iter, eps=self.cg_eps)\n\n        self.x.detach_()\n        self.x += delta_x\n\n        if self.debug:\n            self.residuals = torch.cat((self.residuals, res))\n\n\n    def A(self, x):\n        return TensorList(torch.autograd.grad(self.g, self.x, x, retain_graph=True)) + self.hessian_reg * x\n\n    def ip(self, a, b):\n        # Implements the inner product\n        return self.problem.ip_input(a, b)\n\n    def M1(self, x):\n        return self.problem.M1(x)\n\n    def M2(self, x):\n        return self.problem.M2(x)\n\n    def evaluate_CG_iteration(self, delta_x):\n        if self.analyze_convergence:\n            x = (self.x + delta_x).detach()\n            x.requires_grad_(True)\n\n            # compute loss and gradient\n            loss = self.problem(x)\n            grad = TensorList(torch.autograd.grad(loss, x))\n\n            # store in the vectors\n            self.losses = torch.cat((self.losses, loss.detach().cpu().view(-1)))\n            self.gradient_mags = torch.cat((self.gradient_mags, sum(grad.view(-1) @ grad.view(-1)).cpu().sqrt().detach().view(-1)))\n\n\nclass GradientDescent:\n    \"\"\"Gradient descent for general minimization problems.\"\"\"\n\n    def __init__(self, problem: MinimizationProblem, variable: TensorList, step_length: float, momentum: float = 0.0,\n                 debug = False, plotting = False, fig_num=(10,11)):\n\n        self.problem = problem\n        self.x = variable\n\n        self.step_legnth = step_length\n        self.momentum = momentum\n\n        self.debug = debug or plotting\n        self.plotting = plotting\n        self.fig_num = fig_num\n\n        self.losses = torch.zeros(0)\n        self.gradient_mags = torch.zeros(0)\n        self.residuals = None\n\n        self.clear_temp()\n\n\n    def clear_temp(self):\n        self.dir = None\n\n\n    def run(self, num_iter, dummy = None):\n\n        if num_iter == 0:\n            return\n\n        lossvec = None\n        if self.debug:\n            lossvec = torch.zeros(num_iter+1)\n            grad_mags = torch.zeros(num_iter+1)\n\n        for i in range(num_iter):\n            self.x.requires_grad_(True)\n\n            # Evaluate function at current estimate\n            loss = self.problem(self.x)\n\n            # Compute grad\n            grad = TensorList(torch.autograd.grad(loss, self.x))\n\n            # Update direction\n            if self.dir is None:\n                self.dir = grad\n            else:\n                self.dir = grad + self.momentum * self.dir\n\n            self.x.detach_()\n            self.x -= self.step_legnth * self.dir\n\n            if self.debug:\n                lossvec[i] = loss.item()\n                grad_mags[i] = sum(grad.view(-1) @ grad.view(-1)).sqrt().item()\n\n        if self.debug:\n            self.x.requires_grad_(True)\n            loss = self.problem(self.x)\n            grad = TensorList(torch.autograd.grad(loss, self.x))\n            lossvec[-1] = loss.item()\n            grad_mags[-1] = sum(grad.view(-1) @ grad.view(-1)).cpu().sqrt().item()\n            self.losses = torch.cat((self.losses, lossvec))\n            self.gradient_mags = torch.cat((self.gradient_mags, grad_mags))\n            if self.plotting:\n                plot_graph(self.losses, self.fig_num[0], title='Loss')\n                plot_graph(self.gradient_mags, self.fig_num[1], title='Gradient magnitude')\n\n        self.x.detach_()\n        self.clear_temp()"
  },
  {
    "path": "external/AR/pytracking/libs/tensordict.py",
    "content": "from collections import OrderedDict\nimport torch\nimport copy\n\n\nclass TensorDict(OrderedDict):\n    \"\"\"Container mainly used for dicts of torch tensors. Extends OrderedDict with pytorch functionality.\"\"\"\n\n    def concat(self, other):\n        \"\"\"Concatenates two dicts without copying internal data.\"\"\"\n        return TensorDict(self, **other)\n\n    def copy(self):\n        return TensorDict(super(TensorDict, self).copy())\n\n    def __deepcopy__(self, memodict={}):\n        return TensorDict(copy.deepcopy(list(self), memodict))\n\n    def __getattr__(self, name):\n        if not hasattr(torch.Tensor, name):\n            raise AttributeError('\\'TensorDict\\' object has not attribute \\'{}\\''.format(name))\n\n        def apply_attr(*args, **kwargs):\n            return TensorDict({n: getattr(e, name)(*args, **kwargs) if hasattr(e, name) else e for n, e in self.items()})\n        return apply_attr\n\n    def attribute(self, attr: str, *args):\n        return TensorDict({n: getattr(e, attr, *args) for n, e in self.items()})\n\n    def apply(self, fn, *args, **kwargs):\n        return TensorDict({n: fn(e, *args, **kwargs) for n, e in self.items()})\n\n    @staticmethod\n    def _iterable(a):\n        return isinstance(a, (TensorDict, list))\n\n"
  },
  {
    "path": "external/AR/pytracking/libs/tensorlist.py",
    "content": "import functools\nimport torch\nimport copy\n\n\nclass TensorList(list):\n    \"\"\"Container mainly used for lists of torch tensors. Extends lists with pytorch functionality.\"\"\"\n\n    def __init__(self, list_of_tensors = None):\n        if list_of_tensors is None:\n            list_of_tensors = list()\n        super(TensorList, self).__init__(list_of_tensors)\n\n    def __deepcopy__(self, memodict={}):\n        return TensorList(copy.deepcopy(list(self), memodict))\n\n    def __getitem__(self, item):\n        if isinstance(item, int):\n            return super(TensorList, self).__getitem__(item)\n        elif isinstance(item, (tuple, list)):\n            return TensorList([super(TensorList, self).__getitem__(i) for i in item])\n        else:\n            return TensorList(super(TensorList, self).__getitem__(item))\n\n    def __add__(self, other):\n        if TensorList._iterable(other):\n            return TensorList([e1 + e2 for e1, e2 in zip(self, other)])\n        return TensorList([e + other for e in self])\n\n    def __radd__(self, other):\n        if TensorList._iterable(other):\n            return TensorList([e2 + e1 for e1, e2 in zip(self, other)])\n        return TensorList([other + e for e in self])\n\n    def __iadd__(self, other):\n        if TensorList._iterable(other):\n            for i, e2 in enumerate(other):\n                self[i] += e2\n        else:\n            for i in range(len(self)):\n                self[i] += other\n        return self\n\n    def __sub__(self, other):\n        if TensorList._iterable(other):\n            return TensorList([e1 - e2 for e1, e2 in zip(self, other)])\n        return TensorList([e - other for e in self])\n\n    def __rsub__(self, other):\n        if TensorList._iterable(other):\n            return TensorList([e2 - e1 for e1, e2 in zip(self, other)])\n        return TensorList([other - e for e in self])\n\n    def __isub__(self, other):\n        if TensorList._iterable(other):\n            for i, e2 in enumerate(other):\n                self[i] -= e2\n        else:\n            for i in range(len(self)):\n                self[i] -= other\n        return self\n\n    def __mul__(self, other):\n        if TensorList._iterable(other):\n            return TensorList([e1 * e2 for e1, e2 in zip(self, other)])\n        return TensorList([e * other for e in self])\n\n    def __rmul__(self, other):\n        if TensorList._iterable(other):\n            return TensorList([e2 * e1 for e1, e2 in zip(self, other)])\n        return TensorList([other * e for e in self])\n\n    def __imul__(self, other):\n        if TensorList._iterable(other):\n            for i, e2 in enumerate(other):\n                self[i] *= e2\n        else:\n            for i in range(len(self)):\n                self[i] *= other\n        return self\n\n    def __truediv__(self, other):\n        if TensorList._iterable(other):\n            return TensorList([e1 / e2 for e1, e2 in zip(self, other)])\n        return TensorList([e / other for e in self])\n\n    def __rtruediv__(self, other):\n        if TensorList._iterable(other):\n            return TensorList([e2 / e1 for e1, e2 in zip(self, other)])\n        return TensorList([other / e for e in self])\n\n    def __itruediv__(self, other):\n        if TensorList._iterable(other):\n            for i, e2 in enumerate(other):\n                self[i] /= e2\n        else:\n            for i in range(len(self)):\n                self[i] /= other\n        return self\n\n    def __matmul__(self, other):\n        if TensorList._iterable(other):\n            return TensorList([e1 @ e2 for e1, e2 in zip(self, other)])\n        return TensorList([e @ other for e in self])\n\n    def __rmatmul__(self, other):\n        if TensorList._iterable(other):\n            return TensorList([e2 @ e1 for e1, e2 in zip(self, other)])\n        return TensorList([other @ e for e in self])\n\n    def __imatmul__(self, other):\n        if TensorList._iterable(other):\n            for i, e2 in enumerate(other):\n                self[i] @= e2\n        else:\n            for i in range(len(self)):\n                self[i] @= other\n        return self\n\n    def __mod__(self, other):\n        if TensorList._iterable(other):\n            return TensorList([e1 % e2 for e1, e2 in zip(self, other)])\n        return TensorList([e % other for e in self])\n\n    def __rmod__(self, other):\n        if TensorList._iterable(other):\n            return TensorList([e2 % e1 for e1, e2 in zip(self, other)])\n        return TensorList([other % e for e in self])\n\n    def __pos__(self):\n        return TensorList([+e for e in self])\n\n    def __neg__(self):\n        return TensorList([-e for e in self])\n\n    def __le__(self, other):\n        if TensorList._iterable(other):\n            return TensorList([e1 <= e2 for e1, e2 in zip(self, other)])\n        return TensorList([e <= other for e in self])\n\n    def __ge__(self, other):\n        if TensorList._iterable(other):\n            return TensorList([e1 >= e2 for e1, e2 in zip(self, other)])\n        return TensorList([e >= other for e in self])\n\n    def concat(self, other):\n        return TensorList(super(TensorList, self).__add__(other))\n\n    def copy(self):\n        return TensorList(super(TensorList, self).copy())\n\n    def unroll(self):\n        if not any(isinstance(t, TensorList) for t in self):\n            return self\n\n        new_list = TensorList()\n        for t in self:\n            if isinstance(t, TensorList):\n                new_list.extend(t.unroll())\n            else:\n                new_list.append(t)\n        return new_list\n\n    def list(self):\n        return list(self)\n\n    def attribute(self, attr: str, *args):\n        return TensorList([getattr(e, attr, *args) for e in self])\n\n    def apply(self, fn):\n        return TensorList([fn(e) for e in self])\n\n    def __getattr__(self, name):\n        if not hasattr(torch.Tensor, name):\n            raise AttributeError('\\'TensorList\\' object has not attribute \\'{}\\''.format(name))\n\n        def apply_attr(*args, **kwargs):\n            return TensorList([getattr(e, name)(*args, **kwargs) for e in self])\n\n        return apply_attr\n\n    @staticmethod\n    def _iterable(a):\n        return isinstance(a, (TensorList, list))\n\n\n\ndef tensor_operation(op):\n    def islist(a):\n        return isinstance(a, TensorList)\n\n    @functools.wraps(op)\n    def oplist(*args, **kwargs):\n        if len(args) == 0:\n            raise ValueError('Must be at least one argument without keyword (i.e. operand).')\n\n        if len(args) == 1:\n            if islist(args[0]):\n                return TensorList([op(a, **kwargs) for a in args[0]])\n        else:\n            # Multiple operands, assume max two\n            if islist(args[0]) and islist(args[1]):\n                return TensorList([op(a, b, *args[2:], **kwargs) for a, b in zip(*args[:2])])\n            if islist(args[0]):\n                return TensorList([op(a, *args[1:], **kwargs) for a in args[0]])\n            if islist(args[1]):\n                return TensorList([op(args[0], b, *args[2:], **kwargs) for b in args[1]])\n\n        # None of the operands are lists\n        return op(*args, **kwargs)\n\n    return oplist\n"
  },
  {
    "path": "external/AR/pytracking/parameter/__init__.py",
    "content": ""
  },
  {
    "path": "external/AR/pytracking/parameter/atom/__init__.py",
    "content": ""
  },
  {
    "path": "external/AR/pytracking/parameter/atom/atom_gmm_sampl.py",
    "content": "from pytracking.utils import TrackerParams, FeatureParams, Choice\nfrom pytracking.features.extractor import MultiResolutionExtractor\nfrom pytracking.features import deep\nimport torch\n\ndef parameters():\n    params = TrackerParams()\n\n    # These are usually set from outside\n    params.debug = 0                        # Debug level\n    params.visualization = False            # Do visualization\n\n    # Use GPU or not (IoUNet requires this to be True)\n    params.use_gpu = True\n\n    # Feature specific parameters\n    deep_params = TrackerParams()\n\n    # Patch sampling parameters\n    params.max_image_sample_size = (18*16)**2   # Maximum image sample size\n    params.min_image_sample_size = (18*16)**2   # Minimum image sample size\n    params.search_area_scale = 5                # Scale relative to target size\n    params.feature_size_odd = False             # Good to use False for even-sized kernels and vice versa\n\n    # Optimization parameters\n    params.CG_iter = 5                  # The number of Conjugate Gradient iterations in each update after the first frame\n    params.init_CG_iter = 60            # The total number of Conjugate Gradient iterations used in the first frame\n    params.init_GN_iter = 6             # The number of Gauss-Newton iterations used in the first frame (only if the projection matrix is updated)\n    params.post_init_CG_iter = 0        # CG iterations to run after GN\n    params.fletcher_reeves = False      # Use the Fletcher-Reeves (true) or Polak-Ribiere (false) formula in the Conjugate Gradient\n    params.standard_alpha = True        # Use the standard formula for computing the step length in Conjugate Gradient\n    params.CG_forgetting_rate = None\t# Forgetting rate of the last conjugate direction\n\n    # Learning parameters for each feature type\n    deep_params.learning_rate = 0.01                # Learning rate\n    deep_params.init_samples_minimum_weight = 0.25  # Minimum weight of initial samples in memory\n    deep_params.output_sigma_factor = 1/4           # Standard deviation of Gaussian label relative to target size\n\n    # Training parameters\n    params.sample_memory_size = 250     # Memory size\n    params.train_skipping = 10          # How often to run training (every n-th frame)\n\n    # Online model parameters\n    deep_params.kernel_size = (4,4)     # Kernel size of filter\n    deep_params.compressed_dim = 64     # Dimension output of projection matrix\n    deep_params.filter_reg = 1e-1       # Filter regularization factor\n    deep_params.projection_reg = 1e-4   # Projection regularization factor\n\n    # Windowing\n    params.feature_window = False       # Perform windowing of features\n    params.window_output = False        # Perform windowing of output scores\n\n    # Detection parameters\n    params.scale_factors = torch.ones(1) # What scales to use for localization (only one scale if IoUNet is used)\n    params.score_upsample_factor = 1     # How much Fourier upsampling to use\n\n    # Init data augmentation parameters\n    params.augmentation = {'fliplr': True,\n                           'rotate': [5, -5, 10, -10, 20, -20, 30, -30, 45,-45, -60, 60],\n                           'blur': [(2, 0.2), (0.2, 2), (3,1), (1, 3), (2, 2)],\n                           'relativeshift': [(0.6, 0.6), (-0.6, 0.6), (0.6, -0.6), (-0.6,-0.6)],\n                           'dropout': (7, 0.2)}\n\n    params.augmentation_expansion_factor = 2    # How much to expand sample when doing augmentation\n    params.random_shift_factor = 1/3            # How much random shift to do on each augmented sample\n    deep_params.use_augmentation = True         # Whether to use augmentation for this feature\n\n    # Factorized convolution parameters\n    # params.use_projection_matrix = True       # Use projection matrix, i.e. use the factorized convolution formulation\n    params.update_projection_matrix = True      # Whether the projection matrix should be optimized or not\n    params.proj_init_method = 'randn'           # Method for initializing the projection matrix\n    params.filter_init_method = 'randn'         # Method for initializing the spatial filter\n    params.projection_activation = 'none'       # Activation function after projection ('none', 'relu', 'elu' or 'mlu')\n    params.response_activation = ('mlu', 0.05)  # Activation function on the output scores ('none', 'relu', 'elu' or 'mlu')\n\n    # Advanced localization parameters\n    params.advanced_localization = True         # Use this or not\n    params.target_not_found_threshold = 0.25    # Absolute score threshold to detect target missing\n    params.distractor_threshold = 0.8           # Relative threshold to find distractors\n    params.hard_negative_threshold = 0.5        # Relative threshold to find hard negative samples\n    params.target_neighborhood_scale = 2.2      # Target neighborhood to remove\n    params.dispalcement_scale = 0.8             # Dispacement to consider for distractors\n    params.hard_negative_learning_rate = 0.02   # Learning rate if hard negative detected\n    params.hard_negative_CG_iter = 5            # Number of optimization iterations to use if hard negative detected\n    params.update_scale_when_uncertain = True   # Update scale or not if distractor is close\n\n    # IoUNet parameters\n    params.use_iou_net = True               # Use IoU net or not\n    params.box_refinement_space = 'relative'\n    params.iounet_augmentation = False      # Use the augmented samples to compute the modulation vector\n    params.iounet_k = 3                     # Top-k average to estimate final box\n    params.num_init_random_boxes = 9        # Num extra random boxes in addition to the classifier prediction\n    params.box_jitter_pos = 0.1             # How much to jitter the translation for random boxes\n    params.box_jitter_sz = 0.5              # How much to jitter the scale for random boxes\n    params.maximal_aspect_ratio = 6         # Limit on the aspect ratio\n    params.box_refinement_iter = 10          # Number of iterations for refining the boxes\n    params.box_refinement_step_length = (1e-2, 5e-2) # 1   # Gradient step length in the bounding box refinement 5e-3 2e-2\n    params.box_refinement_step_decay = 1    # Multiplicative step length decay (1 means no decay)\n\n    # Setup the feature extractor (which includes the IoUNet)\n    deep_fparams = FeatureParams(feature_params=[deep_params])\n    deep_feat = deep.ATOMResNet18(net_path='atom_gmm_sampl', output_layers=['layer3'], fparams=deep_fparams, normalize_power=2)\n    params.features = MultiResolutionExtractor([deep_feat])\n\n    return params\n"
  },
  {
    "path": "external/AR/pytracking/parameter/atom/atom_prob_ml.py",
    "content": "from pytracking.utils import TrackerParams, FeatureParams, Choice\nfrom pytracking.features.extractor import MultiResolutionExtractor\nfrom pytracking.features import deep\nimport torch\n\ndef parameters():\n    params = TrackerParams()\n\n    # These are usually set from outside\n    params.debug = 0                        # Debug level\n    params.visualization = False            # Do visualization\n\n    # Use GPU or not (IoUNet requires this to be True)\n    params.use_gpu = True\n\n    # Feature specific parameters\n    deep_params = TrackerParams()\n\n    # Patch sampling parameters\n    params.max_image_sample_size = (18*16)**2   # Maximum image sample size\n    params.min_image_sample_size = (18*16)**2   # Minimum image sample size\n    params.search_area_scale = 5                # Scale relative to target size\n    params.feature_size_odd = False             # Good to use False for even-sized kernels and vice versa\n\n    # Optimization parameters\n    params.CG_iter = 5                  # The number of Conjugate Gradient iterations in each update after the first frame\n    params.init_CG_iter = 60            # The total number of Conjugate Gradient iterations used in the first frame\n    params.init_GN_iter = 6             # The number of Gauss-Newton iterations used in the first frame (only if the projection matrix is updated)\n    params.post_init_CG_iter = 0        # CG iterations to run after GN\n    params.fletcher_reeves = False      # Use the Fletcher-Reeves (true) or Polak-Ribiere (false) formula in the Conjugate Gradient\n    params.standard_alpha = True        # Use the standard formula for computing the step length in Conjugate Gradient\n    params.CG_forgetting_rate = None\t# Forgetting rate of the last conjugate direction\n\n    # Learning parameters for each feature type\n    deep_params.learning_rate = 0.01                # Learning rate\n    deep_params.init_samples_minimum_weight = 0.25  # Minimum weight of initial samples in memory\n    deep_params.output_sigma_factor = 1/4           # Standard deviation of Gaussian label relative to target size\n\n    # Training parameters\n    params.sample_memory_size = 250     # Memory size\n    params.train_skipping = 10          # How often to run training (every n-th frame)\n\n    # Online model parameters\n    deep_params.kernel_size = (4,4)     # Kernel size of filter\n    deep_params.compressed_dim = 64     # Dimension output of projection matrix\n    deep_params.filter_reg = 1e-1       # Filter regularization factor\n    deep_params.projection_reg = 1e-4   # Projection regularization factor\n\n    # Windowing\n    params.feature_window = False       # Perform windowing of features\n    params.window_output = False        # Perform windowing of output scores\n\n    # Detection parameters\n    params.scale_factors = torch.ones(1) # What scales to use for localization (only one scale if IoUNet is used)\n    params.score_upsample_factor = 1     # How much Fourier upsampling to use\n\n    # Init data augmentation parameters\n    params.augmentation = {'fliplr': True,\n                           'rotate': [5, -5, 10, -10, 20, -20, 30, -30, 45,-45, -60, 60],\n                           'blur': [(2, 0.2), (0.2, 2), (3,1), (1, 3), (2, 2)],\n                           'relativeshift': [(0.6, 0.6), (-0.6, 0.6), (0.6, -0.6), (-0.6,-0.6)],\n                           'dropout': (7, 0.2)}\n\n    params.augmentation_expansion_factor = 2    # How much to expand sample when doing augmentation\n    params.random_shift_factor = 1/3            # How much random shift to do on each augmented sample\n    deep_params.use_augmentation = True         # Whether to use augmentation for this feature\n\n    # Factorized convolution parameters\n    # params.use_projection_matrix = True       # Use projection matrix, i.e. use the factorized convolution formulation\n    params.update_projection_matrix = True      # Whether the projection matrix should be optimized or not\n    params.proj_init_method = 'randn'           # Method for initializing the projection matrix\n    params.filter_init_method = 'randn'         # Method for initializing the spatial filter\n    params.projection_activation = 'none'       # Activation function after projection ('none', 'relu', 'elu' or 'mlu')\n    params.response_activation = ('mlu', 0.05)  # Activation function on the output scores ('none', 'relu', 'elu' or 'mlu')\n\n    # Advanced localization parameters\n    params.advanced_localization = True         # Use this or not\n    params.target_not_found_threshold = 0.25    # Absolute score threshold to detect target missing\n    params.distractor_threshold = 0.8           # Relative threshold to find distractors\n    params.hard_negative_threshold = 0.5        # Relative threshold to find hard negative samples\n    params.target_neighborhood_scale = 2.2      # Target neighborhood to remove\n    params.dispalcement_scale = 0.8             # Dispacement to consider for distractors\n    params.hard_negative_learning_rate = 0.02   # Learning rate if hard negative detected\n    params.hard_negative_CG_iter = 5            # Number of optimization iterations to use if hard negative detected\n    params.update_scale_when_uncertain = True   # Update scale or not if distractor is close\n\n    # IoUNet parameters\n    params.use_iou_net = True               # Use IoU net or not\n    params.box_refinement_space = 'relative'\n    params.iounet_augmentation = False      # Use the augmented samples to compute the modulation vector\n    params.iounet_k = 3                     # Top-k average to estimate final box\n    params.num_init_random_boxes = 9        # Num extra random boxes in addition to the classifier prediction\n    params.box_jitter_pos = 0.1             # How much to jitter the translation for random boxes\n    params.box_jitter_sz = 0.5              # How much to jitter the scale for random boxes\n    params.maximal_aspect_ratio = 6         # Limit on the aspect ratio\n    params.box_refinement_iter = 10          # Number of iterations for refining the boxes\n    params.box_refinement_step_length = (2e-4, 10e-4) # 1   # Gradient step length in the bounding box refinement\n    params.box_refinement_step_decay = 1    # Multiplicative step length decay (1 means no decay)\n\n    # Setup the feature extractor (which includes the IoUNet)\n    deep_fparams = FeatureParams(feature_params=[deep_params])\n    deep_feat = deep.ATOMResNet18(net_path='atom_prob_ml', output_layers=['layer3'], fparams=deep_fparams, normalize_power=2)\n    params.features = MultiResolutionExtractor([deep_feat])\n\n    return params\n"
  },
  {
    "path": "external/AR/pytracking/parameter/atom/default.py",
    "content": "from pytracking.utils import TrackerParams, FeatureParams, Choice\nfrom pytracking.features.extractor import MultiResolutionExtractor\nfrom pytracking.features import deep\nimport torch\n\ndef parameters():\n    params = TrackerParams()\n\n    # These are usually set from outside\n    params.debug = 0                        # Debug level\n    params.visualization = False            # Do visualization\n\n    # Use GPU or not (IoUNet requires this to be True)\n    params.use_gpu = True\n\n    # Feature specific parameters\n    deep_params = TrackerParams()\n\n    # Patch sampling parameters\n    params.max_image_sample_size = (18*16)**2   # Maximum image sample size\n    params.min_image_sample_size = (18*16)**2   # Minimum image sample size\n    params.search_area_scale = 5                # Scale relative to target size\n    params.feature_size_odd = False             # Good to use False for even-sized kernels and vice versa\n\n    # Optimization parameters\n    params.CG_iter = 5                  # The number of Conjugate Gradient iterations in each update after the first frame\n    params.init_CG_iter = 60            # The total number of Conjugate Gradient iterations used in the first frame\n    params.init_GN_iter = 6             # The number of Gauss-Newton iterations used in the first frame (only if the projection matrix is updated)\n    params.post_init_CG_iter = 0        # CG iterations to run after GN\n    params.fletcher_reeves = False      # Use the Fletcher-Reeves (true) or Polak-Ribiere (false) formula in the Conjugate Gradient\n    params.standard_alpha = True        # Use the standard formula for computing the step length in Conjugate Gradient\n    params.CG_forgetting_rate = None\t# Forgetting rate of the last conjugate direction\n\n    # Learning parameters for each feature type\n    deep_params.learning_rate = 0.01                # Learning rate\n    deep_params.init_samples_minimum_weight = 0.25  # Minimum weight of initial samples in memory\n    deep_params.output_sigma_factor = 1/4           # Standard deviation of Gaussian label relative to target size\n\n    # Training parameters\n    params.sample_memory_size = 250     # Memory size\n    params.train_skipping = 10          # How often to run training (every n-th frame)\n\n    # Online model parameters\n    deep_params.kernel_size = (4,4)     # Kernel size of filter\n    deep_params.compressed_dim = 64     # Dimension output of projection matrix\n    deep_params.filter_reg = 1e-1       # Filter regularization factor\n    deep_params.projection_reg = 1e-4   # Projection regularization factor\n\n    # Windowing\n    params.feature_window = False       # Perform windowing of features\n    params.window_output = False        # Perform windowing of output scores\n\n    # Detection parameters\n    params.scale_factors = torch.ones(1) # What scales to use for localization (only one scale if IoUNet is used)\n    params.score_upsample_factor = 1     # How much Fourier upsampling to use\n\n    # Init data augmentation parameters\n    params.augmentation = {'fliplr': True,\n                           'rotate': [5, -5, 10, -10, 20, -20, 30, -30, 45,-45, -60, 60],\n                           'blur': [(2, 0.2), (0.2, 2), (3,1), (1, 3), (2, 2)],\n                           'relativeshift': [(0.6, 0.6), (-0.6, 0.6), (0.6, -0.6), (-0.6,-0.6)],\n                           'dropout': (7, 0.2)}\n\n    params.augmentation_expansion_factor = 2    # How much to expand sample when doing augmentation\n    params.random_shift_factor = 1/3            # How much random shift to do on each augmented sample\n    deep_params.use_augmentation = True         # Whether to use augmentation for this feature\n\n    # Factorized convolution parameters\n    # params.use_projection_matrix = True       # Use projection matrix, i.e. use the factorized convolution formulation\n    params.update_projection_matrix = True      # Whether the projection matrix should be optimized or not\n    params.proj_init_method = 'randn'           # Method for initializing the projection matrix\n    params.filter_init_method = 'randn'         # Method for initializing the spatial filter\n    params.projection_activation = 'none'       # Activation function after projection ('none', 'relu', 'elu' or 'mlu')\n    params.response_activation = ('mlu', 0.05)  # Activation function on the output scores ('none', 'relu', 'elu' or 'mlu')\n\n    # Advanced localization parameters\n    params.advanced_localization = True         # Use this or not\n    params.target_not_found_threshold = 0.25    # Absolute score threshold to detect target missing\n    params.distractor_threshold = 0.8           # Relative threshold to find distractors\n    params.hard_negative_threshold = 0.5        # Relative threshold to find hard negative samples\n    params.target_neighborhood_scale = 2.2      # Target neighborhood to remove\n    params.dispalcement_scale = 0.8             # Dispacement to consider for distractors\n    params.hard_negative_learning_rate = 0.02   # Learning rate if hard negative detected\n    params.hard_negative_CG_iter = 5            # Number of optimization iterations to use if hard negative detected\n    params.update_scale_when_uncertain = True   # Update scale or not if distractor is close\n\n    # IoUNet parameters\n    params.use_iou_net = True               # Use IoU net or not\n    params.iounet_augmentation = False      # Use the augmented samples to compute the modulation vector\n    params.iounet_k = 3                     # Top-k average to estimate final box\n    params.num_init_random_boxes = 9        # Num extra random boxes in addition to the classifier prediction\n    params.box_jitter_pos = 0.1             # How much to jitter the translation for random boxes\n    params.box_jitter_sz = 0.5              # How much to jitter the scale for random boxes\n    params.maximal_aspect_ratio = 6         # Limit on the aspect ratio\n    params.box_refinement_iter = 5          # Number of iterations for refining the boxes\n    params.box_refinement_step_length = 1   # Gradient step length in the bounding box refinement\n    params.box_refinement_step_decay = 1    # Multiplicative step length decay (1 means no decay)\n\n    # Setup the feature extractor (which includes the IoUNet)\n    deep_fparams = FeatureParams(feature_params=[deep_params])\n    deep_feat = deep.ATOMResNet18(net_path='atom_default.pth', output_layers=['layer3'], fparams=deep_fparams, normalize_power=2)\n    params.features = MultiResolutionExtractor([deep_feat])\n\n    return params\n"
  },
  {
    "path": "external/AR/pytracking/parameter/atom/default_vot.py",
    "content": "from pytracking.utils import TrackerParams, FeatureParams, Choice\nfrom pytracking.features.extractor import MultiResolutionExtractor\nfrom pytracking.features import deep\nimport torch\n\ndef parameters():\n    params = TrackerParams()\n\n    # These are usually set from outside\n    params.debug = 0                        # Debug level\n    params.visualization = False            # Do visualization\n\n    # Use GPU or not (IoUNet requires this to be True)\n    params.use_gpu = True\n\n    # Feature specific parameters\n    deep_params = TrackerParams()\n\n    # Patch sampling parameters\n    params.max_image_sample_size = (14 * 16) ** 2   # Maximum image sample size\n    params.min_image_sample_size = (14 * 16) ** 2   # Minimum image sample size\n    params.search_area_scale = 4                    # Scale relative to target size\n    params.feature_size_odd = False                 # Good to use False for even-sized kernels and vice versa\n\n    # Optimization parameters\n    params.CG_iter = 5                    # The number of Conjugate Gradient iterations in each update after the first frame\n    params.init_CG_iter = 60              # The total number of Conjugate Gradient iterations used in the first frame\n    params.init_GN_iter = 6               # The number of Gauss-Newton iterations used in the first frame (only if the projection matrix is updated)\n    params.post_init_CG_iter = 0          # CG iterations to run after GN\n    params.fletcher_reeves = False        # Use the Fletcher-Reeves (true) or Polak-Ribiere (false) formula in the Conjugate Gradient\n    params.standard_alpha = True          # Use the standard formula for computing the step length in Conjugate Gradient\n    params.CG_forgetting_rate = None\t  # Forgetting rate of the last conjugate direction\n\n    # Learning parameters for each feature type\n    deep_params.learning_rate = 0.0075           # Learning rate\n    deep_params.output_sigma_factor = 1/4        # Standard deviation of Gaussian label relative to target size\n\n    # Training parameters\n    params.sample_memory_size = 250              # Memory size\n    params.train_skipping = 10                   # How often to run training (every n-th frame)\n\n    # Online model parameters\n    deep_params.kernel_size = (4, 4)             # Kernel size of filter\n    deep_params.compressed_dim = 64              # Dimension output of projection matrix\n    deep_params.filter_reg = 1e-1                # Filter regularization factor\n    deep_params.projection_reg = 1e-4            # Projection regularization factor\n\n    # Windowing\n    params.feature_window = False                # Perform windowing of features\n    params.window_output = True                  # Perform windowing of output scores\n\n    # Detection parameters\n    params.scale_factors = torch.ones(1)        # What scales to use for localization (only one scale if IoUNet is used)\n    params.score_upsample_factor = 1            # How much Fourier upsampling to use\n\n    # Init data augmentation parameters\n    params.augmentation = {'fliplr': True,\n                           'rotate': [5, -5, 10, -10, 20, -20, 30, -30, 45,-45, -60, 60],\n                           'blur': [(2, 0.2), (0.2, 2), (3,1), (1, 3), (2, 2)],\n                           'relativeshift': [(0.6, 0.6), (-0.6, 0.6), (0.6, -0.6), (-0.6,-0.6)],\n                           'dropout': (7, 0.2)}\n\n    params.augmentation_expansion_factor = 2    # How much to expand sample when doing augmentation\n    params.random_shift_factor = 1 / 3          # How much random shift to do on each augmented sample\n    deep_params.use_augmentation = True         # Whether to use augmentation for this feature\n\n    # Factorized convolution parameters\n    # params.use_projection_matrix = True       # Use projection matrix, i.e. use the factorized convolution formulation\n    params.update_projection_matrix = True      # Whether the projection matrix should be optimized or not\n    params.proj_init_method = 'randn'           # Method for initializing the projection matrix\n    params.filter_init_method = 'randn'         # Method for initializing the spatial filter\n    params.projection_activation = 'none'       # Activation function after projection ('none', 'relu', 'elu' or 'mlu')\n    params.response_activation = ('mlu', 0.05)  # Activation function on the output scores ('none', 'relu', 'elu' or 'mlu')\n\n    # Advanced localization parameters\n    params.advanced_localization = True         # Use this or not\n    params.target_not_found_threshold = -1      # Absolute score threshold to detect target missing\n    params.distractor_threshold = 100           # Relative threshold to find distractors\n    params.hard_negative_threshold = 0.3        # Relative threshold to find hard negative samples\n    params.target_neighborhood_scale = 2.2      # Target neighborhood to remove\n    params.dispalcement_scale = 0.7             # Dispacement to consider for distractors\n    params.hard_negative_learning_rate = 0.02   # Learning rate if hard negative detected\n    params.hard_negative_CG_iter = 5            # Number of optimization iterations to use if hard negative detected\n    params.update_scale_when_uncertain = True   # Update scale or not if distractor is close\n\n    # IoUNet parameters\n    params.iounet_augmentation = False      # Use the augmented samples to compute the modulation vector\n    params.iounet_k = 3                     # Top-k average to estimate final box\n    params.num_init_random_boxes = 9        # Num extra random boxes in addition to the classifier prediction\n    params.box_jitter_pos = 0.1             # How much to jitter the translation for random boxes\n    params.box_jitter_sz = 0.5              # How much to jitter the scale for random boxes\n    params.maximal_aspect_ratio = 6         # Limit on the aspect ratio\n    params.box_refinement_iter = 5          # Number of iterations for refining the boxes\n    params.box_refinement_step_length = 1   # Gradient step length in the bounding box refinement\n    params.box_refinement_step_decay = 1    # Multiplicative step length decay (1 means no decay)\n\n    # Setup the feature extractor (which includes the IoUNet)\n    deep_fparams = FeatureParams(feature_params=[deep_params])\n    deep_feat = deep.ATOMResNet18(net_path='atom_default.pth', output_layers=['layer3'], fparams=deep_fparams,\n                                  normalize_power=2)\n    params.features = MultiResolutionExtractor([deep_feat])\n\n    params.vot_anno_conversion_type = 'preserve_area'\n    return params"
  },
  {
    "path": "external/AR/pytracking/parameter/atom/multiscale_no_iounet.py",
    "content": "from pytracking.utils import TrackerParams, FeatureParams, Choice\nfrom pytracking.features.extractor import MultiResolutionExtractor\nfrom pytracking.features import deep\nimport torch\n\ndef parameters():\n    params = TrackerParams()\n\n    # These are usually set from outside\n    params.debug = 0                        # Debug level\n    params.visualization = False            # Do visualization\n\n    # Use GPU or not (IoUNet requires this to be True)\n    params.use_gpu = True\n\n    # Feature specific parameters\n    deep_params = TrackerParams()\n\n    # Patch sampling parameters\n    params.max_image_sample_size = (18*16)**2   # Maximum image sample size\n    params.min_image_sample_size = (18*16)**2   # Minimum image sample size\n    params.search_area_scale = 5                # Scale relative to target size\n    params.feature_size_odd = False             # Good to use False for even-sized kernels and vice versa\n\n    # Optimization parameters\n    params.CG_iter = 5                  # The number of Conjugate Gradient iterations in each update after the first frame\n    params.init_CG_iter = 60            # The total number of Conjugate Gradient iterations used in the first frame\n    params.init_GN_iter = 6             # The number of Gauss-Newton iterations used in the first frame (only if the projection matrix is updated)\n    params.post_init_CG_iter = 0        # CG iterations to run after GN\n    params.fletcher_reeves = False      # Use the Fletcher-Reeves (true) or Polak-Ribiere (false) formula in the Conjugate Gradient\n    params.standard_alpha = True        # Use the standard formula for computing the step length in Conjugate Gradient\n    params.CG_forgetting_rate = None\t# Forgetting rate of the last conjugate direction\n\n    # Learning parameters for each feature type\n    deep_params.learning_rate = 0.01                # Learning rate\n    deep_params.init_samples_minimum_weight = 0.25  # Minimum weight of initial samples in memory\n    deep_params.output_sigma_factor = 1/4           # Standard deviation of Gaussian label relative to target size\n\n    # Training parameters\n    params.sample_memory_size = 250     # Memory size\n    params.train_skipping = 10          # How often to run training (every n-th frame)\n\n    # Online model parameters\n    deep_params.kernel_size = (4,4)     # Kernel size of filter\n    deep_params.compressed_dim = 64     # Dimension output of projection matrix\n    deep_params.filter_reg = 1e-1       # Filter regularization factor\n    deep_params.projection_reg = 1e-4   # Projection regularization factor\n\n    # Windowing\n    params.feature_window = False       # Perform windowing of features\n    params.window_output = False        # Perform windowing of output scores\n\n    # Detection parameters\n    params.scale_factors = 1.02**torch.arange(-2, 3).float() # What scales to use for localization (only one scale if IoUNet is used)\n    params.score_upsample_factor = 1     # How much Fourier upsampling to use\n\n    # Init data augmentation parameters\n    params.augmentation = {'fliplr': True,\n                           'rotate': [5, -5, 10, -10, 20, -20, 30, -30, 45,-45, -60, 60],\n                           'blur': [(2, 0.2), (0.2, 2), (3,1), (1, 3), (2, 2)],\n                           'relativeshift': [(0.6, 0.6), (-0.6, 0.6), (0.6, -0.6), (-0.6,-0.6)],\n                           'dropout': (7, 0.2)}\n\n    params.augmentation_expansion_factor = 2    # How much to expand sample when doing augmentation\n    params.random_shift_factor = 1/3            # How much random shift to do on each augmented sample\n    deep_params.use_augmentation = True         # Whether to use augmentation for this feature\n\n    # Factorized convolution parameters\n    # params.use_projection_matrix = True       # Use projection matrix, i.e. use the factorized convolution formulation\n    params.update_projection_matrix = True      # Whether the projection matrix should be optimized or not\n    params.proj_init_method = 'randn'           # Method for initializing the projection matrix\n    params.filter_init_method = 'randn'         # Method for initializing the spatial filter\n    params.projection_activation = 'none'       # Activation function after projection ('none', 'relu', 'elu' or 'mlu')\n    params.response_activation = ('mlu', 0.05)  # Activation function on the output scores ('none', 'relu', 'elu' or 'mlu')\n\n    # Advanced localization parameters\n    params.advanced_localization = True         # Use this or not\n    params.target_not_found_threshold = 0.25    # Absolute score threshold to detect target missing\n    params.distractor_threshold = 0.8           # Relative threshold to find distractors\n    params.hard_negative_threshold = 0.5        # Relative threshold to find hard negative samples\n    params.target_neighborhood_scale = 2.2      # Target neighborhood to remove\n    params.dispalcement_scale = 0.8             # Dispacement to consider for distractors\n    params.hard_negative_learning_rate = 0.02   # Learning rate if hard negative detected\n    params.hard_negative_CG_iter = 5            # Number of optimization iterations to use if hard negative detected\n    params.update_scale_when_uncertain = True   # Update scale or not if distractor is close\n\n    # IoUNet parameters\n    params.use_iou_net = False               # Use IoU net or not\n    params.iounet_augmentation = False      # Use the augmented samples to compute the modulation vector\n    params.iounet_k = 3                     # Top-k average to estimate final box\n    params.num_init_random_boxes = 9        # Num extra random boxes in addition to the classifier prediction\n    params.box_jitter_pos = 0.1             # How much to jitter the translation for random boxes\n    params.box_jitter_sz = 0.5              # How much to jitter the scale for random boxes\n    params.maximal_aspect_ratio = 6         # Limit on the aspect ratio\n    params.box_refinement_iter = 5          # Number of iterations for refining the boxes\n    params.box_refinement_step_length = 1   # Gradient step length in the bounding box refinement\n    params.box_refinement_step_decay = 1    # Multiplicative step length decay (1 means no decay)\n\n    # Setup the feature extractor (which includes the IoUNet)\n    deep_fparams = FeatureParams(feature_params=[deep_params])\n    deep_feat = deep.ATOMResNet18(net_path='atom_default.pth', output_layers=['layer3'], fparams=deep_fparams, normalize_power=2)\n    params.features = MultiResolutionExtractor([deep_feat])\n\n    return params\n"
  },
  {
    "path": "external/AR/pytracking/parameter/dimp/__init__.py",
    "content": ""
  },
  {
    "path": "external/AR/pytracking/parameter/dimp/dimp18.py",
    "content": "from pytracking.utils import TrackerParams\nfrom pytracking.features.net_wrappers import NetWithBackbone\n\ndef parameters():\n    params = TrackerParams()\n\n    params.debug = 0\n    params.visualization = False\n\n    params.use_gpu = True\n\n    params.image_sample_size = 18*16\n    params.search_area_scale = 5\n\n    # Learning parameters\n    params.sample_memory_size = 50\n    params.learning_rate = 0.01\n    params.init_samples_minimum_weight = 0.25\n    params.train_skipping = 20\n\n    # Net optimization params\n    params.update_classifier = True\n    params.net_opt_iter = 10\n    params.net_opt_update_iter = 2\n    params.net_opt_hn_iter = 1\n\n    # Detection parameters\n    params.window_output = False\n\n    # Init augmentation parameters\n    params.use_augmentation = True\n    params.augmentation = {'fliplr': True,\n                           'rotate': [10, -10, 45, -45],\n                           'blur': [(3,1), (1, 3), (2, 2)],\n                           'relativeshift': [(0.6, 0.6), (-0.6, 0.6), (0.6, -0.6), (-0.6,-0.6)],\n                           'dropout': (2, 0.2)}\n\n    params.augmentation_expansion_factor = 2\n    params.random_shift_factor = 1/3\n\n    # Advanced localization parameters\n    params.advanced_localization = True\n    params.target_not_found_threshold = 0.25\n    params.distractor_threshold = 0.8\n    params.hard_negative_threshold = 0.5\n    params.target_neighborhood_scale = 2.2\n    params.dispalcement_scale = 0.8\n    params.hard_negative_learning_rate = 0.02\n    params.update_scale_when_uncertain = True\n\n    # IoUnet parameters\n    params.iounet_augmentation = False\n    params.iounet_use_log_scale = True\n    params.iounet_k = 3\n    params.num_init_random_boxes = 9\n    params.box_jitter_pos = 0.1\n    params.box_jitter_sz = 0.5\n    params.maximal_aspect_ratio = 6\n    params.box_refinement_iter = 5\n    params.box_refinement_step_length = 1\n    params.box_refinement_step_decay = 1\n\n    params.net = NetWithBackbone(net_path='dimp18.pth',\n                                 use_gpu=params.use_gpu)\n\n    params.vot_anno_conversion_type = 'preserve_area'\n\n    return params\n"
  },
  {
    "path": "external/AR/pytracking/parameter/dimp/dimp18_vot.py",
    "content": "from pytracking.utils import TrackerParams\nfrom pytracking.features.net_wrappers import NetWithBackbone\n\ndef parameters():\n    params = TrackerParams()\n\n    params.debug = 0\n    params.visualization = False\n\n    params.use_gpu = True\n\n    params.image_sample_size = 14 * 16\n    params.search_area_scale = 4\n    params.feature_size_odd = False\n\n    # Learning parameters\n    params.sample_memory_size = 250\n    params.learning_rate = 0.0075\n    params.init_samples_minimum_weight = 0.0\n    params.train_skipping = 10\n\n    # Net optimization params\n    params.update_classifier = True\n    params.net_opt_iter = 25\n    params.net_opt_update_iter = 3\n    params.net_opt_hn_iter = 3\n\n    # Detection parameters\n    params.window_output = True\n\n    # Init augmentation parameters\n    params.use_augmentation = True\n    params.augmentation = {'fliplr': True,\n                           'rotate': [5, -5, 10, -10, 20, -20, 30, -30, 45, -45, -60, 60],\n                           'blur': [(2, 0.2), (0.2, 2), (3, 1), (1, 3), (2, 2)],\n                           'relativeshift': [(0.6, 0.6), (-0.6, 0.6), (0.6, -0.6), (-0.6, -0.6)],\n                           'dropout': (7, 0.2)}\n\n    params.augmentation_expansion_factor = 2\n    params.random_shift_factor = 1/3\n\n    # Advanced localization parameters\n    params.advanced_localization = True\n    params.target_not_found_threshold = 0.0\n    params.distractor_threshold = 100\n    params.hard_negative_threshold = 0.45\n    params.target_neighborhood_scale = 2.2\n    params.dispalcement_scale = 0.7\n\n    params.perform_hn_without_windowing = True\n\n    params.hard_negative_learning_rate = 0.02\n    params.update_scale_when_uncertain = True\n\n    # IoUnet parameters\n    params.iounet_augmentation = False\n    params.iounet_use_log_scale = True\n    params.iounet_k = 3\n    params.num_init_random_boxes = 9\n    params.box_jitter_pos = 0.1\n    params.box_jitter_sz = 0.5\n    params.maximal_aspect_ratio = 6\n    params.box_refinement_iter = 5\n    params.box_refinement_step_length = 1\n    params.box_refinement_step_decay = 1\n\n    params.net = NetWithBackbone(net_path='dimp18.pth',\n                                 use_gpu=params.use_gpu)\n\n    params.vot_anno_conversion_type = 'preserve_area'\n\n    return params\n"
  },
  {
    "path": "external/AR/pytracking/parameter/dimp/dimp50.py",
    "content": "from pytracking.utils import TrackerParams\nfrom pytracking.features.net_wrappers import NetWithBackbone\n\ndef parameters():\n    params = TrackerParams()\n\n    params.debug = 0\n    params.visualization = False\n\n    params.use_gpu = True\n\n    params.image_sample_size = 18*16\n    params.search_area_scale = 5\n\n    # Learning parameters\n    params.sample_memory_size = 50\n    params.learning_rate = 0.01\n    params.init_samples_minimum_weight = 0.25\n    params.train_skipping = 20\n\n    # Net optimization params\n    params.update_classifier = True\n    params.net_opt_iter = 10\n    params.net_opt_update_iter = 2\n    params.net_opt_hn_iter = 1\n\n    # Detection parameters\n    params.window_output = False\n\n    # Init augmentation parameters\n    params.use_augmentation = True\n    params.augmentation = {'fliplr': True,\n                           'rotate': [10, -10, 45, -45],\n                           'blur': [(3,1), (1, 3), (2, 2)],\n                           'relativeshift': [(0.6, 0.6), (-0.6, 0.6), (0.6, -0.6), (-0.6,-0.6)],\n                           'dropout': (2, 0.2)}\n\n    params.augmentation_expansion_factor = 2\n    params.random_shift_factor = 1/3\n\n    # Advanced localization parameters\n    params.advanced_localization = True\n    params.target_not_found_threshold = 0.25\n    params.distractor_threshold = 0.8\n    params.hard_negative_threshold = 0.5\n    params.target_neighborhood_scale = 2.2\n    params.dispalcement_scale = 0.8\n    params.hard_negative_learning_rate = 0.02\n    params.update_scale_when_uncertain = True\n\n    # IoUnet parameters\n    params.iounet_augmentation = False\n    params.iounet_use_log_scale = True\n    params.iounet_k = 3\n    params.num_init_random_boxes = 9\n    params.box_jitter_pos = 0.1\n    params.box_jitter_sz = 0.5\n    params.maximal_aspect_ratio = 6\n    params.box_refinement_iter = 5\n    params.box_refinement_step_length = 1\n    params.box_refinement_step_decay = 1\n\n    params.net = NetWithBackbone(net_path='dimp50.pth',\n                                 use_gpu=params.use_gpu)\n\n    params.vot_anno_conversion_type = 'preserve_area'\n\n    return params\n"
  },
  {
    "path": "external/AR/pytracking/parameter/dimp/dimp50_vot.py",
    "content": "from pytracking.utils import TrackerParams\nfrom pytracking.features.net_wrappers import NetWithBackbone\n\ndef parameters():\n    params = TrackerParams()\n\n    params.debug = 0\n    params.visualization = False\n\n    params.use_gpu = True\n\n    params.image_sample_size = 14 * 16\n    params.search_area_scale = 4\n\n    # Learning parameters\n    params.sample_memory_size = 250\n    params.learning_rate = 0.0075\n    params.init_samples_minimum_weight = 0.0\n    params.train_skipping = 10\n\n    # Net optimization params\n    params.update_classifier = True\n    params.net_opt_iter = 25\n    params.net_opt_update_iter = 3\n    params.net_opt_hn_iter = 3\n\n    # Detection parameters\n    params.window_output = True\n\n    # Init augmentation parameters\n    params.use_augmentation = True\n    params.augmentation = {'fliplr': True,\n                           'rotate': [5, -5, 10, -10, 20, -20, 30, -30, 45, -45, -60, 60],\n                           'blur': [(2, 0.2), (0.2, 2), (3, 1), (1, 3), (2, 2)],\n                           'relativeshift': [(0.6, 0.6), (-0.6, 0.6), (0.6, -0.6), (-0.6, -0.6)],\n                           'dropout': (7, 0.2)}\n\n    params.augmentation_expansion_factor = 2\n    params.random_shift_factor = 1/3\n\n    # Advanced localization parameters\n    params.advanced_localization = True\n    params.target_not_found_threshold = 0.0\n    params.distractor_threshold = 100\n    params.hard_negative_threshold = 0.45\n    params.target_neighborhood_scale = 2.2\n    params.dispalcement_scale = 0.7\n\n    params.perform_hn_without_windowing = True\n\n    params.hard_negative_learning_rate = 0.02\n    params.update_scale_when_uncertain = True\n\n    # IoUnet parameters\n    params.iounet_augmentation = False\n    params.iounet_use_log_scale = True\n    params.iounet_k = 3\n    params.num_init_random_boxes = 9\n    params.box_jitter_pos = 0.1\n    params.box_jitter_sz = 0.5\n    params.maximal_aspect_ratio = 6\n    params.box_refinement_iter = 5\n    params.box_refinement_step_length = 1\n    params.box_refinement_step_decay = 1\n\n    params.net = NetWithBackbone(net_path='dimp50.pth',\n                                 use_gpu=params.use_gpu)\n\n    params.vot_anno_conversion_type = 'preserve_area'\n\n    return params\n"
  },
  {
    "path": "external/AR/pytracking/parameter/dimp/dimp50_vot19.py",
    "content": "from pytracking.utils import TrackerParams\nfrom pytracking.features.net_wrappers import NetWithBackbone\n\ndef parameters():\n    params = TrackerParams()\n\n    params.debug = 0\n    params.visualization = False\n\n    params.use_gpu = True\n\n    params.image_sample_size = 16 * 16\n    params.search_area_scale = 4.5\n\n    # Learning parameters\n    params.sample_memory_size = 100\n    params.learning_rate = 0.0075\n    params.init_samples_minimum_weight = 0.0\n    params.train_skipping = 10\n\n    # Net optimization params\n    params.update_classifier = True\n    params.net_opt_iter = 15\n    params.net_opt_update_iter = 2\n    params.net_opt_hn_iter = 2\n\n    # Detection parameters\n    params.window_output = True\n\n    # Init augmentation parameters\n    params.use_augmentation = True\n    params.augmentation = {'fliplr': True,\n                           'rotate': [-5, 10, -30, 60],\n                           'blur': [(2, 0.2), (1, 3)],\n                           'relativeshift': [(0.6, 0.6), (-0.6, -0.6)],\n                           'dropout': (3, 0.2)}\n\n    params.augmentation_expansion_factor = 1.4\n    params.random_shift_factor = 1/3\n\n    # Advanced localization parameters\n    params.advanced_localization = True\n    params.target_not_found_threshold = 0.0\n    params.distractor_threshold = 100\n    params.hard_negative_threshold = 0.45\n    params.target_neighborhood_scale = 2.2\n    params.dispalcement_scale = 0.7\n\n    params.perform_hn_without_windowing = True\n\n    params.hard_negative_learning_rate = 0.02\n    params.update_scale_when_uncertain = True\n\n    # IoUnet parameters\n    params.iounet_augmentation = False\n    params.iounet_use_log_scale = True\n    params.iounet_k = 3\n    params.num_init_random_boxes = 9\n    params.box_jitter_pos = 0.1\n    params.box_jitter_sz = 0.5\n    params.maximal_aspect_ratio = 6\n    params.box_refinement_iter = 3\n    params.box_refinement_step_length = 1\n    params.box_refinement_step_decay = 1\n\n    params.net = NetWithBackbone(net_path='dimp50.pth',\n                                 use_gpu=params.use_gpu)\n\n    params.vot_anno_conversion_type = 'preserve_area'\n\n    return params\n"
  },
  {
    "path": "external/AR/pytracking/parameter/dimp/prdimp18.py",
    "content": "from pytracking.utils import TrackerParams\nfrom pytracking.features.net_wrappers import NetWithBackbone\n\ndef parameters():\n    params = TrackerParams()\n\n    params.debug = 0\n    params.visualization = False\n\n    params.use_gpu = True\n\n    params.image_sample_size = 18*16\n    params.search_area_scale = 5\n\n    # Learning parameters\n    params.sample_memory_size = 50\n    params.learning_rate = 0.01\n    params.init_samples_minimum_weight = 0.25\n    params.train_skipping = 20\n\n    # Net optimization params\n    params.update_classifier = True\n    params.net_opt_iter = 10\n    params.net_opt_update_iter = 2\n    params.net_opt_hn_iter = 1\n\n    # Detection parameters\n    params.window_output = False\n\n    # Init augmentation parameters\n    params.use_augmentation = True\n    params.augmentation = {'fliplr': True,\n                           'rotate': [10, -10, 45, -45],\n                           'blur': [(3,1), (1, 3), (2, 2)],\n                           'relativeshift': [(0.6, 0.6), (-0.6, 0.6), (0.6, -0.6), (-0.6,-0.6)],\n                           'dropout': (2, 0.2)}\n\n    params.augmentation_expansion_factor = 2\n    params.random_shift_factor = 1/3\n\n    # Advanced localization parameters\n    params.advanced_localization = True\n    params.score_preprocess = 'softmax'\n    params.target_not_found_threshold = 0.04\n    params.distractor_threshold = 0.8\n    params.hard_negative_threshold = 0.5\n    params.target_neighborhood_scale = 2.2\n    params.dispalcement_scale = 0.8\n    params.hard_negative_learning_rate = 0.02\n    params.update_scale_when_uncertain = True\n\n    # IoUnet parameters\n    params.box_refinement_space = 'relative'\n    params.iounet_augmentation = False      # Use the augmented samples to compute the modulation vector\n    params.iounet_k = 3                     # Top-k average to estimate final box\n    params.num_init_random_boxes = 9        # Num extra random boxes in addition to the classifier prediction\n    params.box_jitter_pos = 0.1             # How much to jitter the translation for random boxes\n    params.box_jitter_sz = 0.5              # How much to jitter the scale for random boxes\n    params.maximal_aspect_ratio = 6         # Limit on the aspect ratio\n    params.box_refinement_iter = 10          # Number of iterations for refining the boxes\n    params.box_refinement_step_length = 2.5e-3 # 1   # Gradient step length in the bounding box refinement\n    params.box_refinement_step_decay = 1    # Multiplicative step length decay (1 means no decay)\n\n    params.net = NetWithBackbone(net_path='prdimp18.pth.tar',\n                                 use_gpu=params.use_gpu)\n\n    params.vot_anno_conversion_type = 'preserve_area'\n\n    return params\n"
  },
  {
    "path": "external/AR/pytracking/parameter/dimp/prdimp50.py",
    "content": "from pytracking.utils import TrackerParams\nfrom pytracking.features.net_wrappers import NetWithBackbone\n\ndef parameters():\n    params = TrackerParams()\n\n    params.debug = 0\n    params.visualization = False\n\n    params.use_gpu = True\n\n    params.image_sample_size = 22*16\n    params.search_area_scale = 6\n    params.border_mode = 'inside_major'\n    params.patch_max_scale_change = 1.5\n\n    # Learning parameters\n    params.sample_memory_size = 50\n    params.learning_rate = 0.01\n    params.init_samples_minimum_weight = 0.25\n    params.train_skipping = 20\n\n    # Net optimization params\n    params.update_classifier = True\n    params.net_opt_iter = 10\n    params.net_opt_update_iter = 2\n    params.net_opt_hn_iter = 1\n\n    # Detection parameters\n    params.window_output = False\n\n    # Init augmentation parameters\n    params.use_augmentation = True\n    params.augmentation = {'fliplr': True,\n                           'rotate': [10, -10, 45, -45],\n                           'blur': [(3,1), (1, 3), (2, 2)],\n                           'relativeshift': [(0.6, 0.6), (-0.6, 0.6), (0.6, -0.6), (-0.6,-0.6)],\n                           'dropout': (2, 0.2)}\n\n    params.augmentation_expansion_factor = 2\n    params.random_shift_factor = 1/3\n\n    # Advanced localization parameters\n    params.advanced_localization = True\n    params.score_preprocess = 'softmax'\n    params.target_not_found_threshold = 0.04\n    params.distractor_threshold = 0.8\n    params.hard_negative_threshold = 0.5\n    params.target_neighborhood_scale = 2.2\n    params.dispalcement_scale = 0.8\n    params.hard_negative_learning_rate = 0.02\n    params.update_scale_when_uncertain = True\n\n    # IoUnet parameters\n    params.box_refinement_space = 'relative'\n    params.iounet_augmentation = False      # Use the augmented samples to compute the modulation vector\n    params.iounet_k = 3                     # Top-k average to estimate final box\n    params.num_init_random_boxes = 9        # Num extra random boxes in addition to the classifier prediction\n    params.box_jitter_pos = 0.1             # How much to jitter the translation for random boxes\n    params.box_jitter_sz = 0.5              # How much to jitter the scale for random boxes\n    params.maximal_aspect_ratio = 6         # Limit on the aspect ratio\n    params.box_refinement_iter = 10          # Number of iterations for refining the boxes\n    params.box_refinement_step_length = 2.5e-3 # 1   # Gradient step length in the bounding box refinement\n    params.box_refinement_step_decay = 1    # Multiplicative step length decay (1 means no decay)\n\n    params.net = NetWithBackbone(net_path='prdimp50.pth.tar',\n                                 use_gpu=params.use_gpu)\n\n    params.vot_anno_conversion_type = 'preserve_area'\n\n    return params\n"
  },
  {
    "path": "external/AR/pytracking/parameter/dimp/super_dimp.py",
    "content": "from pytracking.utils import TrackerParams\nfrom pytracking.features.net_wrappers import NetWithBackbone\n\ndef parameters():\n    params = TrackerParams()\n\n    params.debug = 0\n    params.visualization = False\n\n    params.use_gpu = True\n\n    params.image_sample_size = 22*16\n    params.search_area_scale = 6\n    params.border_mode = 'inside_major'\n    params.patch_max_scale_change = 1.5\n\n    # Learning parameters\n    params.sample_memory_size = 50\n    params.learning_rate = 0.01\n    params.init_samples_minimum_weight = 0.25\n    params.train_skipping = 20\n\n    # Net optimization params\n    params.update_classifier = True\n    params.net_opt_iter = 10\n    params.net_opt_update_iter = 2\n    params.net_opt_hn_iter = 1\n\n    # Detection parameters\n    params.window_output = False\n\n    # Init augmentation parameters\n    params.use_augmentation = True\n    params.augmentation = {'fliplr': True,\n                           'rotate': [10, -10, 45, -45],\n                           'blur': [(3,1), (1, 3), (2, 2)],\n                           'relativeshift': [(0.6, 0.6), (-0.6, 0.6), (0.6, -0.6), (-0.6,-0.6)],\n                           'dropout': (2, 0.2)}\n\n    params.augmentation_expansion_factor = 2\n    params.random_shift_factor = 1/3\n\n    # Advanced localization parameters\n    params.advanced_localization = True\n    params.target_not_found_threshold = 0.25\n    params.distractor_threshold = 0.8\n    params.hard_negative_threshold = 0.5\n    params.target_neighborhood_scale = 2.2\n    params.dispalcement_scale = 0.8\n    params.hard_negative_learning_rate = 0.02\n    params.update_scale_when_uncertain = True\n\n    # IoUnet parameters\n    params.box_refinement_space = 'relative'\n    params.iounet_augmentation = False      # Use the augmented samples to compute the modulation vector\n    params.iounet_k = 3                     # Top-k average to estimate final box\n    params.num_init_random_boxes = 9        # Num extra random boxes in addition to the classifier prediction\n    params.box_jitter_pos = 0.1             # How much to jitter the translation for random boxes\n    params.box_jitter_sz = 0.5              # How much to jitter the scale for random boxes\n    params.maximal_aspect_ratio = 6         # Limit on the aspect ratio\n    params.box_refinement_iter = 10          # Number of iterations for refining the boxes\n    params.box_refinement_step_length = 2.5e-3 # 1   # Gradient step length in the bounding box refinement\n    params.box_refinement_step_decay = 1    # Multiplicative step length decay (1 means no decay)\n\n    params.net = NetWithBackbone(net_path='super_dimp.pth.tar',\n                                 use_gpu=params.use_gpu)\n\n    params.vot_anno_conversion_type = 'preserve_area'\n\n    return params\n"
  },
  {
    "path": "external/AR/pytracking/parameter/eco/__init__.py",
    "content": ""
  },
  {
    "path": "external/AR/pytracking/parameter/eco/default.py",
    "content": "from pytracking.utils import TrackerParams, FeatureParams\nfrom pytracking.features.extractor import MultiResolutionExtractor\nfrom pytracking.features import deep\nimport torch\n\ndef parameters():\n    params = TrackerParams()\n\n    params.debug = 0\n    params.visualization = False\n\n    params.use_gpu = True\n\n    # Feature specific parameters\n    shallow_params = TrackerParams()\n    deep_params = TrackerParams()\n\n    # Patch sampling parameters\n    params.max_image_sample_size = 250**2   # Maximum image sample size\n    params.min_image_sample_size = 200**2   # Minimum image sample size\n    params.search_area_scale = 4.5          # Scale relative to target size\n\n    # Conjugate Gradient parameters\n    params.CG_iter = 5                  # The number of Conjugate Gradient iterations in each update after the first frame\n    params.init_CG_iter = 100           # The total number of Conjugate Gradient iterations used in the first frame\n    params.init_GN_iter = 10            # The number of Gauss-Newton iterations used in the first frame (only if the projection matrix is updated)\n    params.post_init_CG_iter = 0        # CG iterations to run after GN\n    params.fletcher_reeves = False      # Use the Fletcher-Reeves (true) or Polak-Ribiere (false) formula in the Conjugate Gradient\n    params.standard_alpha = True        # Use the standard formula for computing the step length in Conjugate Gradient\n    params.CG_forgetting_rate = 75\t \t# Forgetting rate of the last conjugate direction\n    params.precond_data_param = 0.3\t \t# Weight of the data term in the preconditioner\n    params.precond_reg_param = 0.15\t \t# Weight of the regularization term in the preconditioner\n    params.precond_proj_param = 35\t \t# Weight of the projection matrix part in the preconditioner\n\n    # Learning parameters\n    shallow_params.learning_rate = 0.025\n    deep_params.learning_rate = 0.0075\n    shallow_params.output_sigma_factor = 1/16\n    deep_params.output_sigma_factor = 1/4\n\n    # Training parameters\n    params.sample_memory_size = 200     # Memory size\n    params.train_skipping = 10          # How often to run training (every n-th frame)\n\n    # Detection parameters\n    params.scale_factors = 1.02**torch.arange(-2, 3).float()     # What scales to use for localization\n    params.score_upsample_factor = 1                             # How much Fourier upsampling to use\n    params.score_fusion_strategy = 'weightedsum'                 # Fusion strategy\n    shallow_params.translation_weight = 0.4                      # Weight of this feature\n    deep_params.translation_weight = 1 - shallow_params.translation_weight\n\n    # Init augmentation parameters\n    params.augmentation = {'fliplr': True,\n                           'rotate': [5, -5, 10, -10, 20, -20, 30, -30, 45,-45, -60, 60],\n                           'blur': [(2, 0.2), (0.2, 2), (3,1), (1, 3), (2, 2)],\n                           'shift': [(6, 6), (-6, 6), (6, -6), (-6,-6)],\n                           'dropout': (7, 0.2)}\n\n    # Whether to use augmentation for this feature\n    deep_params.use_augmentation = True\n    shallow_params.use_augmentation = True\n\n    # Factorized convolution parameters\n    # params.use_projection_matrix = True    # Use projection matrix, i.e. use the factorized convolution formulation\n    params.update_projection_matrix = True   # Whether the projection matrix should be optimized or not\n    # params.proj_init_method = 'pca'        # Method for initializing the projection matrix\n    params.projection_reg = 5e-8\t \t \t # Regularization paremeter of the projection matrix\n    shallow_params.compressed_dim = 16       # Dimension output of projection matrix for shallow features\n    deep_params.compressed_dim = 64          # Dimension output of projection matrix for deep features\n\n    # Interpolation parameters\n    params.interpolation_method = 'bicubic'    # The kind of interpolation kernel\n    params.interpolation_bicubic_a = -0.75     # The parameter for the bicubic interpolation kernel\n    params.interpolation_centering = True      # Center the kernel at the feature sample\n    params.interpolation_windowing = False     # Do additional windowing on the Fourier coefficients of the kernel\n\n    # Regularization parameters\n    shallow_params.use_reg_window = True           # Use spatial regularization or not\n    shallow_params.reg_window_min = 1e-4\t\t   # The minimum value of the regularization window\n    shallow_params.reg_window_edge = 10e-3         # The impact of the spatial regularization\n    shallow_params.reg_window_power = 2            # The degree of the polynomial to use (e.g. 2 is a quadratic window)\n    shallow_params.reg_sparsity_threshold = 0.05   # A relative threshold of which DFT coefficients that should be set to zero\n\n    deep_params.use_reg_window = True           # Use spatial regularization or not\n    deep_params.reg_window_min = 10e-4\t\t\t# The minimum value of the regularization window\n    deep_params.reg_window_edge = 50e-3         # The impact of the spatial regularization\n    deep_params.reg_window_power = 2            # The degree of the polynomial to use (e.g. 2 is a quadratic window)\n    deep_params.reg_sparsity_threshold = 0.1    # A relative threshold of which DFT coefficients that should be set to zero\n\n\n    fparams = FeatureParams(feature_params=[shallow_params, deep_params])\n    features = deep.ResNet18m1(output_layers=['vggconv1', 'layer3'], use_gpu=params.use_gpu, fparams=fparams,\n                               pool_stride=[2, 1], normalize_power=2)\n\n    params.features = MultiResolutionExtractor([features])\n\n    return params"
  },
  {
    "path": "external/AR/pytracking/tracker/__init__.py",
    "content": ""
  },
  {
    "path": "external/AR/pytracking/tracker/atom/__init__.py",
    "content": "from .atom import ATOM\n\ndef get_tracker_class():\n    return ATOM"
  },
  {
    "path": "external/AR/pytracking/tracker/atom/atom.py",
    "content": "from pytracking.tracker.base import BaseTracker\nimport torch\nimport torch.nn.functional as F\nimport torch.nn\nimport math\nimport time\nfrom pytracking import dcf, fourier, TensorList, operation\nfrom pytracking.features.preprocessing import numpy_to_torch\nfrom pytracking.utils.plotting import show_tensor\nfrom pytracking.libs.optimization import GaussNewtonCG, ConjugateGradient, GradientDescentL2\nfrom .optim import ConvProblem, FactorizedConvProblem\nfrom pytracking.features import augmentation\nimport ltr.data.bounding_box_utils as bbutils\n\n\nclass ATOM(BaseTracker):\n\n    multiobj_mode = 'parallel'\n\n    def initialize_features(self):\n        if not getattr(self, 'features_initialized', False):\n            self.params.features.initialize()\n        self.features_initialized = True\n\n\n    def initialize(self, image, info: dict) -> dict:\n        state = info['init_bbox']\n\n        # Initialize some stuff\n        self.frame_num = 1\n        if not self.params.has('device'):\n            self.params.device = 'cuda' if self.params.use_gpu else 'cpu'\n\n        # Initialize features\n        self.initialize_features()\n\n        # Check if image is color\n        self.params.features.set_is_color(image.shape[2] == 3)\n\n        # Get feature specific params\n        self.fparams = self.params.features.get_fparams('feature_params')\n\n        tic = time.time()\n\n        # Get position and size\n        self.pos = torch.Tensor([state[1] + (state[3] - 1)/2, state[0] + (state[2] - 1)/2])\n        self.target_sz = torch.Tensor([state[3], state[2]])\n\n        # Set search area\n        self.target_scale = 1.0\n        search_area = torch.prod(self.target_sz * self.params.search_area_scale).item()\n        if search_area > self.params.max_image_sample_size:\n            self.target_scale =  math.sqrt(search_area / self.params.max_image_sample_size)\n        elif search_area < self.params.min_image_sample_size:\n            self.target_scale =  math.sqrt(search_area / self.params.min_image_sample_size)\n\n        # Check if IoUNet is used\n        self.use_iou_net = self.params.get('use_iou_net', True)\n\n        # Target size in base scale\n        self.base_target_sz = self.target_sz / self.target_scale\n\n        # Use odd square search area and set sizes\n        feat_max_stride = max(self.params.features.stride())\n        if self.params.get('search_area_shape', 'square') == 'square':\n            self.img_sample_sz = torch.round(torch.sqrt(torch.prod(self.base_target_sz * self.params.search_area_scale))) * torch.ones(2)\n        elif self.params.search_area_shape == 'initrect':\n            self.img_sample_sz = torch.round(self.base_target_sz * self.params.search_area_scale)\n        else:\n            raise ValueError('Unknown search area shape')\n        if self.params.feature_size_odd:\n            self.img_sample_sz += feat_max_stride - self.img_sample_sz % (2 * feat_max_stride)\n        else:\n            self.img_sample_sz += feat_max_stride - (self.img_sample_sz + feat_max_stride) % (2 * feat_max_stride)\n\n        # Set sizes\n        self.img_support_sz = self.img_sample_sz\n        self.feature_sz = self.params.features.size(self.img_sample_sz)\n        self.output_sz = self.params.score_upsample_factor * self.img_support_sz  # Interpolated size of the output\n        self.kernel_size = self.fparams.attribute('kernel_size')\n\n        self.iou_img_sample_sz = self.img_sample_sz\n\n        # Optimization options\n        self.params.precond_learning_rate = self.fparams.attribute('learning_rate')\n        if self.params.CG_forgetting_rate is None or max(self.params.precond_learning_rate) >= 1:\n            self.params.direction_forget_factor = 0\n        else:\n            self.params.direction_forget_factor = (1 - max(self.params.precond_learning_rate))**self.params.CG_forgetting_rate\n\n        self.output_window = None\n        if self.params.get('window_output', False):\n            if self.params.get('use_clipped_window', False):\n                self.output_window = dcf.hann2d_clipped(self.output_sz.long(), self.output_sz.long()*self.params.effective_search_area / self.params.search_area_scale, centered=False).to(self.params.device)\n            else:\n                self.output_window = dcf.hann2d(self.output_sz.long(), centered=False).to(self.params.device)\n\n        # Initialize some learning things\n        self.init_learning()\n\n        # Convert image\n        im = numpy_to_torch(image)\n        self.im = im    # For debugging only\n\n        # Setup scale bounds\n        self.image_sz = torch.Tensor([im.shape[2], im.shape[3]])\n        self.min_scale_factor = torch.max(10 / self.base_target_sz)\n        self.max_scale_factor = torch.min(self.image_sz / self.base_target_sz)\n\n        # Extract and transform sample\n        x = self.generate_init_samples(im)\n\n        # Initialize iounet\n        if self.use_iou_net:\n            self.init_iou_net()\n\n        # Initialize projection matrix\n        self.init_projection_matrix(x)\n\n        # Transform to get the training sample\n        train_x = self.preprocess_sample(x)\n\n        # Generate label function\n        init_y = self.init_label_function(train_x)\n\n        # Init memory\n        self.init_memory(train_x)\n\n        # Init optimizer and do initial optimization\n        self.init_optimization(train_x, init_y)\n\n        self.pos_iounet = self.pos.clone()\n\n        out = {'time': time.time() - tic}\n        return out\n\n\n    def init_optimization(self, train_x, init_y):\n        # Initialize filter\n        filter_init_method = self.params.get('filter_init_method', 'zeros')\n        self.filter = TensorList(\n            [x.new_zeros(1, cdim, sz[0], sz[1]) for x, cdim, sz in zip(train_x, self.compressed_dim, self.kernel_size)])\n        if filter_init_method == 'zeros':\n            pass\n        elif filter_init_method == 'randn':\n            for f in self.filter:\n                f.normal_(0, 1/f.numel())\n        else:\n            raise ValueError('Unknown \"filter_init_method\"')\n\n        # Get parameters\n        self.params.update_projection_matrix = self.params.get('update_projection_matrix', True) and self.params.use_projection_matrix\n        optimizer = self.params.get('optimizer', 'GaussNewtonCG')\n\n        # Setup factorized joint optimization\n        if self.params.update_projection_matrix:\n            self.joint_problem = FactorizedConvProblem(self.init_training_samples, init_y, self.filter_reg,\n                                                       self.fparams.attribute('projection_reg'), self.params, self.init_sample_weights,\n                                                       self.projection_activation, self.response_activation)\n\n            # Variable containing both filter and projection matrix\n            joint_var = self.filter.concat(self.projection_matrix)\n\n            # Initialize optimizer\n            analyze_convergence = self.params.get('analyze_convergence', False)\n            if optimizer == 'GaussNewtonCG':\n                self.joint_optimizer = GaussNewtonCG(self.joint_problem, joint_var, debug=(self.params.debug >= 1),\n                                                     plotting=(self.params.debug >= 3), analyze=analyze_convergence,\n                                                     visdom=self.visdom)\n            elif optimizer == 'GradientDescentL2':\n                self.joint_optimizer = GradientDescentL2(self.joint_problem, joint_var, self.params.optimizer_step_length, self.params.optimizer_momentum, plotting=(self.params.debug >= 3), debug=(self.params.debug >= 1),\n                                                         visdom=self.visdom)\n\n            # Do joint optimization\n            if isinstance(self.params.init_CG_iter, (list, tuple)):\n                self.joint_optimizer.run(self.params.init_CG_iter)\n            else:\n                self.joint_optimizer.run(self.params.init_CG_iter // self.params.init_GN_iter, self.params.init_GN_iter)\n\n            if analyze_convergence:\n                opt_name = 'CG' if self.params.get('CG_optimizer', True) else 'GD'\n                for val_name, values in zip(['loss', 'gradient'], [self.joint_optimizer.losses, self.joint_optimizer.gradient_mags]):\n                    val_str = ' '.join(['{:.8e}'.format(v.item()) for v in values])\n                    file_name = '{}_{}.txt'.format(opt_name, val_name)\n                    with open(file_name, 'a') as f:\n                        f.write(val_str + '\\n')\n                raise RuntimeError('Exiting')\n\n        # Re-project samples with the new projection matrix\n        compressed_samples = self.project_sample(self.init_training_samples, self.projection_matrix)\n        for train_samp, init_samp in zip(self.training_samples, compressed_samples):\n            train_samp[:init_samp.shape[0],...] = init_samp\n\n        self.hinge_mask = None\n\n        # Initialize optimizer\n        self.conv_problem = ConvProblem(self.training_samples, self.y, self.filter_reg, self.sample_weights, self.response_activation)\n\n        if optimizer == 'GaussNewtonCG':\n            self.filter_optimizer = ConjugateGradient(self.conv_problem, self.filter, fletcher_reeves=self.params.fletcher_reeves,\n                                                      direction_forget_factor=self.params.direction_forget_factor, debug=(self.params.debug>=1),\n                                                      plotting=(self.params.debug>=3), visdom=self.visdom)\n        elif optimizer == 'GradientDescentL2':\n            self.filter_optimizer = GradientDescentL2(self.conv_problem, self.filter, self.params.optimizer_step_length,\n                                                      self.params.optimizer_momentum, debug=(self.params.debug >= 1),\n                                                      plotting=(self.params.debug>=3), visdom=self.visdom)\n\n        # Transfer losses from previous optimization\n        if self.params.update_projection_matrix:\n            self.filter_optimizer.residuals = self.joint_optimizer.residuals\n            self.filter_optimizer.losses = self.joint_optimizer.losses\n\n        if not self.params.update_projection_matrix:\n            self.filter_optimizer.run(self.params.init_CG_iter)\n\n        # Post optimization\n        self.filter_optimizer.run(self.params.post_init_CG_iter)\n\n        # Free memory\n        del self.init_training_samples\n        if self.params.use_projection_matrix:\n            del self.joint_problem, self.joint_optimizer\n\n\n    def track(self, image, info: dict = None) -> dict:\n        self.debug_info = {}\n\n        self.frame_num += 1\n        self.debug_info['frame_num'] = self.frame_num\n\n        # Convert image\n        im = numpy_to_torch(image)\n        self.im = im    # For debugging only\n\n        # ------- LOCALIZATION ------- #\n\n        # Get sample\n        sample_pos = self.pos.round()\n        sample_scales = self.target_scale * self.params.scale_factors\n        test_x = self.extract_processed_sample(im, self.pos, sample_scales, self.img_sample_sz)\n\n        # Compute scores\n        scores_raw = self.apply_filter(test_x)\n        translation_vec, scale_ind, s, flag = self.localize_target(scores_raw)\n\n        # Update position and scale\n        if flag != 'not_found':\n            if self.use_iou_net:\n                update_scale_flag = self.params.get('update_scale_when_uncertain', True) or flag != 'uncertain'\n                if self.params.get('use_classifier', True):\n                    self.update_state(sample_pos + translation_vec)\n                self.refine_target_box(sample_pos, sample_scales[scale_ind], scale_ind, update_scale_flag)\n            elif self.params.get('use_classifier', True):\n                self.update_state(sample_pos + translation_vec, sample_scales[scale_ind])\n\n        score_map = s[scale_ind, ...]\n        max_score = torch.max(score_map).item()\n        self.debug_info['max_score'] = max_score\n        self.debug_info['flag'] = flag\n\n        if self.visdom is not None:\n            self.visdom.register(score_map, 'heatmap', 2, 'Score Map')\n            self.visdom.register(self.debug_info, 'info_dict', 1, 'Status')\n        elif self.params.debug >= 2:\n            show_tensor(score_map, 5, title='Max score = {:.2f}'.format(max_score))\n\n        # ------- UPDATE ------- #\n\n        # Check flags and set learning rate if hard negative\n        update_flag = flag not in ['not_found', 'uncertain']\n        hard_negative = (flag == 'hard_negative')\n        learning_rate = self.params.hard_negative_learning_rate if hard_negative else None\n\n        if update_flag:\n            # Get train sample\n            train_x = TensorList([x[scale_ind:scale_ind+1, ...] for x in test_x])\n\n            # Create label for sample\n            train_y = self.get_label_function(sample_pos, sample_scales[scale_ind])\n\n            # Update memory\n            self.update_memory(train_x, train_y, learning_rate)\n\n        # Train filter\n        if hard_negative:\n            self.filter_optimizer.run(self.params.hard_negative_CG_iter)\n        elif (self.frame_num-1) % self.params.train_skipping == 0:\n            self.filter_optimizer.run(self.params.CG_iter)\n\n        # Set the pos of the tracker to iounet pos\n        if self.use_iou_net and flag != 'not_found':\n            self.pos = self.pos_iounet.clone()\n\n        # Return new state\n        new_state = torch.cat((self.pos[[1,0]] - (self.target_sz[[1,0]]-1)/2, self.target_sz[[1,0]]))\n\n        out = {'target_bbox': new_state.tolist()}\n        return out\n\n\n    def apply_filter(self, sample_x: TensorList):\n        return operation.conv2d(sample_x, self.filter, mode='same')\n\n    def localize_target(self, scores_raw):\n        # Weighted sum (if multiple features) with interpolation in fourier domain\n        weight = self.fparams.attribute('translation_weight', 1.0)\n        scores_raw = weight * scores_raw\n        sf_weighted = fourier.cfft2(scores_raw) / (scores_raw.size(2) * scores_raw.size(3))\n        for i, (sz, ksz) in enumerate(zip(self.feature_sz, self.kernel_size)):\n            sf_weighted[i] = fourier.shift_fs(sf_weighted[i], math.pi * (1 - torch.Tensor([ksz[0]%2, ksz[1]%2]) / sz))\n\n        scores_fs = fourier.sum_fs(sf_weighted)\n        scores = fourier.sample_fs(scores_fs, self.output_sz)\n\n        if self.output_window is not None and not self.params.get('perform_hn_without_windowing', False):\n            scores *= self.output_window\n\n        if self.params.get('advanced_localization', False):\n            return self.localize_advanced(scores)\n\n        # Get maximum\n        max_score, max_disp = dcf.max2d(scores)\n        _, scale_ind = torch.max(max_score, dim=0)\n        max_disp = max_disp.float().cpu()\n\n        # Convert to displacements in the base scale\n        disp = (max_disp + self.output_sz / 2) % self.output_sz - self.output_sz / 2\n\n        # Compute translation vector and scale change factor\n        translation_vec = disp[scale_ind, ...].view(-1) * (self.img_support_sz / self.output_sz) * self.target_scale\n        translation_vec *= self.params.scale_factors[scale_ind]\n\n        # Shift the score output for visualization purposes\n        if self.params.debug >= 2:\n            sz = scores.shape[-2:]\n            scores = torch.cat([scores[...,sz[0]//2:,:], scores[...,:sz[0]//2,:]], -2)\n            scores = torch.cat([scores[...,:,sz[1]//2:], scores[...,:,:sz[1]//2]], -1)\n\n        return translation_vec, scale_ind, scores, None\n\n    def localize_advanced(self, scores):\n        \"\"\"Dows the advanced localization with hard negative detection and target not found.\"\"\"\n\n        sz = scores.shape[-2:]\n\n        if self.output_window is not None and self.params.get('perform_hn_without_windowing', False):\n            scores_orig = scores.clone()\n\n            scores_orig = torch.cat([scores_orig[..., (sz[0] + 1) // 2:, :], scores_orig[..., :(sz[0] + 1) // 2, :]], -2)\n            scores_orig = torch.cat([scores_orig[..., :, (sz[1] + 1) // 2:], scores_orig[..., :, :(sz[1] + 1) // 2]], -1)\n\n            scores *= self.output_window\n\n        # Shift scores back\n        scores = torch.cat([scores[...,(sz[0]+1)//2:,:], scores[...,:(sz[0]+1)//2,:]], -2)\n        scores = torch.cat([scores[...,:,(sz[1]+1)//2:], scores[...,:,:(sz[1]+1)//2]], -1)\n\n        # Find maximum\n        max_score1, max_disp1 = dcf.max2d(scores)\n        _, scale_ind = torch.max(max_score1, dim=0)\n        max_score1 = max_score1[scale_ind]\n        max_disp1 = max_disp1[scale_ind,...].float().cpu().view(-1)\n        target_disp1 = max_disp1 - self.output_sz // 2\n        translation_vec1 = target_disp1 * (self.img_support_sz / self.output_sz) * self.target_scale\n\n        if max_score1.item() < self.params.target_not_found_threshold:\n            return translation_vec1, scale_ind, scores, 'not_found'\n\n        if self.output_window is not None and self.params.get('perform_hn_without_windowing', False):\n            scores = scores_orig\n\n        # Mask out target neighborhood\n        target_neigh_sz = self.params.target_neighborhood_scale * self.target_sz / self.target_scale\n        tneigh_top = max(round(max_disp1[0].item() - target_neigh_sz[0].item() / 2), 0)\n        tneigh_bottom = min(round(max_disp1[0].item() + target_neigh_sz[0].item() / 2 + 1), sz[0])\n        tneigh_left = max(round(max_disp1[1].item() - target_neigh_sz[1].item() / 2), 0)\n        tneigh_right = min(round(max_disp1[1].item() + target_neigh_sz[1].item() / 2 + 1), sz[1])\n        scores_masked = scores[scale_ind:scale_ind+1,...].clone()\n        scores_masked[...,tneigh_top:tneigh_bottom,tneigh_left:tneigh_right] = 0\n\n        # Find new maximum\n        max_score2, max_disp2 = dcf.max2d(scores_masked)\n        max_disp2 = max_disp2.float().cpu().view(-1)\n        target_disp2 = max_disp2 - self.output_sz // 2\n        translation_vec2 = target_disp2 * (self.img_support_sz / self.output_sz) * self.target_scale\n\n        # Handle the different cases\n        if max_score2 > self.params.distractor_threshold * max_score1:\n            disp_norm1 = torch.sqrt(torch.sum(target_disp1**2))\n            disp_norm2 = torch.sqrt(torch.sum(target_disp2**2))\n            disp_threshold = self.params.dispalcement_scale * math.sqrt(sz[0] * sz[1]) / 2\n\n            if disp_norm2 > disp_threshold and disp_norm1 < disp_threshold:\n                return translation_vec1, scale_ind, scores, 'hard_negative'\n            if disp_norm2 < disp_threshold and disp_norm1 > disp_threshold:\n                return translation_vec2, scale_ind, scores, 'hard_negative'\n            if disp_norm2 > disp_threshold and disp_norm1 > disp_threshold:\n                return translation_vec1, scale_ind, scores, 'uncertain'\n\n            # If also the distractor is close, return with highest score\n            return translation_vec1, scale_ind, scores, 'uncertain'\n\n        if max_score2 > self.params.hard_negative_threshold * max_score1 and max_score2 > self.params.target_not_found_threshold:\n            return translation_vec1, scale_ind, scores, 'hard_negative'\n\n        return translation_vec1, scale_ind, scores, None\n\n\n    def extract_sample(self, im: torch.Tensor, pos: torch.Tensor, scales, sz: torch.Tensor):\n        return self.params.features.extract(im, pos, scales, sz)[0]\n\n    def get_iou_features(self):\n        return self.params.features.get_unique_attribute('iounet_features')\n\n    def get_iou_backbone_features(self):\n        return self.params.features.get_unique_attribute('iounet_backbone_features')\n\n    def extract_processed_sample(self, im: torch.Tensor, pos: torch.Tensor, scales, sz: torch.Tensor) -> (TensorList, TensorList):\n        x = self.extract_sample(im, pos, scales, sz)\n        return self.preprocess_sample(self.project_sample(x))\n\n    def preprocess_sample(self, x: TensorList) -> (TensorList, TensorList):\n        if self.params.get('_feature_window', False):\n            x = x * self.feature_window\n        return x\n\n    def project_sample(self, x: TensorList, proj_matrix = None):\n        # Apply projection matrix\n        if proj_matrix is None:\n            proj_matrix = self.projection_matrix\n        return operation.conv2d(x, proj_matrix).apply(self.projection_activation)\n\n    def init_learning(self):\n        # Get window function\n        self.feature_window = TensorList([dcf.hann2d(sz).to(self.params.device) for sz in self.feature_sz])\n\n        # Filter regularization\n        self.filter_reg = self.fparams.attribute('filter_reg')\n\n        # Activation function after the projection matrix (phi_1 in the paper)\n        projection_activation = self.params.get('projection_activation', 'none')\n        if isinstance(projection_activation, tuple):\n            projection_activation, act_param = projection_activation\n\n        if projection_activation == 'none':\n            self.projection_activation = lambda x: x\n        elif projection_activation == 'relu':\n            self.projection_activation = torch.nn.ReLU(inplace=True)\n        elif projection_activation == 'elu':\n            self.projection_activation = torch.nn.ELU(inplace=True)\n        elif projection_activation == 'mlu':\n            self.projection_activation = lambda x: F.elu(F.leaky_relu(x, 1 / act_param), act_param)\n        else:\n            raise ValueError('Unknown activation')\n\n        # Activation function after the output scores (phi_2 in the paper)\n        response_activation = self.params.get('response_activation', 'none')\n        if isinstance(response_activation, tuple):\n            response_activation, act_param = response_activation\n\n        if response_activation == 'none':\n            self.response_activation = lambda x: x\n        elif response_activation == 'relu':\n            self.response_activation = torch.nn.ReLU(inplace=True)\n        elif response_activation == 'elu':\n            self.response_activation = torch.nn.ELU(inplace=True)\n        elif response_activation == 'mlu':\n            self.response_activation = lambda x: F.elu(F.leaky_relu(x, 1 / act_param), act_param)\n        else:\n            raise ValueError('Unknown activation')\n\n\n    def generate_init_samples(self, im: torch.Tensor) -> TensorList:\n        \"\"\"Generate augmented initial samples.\"\"\"\n\n        # Compute augmentation size\n        aug_expansion_factor = self.params.get('augmentation_expansion_factor', None)\n        aug_expansion_sz = self.img_sample_sz.clone()\n        aug_output_sz = None\n        if aug_expansion_factor is not None and aug_expansion_factor != 1:\n            aug_expansion_sz = (self.img_sample_sz * aug_expansion_factor).long()\n            aug_expansion_sz += (aug_expansion_sz - self.img_sample_sz.long()) % 2\n            aug_expansion_sz = aug_expansion_sz.float()\n            aug_output_sz = self.img_sample_sz.long().tolist()\n\n        # Random shift operator\n        get_rand_shift = lambda: None\n        random_shift_factor = self.params.get('random_shift_factor', 0)\n        if random_shift_factor > 0:\n            get_rand_shift = lambda: ((torch.rand(2) - 0.5) * self.img_sample_sz * random_shift_factor).long().tolist()\n\n        # Create transofmations\n        self.transforms = [augmentation.Identity(aug_output_sz)]\n        if 'shift' in self.params.augmentation:\n            self.transforms.extend([augmentation.Translation(shift, aug_output_sz) for shift in self.params.augmentation['shift']])\n        if 'relativeshift' in self.params.augmentation:\n            get_absolute = lambda shift: (torch.Tensor(shift) * self.img_sample_sz/2).long().tolist()\n            self.transforms.extend([augmentation.Translation(get_absolute(shift), aug_output_sz) for shift in self.params.augmentation['relativeshift']])\n        if 'fliplr' in self.params.augmentation and self.params.augmentation['fliplr']:\n            self.transforms.append(augmentation.FlipHorizontal(aug_output_sz, get_rand_shift()))\n        if 'blur' in self.params.augmentation:\n            self.transforms.extend([augmentation.Blur(sigma, aug_output_sz, get_rand_shift()) for sigma in self.params.augmentation['blur']])\n        if 'scale' in self.params.augmentation:\n            self.transforms.extend([augmentation.Scale(scale_factor, aug_output_sz, get_rand_shift()) for scale_factor in self.params.augmentation['scale']])\n        if 'rotate' in self.params.augmentation:\n            self.transforms.extend([augmentation.Rotate(angle, aug_output_sz, get_rand_shift()) for angle in self.params.augmentation['rotate']])\n\n        # Generate initial samples\n        init_samples = self.params.features.extract_transformed(im, self.pos, self.target_scale, aug_expansion_sz, self.transforms)\n\n        # Remove augmented samples for those that shall not have\n        for i, use_aug in enumerate(self.fparams.attribute('use_augmentation')):\n            if not use_aug:\n                init_samples[i] = init_samples[i][0:1, ...]\n\n        # Add dropout samples\n        if 'dropout' in self.params.augmentation:\n            num, prob = self.params.augmentation['dropout']\n            self.transforms.extend(self.transforms[:1]*num)\n            for i, use_aug in enumerate(self.fparams.attribute('use_augmentation')):\n                if use_aug:\n                    init_samples[i] = torch.cat([init_samples[i], F.dropout2d(init_samples[i][0:1,...].expand(num,-1,-1,-1), p=prob, training=True)])\n\n        return init_samples\n\n\n    def init_projection_matrix(self, x):\n        # Set if using projection matrix\n        self.params.use_projection_matrix = self.params.get('use_projection_matrix', True)\n\n        if self.params.use_projection_matrix:\n            self.compressed_dim = self.fparams.attribute('compressed_dim', None)\n\n            proj_init_method = self.params.get('proj_init_method', 'pca')\n            if proj_init_method == 'pca':\n                x_mat = TensorList([e.permute(1, 0, 2, 3).reshape(e.shape[1], -1).clone() for e in x])\n                x_mat -= x_mat.mean(dim=1, keepdim=True)\n                cov_x = x_mat @ x_mat.t()\n                self.projection_matrix = TensorList(\n                    [None if cdim is None else torch.svd(C)[0][:, :cdim].t().unsqueeze(-1).unsqueeze(-1).clone() for C, cdim in\n                     zip(cov_x, self.compressed_dim)])\n            elif proj_init_method == 'randn':\n                self.projection_matrix = TensorList(\n                    [None if cdim is None else ex.new_zeros(cdim,ex.shape[1],1,1).normal_(0,1/math.sqrt(ex.shape[1])) for ex, cdim in\n                     zip(x, self.compressed_dim)])\n        else:\n            self.compressed_dim = x.size(1)\n            self.projection_matrix = TensorList([None]*len(x))\n\n    def init_label_function(self, train_x):\n        # Allocate label function\n        self.y = TensorList([x.new_zeros(self.params.sample_memory_size, 1, x.shape[2], x.shape[3]) for x in train_x])\n\n        # Output sigma factor\n        output_sigma_factor = self.fparams.attribute('output_sigma_factor')\n        self.sigma = (self.feature_sz / self.img_support_sz * self.base_target_sz).prod().sqrt() * output_sigma_factor * torch.ones(2)\n\n        # Center pos in normalized coords\n        target_center_norm = (self.pos - self.pos.round()) / (self.target_scale * self.img_support_sz)\n\n        # Generate label functions\n        for y, sig, sz, ksz, x in zip(self.y, self.sigma, self.feature_sz, self.kernel_size, train_x):\n            center_pos = sz * target_center_norm + 0.5 * torch.Tensor([(ksz[0] + 1) % 2, (ksz[1] + 1) % 2])\n            for i, T in enumerate(self.transforms[:x.shape[0]]):\n                sample_center = center_pos + torch.Tensor(T.shift) / self.img_support_sz * sz\n                y[i, 0, ...] = dcf.label_function_spatial(sz, sig, sample_center)\n\n        # Return only the ones to use for initial training\n        return TensorList([y[:x.shape[0], ...] for y, x in zip(self.y, train_x)])\n\n\n    def init_memory(self, train_x):\n        # Initialize first-frame training samples\n        self.num_init_samples = train_x.size(0)\n        self.init_sample_weights = TensorList([x.new_ones(1) / x.shape[0] for x in train_x])\n        self.init_training_samples = train_x\n\n        # Sample counters and weights\n        self.num_stored_samples = self.num_init_samples.copy()\n        self.previous_replace_ind = [None] * len(self.num_stored_samples)\n        self.sample_weights = TensorList([x.new_zeros(self.params.sample_memory_size) for x in train_x])\n        for sw, init_sw, num in zip(self.sample_weights, self.init_sample_weights, self.num_init_samples):\n            sw[:num] = init_sw\n\n        # Initialize memory\n        self.training_samples = TensorList(\n            [x.new_zeros(self.params.sample_memory_size, cdim, x.shape[2], x.shape[3]) for x, cdim in\n             zip(train_x, self.compressed_dim)])\n\n    def update_memory(self, sample_x: TensorList, sample_y: TensorList, learning_rate = None):\n        replace_ind = self.update_sample_weights(self.sample_weights, self.previous_replace_ind, self.num_stored_samples, self.num_init_samples, self.fparams, learning_rate)\n        self.previous_replace_ind = replace_ind\n        for train_samp, x, ind in zip(self.training_samples, sample_x, replace_ind):\n            train_samp[ind:ind+1,...] = x\n        for y_memory, y, ind in zip(self.y, sample_y, replace_ind):\n            y_memory[ind:ind+1,...] = y\n        if self.hinge_mask is not None:\n            for m, y, ind in zip(self.hinge_mask, sample_y, replace_ind):\n                m[ind:ind+1,...] = (y >= self.params.hinge_threshold).float()\n        self.num_stored_samples += 1\n\n\n    def update_sample_weights(self, sample_weights, previous_replace_ind, num_stored_samples, num_init_samples, fparams, learning_rate = None):\n        # Update weights and get index to replace in memory\n        replace_ind = []\n        for sw, prev_ind, num_samp, num_init, fpar in zip(sample_weights, previous_replace_ind, num_stored_samples, num_init_samples, fparams):\n            lr = learning_rate\n            if lr is None:\n                lr = fpar.learning_rate\n\n            init_samp_weight = getattr(fpar, 'init_samples_minimum_weight', None)\n            if init_samp_weight == 0:\n                init_samp_weight = None\n            s_ind = 0 if init_samp_weight is None else num_init\n\n            if num_samp == 0 or lr == 1:\n                sw[:] = 0\n                sw[0] = 1\n                r_ind = 0\n            else:\n                # Get index to replace\n                _, r_ind = torch.min(sw[s_ind:], 0)\n                r_ind = r_ind.item() + s_ind\n\n                # Update weights\n                if prev_ind is None:\n                    sw /= 1 - lr\n                    sw[r_ind] = lr\n                else:\n                    sw[r_ind] = sw[prev_ind] / (1 - lr)\n\n            sw /= sw.sum()\n            if init_samp_weight is not None and sw[:num_init].sum() < init_samp_weight:\n                sw /= init_samp_weight + sw[num_init:].sum()\n                sw[:num_init] = init_samp_weight / num_init\n\n            replace_ind.append(r_ind)\n\n        return replace_ind\n\n    def get_label_function(self, sample_pos, sample_scale):\n        # Generate label function\n        train_y = TensorList()\n        target_center_norm = (self.pos - sample_pos) / (sample_scale * self.img_support_sz)\n        for sig, sz, ksz in zip(self.sigma, self.feature_sz, self.kernel_size):\n            center = sz * target_center_norm + 0.5 * torch.Tensor([(ksz[0] + 1) % 2, (ksz[1] + 1) % 2])\n            train_y.append(dcf.label_function_spatial(sz, sig, center))\n        return train_y\n\n    def update_state(self, new_pos, new_scale = None):\n        # Update scale\n        if new_scale is not None:\n            self.target_scale = new_scale.clamp(self.min_scale_factor, self.max_scale_factor)\n            self.target_sz = self.base_target_sz * self.target_scale\n\n        # Update pos\n        inside_ratio = 0.2\n        inside_offset = (inside_ratio - 0.5) * self.target_sz\n        self.pos = torch.max(torch.min(new_pos, self.image_sz - inside_offset), inside_offset)\n\n    def get_iounet_box(self, pos, sz, sample_pos, sample_scale):\n        \"\"\"All inputs in original image coordinates\"\"\"\n        box_center = (pos - sample_pos) / sample_scale + (self.iou_img_sample_sz - 1) / 2\n        box_sz = sz / sample_scale\n        target_ul = box_center - (box_sz - 1) / 2\n        return torch.cat([target_ul.flip((0,)), box_sz.flip((0,))])\n\n    def init_iou_net(self):\n        # Setup IoU net\n        self.iou_predictor = self.params.features.get_unique_attribute('iou_predictor')\n        for p in self.iou_predictor.parameters():\n            p.requires_grad = False\n\n        # Get target boxes for the different augmentations\n        self.iou_target_box = self.get_iounet_box(self.pos, self.target_sz, self.pos.round(), self.target_scale)\n        target_boxes = TensorList()\n        if self.params.iounet_augmentation:\n            for T in self.transforms:\n                if not isinstance(T, (augmentation.Identity, augmentation.Translation, augmentation.FlipHorizontal, augmentation.FlipVertical, augmentation.Blur)):\n                    break\n                target_boxes.append(self.iou_target_box + torch.Tensor([T.shift[1], T.shift[0], 0, 0]))\n        else:\n            target_boxes.append(self.iou_target_box.clone())\n        target_boxes = torch.cat(target_boxes.view(1,4), 0).to(self.params.device)\n\n        # Get iou features\n        iou_backbone_features = self.get_iou_backbone_features()\n\n        # Remove other augmentations such as rotation\n        iou_backbone_features = TensorList([x[:target_boxes.shape[0],...] for x in iou_backbone_features])\n\n        # Extract target feat\n        with torch.no_grad():\n            target_feat = self.iou_predictor.get_modulation(iou_backbone_features, target_boxes)\n        self.target_feat = TensorList([x.detach().mean(0) for x in target_feat])\n\n        if self.params.get('iounet_not_use_reference', False):\n            self.target_feat = TensorList([torch.full_like(tf, tf.norm() / tf.numel()) for tf in self.target_feat])\n\n\n    def refine_target_box(self, sample_pos, sample_scale, scale_ind, update_scale = True):\n        # Initial box for refinement\n        init_box = self.get_iounet_box(self.pos, self.target_sz, sample_pos, sample_scale)\n\n        # Extract features from the relevant scale\n        iou_features = self.get_iou_features()\n        iou_features = TensorList([x[scale_ind:scale_ind+1,...] for x in iou_features])\n\n        init_boxes = init_box.view(1,4).clone()\n        if self.params.num_init_random_boxes > 0:\n            # Get random initial boxes\n            square_box_sz = init_box[2:].prod().sqrt()\n            rand_factor = square_box_sz * torch.cat([self.params.box_jitter_pos * torch.ones(2), self.params.box_jitter_sz * torch.ones(2)])\n            minimal_edge_size = init_box[2:].min()/3\n            rand_bb = (torch.rand(self.params.num_init_random_boxes, 4) - 0.5) * rand_factor\n            new_sz = (init_box[2:] + rand_bb[:,2:]).clamp(minimal_edge_size)\n            new_center = (init_box[:2] + init_box[2:]/2) + rand_bb[:,:2]\n            init_boxes = torch.cat([new_center - new_sz/2, new_sz], 1)\n            init_boxes = torch.cat([init_box.view(1,4), init_boxes])\n\n        # Refine boxes by maximizing iou\n        output_boxes, output_iou = self.optimize_boxes(iou_features, init_boxes)\n\n        # Remove weird boxes with extreme aspect ratios\n        output_boxes[:, 2:].clamp_(1)\n        aspect_ratio = output_boxes[:,2] / output_boxes[:,3]\n        keep_ind = (aspect_ratio < self.params.maximal_aspect_ratio) * (aspect_ratio > 1/self.params.maximal_aspect_ratio)\n        output_boxes = output_boxes[keep_ind,:]\n        output_iou = output_iou[keep_ind]\n\n        # If no box found\n        if output_boxes.shape[0] == 0:\n            return\n\n        # Take average of top k boxes\n        k = self.params.get('iounet_k', 5)\n        topk = min(k, output_boxes.shape[0])\n        _, inds = torch.topk(output_iou, topk)\n        predicted_box = output_boxes[inds, :].mean(0)\n        predicted_iou = output_iou.view(-1, 1)[inds, :].mean(0)\n\n        # Update position\n        new_pos = predicted_box[:2] + predicted_box[2:]/2 - (self.iou_img_sample_sz - 1) / 2\n        new_pos = new_pos.flip((0,)) * sample_scale + sample_pos\n        new_target_sz = predicted_box[2:].flip((0,)) * sample_scale\n        new_scale = torch.sqrt(new_target_sz.prod() / self.base_target_sz.prod())\n\n        self.pos_iounet = new_pos.clone()\n\n        if self.params.get('use_iounet_pos_for_learning', True):\n            self.pos = new_pos.clone()\n\n        self.target_sz = new_target_sz\n\n        if update_scale:\n            self.target_scale = new_scale\n\n    def optimize_boxes(self, iou_features, init_boxes):\n        # Optimize iounet boxes\n        output_boxes = init_boxes.view(1, -1, 4).to(self.params.device)\n        step_length = self.params.box_refinement_step_length\n        init_step_length = self.params.box_refinement_step_length\n        if isinstance(step_length, (tuple, list)):\n            init_step_length = torch.Tensor([step_length[0], step_length[0], step_length[1], step_length[1]]).to(\n                self.params.device).view(1, 1, 4)\n        box_refinement_space = self.params.get('box_refinement_space', 'default')\n\n        step_length = init_step_length * output_boxes.new_ones(1, output_boxes.shape[1], 1)\n        outputs_prev = -99999999 * output_boxes.new_ones(1, output_boxes.shape[1])\n        step = torch.zeros_like(output_boxes)\n\n        if box_refinement_space == 'default':\n            # Optimization using bounding box space used in original IoUNet\n            for i_ in range(self.params.box_refinement_iter):\n                # forward pass\n                bb_init = output_boxes.clone().detach()\n                bb_init.requires_grad = True\n\n                outputs = self.iou_predictor.predict_iou(self.target_feat, iou_features, bb_init)\n\n                if isinstance(outputs, (list, tuple)):\n                    outputs = outputs[0]\n\n                outputs.backward(gradient=torch.ones_like(outputs))\n\n                # Update mask and step length\n                update_mask = (outputs.detach() > outputs_prev) | (self.params.box_refinement_step_decay >= 1)\n                update_mask_float = update_mask.view(1, -1, 1).float()\n                step_length[~update_mask, :] *= self.params.box_refinement_step_decay\n                outputs_prev = outputs.detach().clone()\n\n                # Update proposal\n                step = update_mask_float * step_length * bb_init.grad * bb_init[:, :, 2:].repeat(1, 1, 2) - (\n                            1.0 - update_mask_float) * step\n                output_boxes = bb_init + step\n                output_boxes.detach_()\n\n        elif box_refinement_space == 'relative':\n            # Optimization using relative bounding box space\n            sz_norm = output_boxes[:, :1, 2:].clone()\n            output_boxes_rel = bbutils.rect_to_rel(output_boxes, sz_norm)\n            for i_ in range(self.params.box_refinement_iter):\n                # forward pass\n                bb_init_rel = output_boxes_rel.clone().detach()\n                bb_init_rel.requires_grad = True\n\n                bb_init = bbutils.rel_to_rect(bb_init_rel, sz_norm)\n                outputs = self.iou_predictor.predict_iou(self.target_feat, iou_features, bb_init)\n\n                if isinstance(outputs, (list, tuple)):\n                    outputs = outputs[0]\n\n                outputs.backward(gradient=torch.ones_like(outputs))\n\n                # Update mask and step length\n                update_mask = (outputs.detach() > outputs_prev) | (self.params.box_refinement_step_decay >= 1)\n                update_mask_float = update_mask.view(1, -1, 1).float()\n                step_length[~update_mask, :] *= self.params.box_refinement_step_decay\n                outputs_prev = outputs.detach().clone()\n\n                # Update proposal\n                step = update_mask_float * step_length * bb_init_rel.grad - (1.0 - update_mask_float) * step\n                output_boxes_rel = bb_init_rel + step\n                output_boxes_rel.detach_()\n\n                # for s in outputs.view(-1):\n                #     print('{:.2f}  '.format(s.item()), end='')\n                # print('')\n            # print('')\n\n            output_boxes = bbutils.rel_to_rect(output_boxes_rel, sz_norm)\n\n        else:\n            raise ValueError('Unknown box_refinement_space {}'.format(box_refinement_space))\n\n        return output_boxes.view(-1, 4).cpu(), outputs.detach().view(-1).cpu()\n"
  },
  {
    "path": "external/AR/pytracking/tracker/atom/optim.py",
    "content": "import torch\nfrom pytracking import optimization, TensorList, operation\nimport math\n\n\nclass FactorizedConvProblem(optimization.L2Problem):\n    def __init__(self, training_samples: TensorList, y:TensorList, filter_reg: torch.Tensor, projection_reg, params, sample_weights: TensorList,\n                 projection_activation, response_activation):\n        self.training_samples = training_samples\n        self.y = y\n        self.filter_reg = filter_reg\n        self.sample_weights = sample_weights\n        self.params = params\n        self.projection_reg = projection_reg\n        self.projection_activation = projection_activation\n        self.response_activation = response_activation\n\n        self.diag_M = self.filter_reg.concat(projection_reg)\n\n    def __call__(self, x: TensorList):\n        \"\"\"\n        Compute residuals\n        :param x: [filters, projection_matrices]\n        :return: [data_terms, filter_regularizations, proj_mat_regularizations]\n        \"\"\"\n        filter = x[:len(x)//2]  # w2 in paper\n        P = x[len(x)//2:]       # w1 in paper\n\n        # Do first convolution\n        compressed_samples = operation.conv1x1(self.training_samples, P).apply(self.projection_activation)\n\n        # Do second convolution\n        residuals = operation.conv2d(compressed_samples, filter, mode='same').apply(self.response_activation)\n\n        # Compute data residuals\n        residuals = residuals - self.y\n\n        residuals = self.sample_weights.sqrt().view(-1, 1, 1, 1) * residuals\n\n        # Add regularization for projection matrix\n        residuals.extend(self.filter_reg.apply(math.sqrt) * filter)\n\n        # Add regularization for projection matrix\n        residuals.extend(self.projection_reg.apply(math.sqrt) * P)\n\n        return residuals\n\n\n    def ip_input(self, a: TensorList, b: TensorList):\n        num = len(a) // 2       # Number of filters\n        a_filter = a[:num]\n        b_filter = b[:num]\n        a_P = a[num:]\n        b_P = b[num:]\n\n        # Filter inner product\n        # ip_out = a_filter.reshape(-1) @ b_filter.reshape(-1)\n        ip_out = operation.conv2d(a_filter, b_filter).view(-1)\n\n        # Add projection matrix part\n        # ip_out += a_P.reshape(-1) @ b_P.reshape(-1)\n        ip_out += operation.conv2d(a_P.view(1,-1,1,1), b_P.view(1,-1,1,1)).view(-1)\n\n        # Have independent inner products for each filter\n        return ip_out.concat(ip_out.clone())\n\n    def M1(self, x: TensorList):\n        return x / self.diag_M\n\n\nclass ConvProblem(optimization.L2Problem):\n    def __init__(self, training_samples: TensorList, y:TensorList, filter_reg: torch.Tensor, sample_weights: TensorList, response_activation):\n        self.training_samples = training_samples\n        self.y = y\n        self.filter_reg = filter_reg\n        self.sample_weights = sample_weights\n        self.response_activation = response_activation\n\n    def __call__(self, x: TensorList):\n        \"\"\"\n        Compute residuals\n        :param x: [filters]\n        :return: [data_terms, filter_regularizations]\n        \"\"\"\n        # Do convolution and compute residuals\n        residuals = operation.conv2d(self.training_samples, x, mode='same').apply(self.response_activation)\n        residuals = residuals - self.y\n\n        residuals = self.sample_weights.sqrt().view(-1, 1, 1, 1) * residuals\n\n        # Add regularization for projection matrix\n        residuals.extend(self.filter_reg.apply(math.sqrt) * x)\n\n        return residuals\n\n    def ip_input(self, a: TensorList, b: TensorList):\n        # return a.reshape(-1) @ b.reshape(-1)\n        # return (a * b).sum()\n        return operation.conv2d(a, b).view(-1)\n"
  },
  {
    "path": "external/AR/pytracking/tracker/base/__init__.py",
    "content": "from .basetracker import BaseTracker"
  },
  {
    "path": "external/AR/pytracking/tracker/base/basetracker.py",
    "content": "from _collections import OrderedDict\n\nclass BaseTracker:\n    \"\"\"Base class for all trackers.\"\"\"\n\n    def __init__(self, params):\n        self.params = params\n        self.visdom = None\n\n\n    def initialize(self, image, info: dict) -> dict:\n        \"\"\"Overload this function in your tracker. This should initialize the model.\"\"\"\n        raise NotImplementedError\n\n\n    def track(self, image, info: dict = None) -> dict:\n        \"\"\"Overload this function in your tracker. This should track in the frame and update the model.\"\"\"\n        raise NotImplementedError\n\n\n    def visdom_draw_tracking(self, image, box, segmentation=None):\n        if isinstance(box, OrderedDict):\n            box = [v for k, v in box.items()]\n        else:\n            box = (box,)\n        if segmentation is None:\n            self.visdom.register((image, *box), 'Tracking', 1, 'Tracking')\n        else:\n            self.visdom.register((image, *box, segmentation), 'Tracking', 1, 'Tracking')"
  },
  {
    "path": "external/AR/pytracking/tracker/dimp/__init__.py",
    "content": "from .dimp import DiMP\n\ndef get_tracker_class():\n    return DiMP"
  },
  {
    "path": "external/AR/pytracking/tracker/dimp/dimp.py",
    "content": "from pytracking.tracker.base import BaseTracker\nimport torch\nimport torch.nn.functional as F\nimport math\nimport time\nfrom pytracking import dcf, TensorList\nfrom pytracking.features.preprocessing import numpy_to_torch\nfrom pytracking.utils.plotting import show_tensor, plot_graph\nfrom pytracking.features.preprocessing import sample_patch_multiscale, sample_patch_transformed\nfrom pytracking.features import augmentation\nimport ltr.data.bounding_box_utils as bbutils\nfrom ltr.models.target_classifier.initializer import FilterInitializerZero\nfrom ltr.models.layers import activation\n\n\nclass DiMP(BaseTracker):\n\n    multiobj_mode = 'parallel'\n\n    def initialize_features(self):\n        if not getattr(self, 'features_initialized', False):\n            self.params.net.initialize()\n        self.features_initialized = True\n\n    def initialize(self, image, info: dict) -> dict:\n        # Initialize some stuff\n        self.frame_num = 1\n        if not self.params.has('device'):\n            self.params.device = 'cuda' if self.params.use_gpu else 'cpu'\n\n        # Initialize network\n        self.initialize_features()\n\n        # The DiMP network\n        self.net = self.params.net\n\n        # Time initialization\n        tic = time.time()\n\n        # Convert image\n        im = numpy_to_torch(image)\n\n        # Get target position and size\n        state = info['init_bbox']\n        self.pos = torch.Tensor([state[1] + (state[3] - 1)/2, state[0] + (state[2] - 1)/2])\n        self.target_sz = torch.Tensor([state[3], state[2]])\n\n        # Get object id\n        self.object_id = info.get('object_ids', [None])[0]\n        self.id_str = '' if self.object_id is None else ' {}'.format(self.object_id)\n\n        # Set sizes\n        self.image_sz = torch.Tensor([im.shape[2], im.shape[3]])\n        sz = self.params.image_sample_size\n        sz = torch.Tensor([sz, sz] if isinstance(sz, int) else sz)\n        if self.params.get('use_image_aspect_ratio', False):\n            sz = self.image_sz * sz.prod().sqrt() / self.image_sz.prod().sqrt()\n            stride = self.params.get('feature_stride', 32)\n            sz = torch.round(sz / stride) * stride\n        self.img_sample_sz = sz\n        self.img_support_sz = self.img_sample_sz\n\n        # Set search area\n        search_area = torch.prod(self.target_sz * self.params.search_area_scale).item()\n        self.target_scale =  math.sqrt(search_area) / self.img_sample_sz.prod().sqrt()\n\n        # Target size in base scale\n        self.base_target_sz = self.target_sz / self.target_scale\n\n        # Setup scale factors\n        if not self.params.has('scale_factors'):\n            self.params.scale_factors = torch.ones(1)\n        elif isinstance(self.params.scale_factors, (list, tuple)):\n            self.params.scale_factors = torch.Tensor(self.params.scale_factors)\n\n        # Setup scale bounds\n        self.min_scale_factor = torch.max(10 / self.base_target_sz)\n        self.max_scale_factor = torch.min(self.image_sz / self.base_target_sz)\n\n        # Extract and transform sample\n        init_backbone_feat = self.generate_init_samples(im)\n\n        # Initialize classifier\n        self.init_classifier(init_backbone_feat)\n\n        # Initialize IoUNet\n        if self.params.get('use_iou_net', True):\n            self.init_iou_net(init_backbone_feat)\n\n        out = {'time': time.time() - tic}\n        return out\n\n\n    def track(self, image, info: dict = None) -> dict:\n        self.debug_info = {}\n\n        self.frame_num += 1\n        self.debug_info['frame_num'] = self.frame_num\n\n        # Convert image\n        im = numpy_to_torch(image)\n\n        # ------- LOCALIZATION ------- #\n\n        # Extract backbone features\n        backbone_feat, sample_coords, im_patches = self.extract_backbone_features(im, self.get_centered_sample_pos(),\n                                                                      self.target_scale * self.params.scale_factors,\n                                                                      self.img_sample_sz)\n        # Extract classification features\n        test_x = self.get_classification_features(backbone_feat)\n\n        # Location of sample\n        sample_pos, sample_scales = self.get_sample_location(sample_coords)\n\n        # Compute classification scores\n        scores_raw = self.classify_target(test_x)\n\n        # Localize the target\n        translation_vec, scale_ind, s, flag = self.localize_target(scores_raw, sample_pos, sample_scales)\n        new_pos = sample_pos[scale_ind,:] + translation_vec\n\n        # Update position and scale\n        if flag != 'not_found':\n            if self.params.get('use_iou_net', True):\n                update_scale_flag = self.params.get('update_scale_when_uncertain', True) or flag != 'uncertain'\n                if self.params.get('use_classifier', True):\n                    self.update_state(new_pos)\n                self.refine_target_box(backbone_feat, sample_pos[scale_ind,:], sample_scales[scale_ind], scale_ind, update_scale_flag)\n            elif self.params.get('use_classifier', True):\n                self.update_state(new_pos, sample_scales[scale_ind])\n\n\n        # ------- UPDATE ------- #\n\n        update_flag = flag not in ['not_found', 'uncertain']\n        hard_negative = (flag == 'hard_negative')\n        learning_rate = self.params.get('hard_negative_learning_rate', None) if hard_negative else None\n\n        if update_flag and self.params.get('update_classifier', False):\n            # Get train sample\n            train_x = test_x[scale_ind:scale_ind+1, ...]\n\n            # Create target_box and label for spatial sample\n            target_box = self.get_iounet_box(self.pos, self.target_sz, sample_pos[scale_ind,:], sample_scales[scale_ind])\n\n            # Update the classifier model\n            self.update_classifier(train_x, target_box, learning_rate, s[scale_ind,...])\n\n        # Set the pos of the tracker to iounet pos\n        if self.params.get('use_iou_net', True) and flag != 'not_found' and hasattr(self, 'pos_iounet'):\n            self.pos = self.pos_iounet.clone()\n\n        score_map = s[scale_ind, ...]\n        max_score = torch.max(score_map).item()\n\n        # Visualize and set debug info\n        self.search_area_box = torch.cat((sample_coords[scale_ind,[1,0]], sample_coords[scale_ind,[3,2]] - sample_coords[scale_ind,[1,0]] - 1))\n        self.debug_info['flag' + self.id_str] = flag\n        self.debug_info['max_score' + self.id_str] = max_score\n        if self.visdom is not None:\n            self.visdom.register(score_map, 'heatmap', 2, 'Score Map' + self.id_str)\n            self.visdom.register(self.debug_info, 'info_dict', 1, 'Status')\n        elif self.params.debug >= 2:\n            show_tensor(score_map, 5, title='Max score = {:.2f}'.format(max_score))\n\n        # Compute output bounding box\n        new_state = torch.cat((self.pos[[1,0]] - (self.target_sz[[1,0]]-1)/2, self.target_sz[[1,0]]))\n\n        if self.params.get('output_not_found_box', False) and flag == 'not_found':\n            output_state = [-1, -1, -1, -1]\n        else:\n            output_state = new_state.tolist()\n        '''2020.4.26 '''\n        out = {'target_bbox': output_state,\n               'dcf_center':new_pos[[1,0]]}\n        return out\n\n\n    def get_sample_location(self, sample_coord):\n        \"\"\"Get the location of the extracted sample.\"\"\"\n        sample_coord = sample_coord.float()\n        sample_pos = 0.5*(sample_coord[:,:2] + sample_coord[:,2:] - 1)\n        sample_scales = ((sample_coord[:,2:] - sample_coord[:,:2]) / self.img_sample_sz).prod(dim=1).sqrt()\n        return sample_pos, sample_scales\n\n    def get_centered_sample_pos(self):\n        \"\"\"Get the center position for the new sample. Make sure the target is correctly centered.\"\"\"\n        return self.pos + ((self.feature_sz + self.kernel_size) % 2) * self.target_scale * \\\n               self.img_support_sz / (2*self.feature_sz)\n\n    def classify_target(self, sample_x: TensorList):\n        \"\"\"Classify target by applying the DiMP filter.\"\"\"\n        with torch.no_grad():\n            scores = self.net.classifier.classify(self.target_filter, sample_x)\n        return scores\n\n    def localize_target(self, scores, sample_pos, sample_scales):\n        \"\"\"Run the target localization.\"\"\"\n\n        scores = scores.squeeze(1)\n\n        preprocess_method = self.params.get('score_preprocess', 'none')\n        if preprocess_method == 'none':\n            pass\n        elif preprocess_method == 'exp':\n            scores = scores.exp()\n        elif preprocess_method == 'softmax':\n            reg_val = getattr(self.net.classifier.filter_optimizer, 'softmax_reg', None)\n            scores_view = scores.view(scores.shape[0], -1)\n            scores_softmax = activation.softmax_reg(scores_view, dim=-1, reg=reg_val)\n            scores = scores_softmax.view(scores.shape)\n        else:\n            raise Exception('Unknown score_preprocess in params.')\n\n        score_filter_ksz = self.params.get('score_filter_ksz', 1)\n        if score_filter_ksz > 1:\n            assert score_filter_ksz % 2 == 1\n            kernel = scores.new_ones(1,1,score_filter_ksz,score_filter_ksz)\n            scores = F.conv2d(scores.view(-1,1,*scores.shape[-2:]), kernel, padding=score_filter_ksz//2).view(scores.shape)\n\n        if self.params.get('advanced_localization', False):\n            return self.localize_advanced(scores, sample_pos, sample_scales)\n\n        # Get maximum\n        score_sz = torch.Tensor(list(scores.shape[-2:]))\n        score_center = (score_sz - 1)/2\n        max_score, max_disp = dcf.max2d(scores)\n        _, scale_ind = torch.max(max_score, dim=0)\n        max_disp = max_disp[scale_ind,...].float().cpu().view(-1)\n        target_disp = max_disp - score_center\n\n        # Compute translation vector and scale change factor\n        output_sz = score_sz - (self.kernel_size + 1) % 2\n        translation_vec = target_disp * (self.img_support_sz / output_sz) * sample_scales[scale_ind]\n\n        return translation_vec, scale_ind, scores, None\n\n\n    def localize_advanced(self, scores, sample_pos, sample_scales):\n        \"\"\"Run the target advanced localization (as in ATOM).\"\"\"\n\n        sz = scores.shape[-2:]\n        score_sz = torch.Tensor(list(sz))\n        output_sz = score_sz - (self.kernel_size + 1) % 2\n        score_center = (score_sz - 1)/2\n\n        scores_hn = scores\n        if self.output_window is not None and self.params.get('perform_hn_without_windowing', False):\n            scores_hn = scores.clone()\n            scores *= self.output_window\n\n        max_score1, max_disp1 = dcf.max2d(scores)\n        _, scale_ind = torch.max(max_score1, dim=0)\n        sample_scale = sample_scales[scale_ind]\n        max_score1 = max_score1[scale_ind]\n        max_disp1 = max_disp1[scale_ind,...].float().cpu().view(-1)\n        target_disp1 = max_disp1 - score_center\n        translation_vec1 = target_disp1 * (self.img_support_sz / output_sz) * sample_scale\n\n        if max_score1.item() < self.params.target_not_found_threshold:\n            return translation_vec1, scale_ind, scores_hn, 'not_found'\n        if max_score1.item() < self.params.get('uncertain_threshold', -float('inf')):\n            return translation_vec1, scale_ind, scores_hn, 'uncertain'\n        if max_score1.item() < self.params.get('hard_sample_threshold', -float('inf')):\n            return translation_vec1, scale_ind, scores_hn, 'hard_negative'\n\n        # Mask out target neighborhood\n        target_neigh_sz = self.params.target_neighborhood_scale * (self.target_sz / sample_scale) * (output_sz / self.img_support_sz)\n\n        tneigh_top = max(round(max_disp1[0].item() - target_neigh_sz[0].item() / 2), 0)\n        tneigh_bottom = min(round(max_disp1[0].item() + target_neigh_sz[0].item() / 2 + 1), sz[0])\n        tneigh_left = max(round(max_disp1[1].item() - target_neigh_sz[1].item() / 2), 0)\n        tneigh_right = min(round(max_disp1[1].item() + target_neigh_sz[1].item() / 2 + 1), sz[1])\n        scores_masked = scores_hn[scale_ind:scale_ind + 1, ...].clone()\n        scores_masked[...,tneigh_top:tneigh_bottom,tneigh_left:tneigh_right] = 0\n\n        # Find new maximum\n        max_score2, max_disp2 = dcf.max2d(scores_masked)\n        max_disp2 = max_disp2.float().cpu().view(-1)\n        target_disp2 = max_disp2 - score_center\n        translation_vec2 = target_disp2 * (self.img_support_sz / output_sz) * sample_scale\n\n        prev_target_vec = (self.pos - sample_pos[scale_ind,:]) / ((self.img_support_sz / output_sz) * sample_scale)\n\n        # Handle the different cases\n        if max_score2 > self.params.distractor_threshold * max_score1:\n            disp_norm1 = torch.sqrt(torch.sum((target_disp1-prev_target_vec)**2))\n            disp_norm2 = torch.sqrt(torch.sum((target_disp2-prev_target_vec)**2))\n            disp_threshold = self.params.dispalcement_scale * math.sqrt(sz[0] * sz[1]) / 2\n\n            if disp_norm2 > disp_threshold and disp_norm1 < disp_threshold:\n                return translation_vec1, scale_ind, scores_hn, 'hard_negative'\n            if disp_norm2 < disp_threshold and disp_norm1 > disp_threshold:\n                return translation_vec2, scale_ind, scores_hn, 'hard_negative'\n            if disp_norm2 > disp_threshold and disp_norm1 > disp_threshold:\n                return translation_vec1, scale_ind, scores_hn, 'uncertain'\n\n            # If also the distractor is close, return with highest score\n            return translation_vec1, scale_ind, scores_hn, 'uncertain'\n\n        if max_score2 > self.params.hard_negative_threshold * max_score1 and max_score2 > self.params.target_not_found_threshold:\n            return translation_vec1, scale_ind, scores_hn, 'hard_negative'\n\n        return translation_vec1, scale_ind, scores_hn, 'normal'\n\n    def extract_backbone_features(self, im: torch.Tensor, pos: torch.Tensor, scales, sz: torch.Tensor):\n        im_patches, patch_coords = sample_patch_multiscale(im, pos, scales, sz,\n                                                           mode=self.params.get('border_mode', 'replicate'),\n                                                           max_scale_change=self.params.get('patch_max_scale_change', None))\n        with torch.no_grad():\n            backbone_feat = self.net.extract_backbone(im_patches)\n        return backbone_feat, patch_coords, im_patches\n\n    def get_classification_features(self, backbone_feat):\n        with torch.no_grad():\n            return self.net.extract_classification_feat(backbone_feat)\n\n    def get_iou_backbone_features(self, backbone_feat):\n        return self.net.get_backbone_bbreg_feat(backbone_feat)\n\n    def get_iou_features(self, backbone_feat):\n        with torch.no_grad():\n            return self.net.bb_regressor.get_iou_feat(self.get_iou_backbone_features(backbone_feat))\n\n    def get_iou_modulation(self, iou_backbone_feat, target_boxes):\n        with torch.no_grad():\n            return self.net.bb_regressor.get_modulation(iou_backbone_feat, target_boxes)\n\n\n    def generate_init_samples(self, im: torch.Tensor) -> TensorList:\n        \"\"\"Perform data augmentation to generate initial training samples.\"\"\"\n\n        mode = self.params.get('border_mode', 'replicate')\n        if mode == 'inside':\n            # Get new sample size if forced inside the image\n            im_sz = torch.Tensor([im.shape[2], im.shape[3]])\n            sample_sz = self.target_scale * self.img_sample_sz\n            shrink_factor = (sample_sz.float() / im_sz)\n            if mode == 'inside':\n                shrink_factor = shrink_factor.max()\n            elif mode == 'inside_major':\n                shrink_factor = shrink_factor.min()\n            shrink_factor.clamp_(min=1, max=self.params.get('patch_max_scale_change', None))\n            sample_sz = (sample_sz.float() / shrink_factor)\n            self.init_sample_scale = (sample_sz / self.img_sample_sz).prod().sqrt()\n            tl = self.pos - (sample_sz - 1) / 2\n            br = self.pos + sample_sz / 2 + 1\n            global_shift = - ((-tl).clamp(0) - (br - im_sz).clamp(0)) / self.init_sample_scale\n        else:\n            self.init_sample_scale = self.target_scale\n            global_shift = torch.zeros(2)\n\n        self.init_sample_pos = self.pos.round()\n\n        # Compute augmentation size\n        aug_expansion_factor = self.params.get('augmentation_expansion_factor', None)\n        aug_expansion_sz = self.img_sample_sz.clone()\n        aug_output_sz = None\n        if aug_expansion_factor is not None and aug_expansion_factor != 1:\n            aug_expansion_sz = (self.img_sample_sz * aug_expansion_factor).long()\n            aug_expansion_sz += (aug_expansion_sz - self.img_sample_sz.long()) % 2\n            aug_expansion_sz = aug_expansion_sz.float()\n            aug_output_sz = self.img_sample_sz.long().tolist()\n\n        # Random shift for each sample\n        get_rand_shift = lambda: None\n        random_shift_factor = self.params.get('random_shift_factor', 0)\n        if random_shift_factor > 0:\n            get_rand_shift = lambda: ((torch.rand(2) - 0.5) * self.img_sample_sz * random_shift_factor + global_shift).long().tolist()\n\n        # Always put identity transformation first, since it is the unaugmented sample that is always used\n        self.transforms = [augmentation.Identity(aug_output_sz, global_shift.long().tolist())]\n\n        augs = self.params.augmentation if self.params.get('use_augmentation', True) else {}\n\n        # Add all augmentations\n        if 'shift' in augs:\n            self.transforms.extend([augmentation.Translation(shift, aug_output_sz, global_shift.long().tolist()) for shift in augs['shift']])\n        if 'relativeshift' in augs:\n            get_absolute = lambda shift: (torch.Tensor(shift) * self.img_sample_sz/2).long().tolist()\n            self.transforms.extend([augmentation.Translation(get_absolute(shift), aug_output_sz, global_shift.long().tolist()) for shift in augs['relativeshift']])\n        if 'fliplr' in augs and augs['fliplr']:\n            self.transforms.append(augmentation.FlipHorizontal(aug_output_sz, get_rand_shift()))\n        if 'blur' in augs:\n            self.transforms.extend([augmentation.Blur(sigma, aug_output_sz, get_rand_shift()) for sigma in augs['blur']])\n        if 'scale' in augs:\n            self.transforms.extend([augmentation.Scale(scale_factor, aug_output_sz, get_rand_shift()) for scale_factor in augs['scale']])\n        if 'rotate' in augs:\n            self.transforms.extend([augmentation.Rotate(angle, aug_output_sz, get_rand_shift()) for angle in augs['rotate']])\n\n        # Extract augmented image patches\n        im_patches = sample_patch_transformed(im, self.init_sample_pos, self.init_sample_scale, aug_expansion_sz, self.transforms)\n\n        # Extract initial backbone features\n        with torch.no_grad():\n            init_backbone_feat = self.net.extract_backbone(im_patches)\n\n        return init_backbone_feat\n\n    def init_target_boxes(self):\n        \"\"\"Get the target bounding boxes for the initial augmented samples.\"\"\"\n        self.classifier_target_box = self.get_iounet_box(self.pos, self.target_sz, self.init_sample_pos, self.init_sample_scale)\n        init_target_boxes = TensorList()\n        for T in self.transforms:\n            init_target_boxes.append(self.classifier_target_box + torch.Tensor([T.shift[1], T.shift[0], 0, 0]))\n        init_target_boxes = torch.cat(init_target_boxes.view(1, 4), 0).to(self.params.device)\n        self.target_boxes = init_target_boxes.new_zeros(self.params.sample_memory_size, 4)\n        self.target_boxes[:init_target_boxes.shape[0],:] = init_target_boxes\n        return init_target_boxes\n\n    def init_memory(self, train_x: TensorList):\n        # Initialize first-frame spatial training samples\n        self.num_init_samples = train_x.size(0)\n        init_sample_weights = TensorList([x.new_ones(1) / x.shape[0] for x in train_x])\n\n        # Sample counters and weights for spatial\n        self.num_stored_samples = self.num_init_samples.copy()\n        self.previous_replace_ind = [None] * len(self.num_stored_samples)\n        self.sample_weights = TensorList([x.new_zeros(self.params.sample_memory_size) for x in train_x])\n        for sw, init_sw, num in zip(self.sample_weights, init_sample_weights, self.num_init_samples):\n            sw[:num] = init_sw\n\n        # Initialize memory\n        self.training_samples = TensorList(\n            [x.new_zeros(self.params.sample_memory_size, x.shape[1], x.shape[2], x.shape[3]) for x in train_x])\n\n        for ts, x in zip(self.training_samples, train_x):\n            ts[:x.shape[0],...] = x\n\n\n    def update_memory(self, sample_x: TensorList, target_box, learning_rate = None):\n        # Update weights and get replace ind\n        replace_ind = self.update_sample_weights(self.sample_weights, self.previous_replace_ind, self.num_stored_samples, self.num_init_samples, learning_rate)\n        self.previous_replace_ind = replace_ind\n\n        # Update sample and label memory\n        for train_samp, x, ind in zip(self.training_samples, sample_x, replace_ind):\n            train_samp[ind:ind+1,...] = x\n\n        # Update bb memory\n        self.target_boxes[replace_ind[0],:] = target_box\n\n        self.num_stored_samples += 1\n\n\n    def update_sample_weights(self, sample_weights, previous_replace_ind, num_stored_samples, num_init_samples, learning_rate = None):\n        # Update weights and get index to replace\n        replace_ind = []\n        for sw, prev_ind, num_samp, num_init in zip(sample_weights, previous_replace_ind, num_stored_samples, num_init_samples):\n            lr = learning_rate\n            if lr is None:\n                lr = self.params.learning_rate\n\n            init_samp_weight = self.params.get('init_samples_minimum_weight', None)\n            if init_samp_weight == 0:\n                init_samp_weight = None\n            s_ind = 0 if init_samp_weight is None else num_init\n\n            if num_samp == 0 or lr == 1:\n                sw[:] = 0\n                sw[0] = 1\n                r_ind = 0\n            else:\n                # Get index to replace\n                if num_samp < sw.shape[0]:\n                    r_ind = num_samp\n                else:\n                    _, r_ind = torch.min(sw[s_ind:], 0)\n                    r_ind = r_ind.item() + s_ind\n\n                # Update weights\n                if prev_ind is None:\n                    sw /= 1 - lr\n                    sw[r_ind] = lr\n                else:\n                    sw[r_ind] = sw[prev_ind] / (1 - lr)\n\n            sw /= sw.sum()\n            if init_samp_weight is not None and sw[:num_init].sum() < init_samp_weight:\n                sw /= init_samp_weight + sw[num_init:].sum()\n                sw[:num_init] = init_samp_weight / num_init\n\n            replace_ind.append(r_ind)\n\n        return replace_ind\n\n    def update_state(self, new_pos, new_scale = None):\n        # Update scale\n        if new_scale is not None:\n            self.target_scale = new_scale.clamp(self.min_scale_factor, self.max_scale_factor)\n            self.target_sz = self.base_target_sz * self.target_scale\n\n        # Update pos\n        inside_ratio = self.params.get('target_inside_ratio', 0.2)\n        inside_offset = (inside_ratio - 0.5) * self.target_sz\n        self.pos = torch.max(torch.min(new_pos, self.image_sz - inside_offset), inside_offset)\n\n\n    def get_iounet_box(self, pos, sz, sample_pos, sample_scale):\n        \"\"\"All inputs in original image coordinates.\n        Generates a box in the cropped image sample reference frame, in the format used by the IoUNet.\"\"\"\n        box_center = (pos - sample_pos) / sample_scale + (self.img_sample_sz - 1) / 2\n        box_sz = sz / sample_scale\n        target_ul = box_center - (box_sz - 1) / 2\n        return torch.cat([target_ul.flip((0,)), box_sz.flip((0,))])\n\n\n    def init_iou_net(self, backbone_feat):\n        # Setup IoU net and objective\n        for p in self.net.bb_regressor.parameters():\n            p.requires_grad = False\n\n        # Get target boxes for the different augmentations\n        self.classifier_target_box = self.get_iounet_box(self.pos, self.target_sz, self.init_sample_pos, self.init_sample_scale)\n        target_boxes = TensorList()\n        if self.params.iounet_augmentation:\n            for T in self.transforms:\n                if not isinstance(T, (augmentation.Identity, augmentation.Translation, augmentation.FlipHorizontal, augmentation.FlipVertical, augmentation.Blur)):\n                    break\n                target_boxes.append(self.classifier_target_box + torch.Tensor([T.shift[1], T.shift[0], 0, 0]))\n        else:\n            target_boxes.append(self.classifier_target_box + torch.Tensor([self.transforms[0].shift[1], self.transforms[0].shift[0], 0, 0]))\n        target_boxes = torch.cat(target_boxes.view(1,4), 0).to(self.params.device)\n\n        # Get iou features\n        iou_backbone_feat = self.get_iou_backbone_features(backbone_feat)\n\n        # Remove other augmentations such as rotation\n        iou_backbone_feat = TensorList([x[:target_boxes.shape[0],...] for x in iou_backbone_feat])\n\n        # Get modulation vector\n        self.iou_modulation = self.get_iou_modulation(iou_backbone_feat, target_boxes)\n        if torch.is_tensor(self.iou_modulation[0]):\n            self.iou_modulation = TensorList([x.detach().mean(0) for x in self.iou_modulation])\n\n\n    def init_classifier(self, init_backbone_feat):\n        # Get classification features\n        x = self.get_classification_features(init_backbone_feat)\n\n        # Overwrite some parameters in the classifier. (These are not generally changed)\n        self._overwrite_classifier_params(feature_dim=x.shape[-3])\n\n        # Add the dropout augmentation here, since it requires extraction of the classification features\n        if 'dropout' in self.params.augmentation and self.params.get('use_augmentation', True):\n            num, prob = self.params.augmentation['dropout']\n            self.transforms.extend(self.transforms[:1]*num)\n            x = torch.cat([x, F.dropout2d(x[0:1,...].expand(num,-1,-1,-1), p=prob, training=True)])\n\n        # Set feature size and other related sizes\n        self.feature_sz = torch.Tensor(list(x.shape[-2:]))\n        ksz = self.net.classifier.filter_size\n        self.kernel_size = torch.Tensor([ksz, ksz] if isinstance(ksz, (int, float)) else ksz)\n        self.output_sz = self.feature_sz + (self.kernel_size + 1)%2\n\n        # Construct output window\n        self.output_window = None\n        if self.params.get('window_output', False):\n            if self.params.get('use_clipped_window', False):\n                self.output_window = dcf.hann2d_clipped(self.output_sz.long(), (self.output_sz*self.params.effective_search_area / self.params.search_area_scale).long(), centered=True).to(self.params.device)\n            else:\n                self.output_window = dcf.hann2d(self.output_sz.long(), centered=True).to(self.params.device)\n            self.output_window = self.output_window.squeeze(0)\n\n        # Get target boxes for the different augmentations\n        target_boxes = self.init_target_boxes()\n\n        # Set number of iterations\n        plot_loss = self.params.debug > 0\n        num_iter = self.params.get('net_opt_iter', None)\n\n        # Get target filter by running the discriminative model prediction module\n        with torch.no_grad():\n            self.target_filter, _, losses = self.net.classifier.get_filter(x, target_boxes, num_iter=num_iter,\n                                                                           compute_losses=plot_loss)\n\n        # Init memory\n        if self.params.get('update_classifier', True):\n            self.init_memory(TensorList([x]))\n\n        if plot_loss:\n            if isinstance(losses, dict):\n                losses = losses['train']\n            self.losses = torch.cat(losses)\n            if self.visdom is not None:\n                self.visdom.register((self.losses, torch.arange(self.losses.numel())), 'lineplot', 3, 'Training Loss' + self.id_str)\n            elif self.params.debug >= 3:\n                plot_graph(self.losses, 10, title='Training Loss' + self.id_str)\n\n    def _overwrite_classifier_params(self, feature_dim):\n        # Overwrite some parameters in the classifier. (These are not generally changed)\n        pred_module = getattr(self.net.classifier.filter_optimizer, 'score_predictor', self.net.classifier.filter_optimizer)\n        if self.params.get('label_threshold', None) is not None:\n            self.net.classifier.filter_optimizer.label_threshold = self.params.label_threshold\n        if self.params.get('label_shrink', None) is not None:\n            self.net.classifier.filter_optimizer.label_shrink = self.params.label_shrink\n        if self.params.get('softmax_reg', None) is not None:\n            self.net.classifier.filter_optimizer.softmax_reg = self.params.softmax_reg\n        if self.params.get('filter_reg', None) is not None:\n            pred_module.filter_reg[0] = self.params.filter_reg\n            pred_module.min_filter_reg = self.params.filter_reg\n        if self.params.get('filter_init_zero', False):\n            self.net.classifier.filter_initializer = FilterInitializerZero(self.net.classifier.filter_size, feature_dim)\n\n\n    def update_classifier(self, train_x, target_box, learning_rate=None, scores=None):\n        # Set flags and learning rate\n        hard_negative_flag = learning_rate is not None\n        if learning_rate is None:\n            learning_rate = self.params.learning_rate\n\n        # Update the tracker memory\n        if hard_negative_flag or self.frame_num % self.params.get('train_sample_interval', 1) == 0:\n            self.update_memory(TensorList([train_x]), target_box, learning_rate)\n\n        # Decide the number of iterations to run\n        num_iter = 0\n        low_score_th = self.params.get('low_score_opt_threshold', None)\n        if hard_negative_flag:\n            num_iter = self.params.get('net_opt_hn_iter', None)\n        elif low_score_th is not None and low_score_th > scores.max().item():\n            num_iter = self.params.get('net_opt_low_iter', None)\n        elif (self.frame_num - 1) % self.params.train_skipping == 0:\n            num_iter = self.params.get('net_opt_update_iter', None)\n\n        plot_loss = self.params.debug > 0\n\n        if num_iter > 0:\n            # Get inputs for the DiMP filter optimizer module\n            samples = self.training_samples[0][:self.num_stored_samples[0],...]\n            target_boxes = self.target_boxes[:self.num_stored_samples[0],:].clone()\n            sample_weights = self.sample_weights[0][:self.num_stored_samples[0]]\n\n            # Run the filter optimizer module\n            with torch.no_grad():\n                self.target_filter, _, losses = self.net.classifier.filter_optimizer(self.target_filter,\n                                                                                     num_iter=num_iter, feat=samples,\n                                                                                     bb=target_boxes,\n                                                                                     sample_weight=sample_weights,\n                                                                                     compute_losses=plot_loss)\n\n            if plot_loss:\n                if isinstance(losses, dict):\n                    losses = losses['train']\n                self.losses = torch.cat((self.losses, torch.cat(losses)))\n                if self.visdom is not None:\n                    self.visdom.register((self.losses, torch.arange(self.losses.numel())), 'lineplot', 3, 'Training Loss' + self.id_str)\n                elif self.params.debug >= 3:\n                    plot_graph(self.losses, 10, title='Training Loss' + self.id_str)\n\n    def refine_target_box(self, backbone_feat, sample_pos, sample_scale, scale_ind, update_scale = True):\n        \"\"\"Run the ATOM IoUNet to refine the target bounding box.\"\"\"\n\n        if hasattr(self.net.bb_regressor, 'predict_bb'):\n            return self.direct_box_regression(backbone_feat, sample_pos, sample_scale, scale_ind, update_scale)\n\n        # Initial box for refinement\n        init_box = self.get_iounet_box(self.pos, self.target_sz, sample_pos, sample_scale)\n\n        # Extract features from the relevant scale\n        iou_features = self.get_iou_features(backbone_feat)\n        iou_features = TensorList([x[scale_ind:scale_ind+1,...] for x in iou_features])\n\n        # Generate random initial boxes\n        init_boxes = init_box.view(1,4).clone()\n        if self.params.num_init_random_boxes > 0:\n            square_box_sz = init_box[2:].prod().sqrt()\n            rand_factor = square_box_sz * torch.cat([self.params.box_jitter_pos * torch.ones(2), self.params.box_jitter_sz * torch.ones(2)])\n\n            minimal_edge_size = init_box[2:].min()/3\n            rand_bb = (torch.rand(self.params.num_init_random_boxes, 4) - 0.5) * rand_factor\n            new_sz = (init_box[2:] + rand_bb[:,2:]).clamp(minimal_edge_size)\n            new_center = (init_box[:2] + init_box[2:]/2) + rand_bb[:,:2]\n            init_boxes = torch.cat([new_center - new_sz/2, new_sz], 1)\n            init_boxes = torch.cat([init_box.view(1,4), init_boxes])\n\n        # Optimize the boxes\n        output_boxes, output_iou = self.optimize_boxes(iou_features, init_boxes)\n\n        # Remove weird boxes\n        output_boxes[:, 2:].clamp_(1)\n        aspect_ratio = output_boxes[:,2] / output_boxes[:,3]\n        keep_ind = (aspect_ratio < self.params.maximal_aspect_ratio) * (aspect_ratio > 1/self.params.maximal_aspect_ratio)\n        output_boxes = output_boxes[keep_ind,:]\n        output_iou = output_iou[keep_ind]\n\n        # If no box found\n        if output_boxes.shape[0] == 0:\n            return\n\n        # Predict box\n        k = self.params.get('iounet_k', 5)\n        topk = min(k, output_boxes.shape[0])\n        _, inds = torch.topk(output_iou, topk)\n        predicted_box = output_boxes[inds, :].mean(0)\n        predicted_iou = output_iou.view(-1, 1)[inds, :].mean(0)\n\n        # Get new position and size\n        new_pos = predicted_box[:2] + predicted_box[2:] / 2\n        new_pos = (new_pos.flip((0,)) - (self.img_sample_sz - 1) / 2) * sample_scale + sample_pos\n        new_target_sz = predicted_box[2:].flip((0,)) * sample_scale\n        new_scale = torch.sqrt(new_target_sz.prod() / self.base_target_sz.prod())\n\n        self.pos_iounet = new_pos.clone()\n\n        if self.params.get('use_iounet_pos_for_learning', True):\n            self.pos = new_pos.clone()\n\n        self.target_sz = new_target_sz\n\n        if update_scale:\n            self.target_scale = new_scale\n\n        # self.visualize_iou_pred(iou_features, predicted_box)\n\n\n    def optimize_boxes(self, iou_features, init_boxes):\n        box_refinement_space = self.params.get('box_refinement_space', 'default')\n        if box_refinement_space == 'default':\n            return self.optimize_boxes_default(iou_features, init_boxes)\n        if box_refinement_space == 'relative':\n            return self.optimize_boxes_relative(iou_features, init_boxes)\n        raise ValueError('Unknown box_refinement_space {}'.format(box_refinement_space))\n\n\n    def optimize_boxes_default(self, iou_features, init_boxes):\n        \"\"\"Optimize iounet boxes with the default parametrization\"\"\"\n        output_boxes = init_boxes.view(1, -1, 4).to(self.params.device)\n        step_length = self.params.box_refinement_step_length\n        if isinstance(step_length, (tuple, list)):\n            step_length = torch.Tensor([step_length[0], step_length[0], step_length[1], step_length[1]], device=self.params.device).view(1,1,4)\n\n        for i_ in range(self.params.box_refinement_iter):\n            # forward pass\n            bb_init = output_boxes.clone().detach()\n            bb_init.requires_grad = True\n\n            outputs = self.net.bb_regressor.predict_iou(self.iou_modulation, iou_features, bb_init)\n\n            if isinstance(outputs, (list, tuple)):\n                outputs = outputs[0]\n\n            outputs.backward(gradient = torch.ones_like(outputs))\n\n            # Update proposal\n            output_boxes = bb_init + step_length * bb_init.grad * bb_init[:, :, 2:].repeat(1, 1, 2)\n            output_boxes.detach_()\n\n            step_length *= self.params.box_refinement_step_decay\n\n        return output_boxes.view(-1,4).cpu(), outputs.detach().view(-1).cpu()\n\n\n    def optimize_boxes_relative(self, iou_features, init_boxes):\n        \"\"\"Optimize iounet boxes with the relative parametrization ised in PrDiMP\"\"\"\n        output_boxes = init_boxes.view(1, -1, 4).to(self.params.device)\n        step_length = self.params.box_refinement_step_length\n        if isinstance(step_length, (tuple, list)):\n            step_length = torch.Tensor([step_length[0], step_length[0], step_length[1], step_length[1]]).to(self.params.device).view(1,1,4)\n\n        sz_norm = output_boxes[:,:1,2:].clone()\n        output_boxes_rel = bbutils.rect_to_rel(output_boxes, sz_norm)\n        for i_ in range(self.params.box_refinement_iter):\n            # forward pass\n            bb_init_rel = output_boxes_rel.clone().detach()\n            bb_init_rel.requires_grad = True\n\n            bb_init = bbutils.rel_to_rect(bb_init_rel, sz_norm)\n            outputs = self.net.bb_regressor.predict_iou(self.iou_modulation, iou_features, bb_init)\n\n            if isinstance(outputs, (list, tuple)):\n                outputs = outputs[0]\n\n            outputs.backward(gradient = torch.ones_like(outputs))\n\n            # Update proposal\n            output_boxes_rel = bb_init_rel + step_length * bb_init_rel.grad\n            output_boxes_rel.detach_()\n\n            step_length *= self.params.box_refinement_step_decay\n\n        #     for s in outputs.view(-1):\n        #         print('{:.2f}  '.format(s.item()), end='')\n        #     print('')\n        # print('')\n\n        output_boxes = bbutils.rel_to_rect(output_boxes_rel, sz_norm)\n\n        return output_boxes.view(-1,4).cpu(), outputs.detach().view(-1).cpu()\n\n    def direct_box_regression(self, backbone_feat, sample_pos, sample_scale, scale_ind, update_scale = True):\n        \"\"\"Implementation of direct bounding box regression.\"\"\"\n\n        # Initial box for refinement\n        init_box = self.get_iounet_box(self.pos, self.target_sz, sample_pos, sample_scale)\n\n        # Extract features from the relevant scale\n        iou_features = self.get_iou_features(backbone_feat)\n        iou_features = TensorList([x[scale_ind:scale_ind+1,...] for x in iou_features])\n\n        # Generate random initial boxes\n        init_boxes = init_box.view(1, 1, 4).clone().to(self.params.device)\n\n        # Optimize the boxes\n        output_boxes = self.net.bb_regressor.predict_bb(self.iou_modulation, iou_features, init_boxes).view(-1,4).cpu()\n\n        # Remove weird boxes\n        output_boxes[:, 2:].clamp_(1)\n\n        predicted_box = output_boxes[0, :]\n\n        # Get new position and size\n        new_pos = predicted_box[:2] + predicted_box[2:] / 2\n        new_pos = (new_pos.flip((0,)) - (self.img_sample_sz - 1) / 2) * sample_scale + sample_pos\n        new_target_sz = predicted_box[2:].flip((0,)) * sample_scale\n        new_scale_bbr = torch.sqrt(new_target_sz.prod() / self.base_target_sz.prod())\n        new_scale = new_scale_bbr\n\n        self.pos_iounet = new_pos.clone()\n\n        if self.params.get('use_iounet_pos_for_learning', True):\n            self.pos = new_pos.clone()\n\n        self.target_sz = new_target_sz\n\n        if update_scale:\n            self.target_scale = new_scale\n\n\n    def visualize_iou_pred(self, iou_features, center_box):\n        center_box = center_box.view(1,1,4)\n        sz_norm = center_box[...,2:].clone()\n        center_box_rel = bbutils.rect_to_rel(center_box, sz_norm)\n\n        pos_dist = 1.0\n        sz_dist = math.log(3.0)\n        pos_step = 0.01\n        sz_step = 0.01\n\n        pos_scale = torch.arange(-pos_dist, pos_dist+pos_step, step=pos_step)\n        sz_scale = torch.arange(-sz_dist, sz_dist+sz_step, step=sz_step)\n\n        bbx = torch.zeros(1, pos_scale.numel(), 4)\n        bbx[0,:,0] = pos_scale.clone()\n        bby = torch.zeros(pos_scale.numel(), 1, 4)\n        bby[:,0,1] = pos_scale.clone()\n        bbw = torch.zeros(1, sz_scale.numel(), 4)\n        bbw[0,:,2] = sz_scale.clone()\n        bbh = torch.zeros(sz_scale.numel(), 1, 4)\n        bbh[:,0,3] = sz_scale.clone()\n\n        pos_boxes = bbutils.rel_to_rect((center_box_rel + bbx) + bby, sz_norm).view(1,-1,4).to(self.params.device)\n        sz_boxes = bbutils.rel_to_rect((center_box_rel + bbw) + bbh, sz_norm).view(1,-1,4).to(self.params.device)\n\n        pos_scores = self.net.bb_regressor.predict_iou(self.iou_modulation, iou_features, pos_boxes).exp()\n        sz_scores = self.net.bb_regressor.predict_iou(self.iou_modulation, iou_features, sz_boxes).exp()\n\n        show_tensor(pos_scores.view(pos_scale.numel(),-1), title='Position scores', fig_num=21)\n        show_tensor(sz_scores.view(sz_scale.numel(),-1), title='Size scores', fig_num=22)\n\n\n    def visdom_draw_tracking(self, image, box, segmentation=None):\n        if hasattr(self, 'search_area_box'):\n            self.visdom.register((image, box, self.search_area_box), 'Tracking', 1, 'Tracking')\n        else:\n            self.visdom.register((image, box), 'Tracking', 1, 'Tracking')"
  },
  {
    "path": "external/AR/pytracking/tracker/eco/__init__.py",
    "content": "from .eco import ECO\n\ndef get_tracker_class():\n    return ECO"
  },
  {
    "path": "external/AR/pytracking/tracker/eco/eco.py",
    "content": "from pytracking.tracker.base import BaseTracker\nimport torch\nimport torch.nn.functional as F\nimport math\nfrom pytracking import complex, dcf, fourier, TensorList\nfrom pytracking.libs.tensorlist import tensor_operation\nfrom pytracking.features.preprocessing import numpy_to_torch\nfrom pytracking.utils.plotting import show_tensor\nfrom pytracking.libs.optimization import GaussNewtonCG\nfrom .optim import FilterOptim, FactorizedConvProblem\nfrom pytracking.features import augmentation\n\n\n\nclass ECO(BaseTracker):\n\n    multiobj_mode = 'parallel'\n\n    def initialize_features(self):\n        if not getattr(self, 'features_initialized', False):\n            self.params.features.initialize()\n        self.features_initialized = True\n\n\n    def initialize(self, image, info: dict) -> dict:\n        state = info['init_bbox']\n\n        # Initialize some stuff\n        self.frame_num = 1\n        if not self.params.has('device'):\n            self.params.device = 'cuda' if self.params.use_gpu else 'cpu'\n\n        # Initialize features\n        self.initialize_features()\n\n        # Chack if image is color\n        self.params.features.set_is_color(image.shape[2] == 3)\n\n        # Get feature specific params\n        self.fparams = self.params.features.get_fparams('feature_params')\n\n        # Get position and size\n        self.pos = torch.Tensor([state[1] + (state[3] - 1)/2, state[0] + (state[2] - 1)/2])\n        self.target_sz = torch.Tensor([state[3], state[2]])\n\n        # Set search area\n        self.target_scale = 1.0\n        search_area = torch.prod(self.target_sz * self.params.search_area_scale).item()\n        if search_area > self.params.max_image_sample_size:\n            self.target_scale =  math.sqrt(search_area / self.params.max_image_sample_size)\n        elif search_area < self.params.min_image_sample_size:\n            self.target_scale =  math.sqrt(search_area / self.params.min_image_sample_size)\n\n        # Target size in base scale\n        self.base_target_sz = self.target_sz / self.target_scale\n\n        # Use odd square search area and set sizes\n        feat_max_stride = max(self.params.features.stride())\n        self.img_sample_sz = torch.round(torch.sqrt(torch.prod(self.base_target_sz * self.params.search_area_scale))) * torch.ones(2)\n        self.img_sample_sz += feat_max_stride - self.img_sample_sz % (2 * feat_max_stride)\n\n        # Set other sizes (corresponds to ECO code)\n        self.img_support_sz = self.img_sample_sz\n        self.feature_sz = self.params.features.size(self.img_sample_sz)\n        self.filter_sz = self.feature_sz + (self.feature_sz + 1) % 2\n        self.output_sz = self.params.score_upsample_factor * self.img_support_sz    # Interpolated size of the output\n        self.compressed_dim = self.fparams.attribute('compressed_dim')\n\n        # Number of filters\n        self.num_filters = len(self.filter_sz)\n\n        # Get window function\n        self.window = TensorList([dcf.hann2d(sz).to(self.params.device) for sz in self.feature_sz])\n\n        # Get interpolation function\n        self.interp_fs = TensorList([dcf.get_interp_fourier(sz, self.params.interpolation_method,\n                                                self.params.interpolation_bicubic_a, self.params.interpolation_centering,\n                                                self.params.interpolation_windowing, self.params.device) for sz in self.filter_sz])\n\n        # Get regularization filter\n        self.reg_filter = TensorList([dcf.get_reg_filter(self.img_support_sz, self.base_target_sz, fparams).to(self.params.device)\n                                      for fparams in self.fparams])\n        self.reg_energy = self.reg_filter.view(-1) @ self.reg_filter.view(-1)\n\n        # Get label function\n        output_sigma_factor = self.fparams.attribute('output_sigma_factor')\n        sigma = (self.filter_sz / self.img_support_sz) * torch.sqrt(self.base_target_sz.prod()) * output_sigma_factor\n        self.yf = TensorList([dcf.label_function(sz, sig).to(self.params.device) for sz, sig in zip(self.filter_sz, sigma)])\n\n        # Optimization options\n        self.params.precond_learning_rate = self.fparams.attribute('learning_rate')\n        if self.params.CG_forgetting_rate is None or max(self.params.precond_learning_rate) >= 1:\n            self.params.direction_forget_factor = 0\n        else:\n            self.params.direction_forget_factor = (1 - max(self.params.precond_learning_rate))**self.params.CG_forgetting_rate\n\n\n        # Convert image\n        im = numpy_to_torch(image)\n\n        # Setup bounds\n        self.image_sz = torch.Tensor([im.shape[2], im.shape[3]])\n        self.min_scale_factor = torch.max(10 / self.base_target_sz)\n        self.max_scale_factor = torch.min(self.image_sz / self.base_target_sz)\n\n        # Extract and transform sample\n        x = self.generate_init_samples(im)\n\n        # Initialize projection matrix\n        x_mat = TensorList([e.permute(1,0,2,3).reshape(e.shape[1], -1).clone() for e in x])\n        x_mat -= x_mat.mean(dim=1, keepdim=True)\n        cov_x = x_mat @ x_mat.t()\n        self.projection_matrix = TensorList([torch.svd(C)[0][:,:cdim].clone() for C, cdim in zip(cov_x, self.compressed_dim)])\n\n        # Transform to get the training sample\n        train_xf = self.preprocess_sample(x)\n\n        # Shift the samples back\n        if 'shift' in self.params.augmentation:\n            for xf in train_xf:\n                if xf.shape[0] == 1:\n                    continue\n                for i, shift in enumerate(self.params.augmentation['shift']):\n                    shift_samp = 2 * math.pi * torch.Tensor(shift) / self.img_support_sz\n                    xf[1+i:2+i,...] = fourier.shift_fs(xf[1+i:2+i,...], shift=shift_samp)\n\n        # Shift sample\n        shift_samp = 2*math.pi * (self.pos - self.pos.round()) / (self.target_scale * self.img_support_sz)\n        train_xf = fourier.shift_fs(train_xf, shift=shift_samp)\n\n        # Initialize first-frame training samples\n        num_init_samples = train_xf.size(0)\n        self.init_sample_weights = TensorList([xf.new_ones(1) / xf.shape[0] for xf in train_xf])\n        self.init_training_samples = train_xf.permute(2, 3, 0, 1, 4)\n\n\n        # Sample counters and weights\n        self.num_stored_samples = num_init_samples\n        self.previous_replace_ind = [None]*len(self.num_stored_samples)\n        self.sample_weights = TensorList([xf.new_zeros(self.params.sample_memory_size) for xf in train_xf])\n        for sw, init_sw, num in zip(self.sample_weights, self.init_sample_weights, num_init_samples):\n            sw[:num] = init_sw\n\n        # Initialize memory\n        self.training_samples = TensorList(\n            [xf.new_zeros(xf.shape[2], xf.shape[3], self.params.sample_memory_size, cdim, 2) for xf, cdim in zip(train_xf, self.compressed_dim)])\n\n        # Initialize filter\n        self.filter = TensorList(\n            [xf.new_zeros(1, cdim, xf.shape[2], xf.shape[3], 2) for xf, cdim in zip(train_xf, self.compressed_dim)])\n\n        # Do joint optimization\n        self.joint_problem = FactorizedConvProblem(self.init_training_samples, self.yf, self.reg_filter, self.projection_matrix, self.params, self.init_sample_weights)\n        joint_var = self.filter.concat(self.projection_matrix)\n        self.joint_optimizer = GaussNewtonCG(self.joint_problem, joint_var, debug=(self.params.debug>=1), visdom=self.visdom)\n\n        if self.params.update_projection_matrix:\n            self.joint_optimizer.run(self.params.init_CG_iter // self.params.init_GN_iter, self.params.init_GN_iter)\n\n        # Re-project samples with the new projection matrix\n        compressed_samples = complex.mtimes(self.init_training_samples, self.projection_matrix)\n        for train_samp, init_samp in zip(self.training_samples, compressed_samples):\n            train_samp[:,:,:init_samp.shape[2],:,:] = init_samp\n\n        # Initialize optimizer\n        self.filter_optimizer = FilterOptim(self.params, self.reg_energy)\n        self.filter_optimizer.register(self.filter, self.training_samples, self.yf, self.sample_weights, self.reg_filter)\n        self.filter_optimizer.sample_energy = self.joint_problem.sample_energy\n        self.filter_optimizer.residuals = self.joint_optimizer.residuals.clone()\n\n        if not self.params.update_projection_matrix:\n            self.filter_optimizer.run(self.params.init_CG_iter)\n\n        # Post optimization\n        self.filter_optimizer.run(self.params.post_init_CG_iter)\n\n        self.symmetrize_filter()\n\n\n\n    def track(self, image, info: dict = None) -> dict:\n        self.debug_info = {}\n\n        self.frame_num += 1\n        self.debug_info['frame_num'] = self.frame_num\n\n        # Convert image\n        im = numpy_to_torch(image)\n\n        # ------- LOCALIZATION ------- #\n\n        # Get sample\n        sample_pos = self.pos.round()\n        sample_scales = self.target_scale * self.params.scale_factors\n        test_xf = self.extract_fourier_sample(im, self.pos, sample_scales, self.img_sample_sz)\n\n        # Compute scores\n        sf = self.apply_filter(test_xf)\n        translation_vec, scale_ind, s = self.localize_target(sf)\n        scale_change_factor = self.params.scale_factors[scale_ind]\n\n        # Update position and scale\n        self.update_state(sample_pos + translation_vec, self.target_scale * scale_change_factor)\n\n        score_map = s[scale_ind, ...]\n        max_score = torch.max(score_map).item()\n        self.debug_info['max_score'] = max_score\n\n        if self.visdom is not None:\n            self.visdom.register(score_map, 'heatmap', 2, 'Score Map')\n            self.visdom.register(self.debug_info, 'info_dict', 1, 'Status')\n        elif self.params.debug >= 2:\n            show_tensor(score_map, 5, title='Max score = {:.2f}'.format(max_score))\n\n        # if self.params.debug >= 3:\n        #     for i, hf in enumerate(self.filter):\n        #         show_tensor(fourier.sample_fs(hf).abs().mean(1), 6+i)\n\n\n        # ------- UPDATE ------- #\n\n        # Get train sample\n        train_xf = TensorList([xf[scale_ind:scale_ind+1, ...] for xf in test_xf])\n\n        # Shift the sample\n        shift_samp = 2*math.pi * (self.pos - sample_pos) / (sample_scales[scale_ind] * self.img_support_sz)\n        train_xf = fourier.shift_fs(train_xf, shift=shift_samp)\n\n        # Update memory\n        self.update_memory(train_xf)\n\n        # Train filter\n        if self.frame_num % self.params.train_skipping == 1:\n            self.filter_optimizer.run(self.params.CG_iter, train_xf)\n            self.symmetrize_filter()\n\n        # Return new state\n        new_state = torch.cat((self.pos[[1,0]] - (self.target_sz[[1,0]]-1)/2, self.target_sz[[1,0]]))\n\n        out = {'target_bbox': new_state.tolist()}\n        return out\n\n\n    def apply_filter(self, sample_xf: TensorList) -> torch.Tensor:\n        return complex.mult(self.filter, sample_xf).sum(1, keepdim=True)\n\n    def localize_target(self, sf: TensorList):\n        if self.params.score_fusion_strategy == 'sum':\n            scores = fourier.sample_fs(fourier.sum_fs(sf), self.output_sz)\n        elif self.params.score_fusion_strategy == 'weightedsum':\n            weight = self.fparams.attribute('translation_weight')\n            scores = fourier.sample_fs(fourier.sum_fs(weight * sf), self.output_sz)\n        elif self.params.score_fusion_strategy == 'transcale':\n            alpha = self.fparams.attribute('scale_weight')\n            beta = self.fparams.attribute('translation_weight')\n            sample_sz = torch.round(self.output_sz.view(1,-1) * self.params.scale_factors.view(-1,1))\n            scores = 0\n            for sfe, a, b in zip(sf, alpha, beta):\n                sfe = fourier.shift_fs(sfe, math.pi*torch.ones(2))\n                scores_scales = []\n                for sind, sz in enumerate(sample_sz):\n                    pd = (self.output_sz-sz)/2\n                    scores_scales.append(F.pad(fourier.sample_fs(sfe[sind:sind+1,...], sz),\n                                        (math.floor(pd[1].item()), math.ceil(pd[1].item()),\n                                         math.floor(pd[0].item()), math.ceil(pd[0].item()))))\n                scores_cat = torch.cat(scores_scales)\n                scores = scores + (b - a) * scores_cat.mean(dim=0, keepdim=True) + a * scores_cat\n        else:\n            raise ValueError('Unknown score fusion strategy.')\n\n        # Get maximum\n        max_score, max_disp = dcf.max2d(scores)\n        _, scale_ind = torch.max(max_score, dim=0)\n        max_disp = max_disp.float().cpu()\n\n        # Convert to displacements in the base scale\n        if self.params.score_fusion_strategy in ['sum', 'weightedsum']:\n            disp = (max_disp + self.output_sz / 2) % self.output_sz - self.output_sz / 2\n        elif self.params.score_fusion_strategy == 'transcale':\n            disp = max_disp - self.output_sz / 2\n\n        # Compute translation vector and scale change factor\n        translation_vec = disp[scale_ind, ...].view(-1) * (self.img_support_sz / self.output_sz) * self.target_scale\n        if self.params.score_fusion_strategy in ['sum', 'weightedsum']:\n            translation_vec *= self.params.scale_factors[scale_ind]\n\n        return translation_vec, scale_ind, scores\n\n\n    def extract_sample(self, im: torch.Tensor, pos: torch.Tensor, scales, sz: torch.Tensor):\n        return self.params.features.extract(im, pos, scales, sz)[0]\n\n    def extract_fourier_sample(self, im: torch.Tensor, pos: torch.Tensor, scales, sz: torch.Tensor) -> TensorList:\n        x = self.extract_sample(im, pos, scales, sz)\n        return self.preprocess_sample(self.project_sample(x))\n\n    def preprocess_sample(self, x: TensorList) -> TensorList:\n        x *= self.window\n        sample_xf = fourier.cfft2(x)\n        return TensorList([dcf.interpolate_dft(xf, bf) for xf, bf in zip(sample_xf, self.interp_fs)])\n\n    def project_sample(self, x: TensorList):\n        @tensor_operation\n        def _project_sample(x: torch.Tensor, P: torch.Tensor):\n            if P is None:\n                return x\n            return torch.matmul(x.permute(2, 3, 0, 1), P).permute(2, 3, 0, 1)\n\n        return _project_sample(x, self.projection_matrix)\n\n    def generate_init_samples(self, im: torch.Tensor) -> TensorList:\n        # Do data augmentation\n        transforms = [augmentation.Identity()]\n        if 'shift' in self.params.augmentation:\n            transforms.extend([augmentation.Translation(shift) for shift in self.params.augmentation['shift']])\n        if 'fliplr' in self.params.augmentation and self.params.augmentation['fliplr']:\n            transforms.append(augmentation.FlipHorizontal())\n        if 'rotate' in self.params.augmentation:\n            transforms.extend([augmentation.Rotate(angle) for angle in self.params.augmentation['rotate']])\n        if 'blur' in self.params.augmentation:\n            transforms.extend([augmentation.Blur(sigma) for sigma in self.params.augmentation['blur']])\n\n        init_samples = self.params.features.extract_transformed(im, self.pos, self.target_scale, self.img_sample_sz, transforms)\n\n        # Remove augmented samples for those that shall not have\n        for i, use_aug in enumerate(self.fparams.attribute('use_augmentation')):\n            if not use_aug:\n                init_samples[i] = init_samples[i][0:1, ...]\n\n        if 'dropout' in self.params.augmentation:\n            num, prob = self.params.augmentation['dropout']\n            for i, use_aug in enumerate(self.fparams.attribute('use_augmentation')):\n                if use_aug:\n                    init_samples[i] = torch.cat([init_samples[i], F.dropout2d(init_samples[i][0:1,...].expand(num,-1,-1,-1), p=prob, training=True)])\n\n        return init_samples\n\n\n    def update_memory(self, sample_xf: TensorList):\n        # Update weights and get index to replace\n        replace_ind = self.update_sample_weights()\n        for train_samp, xf, ind in zip(self.training_samples, sample_xf, replace_ind):\n            train_samp[:,:,ind:ind+1,:,:] = xf.permute(2, 3, 0, 1, 4)\n\n\n    def update_sample_weights(self):\n        replace_ind = []\n        for sw, prev_ind, num_samp, fparams in zip(self.sample_weights, self.previous_replace_ind, self.num_stored_samples, self.fparams):\n            if num_samp == 0 or fparams.learning_rate == 1:\n                sw[:] = 0\n                sw[0] = 1\n                r_ind = 0\n            else:\n                # Get index to replace\n                _, r_ind = torch.min(sw, 0)\n                r_ind = r_ind.item()\n\n                # Update weights\n                if prev_ind is None:\n                    sw /= 1 - fparams.learning_rate\n                    sw[r_ind] = fparams.learning_rate\n                else:\n                    sw[r_ind] = sw[prev_ind] / (1 - fparams.learning_rate)\n\n            sw /= sw.sum()\n            replace_ind.append(r_ind)\n\n        self.previous_replace_ind = replace_ind.copy()\n        self.num_stored_samples += 1\n        return replace_ind\n\n    def update_state(self, new_pos, new_scale):\n        # Update scale\n        self.target_scale = new_scale.clamp(self.min_scale_factor, self.max_scale_factor)\n        self.target_sz = self.base_target_sz * self.target_scale\n\n        # Update pos\n        inside_ratio = 0.2\n        inside_offset = (inside_ratio - 0.5) * self.target_sz\n        self.pos = torch.max(torch.min(new_pos, self.image_sz - inside_offset), inside_offset)\n\n    def symmetrize_filter(self):\n        for hf in self.filter:\n            hf[:,:,:,0,:] /= 2\n            hf[:,:,:,0,:] += complex.conj(hf[:,:,:,0,:].flip((2,)))"
  },
  {
    "path": "external/AR/pytracking/tracker/eco/optim.py",
    "content": "import torch\nimport torch.nn.functional as F\nfrom pytracking import complex, optimization, fourier, TensorList\nfrom pytracking.utils.plotting import plot_graph\nimport math\n\n\nclass FactorizedConvProblem(optimization.L2Problem):\n    def __init__(self, training_samples: TensorList, yf:TensorList, reg_filter: torch.Tensor, init_proj_mat: TensorList, params, sample_weights: torch.Tensor = None):\n        self.training_samples = training_samples\n        self.yf = complex.complex(yf).permute(2, 3, 0, 1, 4)\n        self.reg_filter = reg_filter\n        self.sample_weights_sqrt = None if sample_weights is None else sample_weights.sqrt()\n        self.params = params\n\n        # Sample energy for preconditioner\n        compressed_samples = complex.mtimes(self.training_samples, init_proj_mat)\n        self.sample_energy = complex.abs_sqr(compressed_samples).mean(dim=2, keepdim=True).permute(2, 3, 0, 1)\n        self.reg_energy = self.reg_filter.view(-1) @ self.reg_filter.view(-1)\n\n        # Projection energy for preconditioner\n        self.proj_energy = 2 * fourier.inner_prod_fs(yf, yf) / self.training_samples.size(3)\n\n        # Filter part of preconditioner\n        self.diag_M = (1 - self.params.precond_reg_param) * (self.params.precond_data_param * self.sample_energy +\n                            (1 - self.params.precond_data_param) * self.sample_energy.mean(1, keepdim=True)) + \\\n                      self.params.precond_reg_param * self.reg_energy\n        self.diag_M.unsqueeze_(-1)\n\n        # Projection matrix part of preconditioner\n        self.diag_M.extend(self.params.precond_proj_param * (self.proj_energy + self.params.projection_reg))\n\n\n    def __call__(self, x: TensorList):\n        \"\"\"\n        Compute residuals\n        :param x: [filters, projection_matrices]\n        :return: [data_terms, filter_regularizations, proj_mat_regularizations]\n        \"\"\"\n        hf = x[:len(x)//2]\n        P = x[len(x)//2:]\n\n        compressed_samples = complex.mtimes(self.training_samples, P)\n        residuals = complex.mtimes(compressed_samples, hf.permute(2, 3, 1, 0, 4))  # (h, w, num_samp, num_filt, 2)\n        residuals = residuals - self.yf\n\n        if self.sample_weights_sqrt is not None:\n            residuals = complex.mult(self.sample_weights_sqrt.view(1, 1, -1, 1), residuals)\n\n\n        # Add spatial regularization\n        for hfe, reg_filter in zip(hf, self.reg_filter):\n            reg_pad1 = min(reg_filter.shape[-2] - 1, hfe.shape[-3] - 1)\n            reg_pad2 = min(reg_filter.shape[-1] - 1, hfe.shape[-2] - 1)\n\n            # Add part needed for convolution\n            if reg_pad2 > 0:\n                hfe_left_padd = complex.conj(hfe[...,1:reg_pad2+1,:].clone().detach().flip((2,3)))\n                hfe_conv = torch.cat([hfe_left_padd, hfe], -2)\n            else:\n                hfe_conv = hfe.clone()\n\n            # Shift data to batch dimension\n            hfe_conv = hfe_conv.permute(0,1,4,2,3).reshape(-1, 1, hfe_conv.shape[-3], hfe_conv.shape[-2])\n\n            # Do first convolution\n            hfe_conv = F.conv2d(hfe_conv, reg_filter, padding=(reg_pad1, reg_pad2))\n\n            residuals.append(hfe_conv)\n\n        # Add regularization for projection matrix\n        residuals.extend(math.sqrt(self.params.projection_reg) * P)\n\n        return residuals\n\n\n    def ip_input(self, a: TensorList, b: TensorList):\n        num = len(a) // 2       # Number of filters\n        a_filter = a[:num]\n        b_filter = b[:num]\n        a_P = a[num:]\n        b_P = b[num:]\n\n        # Filter inner product\n        ip_out = fourier.inner_prod_fs(a_filter, b_filter)\n\n        # Add projection matrix part\n        ip_out += a_P.reshape(-1) @ b_P.reshape(-1)\n\n        # Have independent inner products for each filter\n        return ip_out.concat(ip_out.clone())\n\n\n    def ip_output(self, a: TensorList, b: TensorList):\n        num = len(a) // 3       # Number of filters\n        a_data = a[:num].permute(2,3,0,1,4)\n        b_data = b[:num].permute(2,3,0,1,4)\n        a_filt_reg = a[num:2*num]\n        b_filt_reg = b[num:2*num]\n        a_P_reg = a[2*num:]\n        b_P_reg = b[2*num:]\n\n        ip_data = sum(fourier.inner_prod_fs(a_data, b_data))\n        ip_filt_reg = ip_data.new_zeros(1)\n\n        for ar, br, res_data, reg_filter in zip(a_filt_reg, b_filt_reg, a_data, self.reg_filter):\n            reg_pad2 = min(reg_filter.shape[-1] - 1, res_data.shape[-2] - 1)\n            arp = ar.reshape(1, -1, 2, ar.shape[2], ar.shape[3]).permute(0, 1, 3, 4, 2)\n            brp = br.reshape(1, -1, 2, br.shape[2], br.shape[3]).permute(0, 1, 3, 4, 2)\n            ip_filt_reg += fourier.inner_prod_fs(arp[:,:,:,2*reg_pad2:,:], brp[:,:,:,2*reg_pad2:,:])\n\n        ip_P_reg = sum(a_P_reg.view(-1) @ b_P_reg.view(-1))\n\n        return ip_data + ip_filt_reg + ip_P_reg\n\n\n    def M1(self, x: TensorList):\n        return x / self.diag_M\n\n\nclass FilterOptim(optimization.ConjugateGradientBase):\n    def __init__(self, params, reg_energy):\n        super(FilterOptim, self).__init__(params.fletcher_reeves, params.standard_alpha, params.direction_forget_factor, (params.debug >= 3))\n\n        # Parameters\n        self.params = params\n\n        self.reg_energy = reg_energy\n        self.sample_energy = None\n\n        self.residuals = torch.zeros(0)\n\n\n    def register(self, filter, training_samples, yf, sample_weights, reg_filter):\n        self.filter = filter\n        self.training_samples = training_samples    # (h, w, num_samples, num_channels, 2)\n        self.yf = yf\n        self.sample_weights = sample_weights\n        self.reg_filter = reg_filter\n\n\n    def run(self, num_iter, new_xf: TensorList = None):\n        if num_iter == 0:\n            return\n\n        if new_xf is not None:\n            new_sample_energy = complex.abs_sqr(new_xf)\n            if self.sample_energy is None:\n                self.sample_energy = new_sample_energy\n            else:\n                self.sample_energy = (1 - self.params.precond_learning_rate) * self.sample_energy + self.params.precond_learning_rate * new_sample_energy\n\n        # Compute right hand side\n        self.b = complex.mtimes(self.sample_weights.view(1,1,1,-1), self.training_samples).permute(2,3,0,1,4)\n        self.b = complex.mult_conj(self.yf, self.b)\n\n        self.diag_M = (1 - self.params.precond_reg_param) * (self.params.precond_data_param * self.sample_energy +\n                            (1 - self.params.precond_data_param) * self.sample_energy.mean(1, keepdim=True)) + self.params.precond_reg_param * self.reg_energy\n\n        _, res = self.run_CG(num_iter, self.filter)\n\n        if self.debug:\n            self.residuals = torch.cat((self.residuals, res))\n            plot_graph(self.residuals, 9)\n\n\n\n    def A(self, hf: TensorList):\n        # Classify\n        sh = complex.mtimes(self.training_samples, hf.permute(2,3,1,0,4)) # (h, w, num_samp, num_filt, 2)\n        sh = complex.mult(self.sample_weights.view(1,1,-1,1), sh)\n\n        # Multiply with transpose\n        hf_out = complex.mtimes(sh.permute(0,1,3,2,4), self.training_samples, conj_b=True).permute(2,3,0,1,4)\n\n        # Add regularization\n        for hfe, hfe_out, reg_filter in zip(hf, hf_out, self.reg_filter):\n            reg_pad1 = min(reg_filter.shape[-2] - 1, hfe.shape[-3] - 1)\n            reg_pad2 = min(reg_filter.shape[-1] - 1, 2*hfe.shape[-2]- 2)\n\n            # Add part needed for convolution\n            if reg_pad2 > 0:\n                hfe_conv = torch.cat([complex.conj(hfe[...,1:reg_pad2+1,:].flip((2,3))), hfe], -2)\n            else:\n                hfe_conv = hfe.clone()\n\n            # Shift data to batch dimension\n            hfe_conv = hfe_conv.permute(0,1,4,2,3).reshape(-1, 1, hfe_conv.shape[-3], hfe_conv.shape[-2])\n\n            # Do first convolution\n            hfe_conv = F.conv2d(hfe_conv, reg_filter, padding=(reg_pad1, reg_pad2))\n\n            # Do second convolution\n            remove_size = min(reg_pad2, hfe.shape[-2]-1)\n            hfe_conv = F.conv2d(hfe_conv[...,remove_size:], reg_filter)\n\n            # Reshape back and add\n            hfe_out += hfe_conv.reshape(hfe.shape[0], hfe.shape[1], 2, hfe.shape[2], hfe.shape[3]).permute(0,1,3,4,2)\n\n        return hf_out\n\n\n    def ip(self, a: torch.Tensor, b: torch.Tensor):\n        return fourier.inner_prod_fs(a, b)\n        \n\n    def M1(self, hf):\n        return complex.div(hf, self.diag_M)\n"
  },
  {
    "path": "external/AR/pytracking/util_scripts/__init__.py",
    "content": ""
  },
  {
    "path": "external/AR/pytracking/util_scripts/download_results.py",
    "content": "import os\nimport sys\nimport gdown\nimport re\nimport shutil\nimport argparse\nimport tempfile\n\nenv_path = os.path.join(os.path.dirname(__file__), '../..')\nif env_path not in sys.path:\n    sys.path.append(env_path)\n\nfrom pytracking.evaluation.environment import env_settings\n\nresults_link_dict = {\n    \"dimp\": {\n        \"prdimp50_003.zip\": \"1p13j3iwcOCubBi3ms0hLwqnP6-x0J8Mc\",\n        \"prdimp50_002.zip\": \"1PPKgrAepbuyM2kjfzYAozQKTL6AjcQOz\",\n        \"prdimp50_001.zip\": \"17NFBObEDeK6mW4Mk2vN5Ekk1SGbFvxRS\",\n        \"prdimp50_000.zip\": \"1r3Efq7AumML2yGQ_KV4zmf4ATKVE1bo6\",\n        \"prdimp18_004.zip\": \"1DF4ZJQAa4CwvN_OiT4te33AV0kpsO7JM\",\n        \"prdimp18_003.zip\": \"1RgwJAN4TxnzgVgsfvrHIg1OUXD1EBZkO\",\n        \"prdimp18_002.zip\": \"17lMllYhygCqgE81DoHX4BZar3xc3auzM\",\n        \"prdimp18_001.zip\": \"1Yg7DmGYOnn2k0MYtSjjKlGyzO1Uimj4G\",\n        \"prdimp18_000.zip\": \"1DuZJSBJ-23WJBQTOWSAaoPYSbGAJJN2Z\",\n        \"prdimp50_004.zip\": \"1f9bx9-dtx3B5_IvIJhjjJyp-cnXciqLO\",\n        \"dimp50_004.zip\": \"1Lj3p8mYCoIqxzdQXZkWFTw-MA8c6eeLa\",\n        \"dimp50_000.zip\": \"1LCgf5sg453Z4bY37A_W5mbXeG68U1fET\",\n        \"dimp18_000.zip\": \"17M7dJZ1oKrIY4-O5lL_mlQPEubUn034g\",\n        \"dimp18_001.zip\": \"1AsiliVgISyDTouYOQYVOXA0srj3YskhJ\",\n        \"dimp50_got_001.zip\": \"1EE5FcPXqMBkv_0ghfzytCMmbKxWxy04p\",\n        \"dimp18_002.zip\": \"1I0GrBaPnySOyPWSvItHhXH8182tFCi_Y\",\n        \"dimp50_got_002.zip\": \"1ALXzVkn58GZ1E0I22vrbXkEXwy5u0xOc\",\n        \"dimp18_got_000.zip\": \"1BxowlgGEonnuaVXwiDwiYr7VV7BRWLvr\",\n        \"dimp50_001.zip\": \"1XfPvwAcymW88J1rq7RlhyKmqsawJDK-K\",\n        \"dimp18_got_002.zip\": \"1awqXQnFRr5NwjLfI-Ngtt3zT7XmQIwzs\",\n        \"dimp18_got_001.zip\": \"1rr2J6NuuYJ5E4wDUw-PrxaNKjIsfgAyk\",\n        \"dimp50_got_000.zip\": \"1ruP8XJOu0woq-bvKdHJ9_Y9RceHDrDjm\",\n        \"dimp18_004.zip\": \"1EztF6bpROFwZ1PSJWgMB7bQ4G_Z08YIg\",\n        \"dimp18_003.zip\": \"1iuiFLv04WE7GfBjm8UkZXFq4gheG2Ru8\",\n        \"dimp50_003.zip\": \"1rLsgeQXyKpD6ryl9BjlIVdO3vd27ekwy\",\n        \"dimp50_002.zip\": \"1wj2jUwlpHgsP1hAcuxXAVriUPuEspsu4\",\n    },\n    \"atom\": {\n        \"default_004.zip\": \"1BapnQh_8iRM44DXj862eOZV4q8zQLdmT\",\n        \"default_003.zip\": \"1YpfOBLBEUQQiX0fWMPA5pnW3dm0NG3E5\",\n        \"default_got_000.zip\": \"1uJnC0PPQhavwRbAL7VQ2Zow8YdLVzeCb\",\n        \"default_got_001.zip\": \"1YzJm0H31veDW-lMxwy8MYNpMULgsYHKf\",\n        \"default_000.zip\": \"1x6fKGZk3V839mX99Gl_pw7JUaiMaTxc5\",\n        \"default_002.zip\": \"1QIlQFv3p6MBTwsYdIMYmzUDBDQGxGsUC\",\n        \"default_001.zip\": \"1-K2--GNCURDKEgUuiEF18K4DcCLvDEVt\",\n        \"default_got_002.zip\": \"1qGtArxdAy0uWSd-HqFT5zmXpR6TCm4Vc\",\n    },\n}\n\n\ndef _download_file(file_id, path):\n    link = 'https://drive.google.com/uc?id=' + file_id\n    gdown.download(link, path, quiet=True)\n\n\ndef download_results(download_path, trackers='all'):\n    \"\"\"\n    Script to automatically download tracker results for PyTracking.\n\n    args:\n        download_path - Directory where the zipped results are downloaded\n        trackers - Tracker results which are to be downloaded. If set to 'all', all available results are downloaded.\n                   If set to a name of a tracker (e.g. atom), all results for that tracker are downloaded.\n                   Otherwise, it can be set to a dict, where the keys are the names of the trackers for which results are\n                   downloaded. The value can be set to either 'all', in which case all available results for the\n                    tracker are downloaded. Else the value should be a list of parameter file names.\n    \"\"\"\n    print('Using download path ''{}'''.format(download_path))\n\n    os.makedirs(download_path, exist_ok=True)\n\n    if isinstance(trackers, str):\n        if trackers == 'all':\n            trackers = {k: 'all' for k in results_link_dict.keys()}\n        elif trackers in results_link_dict:\n            trackers = {trackers: 'all'}\n        else:\n            raise Exception('tracker_list must be set to ''all'', a tracker name, or be a dict')\n    elif isinstance(trackers, dict):\n        pass\n    else:\n        raise Exception('tracker_list must be set to ''all'', or be a dict')\n\n    for trk, runfiles in trackers.items():\n        trk_path = os.path.join(download_path, trk)\n        if not os.path.exists(trk_path):\n            os.makedirs(trk_path)\n\n        if runfiles == 'all':\n            for params, fileid in results_link_dict[trk].items():\n                print('Downloading: {}/{}'.format(trk, params))\n                _download_file(fileid, os.path.join(trk_path, params))\n        elif isinstance(runfiles, (list, tuple)):\n            for p in runfiles:\n                for params, fileid in results_link_dict[trk].items():\n                    if re.match(r'{}(|_(\\d\\d\\d)).zip'.format(p), params) is not None:\n                        print('Downloading: {}/{}'.format(trk, params))\n                        _download_file(fileid, os.path.join(trk_path, params))\n\n        else:\n            raise Exception('tracker_list values must either be set to ''all'', or be a list of param names')\n\n\n\ndef unpack_tracking_results(download_path, output_path=None):\n    \"\"\"\n    Unpacks zipped benchmark results. The directory 'download_path' should have the following structure\n    - root\n        - tracker1\n            - param1.zip\n            - param2.zip\n            .\n            .\n        - tracker2\n            - param1.zip\n            - param2.zip\n        .\n        .\n\n    args:\n        download_path - Path to the directory where the zipped results are stored\n        output_path - Path to the directory where the results will be unpacked. Set to env_settings().results_path\n                      by default\n    \"\"\"\n\n    if output_path is None:\n        output_path = env_settings().results_path\n\n    if not os.path.exists(output_path):\n        os.makedirs(output_path)\n\n    trackers = os.listdir(download_path)\n\n    for t in trackers:\n        runfiles = os.listdir(os.path.join(download_path, t))\n\n        for r in runfiles:\n            save_path = os.path.join(output_path, t)\n            if not os.path.exists(save_path):\n                os.makedirs(save_path)\n            shutil.unpack_archive(os.path.join(download_path, t, r), os.path.join(save_path, r[:-4]), 'zip')\n\n\ndef main():\n    parser = argparse.ArgumentParser(description='Download and unpack zipped results')\n    parser.add_argument('--tracker', type=str, default='all',\n                        help='Name of tracker results to download, or ''all''.')\n    parser.add_argument('--output_path', type=str, default=None,\n                        help='Path to the directory where the results will be unpacked.')\n    parser.add_argument('--temp_download_path', type=str, default=None,\n                        help='Temporary path used for downloading the Zip files.')\n    parser.add_argument('--download', type=bool, default=True,\n                        help='Whether to download results or unpack existing downloaded files.')\n    args = parser.parse_args()\n\n    download_path = args.temp_download_path\n    if download_path is None:\n        download_path = '{}/pytracking_results/'.format(tempfile.gettempdir())\n\n    if args.download:\n        download_results(download_path, args.tracker)\n\n    unpack_tracking_results(download_path, args.output_path)\n\n\nif __name__ == '__main__':\n    main()\n"
  },
  {
    "path": "external/AR/pytracking/util_scripts/pack_got10k_results.py",
    "content": "import numpy as np\nimport os\nimport shutil\nfrom pytracking.evaluation.environment import env_settings\n\n\ndef pack_got10k_results(tracker_name, param_name, output_name):\n    \"\"\" Packs got10k results into a zip folder which can be directly uploaded to the evaluation server. The packed\n    file is saved in the folder env_settings().got_packed_results_path\n\n    args:\n        tracker_name - name of the tracker\n        param_name - name of the parameter file\n        output_name - name of the packed zip file\n    \"\"\"\n    output_path = os.path.join(env_settings().got_packed_results_path, output_name)\n\n    if not os.path.exists(output_path):\n        os.makedirs(output_path)\n\n    results_path = env_settings().results_path\n    for i in range(1,181):\n        seq_name = 'GOT-10k_Test_{:06d}'.format(i)\n\n        seq_output_path = '{}/{}'.format(output_path, seq_name)\n        if not os.path.exists(seq_output_path):\n            os.makedirs(seq_output_path)\n\n        for run_id in range(3):\n            res = np.loadtxt('{}/{}/{}_{:03d}/{}.txt'.format(results_path, tracker_name, param_name, run_id, seq_name), dtype=np.float64)\n            times = np.loadtxt(\n                '{}/{}/{}_{:03d}/{}_time.txt'.format(results_path, tracker_name, param_name, run_id, seq_name),\n                dtype=np.float64)\n\n            np.savetxt('{}/{}_{:03d}.txt'.format(seq_output_path, seq_name, run_id+1), res, delimiter=',', fmt='%f')\n            np.savetxt('{}/{}_time.txt'.format(seq_output_path, seq_name), times, fmt='%f')\n\n    # Generate ZIP file\n    shutil.make_archive(output_path, 'zip', output_path)\n\n    # Remove raw text files\n    shutil.rmtree(output_path)\n"
  },
  {
    "path": "external/AR/pytracking/util_scripts/pack_trackingnet_results.py",
    "content": "import numpy as np\nimport os\nimport shutil\nfrom pytracking.evaluation.environment import env_settings\nfrom pytracking.evaluation.datasets import get_dataset\n\n\ndef pack_trackingnet_results(tracker_name, param_name, run_id=None, output_name=None):\n    \"\"\" Packs trackingnet results into a zip folder which can be directly uploaded to the evaluation server. The packed\n    file is saved in the folder env_settings().tn_packed_results_path\n\n    args:\n        tracker_name - name of the tracker\n        param_name - name of the parameter file\n        run_id - run id for the tracker\n        output_name - name of the packed zip file\n    \"\"\"\n\n    if output_name is None:\n        if run_id is None:\n            output_name = '{}_{}'.format(tracker_name, param_name)\n        else:\n            output_name = '{}_{}_{:03d}'.format(tracker_name, param_name, run_id)\n\n    output_path = os.path.join(env_settings().tn_packed_results_path, output_name)\n\n    if not os.path.exists(output_path):\n        os.makedirs(output_path)\n\n    results_path = env_settings().results_path\n\n    tn_dataset = get_dataset('trackingnet')\n\n    for seq in tn_dataset:\n        seq_name = seq.name\n\n        if run_id is None:\n            seq_results_path = '{}/{}/{}/{}.txt'.format(results_path, tracker_name, param_name, seq_name)\n        else:\n            seq_results_path = '{}/{}/{}_{:03d}/{}.txt'.format(results_path, tracker_name, param_name, run_id, seq_name)\n\n        results = np.loadtxt(seq_results_path, dtype=np.float64)\n\n        np.savetxt('{}/{}.txt'.format(output_path, seq_name), results, delimiter=',', fmt='%.2f')\n\n    # Generate ZIP file\n    shutil.make_archive(output_path, 'zip', output_path)\n\n    # Remove raw text files\n    shutil.rmtree(output_path)\n"
  },
  {
    "path": "external/AR/pytracking/utils/__init__.py",
    "content": "from .params import TrackerParams, FeatureParams, Choice"
  },
  {
    "path": "external/AR/pytracking/utils/convert_vot_anno_to_rect.py",
    "content": "import numpy as np\n\n\ndef convert_vot_anno_to_rect(vot_anno, type):\n    if len(vot_anno) == 4:\n        return vot_anno\n\n    if type == 'union':\n        x1 = min(vot_anno[0::2])\n        x2 = max(vot_anno[0::2])\n        y1 = min(vot_anno[1::2])\n        y2 = max(vot_anno[1::2])\n        return [x1, y1, x2 - x1, y2 - y1]\n    elif type == 'preserve_area':\n        if len(vot_anno) != 8:\n            raise ValueError\n\n        vot_anno = np.array(vot_anno)\n        cx = np.mean(vot_anno[0::2])\n        cy = np.mean(vot_anno[1::2])\n\n        x1 = min(vot_anno[0::2])\n        x2 = max(vot_anno[0::2])\n        y1 = min(vot_anno[1::2])\n        y2 = max(vot_anno[1::2])\n\n        A1 = np.linalg.norm(vot_anno[0:2] - vot_anno[2: 4]) * np.linalg.norm(vot_anno[2: 4] - vot_anno[4:6])\n        A2 = (x2 - x1) * (y2 - y1)\n        s = np.sqrt(A1 / A2)\n        w = s * (x2 - x1) + 1\n        h = s * (y2 - y1) + 1\n\n        x = cx - 0.5*w\n        y = cy - 0.5*h\n        return [x, y, w, h]\n    else:\n        raise ValueError\n"
  },
  {
    "path": "external/AR/pytracking/utils/load_text.py",
    "content": "import numpy as np\nimport pandas as pd\n\n\ndef load_text_numpy(path, delimiter, dtype):\n    if isinstance(delimiter, (tuple, list)):\n        for d in delimiter:\n            try:\n                ground_truth_rect = np.loadtxt(path, delimiter=d, dtype=dtype)\n                return ground_truth_rect\n            except:\n                pass\n\n        raise Exception('Could not read file {}'.format(path))\n    else:\n        ground_truth_rect = np.loadtxt(path, delimiter=delimiter, dtype=dtype)\n        return ground_truth_rect\n\n\ndef load_text_pandas(path, delimiter, dtype):\n    if isinstance(delimiter, (tuple, list)):\n        for d in delimiter:\n            try:\n                ground_truth_rect = pd.read_csv(path, delimiter=d, header=None, dtype=dtype, na_filter=False,\n                                                low_memory=False).values\n                return ground_truth_rect\n            except Exception as e:\n                pass\n\n        raise Exception('Could not read file {}'.format(path))\n    else:\n        ground_truth_rect = pd.read_csv(path, delimiter=delimiter, header=None, dtype=dtype, na_filter=False,\n                                        low_memory=False).values\n        return ground_truth_rect\n\n\ndef load_text(path, delimiter=' ', dtype=np.float32, backend='numpy'):\n    if backend == 'numpy':\n        return load_text_numpy(path, delimiter, dtype)\n    elif backend == 'pandas':\n        return load_text_pandas(path, delimiter, dtype)\n"
  },
  {
    "path": "external/AR/pytracking/utils/loading.py",
    "content": "import os\nimport ltr.admin.loading as ltr_loading\nfrom pytracking.evaluation.environment import env_settings\n\n\ndef load_network(net_path, **kwargs):\n    \"\"\"Load network for tracking.\n    args:\n        net_path - Path to network. If it is not an absolute path, it is relative to the network_path in the local.py.\n                   See ltr.admin.loading.load_network for further details.\n        **kwargs - Additional key-word arguments that are sent to ltr.admin.loading.load_network.\n    \"\"\"\n    kwargs['backbone_pretrained'] = False\n    if os.path.isabs(net_path):\n        path_full = net_path\n        net, _ = ltr_loading.load_network(path_full, **kwargs)\n    elif isinstance(env_settings().network_path, (list, tuple)):\n        net = None\n        for p in env_settings().network_path:\n            path_full = os.path.join(p, net_path)\n            try:\n                net, _ = ltr_loading.load_network(path_full, **kwargs)\n                break\n            except Exception as e:\n                # print(e)\n                pass\n\n        assert net is not None, 'Failed to load network'\n    else:\n        path_full = os.path.join(env_settings().network_path, net_path)\n        net, _ = ltr_loading.load_network(path_full, **kwargs)\n\n    return net\n"
  },
  {
    "path": "external/AR/pytracking/utils/params.py",
    "content": "from pytracking import TensorList\nimport random\n\n\nclass TrackerParams:\n    \"\"\"Class for tracker parameters.\"\"\"\n    def set_default_values(self, default_vals: dict):\n        for name, val in default_vals.items():\n            if not hasattr(self, name):\n                setattr(self, name, val)\n\n    def get(self, name: str, *default):\n        \"\"\"Get a parameter value with the given name. If it does not exists, it return the default value given as a\n        second argument or returns an error if no default value is given.\"\"\"\n        if len(default) > 1:\n            raise ValueError('Can only give one default value.')\n\n        if not default:\n            return getattr(self, name)\n\n        return getattr(self, name, default[0])\n\n    def has(self, name: str):\n        \"\"\"Check if there exist a parameter with the given name.\"\"\"\n        return hasattr(self, name)\n\n\nclass FeatureParams:\n    \"\"\"Class for feature specific parameters\"\"\"\n    def __init__(self, *args, **kwargs):\n        if len(args) > 0:\n            raise ValueError\n\n        for name, val in kwargs.items():\n            if isinstance(val, list):\n                setattr(self, name, TensorList(val))\n            else:\n                setattr(self, name, val)\n\n\ndef Choice(*args):\n    \"\"\"Can be used to sample random parameter values.\"\"\"\n    return random.choice(args)\n"
  },
  {
    "path": "external/AR/pytracking/utils/plotting.py",
    "content": "import matplotlib.pyplot as plt\nimport numpy as np\nimport torch\nimport cv2\n\n\ndef draw_figure(fig):\n    fig.canvas.draw()\n    fig.canvas.flush_events()\n    plt.pause(0.001)\n\n\ndef show_tensor(a: torch.Tensor, fig_num = None, title = None, range=(None, None), ax=None):\n    \"\"\"Display a 2D tensor.\n    args:\n        fig_num: Figure number.\n        title: Title of figure.\n    \"\"\"\n    a_np = a.squeeze().cpu().clone().detach().numpy()\n    if a_np.ndim == 3:\n        a_np = np.transpose(a_np, (1, 2, 0))\n\n    if ax is None:\n        fig = plt.figure(fig_num)\n        plt.tight_layout()\n        plt.cla()\n        plt.imshow(a_np, vmin=range[0], vmax=range[1])\n        plt.axis('off')\n        plt.axis('equal')\n        if title is not None:\n            plt.title(title)\n        draw_figure(fig)\n    else:\n        ax.cla()\n        ax.imshow(a_np, vmin=range[0], vmax=range[1])\n        ax.set_axis_off()\n        ax.axis('equal')\n        if title is not None:\n            ax.set_title(title)\n        draw_figure(plt.gcf())\n\n\ndef plot_graph(a: torch.Tensor, fig_num = None, title = None):\n    \"\"\"Plot graph. Data is a 1D tensor.\n    args:\n        fig_num: Figure number.\n        title: Title of figure.\n    \"\"\"\n    a_np = a.squeeze().cpu().clone().detach().numpy()\n    if a_np.ndim > 1:\n        raise ValueError\n    fig = plt.figure(fig_num)\n    # plt.tight_layout()\n    plt.cla()\n    plt.plot(a_np)\n    if title is not None:\n        plt.title(title)\n    draw_figure(fig)\n\n\ndef show_image_with_boxes(im, boxes, iou_pred=None, disp_ids=None):\n    im_np = im.clone().cpu().squeeze().numpy()\n    im_np = np.ascontiguousarray(im_np.transpose(1, 2, 0).astype(np.uint8))\n\n    boxes = boxes.view(-1, 4).cpu().numpy().round().astype(int)\n\n    # Draw proposals\n    for i_ in range(boxes.shape[0]):\n        if disp_ids is None or disp_ids[i_]:\n            bb = boxes[i_, :]\n            disp_color = (i_*38 % 256, (255 - i_*97) % 256, (123 + i_*66) % 256)\n            cv2.rectangle(im_np, (bb[0], bb[1]), (bb[0] + bb[2], bb[1] + bb[3]),\n                          disp_color, 1)\n\n            if iou_pred is not None:\n                text_pos = (bb[0], bb[1] - 5)\n                cv2.putText(im_np, 'ID={} IOU = {:3.2f}'.format(i_, iou_pred[i_]), text_pos,\n                            cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 1, bottomLeftOrigin=False)\n\n    im_tensor = torch.from_numpy(im_np.transpose(2, 0, 1)).float()\n\n    return im_tensor\n\n\n\ndef _pascal_color_map(N=256, normalized=False):\n    \"\"\"\n    Python implementation of the color map function for the PASCAL VOC data set.\n    Official Matlab version can be found in the PASCAL VOC devkit\n    http://host.robots.ox.ac.uk/pascal/VOC/voc2012/index.html#devkit\n    \"\"\"\n\n    def bitget(byteval, idx):\n        return (byteval & (1 << idx)) != 0\n\n    dtype = 'float32' if normalized else 'uint8'\n    cmap = np.zeros((N, 3), dtype=dtype)\n    for i in range(N):\n        r = g = b = 0\n        c = i\n        for j in range(8):\n            r = r | (bitget(c, 0) << 7 - j)\n            g = g | (bitget(c, 1) << 7 - j)\n            b = b | (bitget(c, 2) << 7 - j)\n            c = c >> 3\n\n        cmap[i] = np.array([r, g, b])\n\n    cmap = cmap / 255 if normalized else cmap\n    return cmap\n\n\ndef overlay_mask(im, ann, alpha=0.5, colors=None, contour_thickness=None):\n    \"\"\" Overlay mask over image.\n    Source: https://github.com/albertomontesg/davis-interactive/blob/master/davisinteractive/utils/visualization.py\n    This function allows you to overlay a mask over an image with some\n    transparency.\n    # Arguments\n        im: Numpy Array. Array with the image. The shape must be (H, W, 3) and\n            the pixels must be represented as `np.uint8` data type.\n        ann: Numpy Array. Array with the mask. The shape must be (H, W) and the\n            values must be intergers\n        alpha: Float. Proportion of alpha to apply at the overlaid mask.\n        colors: Numpy Array. Optional custom colormap. It must have shape (N, 3)\n            being N the maximum number of colors to represent.\n        contour_thickness: Integer. Thickness of each object index contour draw\n            over the overlay. This function requires to have installed the\n            package `opencv-python`.\n    # Returns\n        Numpy Array: Image of the overlay with shape (H, W, 3) and data type\n            `np.uint8`.\n    \"\"\"\n    im, ann = np.asarray(im, dtype=np.uint8), np.asarray(ann, dtype=np.int)\n    if im.shape[:-1] != ann.shape:\n        raise ValueError('First two dimensions of `im` and `ann` must match')\n    if im.shape[-1] != 3:\n        raise ValueError('im must have three channels at the 3 dimension')\n\n    colors = colors or _pascal_color_map()\n    colors = np.asarray(colors, dtype=np.uint8)\n\n    mask = colors[ann]\n    fg = im * alpha + (1 - alpha) * mask\n\n    img = im.copy()\n    img[ann > 0] = fg[ann > 0]\n\n    if contour_thickness:  # pragma: no cover\n        import cv2\n        for obj_id in np.unique(ann[ann > 0]):\n            contours = cv2.findContours((ann == obj_id).astype(\n                np.uint8), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)[-2:]\n            cv2.drawContours(img, contours[0], -1, colors[obj_id].tolist(),\n                             contour_thickness)\n    return img\n"
  },
  {
    "path": "external/AR/pytracking/utils/visdom.py",
    "content": "import visdom\nimport visdom.server\nfrom pytracking.features.preprocessing import numpy_to_torch\nfrom pytracking.utils.plotting import show_image_with_boxes, overlay_mask\nimport cv2\nimport torch\nimport copy\nimport numpy as np\nfrom collections import OrderedDict\n\n\nclass VisBase:\n    def __init__(self, visdom, show_data, title):\n        self.visdom = visdom\n        self.show_data = show_data\n        self.title = title\n        self.raw_data = None\n\n    def update(self, data, **kwargs):\n        self.save_data(data, **kwargs)\n\n        if self.show_data:\n            self.draw_data()\n\n    def save_data(self, data, **kwargs):\n        raise NotImplementedError\n\n    def draw_data(self):\n        raise NotImplementedError\n\n    def toggle_display(self, new_mode=None):\n        if new_mode is not None:\n            self.show_data = new_mode\n        else:\n            self.show_data = not self.show_data\n\n        if self.show_data:\n            self.draw_data()\n        else:\n            self.visdom.close(self.title)\n\n\nclass VisImage(VisBase):\n    def __init__(self, visdom, show_data, title):\n        super().__init__(visdom, show_data, title)\n\n    def save_data(self, data):\n        data = data.float()\n        self.raw_data = data\n\n    def draw_data(self):\n        self.visdom.image(self.raw_data.clone(), opts={'title': self.title}, win=self.title)\n\n\nclass VisHeatmap(VisBase):\n    def __init__(self, visdom, show_data, title):\n        super().__init__(visdom, show_data, title)\n\n    def save_data(self, data):\n        data = data.squeeze().flip(0)\n        self.raw_data = data\n\n    def draw_data(self):\n        self.visdom.heatmap(self.raw_data.clone(),  opts={'title': self.title}, win=self.title)\n\n\nclass VisFeaturemap(VisBase):\n    def __init__(self, visdom, show_data, title):\n        super().__init__(visdom, show_data, title)\n        self.block_list = None\n\n    def block_list_callback_handler(self, data):\n        self.block_list[data['propertyId']]['value'] = data['value']\n        self.visdom.properties(self.block_list, opts={'title': 'Featuremap UI'}, win='featuremap_ui')\n        self.draw_data()\n\n    def save_data(self, data):\n        data = data.view(-1, *data.shape[-2:])\n        data = data.flip(1)\n        if self.block_list is None:\n            self.block_list = []\n            self.draw_feat = []\n            for i in range(data.shape[0]):\n                self.block_list.append({'type': 'checkbox', 'name': 'Channel {:04d}'.format(i), 'value': False})\n\n            self.visdom.properties(self.block_list, opts={'title': 'Featuremap UI'}, win='featuremap_ui')\n            self.visdom.register_event_handler(self.block_list_callback_handler, 'featuremap_ui')\n\n        self.raw_data = data\n\n    def draw_data(self):\n        if self.block_list is not None and self.show_data:\n            for i, d in enumerate(self.block_list):\n                if d['value']:\n                    fig_title = '{} ch: {:04d}'.format(self.title, i)\n                    self.visdom.heatmap(self.raw_data[i, :, :].clone(),\n                                        opts={'title': fig_title}, win=fig_title)\n\n\nclass VisCostVolume(VisBase):\n    def __init__(self, visdom, show_data, title, flip=False):\n        super().__init__(visdom, show_data, title)\n        self.show_slice = False\n        self.slice_pos = None\n        self.flip = flip\n\n    def show_cost_volume(self):\n        data = self.raw_data.clone()\n\n        # data_perm = data.permute(2, 0, 3, 1).contiguous()\n        data_perm = data.permute(0, 2, 1, 3).contiguous()\n        if self.flip:\n            data_perm = data_perm.permute(2, 3, 0, 1).contiguous()\n\n        data_perm = data_perm.view(data_perm.shape[0] * data_perm.shape[1], -1)\n        self.visdom.heatmap(data_perm.flip(0), opts={'title': self.title}, win=self.title)\n\n    def set_zoom_pos(self, slice_pos):\n        self.slice_pos = slice_pos\n\n    def toggle_show_slice(self, new_mode=None):\n        if new_mode is not None:\n            self.show_slice = new_mode\n        else:\n            self.show_slice = not self.show_slice\n\n    def show_cost_volume_slice(self):\n        slice_pos = self.slice_pos\n\n        # slice_pos: [row, col]\n        cost_volume_data = self.raw_data.clone()\n\n        if self.flip:\n            cost_volume_slice = cost_volume_data[:, :, slice_pos[0], slice_pos[1]]\n        else:\n            cost_volume_slice = cost_volume_data[slice_pos[0], slice_pos[1], :, :]\n        self.visdom.heatmap(cost_volume_slice.flip(0), opts={'title': self.title}, win=self.title)\n\n    def save_data(self, data):\n        data = data.view(data.shape[-2], data.shape[-1], data.shape[-2], data.shape[-1])\n        self.raw_data = data\n\n    def draw_data(self):\n        if self.show_slice:\n            self.show_cost_volume_slice()\n        else:\n            self.show_cost_volume()\n\n\nclass VisCostVolumeUI(VisBase):\n    def cv_ui_handler(self, data):\n        zoom_toggled = False\n        if data['event_type'] == 'KeyPress':\n            if data['key'] == 'ArrowRight':\n                self.zoom_pos[1] = min(self.zoom_pos[1] + 1, self.feat_shape[1]-1)\n            elif data['key'] == 'ArrowLeft':\n                self.zoom_pos[1] = max(self.zoom_pos[1] - 1, 0)\n            elif data['key'] == 'ArrowUp':\n                self.zoom_pos[0] = max(self.zoom_pos[0] - 1, 0)\n            elif data['key'] == 'ArrowDown':\n                self.zoom_pos[0] = min(self.zoom_pos[0] + 1, self.feat_shape[0]-1)\n            elif data['key'] == 'Enter':\n                self.zoom_mode = not self.zoom_mode\n                zoom_toggled = True\n\n        # Update image\n        self.show_image()\n\n        # Update cost volumes\n        for block_title, block in self.registered_blocks.items():\n            if isinstance(block, VisCostVolume):\n                block.set_zoom_pos(self.zoom_pos)\n                block.toggle_show_slice(self.zoom_mode)\n\n                if (self.zoom_mode or zoom_toggled) and block.show_data:\n                    block.draw_data()\n\n    def __init__(self, visdom, show_data, title, feat_shape, registered_blocks):\n        super().__init__(visdom, show_data, title)\n        self.feat_shape = feat_shape\n        self.zoom_mode = False\n        self.zoom_pos = [int((feat_shape[0] - 1) / 2), int((feat_shape[1] - 1) / 2)]\n        self.registered_blocks = registered_blocks\n\n        self.visdom.register_event_handler(self.cv_ui_handler, title)\n\n    def draw_grid(self, data):\n        stride_r = int(data.shape[1] / self.feat_shape[0])\n        stride_c = int(data.shape[2] / self.feat_shape[1])\n\n        # Draw grid\n        data[:, list(range(0, data.shape[1], stride_r)), :] = 0\n        data[:, :, list(range(0, data.shape[2], stride_c))] = 0\n\n        data[0, list(range(0, data.shape[1], stride_r)), :] = 255\n        data[0, :, list(range(0, data.shape[2], stride_c))] = 255\n\n        return data\n\n    def shade_cell(self, data):\n        stride_r = int(data.shape[1] / self.feat_shape[0])\n        stride_c = int(data.shape[2] / self.feat_shape[1])\n\n        r1 = self.zoom_pos[0]*stride_r\n        r2 = min((self.zoom_pos[0] + 1)*stride_r, data.shape[1])\n\n        c1 = self.zoom_pos[1] * stride_c\n        c2 = min((self.zoom_pos[1] + 1) * stride_c, data.shape[2])\n\n        factor = 0.8 if self.zoom_mode else 0.5\n        data[:, r1:r2, c1:c2] = data[:, r1:r2, c1:c2] * (1 - factor) + torch.tensor([255.0, 0.0, 0.0]).view(3, 1, 1).to(data.device) * factor\n        return data\n\n    def show_image(self, data=None):\n        if data is None:\n            data = self.raw_data.clone()\n\n        data = self.draw_grid(data)\n        data = self.shade_cell(data)\n        self.visdom.image(data, opts={'title': self.title}, win=self.title)\n\n    def save_data(self, data):\n        # Ignore feat shape\n        data = data[0]\n        data = data.float()\n        self.raw_data = data\n\n    def draw_data(self):\n        self.show_image(self.raw_data.clone())\n\n\nclass VisInfoDict(VisBase):\n    def __init__(self, visdom, show_data, title):\n        super().__init__(visdom, show_data, title)\n        self.raw_data = OrderedDict()\n\n    def generate_display_text(self, data):\n        display_text = ''\n        for key, value in data.items():\n            key = key.replace('_', ' ')\n            if value is None:\n                display_text += '<b>{}</b>: {}<br>'.format(key, 'None')\n            elif isinstance(value, (str, int)):\n                display_text += '<b>{}</b>: {}<br>'.format(key, value)\n            else:\n                display_text += '<b>{}</b>: {:.2f}<br>'.format(key, value)\n\n        return display_text\n\n    def save_data(self, data):\n        for key, val in data.items():\n            self.raw_data[key] = val\n\n    def draw_data(self):\n        data = copy.deepcopy(self.raw_data)\n        display_text = self.generate_display_text(data)\n        self.visdom.text(display_text, opts={'title': self.title}, win=self.title)\n\n\nclass VisText(VisBase):\n    def __init__(self, visdom, show_data, title):\n        super().__init__(visdom, show_data, title)\n\n    def save_data(self, data):\n        self.raw_data = data\n\n    def draw_data(self):\n        data = copy.deepcopy(self.raw_data)\n        self.visdom.text(data, opts={'title': self.title}, win=self.title)\n\n\nclass VisLinePlot(VisBase):\n    def __init__(self, visdom, show_data, title):\n        super().__init__(visdom, show_data, title)\n\n    def save_data(self, data):\n        self.raw_data = data\n\n    def draw_data(self):\n        if isinstance(self.raw_data, (list, tuple)):\n            data_y = self.raw_data[0].clone()\n            data_x = self.raw_data[1].clone()\n        else:\n            data_y = self.raw_data.clone()\n            data_x = torch.arange(data_y.shape[0])\n\n        self.visdom.line(data_y, data_x, opts={'title': self.title}, win=self.title)\n\n\nclass VisTracking(VisBase):\n    def __init__(self, visdom, show_data, title):\n        super().__init__(visdom, show_data, title)\n\n    def save_data(self, data):\n        image = data[0]\n        boxes_masks = data[1:]\n\n        boxes, masks = [], []\n        for bm in boxes_masks:\n            if bm is None:\n                continue\n            if isinstance(bm, list):\n                boxes.append(torch.Tensor(bm)); continue\n            if len(bm.shape) > 1:\n                # Binarize segmentation if a float tensor is provided\n                if bm.dtype != np.uint8:\n                    bm = (bm > 0.5).astype(np.uint8)\n                masks.append(bm); continue\n            boxes.append(bm.float())\n\n        self.raw_data = [image, boxes, masks]\n\n    def draw_data(self):\n        disp_image = self.raw_data[0].copy()\n\n        resize_factor = 1\n        if max(disp_image.shape) > 480:\n            resize_factor = 480.0 / float(max(disp_image.shape))\n            disp_image = cv2.resize(disp_image, None, fx=resize_factor, fy=resize_factor)\n            for i, mask in enumerate(self.raw_data[2]):\n                self.raw_data[2][i] = cv2.resize(mask, None, fx=resize_factor, fy=resize_factor)\n\n        boxes = [resize_factor * b.clone() for b in self.raw_data[1]]\n\n        for i, disp_rect in enumerate(boxes):\n            color = ((255*((i%3)>0)), 255*((i+1)%2), (255*(i%5))//4)\n            cv2.rectangle(disp_image,\n                          (int(disp_rect[0]), int(disp_rect[1])),\n                          (int(disp_rect[0] + disp_rect[2]), int(disp_rect[1] + disp_rect[3])), color, 2)\n        for i, mask in enumerate(self.raw_data[2], 1):\n            disp_image = overlay_mask(disp_image, mask * i)\n        disp_image = numpy_to_torch(disp_image).squeeze(0)\n        disp_image = disp_image.float()\n        self.visdom.image(disp_image, opts={'title': self.title}, win=self.title)\n\n\nclass VisBBReg(VisBase):\n    def __init__(self, visdom, show_data, title):\n        super().__init__(visdom, show_data, title)\n        self.block_list = []\n\n    def block_list_callback_handler(self, data):\n        self.block_list[data['propertyId']]['value'] = data['value']\n        self.visdom.properties(self.block_list, opts={'title': 'BBReg Vis'}, win='bbreg_vis')\n        self.draw_data()\n\n    def save_data(self, data):\n        self.image = data[0].float()\n        self.init_boxes = data[1]\n        self.final_boxes = data[2]\n        self.final_ious = data[3]\n\n    def draw_data(self):\n        if len(self.block_list) == 0:\n            self.block_list.append({'type': 'checkbox', 'name': 'ID 0', 'value': True})\n            self.block_list.append({'type': 'checkbox', 'name': 'ID 1', 'value': True})\n            self.visdom.properties(self.block_list, opts={'title': 'BBReg Vis'}, win='bbreg_vis')\n            self.visdom.register_event_handler(self.block_list_callback_handler, 'bbreg_vis')\n\n        disp_image = self.image\n\n        ids = [x['value'] for x in self.block_list]\n        init_box_image = show_image_with_boxes(disp_image.clone(), self.init_boxes.clone(), disp_ids=ids)\n        final_box_image = show_image_with_boxes(disp_image.clone(), self.final_boxes.clone(), self.final_ious.clone(), disp_ids=ids)\n\n        self.visdom.image(init_box_image, opts={'title': 'Init Boxes'}, win='Init Boxes')\n        self.visdom.image(final_box_image, opts={'title': 'Final Boxes'}, win='Final Boxes')\n\n\nclass Visdom:\n    def __init__(self, debug=0, ui_info=None, visdom_info=None):\n        self.debug = debug\n        self.visdom = visdom.Visdom(server=visdom_info.get('server', '127.0.0.1'), port=visdom_info.get('port', 8097))\n        self.registered_blocks = {}\n        self.blocks_list = []\n\n        self.visdom.properties(self.blocks_list, opts={'title': 'Block List'}, win='block_list')\n        self.visdom.register_event_handler(self.block_list_callback_handler, 'block_list')\n\n        if ui_info is not None:\n            self.visdom.register_event_handler(ui_info['handler'], ui_info['win_id'])\n\n    def block_list_callback_handler(self, data):\n        field_name = self.blocks_list[data['propertyId']]['name']\n\n        self.registered_blocks[field_name].toggle_display(data['value'])\n\n        self.blocks_list[data['propertyId']]['value'] = data['value']\n\n        self.visdom.properties(self.blocks_list, opts={'title': 'Block List'}, win='block_list')\n\n    def register(self, data, mode, debug_level=0, title='Data', **kwargs):\n        if title not in self.registered_blocks.keys():\n            show_data = self.debug >= debug_level\n\n            if title != 'Tracking':\n                self.blocks_list.append({'type': 'checkbox', 'name': title, 'value': show_data})\n\n            self.visdom.properties(self.blocks_list, opts={'title': 'Block List'}, win='block_list')\n\n            if mode == 'image':\n                self.registered_blocks[title] = VisImage(self.visdom, show_data, title)\n            elif mode == 'heatmap':\n                self.registered_blocks[title] = VisHeatmap(self.visdom, show_data, title)\n            elif mode == 'cost_volume':\n                self.registered_blocks[title] = VisCostVolume(self.visdom, show_data, title)\n            elif mode == 'cost_volume_flip':\n                self.registered_blocks[title] = VisCostVolume(self.visdom, show_data, title, flip=True)\n            elif mode == 'cost_volume_ui':\n                self.registered_blocks[title] = VisCostVolumeUI(self.visdom, show_data, title, data[1],\n                                                                self.registered_blocks)\n            elif mode == 'info_dict':\n                self.registered_blocks[title] = VisInfoDict(self.visdom, show_data, title)\n            elif mode == 'text':\n                self.registered_blocks[title] = VisText(self.visdom, show_data, title)\n            elif mode == 'lineplot':\n                self.registered_blocks[title] = VisLinePlot(self.visdom, show_data, title)\n            elif mode == 'Tracking':\n                self.registered_blocks[title] = VisTracking(self.visdom, show_data, title)\n            elif mode == 'bbreg':\n                self.registered_blocks[title] = VisBBReg(self.visdom, show_data, title)\n            elif mode == 'featmap':\n                self.registered_blocks[title] = VisFeaturemap(self.visdom, show_data, title)\n            else:\n                raise ValueError('Visdom Error: Unknown data mode {}'.format(mode))\n        # Update\n        self.registered_blocks[title].update(data, **kwargs)\n\n"
  },
  {
    "path": "external/AR/pytracking/vot20_utils.py",
    "content": "import numpy as np\n\n\ndef make_full_size(x, output_sz):\n    '''\n    zero-pad input x (right and down) to match output_sz\n    x: numpy array e.g., binary mask\n    output_sz: size of the output [width, height]\n    '''\n    if x.shape[0] == output_sz[1] and x.shape[1] == output_sz[0]:\n        return x\n    pad_x = output_sz[0] - x.shape[1]\n    if pad_x < 0:\n        x = x[:, :x.shape[1] + pad_x]\n        # padding has to be set to zero, otherwise pad function fails\n        pad_x = 0\n    pad_y = output_sz[1] - x.shape[0]\n    if pad_y < 0:\n        x = x[:x.shape[0] + pad_y, :]\n        # padding has to be set to zero, otherwise pad function fails\n        pad_y = 0\n    return np.pad(x, ((0, pad_y), (0, pad_x)), 'constant', constant_values=0)\n\n\ndef rect_from_mask(mask):\n    '''\n    create an axis-aligned rectangle from a given binary mask\n    mask in created as a minimal rectangle containing all non-zero pixels\n    '''\n    x_ = np.sum(mask, axis=0)\n    y_ = np.sum(mask, axis=1)\n    x0 = np.min(np.nonzero(x_))\n    x1 = np.max(np.nonzero(x_))\n    y0 = np.min(np.nonzero(y_))\n    y1 = np.max(np.nonzero(y_))\n    return [x0, y0, x1 - x0 + 1, y1 - y0 + 1]\n\n\ndef mask_from_rect(rect, output_sz):\n    '''\n    create a binary mask from a given rectangle\n    rect: axis-aligned rectangle [x0, y0, width, height]\n    output_sz: size of the output [width, height]\n    '''\n    mask = np.zeros((output_sz[1], output_sz[0]), dtype=np.uint8)\n    x0 = max(int(round(rect[0])), 0)\n    y0 = max(int(round(rect[1])), 0)\n    x1 = min(int(round(rect[0] + rect[2])), output_sz[0])\n    y1 = min(int(round(rect[1] + rect[3])), output_sz[1])\n    mask[y0:y1, x0:x1] = 1\n    return mask\n\n\ndef bbox_clip(x1, y1, x2, y2, boundary, min_sz=10):\n    '''boundary (H,W)'''\n    x1_new = max(0, min(x1, boundary[1] - min_sz))\n    y1_new = max(0, min(y1, boundary[0] - min_sz))\n    x2_new = max(min_sz, min(x2, boundary[1]))\n    y2_new = max(min_sz, min(y2, boundary[0]))\n    return x1_new, y1_new, x2_new, y2_new"
  },
  {
    "path": "external/PreciseRoIPooling/.gitignore",
    "content": "# Byte-compiled / optimized / DLL files\n__pycache__/\n*.py[cod]\n*$py.class\n\n.vim-template*\n\n# C extensions\n*.so\n\n# Distribution / packaging\n.Python\nbuild/\ndevelop-eggs/\ndist/\ndownloads/\neggs/\n.eggs/\nlib/\nlib64/\nparts/\nsdist/\nvar/\nwheels/\n*.egg-info/\n.installed.cfg\n*.egg\nMANIFEST\n\n# PyInstaller\n#  Usually these files are written by a python script from a template\n#  before PyInstaller builds the exe, so as to inject date/other infos into it.\n*.manifest\n*.spec\n\n# Installer logs\npip-log.txt\npip-delete-this-directory.txt\n\n# Unit test / coverage reports\nhtmlcov/\n.tox/\n.coverage\n.coverage.*\n.cache\nnosetests.xml\ncoverage.xml\n*.cover\n.hypothesis/\n.pytest_cache/\n\n# Translations\n*.mo\n*.pot\n\n# Django stuff:\n*.log\nlocal_settings.py\ndb.sqlite3\n\n# Flask stuff:\ninstance/\n.webassets-cache\n\n# Scrapy stuff:\n.scrapy\n\n# Sphinx documentation\ndocs/_build/\n\n# PyBuilder\ntarget/\n\n# Jupyter Notebook\n.ipynb_checkpoints\n\n# pyenv\n.python-version\n\n# celery beat schedule file\ncelerybeat-schedule\n\n# SageMath parsed files\n*.sage.py\n\n# Environments\n.env\n.venv\nenv/\nvenv/\nENV/\nenv.bak/\nvenv.bak/\n\n# Spyder project settings\n.spyderproject\n.spyproject\n\n# Rope project settings\n.ropeproject\n\n# mkdocs documentation\n/site\n\n# mypy\n.mypy_cache/\n"
  },
  {
    "path": "external/PreciseRoIPooling/LICENSE",
    "content": "MIT License\n\nCopyright (c) 2018 Jiayuan Mao\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n"
  },
  {
    "path": "external/PreciseRoIPooling/README.md",
    "content": "# PreciseRoIPooling\nThis repo implements the **Precise RoI Pooling** (PrRoI Pooling), proposed in the paper **Acquisition of Localization Confidence for Accurate Object Detection** published at ECCV 2018 (Oral Presentation).\n\n**Acquisition of Localization Confidence for Accurate Object Detection**\n\n_Borui Jiang*, Ruixuan Luo*, Jiayuan Mao*, Tete Xiao, Yuning Jiang_ (* indicates equal contribution.)\n\nhttps://arxiv.org/abs/1807.11590\n\n## Brief\n\nIn short, Precise RoI Pooling is an integration-based (bilinear interpolation) average pooling method for RoI Pooling. It avoids any quantization and has a continuous gradient on bounding box coordinates. It is:\n\n- different from the original RoI Pooling proposed in [Fast R-CNN](https://arxiv.org/abs/1504.08083). PrRoI Pooling uses average pooling instead of max pooling for each bin and has a continuous gradient on bounding box coordinates. That is, one can take the derivatives of some loss function w.r.t the coordinates of each RoI and optimize the RoI coordinates.\n- different from the RoI Align proposed in [Mask R-CNN](https://arxiv.org/abs/1703.06870). PrRoI Pooling uses a full integration-based average pooling instead of sampling a constant number of points. This makes the gradient w.r.t. the coordinates continuous.\n\nFor a better illustration, we illustrate RoI Pooling, RoI Align and PrRoI Pooing in the following figure. More details including the gradient computation can be found in our paper.\n\n<center><img src=\"./_assets/prroi_visualization.png\" width=\"80%\"></center>\n\n## Implementation\n\nPrRoI Pooling was originally implemented by [Tete Xiao](http://tetexiao.com/) based on MegBrain, an (internal) deep learning framework built by Megvii Inc. It was later adapted into open-source deep learning frameworks. Currently, we only support PyTorch. Unfortunately, we don't have any specific plan for the adaptation into other frameworks such as TensorFlow, but any contributions (pull requests) will be more than welcome.\n\n## Usage (PyTorch 1.0)\n\nIn the directory `pytorch/`, we provide a PyTorch-based implementation of PrRoI Pooling. It requires PyTorch 1.0+ and only supports CUDA (CPU mode is not implemented).\nSince we use PyTorch JIT for cxx/cuda code compilation, to use the module in your code, simply do:\n\n```\nfrom prroi_pool import PrRoIPool2D\n\navg_pool = PrRoIPool2D(window_height, window_width, spatial_scale)\nroi_features = avg_pool(features, rois)\n\n# for those who want to use the \"functional\"\n\nfrom prroi_pool.functional import prroi_pool2d\nroi_features = prroi_pool2d(features, rois, window_height, window_width, spatial_scale)\n```\n\n\n## Usage (PyTorch 0.4)\n\n**!!! Please first checkout to the branch pytorch0.4.**\n\nIn the directory `pytorch/`, we provide a PyTorch-based implementation of PrRoI Pooling. It requires PyTorch 0.4 and only supports CUDA (CPU mode is not implemented).\nTo use the PrRoI Pooling module, first goto `pytorch/prroi_pool` and execute `./travis.sh` to compile the essential components (you may need `nvcc` for this step). To use the module in your code, simply do:\n\n```\nfrom prroi_pool import PrRoIPool2D\n\navg_pool = PrRoIPool2D(window_height, window_width, spatial_scale)\nroi_features = avg_pool(features, rois)\n\n# for those who want to use the \"functional\"\n\nfrom prroi_pool.functional import prroi_pool2d\nroi_features = prroi_pool2d(features, rois, window_height, window_width, spatial_scale)\n```\n\nHere,\n\n- RoI is an `m * 5` float tensor of format `(batch_index, x0, y0, x1, y1)`, following the convention in the original Caffe implementation of RoI Pooling, although in some frameworks the batch indices are provided by an integer tensor.\n- `spatial_scale` is multiplied to the RoIs. For example, if your feature maps are down-sampled by a factor of 16 (w.r.t. the input image), you should use a spatial scale of `1/16`.\n- The coordinates for RoI follows the [L, R) convension. That is, `(0, 0, 4, 4)` denotes a box of size `4x4`.\n"
  },
  {
    "path": "external/PreciseRoIPooling/pytorch/prroi_pool/.gitignore",
    "content": "*.o\n/_prroi_pooling\n"
  },
  {
    "path": "external/PreciseRoIPooling/pytorch/prroi_pool/__init__.py",
    "content": "#! /usr/bin/env python3\n# -*- coding: utf-8 -*-\n# File   : __init__.py\n# Author : Jiayuan Mao, Tete Xiao\n# Email  : maojiayuan@gmail.com, jasonhsiao97@gmail.com\n# Date   : 07/13/2018\n# \n# This file is part of PreciseRoIPooling.\n# Distributed under terms of the MIT license.\n# Copyright (c) 2017 Megvii Technology Limited.\n\nfrom .prroi_pool import *\n\n"
  },
  {
    "path": "external/PreciseRoIPooling/pytorch/prroi_pool/functional.py",
    "content": "#! /usr/bin/env python3\n# -*- coding: utf-8 -*-\n# File   : functional.py\n# Author : Jiayuan Mao, Tete Xiao\n# Email  : maojiayuan@gmail.com, jasonhsiao97@gmail.com\n# Date   : 07/13/2018\n#\n# This file is part of PreciseRoIPooling.\n# Distributed under terms of the MIT license.\n# Copyright (c) 2017 Megvii Technology Limited.\n\nimport torch\nimport torch.autograd as ag\n\n__all__ = ['prroi_pool2d']\n\n\n_prroi_pooling = None\n\n\ndef _import_prroi_pooling():\n    global _prroi_pooling\n\n    if _prroi_pooling is None:\n        try:\n            from os.path import join as pjoin, dirname\n            from torch.utils.cpp_extension import load as load_extension\n            root_dir = pjoin(dirname(__file__), 'src')\n\n            _prroi_pooling = load_extension(\n                '_prroi_pooling',\n                [pjoin(root_dir, 'prroi_pooling_gpu.c'), pjoin(root_dir, 'prroi_pooling_gpu_impl.cu')],\n                verbose=True\n            )\n        except ImportError:\n            raise ImportError('Can not compile Precise RoI Pooling library.')\n\n    return _prroi_pooling\n\n\nclass PrRoIPool2DFunction(ag.Function):\n    @staticmethod\n    def forward(ctx, features, rois, pooled_height, pooled_width, spatial_scale):\n        _prroi_pooling = _import_prroi_pooling()\n\n        assert 'FloatTensor' in features.type() and 'FloatTensor' in rois.type(), \\\n                'Precise RoI Pooling only takes float input, got {} for features and {} for rois.'.format(features.type(), rois.type())\n\n        pooled_height = int(pooled_height)\n        pooled_width = int(pooled_width)\n        spatial_scale = float(spatial_scale)\n\n        features = features.contiguous()\n        rois = rois.contiguous()\n        params = (pooled_height, pooled_width, spatial_scale)\n\n        if features.is_cuda:\n            output = _prroi_pooling.prroi_pooling_forward_cuda(features, rois, *params)\n            ctx.params = params\n            # everything here is contiguous.\n            ctx.save_for_backward(features, rois, output)\n        else:\n            raise NotImplementedError('Precise RoI Pooling only supports GPU (cuda) implememtations.')\n\n        return output\n\n    @staticmethod\n    def backward(ctx, grad_output):\n        _prroi_pooling = _import_prroi_pooling()\n\n        features, rois, output = ctx.saved_tensors\n        grad_input = grad_coor = None\n\n        if features.requires_grad:\n            grad_output = grad_output.contiguous()\n            grad_input = _prroi_pooling.prroi_pooling_backward_cuda(features, rois, output, grad_output, *ctx.params)\n        if rois.requires_grad:\n            grad_output = grad_output.contiguous()\n            grad_coor = _prroi_pooling.prroi_pooling_coor_backward_cuda(features, rois, output, grad_output, *ctx.params)\n\n        return grad_input, grad_coor, None, None, None\n\nprroi_pool2d = PrRoIPool2DFunction.apply"
  },
  {
    "path": "external/PreciseRoIPooling/pytorch/prroi_pool/prroi_pool.py",
    "content": "#! /usr/bin/env python3\n# -*- coding: utf-8 -*-\n# File   : prroi_pool.py\n# Author : Jiayuan Mao, Tete Xiao\n# Email  : maojiayuan@gmail.com, jasonhsiao97@gmail.com\n# Date   : 07/13/2018\n#\n# This file is part of PreciseRoIPooling.\n# Distributed under terms of the MIT license.\n# Copyright (c) 2017 Megvii Technology Limited.\n\nimport torch.nn as nn\n\nfrom .functional import prroi_pool2d\n\n__all__ = ['PrRoIPool2D']\n\n\nclass PrRoIPool2D(nn.Module):\n    def __init__(self, pooled_height, pooled_width, spatial_scale):\n        super().__init__()\n\n        self.pooled_height = int(pooled_height)\n        self.pooled_width = int(pooled_width)\n        self.spatial_scale = float(spatial_scale)\n\n    def forward(self, features, rois):\n        return prroi_pool2d(features, rois, self.pooled_height, self.pooled_width, self.spatial_scale)\n\n    def extra_repr(self):\n        return 'kernel_size=({pooled_height}, {pooled_width}), spatial_scale={spatial_scale}'.format(**self.__dict__)\n\n"
  },
  {
    "path": "external/PreciseRoIPooling/pytorch/prroi_pool/src/prroi_pooling_gpu.c",
    "content": "/*\n * File   : prroi_pooling_gpu.c\n * Author : Jiayuan Mao, Tete Xiao\n * Email  : maojiayuan@gmail.com, jasonhsiao97@gmail.com\n * Date   : 07/13/2018\n *\n * Distributed under terms of the MIT license.\n * Copyright (c) 2017 Megvii Technology Limited.\n */\n\n#include <math.h>\n#include <torch/extension.h>\n\n#include <ATen/ATen.h>\n#include <ATen/cuda/CUDAContext.h>\n\n#include \"prroi_pooling_gpu_impl.cuh\"\n\n\nat::Tensor prroi_pooling_forward_cuda(const at::Tensor &features, const at::Tensor &rois, int pooled_height, int pooled_width, float spatial_scale) {\n    int nr_rois = rois.size(0);\n    int nr_channels = features.size(1);\n    int height = features.size(2);\n    int width = features.size(3);\n    int top_count = nr_rois * nr_channels * pooled_height * pooled_width;\n    auto output = at::zeros({nr_rois, nr_channels, pooled_height, pooled_width}, features.options());\n\n    if (output.numel() == 0) {\n        AT_CUDA_CHECK(cudaGetLastError());\n        return output;\n    }\n\n    cudaStream_t stream = at::cuda::getCurrentCUDAStream();\n    PrRoIPoolingForwardGpu(\n        stream, features.data<float>(), rois.data<float>(), output.data<float>(),\n        nr_channels, height, width, pooled_height, pooled_width, spatial_scale,\n        top_count\n    );\n\n    AT_CUDA_CHECK(cudaGetLastError());\n    return output;\n}\n\nat::Tensor prroi_pooling_backward_cuda(\n    const at::Tensor &features, const at::Tensor &rois, const at::Tensor &output, const at::Tensor &output_diff,\n    int pooled_height, int pooled_width, float spatial_scale) {\n\n    auto features_diff = at::zeros_like(features);\n\n    int nr_rois = rois.size(0);\n    int batch_size = features.size(0);\n    int nr_channels = features.size(1);\n    int height = features.size(2);\n    int width = features.size(3);\n    int top_count = nr_rois * nr_channels * pooled_height * pooled_width;\n    int bottom_count = batch_size * nr_channels * height * width;\n\n    if (output.numel() == 0) {\n        AT_CUDA_CHECK(cudaGetLastError());\n        return features_diff;\n    }\n\n    cudaStream_t stream = at::cuda::getCurrentCUDAStream();\n    PrRoIPoolingBackwardGpu(\n        stream,\n        features.data<float>(), rois.data<float>(), output.data<float>(), output_diff.data<float>(),\n        features_diff.data<float>(),\n        nr_channels, height, width, pooled_height, pooled_width, spatial_scale,\n        top_count, bottom_count\n    );\n\n    AT_CUDA_CHECK(cudaGetLastError());\n    return features_diff;\n}\n\nat::Tensor prroi_pooling_coor_backward_cuda(\n    const at::Tensor &features, const at::Tensor &rois, const at::Tensor &output, const at::Tensor &output_diff,\n    int pooled_height, int pooled_width, float spatial_scale) {\n\n    auto coor_diff = at::zeros_like(rois);\n\n    int nr_rois = rois.size(0);\n    int nr_channels = features.size(1);\n    int height = features.size(2);\n    int width = features.size(3);\n    int top_count = nr_rois * nr_channels * pooled_height * pooled_width;\n    int bottom_count = nr_rois * 5;\n\n    if (output.numel() == 0) {\n        AT_CUDA_CHECK(cudaGetLastError());\n        return coor_diff;\n    }\n\n    cudaStream_t stream = at::cuda::getCurrentCUDAStream();\n    PrRoIPoolingCoorBackwardGpu(\n        stream,\n        features.data<float>(), rois.data<float>(), output.data<float>(), output_diff.data<float>(),\n        coor_diff.data<float>(),\n        nr_channels, height, width, pooled_height, pooled_width, spatial_scale,\n        top_count, bottom_count\n    );\n\n    AT_CUDA_CHECK(cudaGetLastError());\n    return coor_diff;\n}\n\nPYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {\n    m.def(\"prroi_pooling_forward_cuda\", &prroi_pooling_forward_cuda, \"PRRoIPooling_forward\");\n    m.def(\"prroi_pooling_backward_cuda\", &prroi_pooling_backward_cuda, \"PRRoIPooling_backward\");\n    m.def(\"prroi_pooling_coor_backward_cuda\", &prroi_pooling_coor_backward_cuda, \"PRRoIPooling_backward_coor\");\n}\n"
  },
  {
    "path": "external/PreciseRoIPooling/pytorch/prroi_pool/src/prroi_pooling_gpu.h",
    "content": "/*\n * File   : prroi_pooling_gpu.h\n * Author : Jiayuan Mao, Tete Xiao\n * Email  : maojiayuan@gmail.com, jasonhsiao97@gmail.com \n * Date   : 07/13/2018\n * \n * Distributed under terms of the MIT license.\n * Copyright (c) 2017 Megvii Technology Limited.\n */\n\nint prroi_pooling_forward_cuda(THCudaTensor *features, THCudaTensor *rois, THCudaTensor *output, int pooled_height, int pooled_width, float spatial_scale);\n\nint prroi_pooling_backward_cuda(\n    THCudaTensor *features, THCudaTensor *rois, THCudaTensor *output, THCudaTensor *output_diff, THCudaTensor *features_diff,\n    int pooled_height, int pooled_width, float spatial_scale\n);\n\nint prroi_pooling_coor_backward_cuda(\n    THCudaTensor *features, THCudaTensor *rois, THCudaTensor *output, THCudaTensor *output_diff, THCudaTensor *features_diff,\n    int pooled_height, int pooled_width, float spatial_scal\n);\n\n"
  },
  {
    "path": "external/PreciseRoIPooling/pytorch/prroi_pool/src/prroi_pooling_gpu_impl.cu",
    "content": "/*\n * File   : prroi_pooling_gpu_impl.cu\n * Author : Tete Xiao, Jiayuan Mao\n * Email  : jasonhsiao97@gmail.com\n *\n * Distributed under terms of the MIT license.\n * Copyright (c) 2017 Megvii Technology Limited.\n */\n\n#include \"prroi_pooling_gpu_impl.cuh\"\n\n#include <cstdio>\n#include <cfloat>\n\n#define CUDA_KERNEL_LOOP(i, n) \\\n    for (int i = blockIdx.x * blockDim.x + threadIdx.x; \\\n        i < (n); \\\n        i += blockDim.x * gridDim.x)\n\n#define CUDA_POST_KERNEL_CHECK \\\n    do { \\\n        cudaError_t err = cudaGetLastError(); \\\n        if (cudaSuccess != err) { \\\n            fprintf(stderr, \"cudaCheckError() failed : %s\\n\", cudaGetErrorString(err)); \\\n            exit(-1); \\\n        } \\\n    } while(0)\n\n#define CUDA_NUM_THREADS 512\n\nnamespace {\n\nstatic int CUDA_NUM_BLOCKS(const int N) {\n  return (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS;\n}\n\n__device__ static float PrRoIPoolingGetData(F_DEVPTR_IN data, const int h, const int w, const int height, const int width)\n{\n    bool overflow = (h < 0) || (w < 0) || (h >= height) || (w >= width);\n    float retVal = overflow ? 0.0f : data[h * width + w];\n    return retVal;\n}\n\n__device__ static float PrRoIPoolingGetCoeff(float dh, float dw){\n    dw = dw > 0 ? dw : -dw;\n    dh = dh > 0 ? dh : -dh;\n    return (1.0f - dh) * (1.0f - dw);\n}\n\n__device__ static float PrRoIPoolingSingleCoorIntegral(float s, float t, float c1, float c2) {\n    return 0.5 * (t * t - s * s) * c2 + (t - 0.5 * t * t - s + 0.5 * s * s) * c1;\n}\n\n__device__ static float PrRoIPoolingInterpolation(F_DEVPTR_IN data, const float h, const float w, const int height, const int width){\n    float retVal = 0.0f;\n    int h1 = floorf(h);\n    int w1 = floorf(w);\n    retVal += PrRoIPoolingGetData(data, h1, w1, height, width) * PrRoIPoolingGetCoeff(h - float(h1), w - float(w1));\n    h1 = floorf(h)+1;\n    w1 = floorf(w);\n    retVal += PrRoIPoolingGetData(data, h1, w1, height, width) * PrRoIPoolingGetCoeff(h - float(h1), w - float(w1));\n    h1 = floorf(h);\n    w1 = floorf(w)+1;\n    retVal += PrRoIPoolingGetData(data, h1, w1, height, width) * PrRoIPoolingGetCoeff(h - float(h1), w - float(w1));\n    h1 = floorf(h)+1;\n    w1 = floorf(w)+1;\n    retVal += PrRoIPoolingGetData(data, h1, w1, height, width) * PrRoIPoolingGetCoeff(h - float(h1), w - float(w1));\n    return retVal;\n}\n\n__device__ static float PrRoIPoolingMatCalculation(F_DEVPTR_IN this_data, const int s_h, const int s_w, const int e_h, const int e_w,\n        const float y0, const float x0, const float y1, const float x1, const int h0, const int w0)\n{\n    float alpha, beta, lim_alpha, lim_beta, tmp;\n    float sum_out = 0;\n\n    alpha = x0 - float(s_w);\n    beta = y0 - float(s_h);\n    lim_alpha = x1 - float(s_w);\n    lim_beta = y1 - float(s_h);\n    tmp = (lim_alpha - 0.5f * lim_alpha * lim_alpha - alpha + 0.5f * alpha * alpha)\n        * (lim_beta - 0.5f * lim_beta * lim_beta - beta + 0.5f * beta * beta);\n    sum_out += PrRoIPoolingGetData(this_data, s_h, s_w, h0, w0) * tmp;\n\n    alpha = float(e_w) - x1;\n    lim_alpha = float(e_w) - x0;\n    tmp = (lim_alpha - 0.5f * lim_alpha * lim_alpha - alpha + 0.5f * alpha * alpha)\n        * (lim_beta - 0.5f * lim_beta * lim_beta - beta + 0.5f * beta * beta);\n    sum_out += PrRoIPoolingGetData(this_data, s_h, e_w, h0, w0) * tmp;\n\n    alpha = x0 - float(s_w);\n    beta = float(e_h) - y1;\n    lim_alpha = x1 - float(s_w);\n    lim_beta = float(e_h) - y0;\n    tmp = (lim_alpha - 0.5f * lim_alpha * lim_alpha - alpha + 0.5f * alpha * alpha)\n        * (lim_beta - 0.5f * lim_beta * lim_beta - beta + 0.5f * beta * beta);\n    sum_out += PrRoIPoolingGetData(this_data, e_h, s_w, h0, w0) * tmp;\n\n    alpha = float(e_w) - x1;\n    lim_alpha = float(e_w) - x0;\n    tmp = (lim_alpha - 0.5f * lim_alpha * lim_alpha - alpha + 0.5f * alpha * alpha)\n        * (lim_beta - 0.5f * lim_beta * lim_beta - beta + 0.5f * beta * beta);\n    sum_out += PrRoIPoolingGetData(this_data, e_h, e_w, h0, w0) * tmp;\n\n    return sum_out;\n}\n\n__device__ static void PrRoIPoolingDistributeDiff(F_DEVPTR_OUT diff, const float top_diff, const int h, const int w, const int height, const int width, const float coeff)\n{\n    bool overflow = (h < 0) || (w < 0) || (h >= height) || (w >= width);\n    if (!overflow)\n        atomicAdd(diff + h * width + w, top_diff * coeff);\n}\n\n__device__ static void PrRoIPoolingMatDistributeDiff(F_DEVPTR_OUT diff, const float top_diff, const int s_h, const int s_w, const int e_h, const int e_w,\n        const float y0, const float x0, const float y1, const float x1, const int h0, const int w0)\n{\n    float alpha, beta, lim_alpha, lim_beta, tmp;\n\n    alpha = x0 - float(s_w);\n    beta = y0 - float(s_h);\n    lim_alpha = x1 - float(s_w);\n    lim_beta = y1 - float(s_h);\n    tmp = (lim_alpha - 0.5f * lim_alpha * lim_alpha - alpha + 0.5f * alpha * alpha)\n        * (lim_beta - 0.5f * lim_beta * lim_beta - beta + 0.5f * beta * beta);\n    PrRoIPoolingDistributeDiff(diff, top_diff, s_h, s_w, h0, w0, tmp);\n\n    alpha = float(e_w) - x1;\n    lim_alpha = float(e_w) - x0;\n    tmp = (lim_alpha - 0.5f * lim_alpha * lim_alpha - alpha + 0.5f * alpha * alpha)\n        * (lim_beta - 0.5f * lim_beta * lim_beta - beta + 0.5f * beta * beta);\n    PrRoIPoolingDistributeDiff(diff, top_diff, s_h, e_w, h0, w0, tmp);\n\n    alpha = x0 - float(s_w);\n    beta = float(e_h) - y1;\n    lim_alpha = x1 - float(s_w);\n    lim_beta = float(e_h) - y0;\n    tmp = (lim_alpha - 0.5f * lim_alpha * lim_alpha - alpha + 0.5f * alpha * alpha)\n        * (lim_beta - 0.5f * lim_beta * lim_beta - beta + 0.5f * beta * beta);\n    PrRoIPoolingDistributeDiff(diff, top_diff, e_h, s_w, h0, w0, tmp);\n\n    alpha = float(e_w) - x1;\n    lim_alpha = float(e_w) - x0;\n    tmp = (lim_alpha - 0.5f * lim_alpha * lim_alpha - alpha + 0.5f * alpha * alpha)\n        * (lim_beta - 0.5f * lim_beta * lim_beta - beta + 0.5f * beta * beta);\n    PrRoIPoolingDistributeDiff(diff, top_diff, e_h, e_w, h0, w0, tmp);\n}\n\n__global__ void PrRoIPoolingForward(\n        const int nthreads,\n        F_DEVPTR_IN bottom_data,\n        F_DEVPTR_IN bottom_rois,\n        F_DEVPTR_OUT top_data,\n        const int channels,\n        const int height,\n        const int width,\n        const int pooled_height,\n        const int pooled_width,\n        const float spatial_scale) {\n\n  CUDA_KERNEL_LOOP(index, nthreads) {\n    // (n, c, ph, pw) is an element in the pooled output\n    int pw = index % pooled_width;\n    int ph = (index / pooled_width) % pooled_height;\n    int c = (index / pooled_width / pooled_height) % channels;\n    int n = index / pooled_width / pooled_height / channels;\n\n    bottom_rois += n * 5;\n    int roi_batch_ind = bottom_rois[0];\n\n    float roi_start_w = bottom_rois[1] * spatial_scale;\n    float roi_start_h = bottom_rois[2] * spatial_scale;\n    float roi_end_w = bottom_rois[3] * spatial_scale;\n    float roi_end_h = bottom_rois[4] * spatial_scale;\n\n    float roi_width = max(roi_end_w - roi_start_w, ((float)0.0));\n    float roi_height = max(roi_end_h - roi_start_h, ((float)0.0));\n    float bin_size_h = roi_height / static_cast<float>(pooled_height);\n    float bin_size_w = roi_width / static_cast<float>(pooled_width);\n\n    const float *this_data = bottom_data + (roi_batch_ind * channels + c) * height * width;\n    float *this_out = top_data + index;\n\n    float win_start_w = roi_start_w + bin_size_w * pw;\n    float win_start_h = roi_start_h + bin_size_h * ph;\n    float win_end_w = win_start_w + bin_size_w;\n    float win_end_h = win_start_h + bin_size_h;\n\n    float win_size = max(float(0.0), bin_size_w * bin_size_h);\n    if (win_size == 0) {\n        *this_out = 0;\n        return;\n    }\n\n    float sum_out = 0;\n\n    int s_w, s_h, e_w, e_h;\n\n    s_w = floorf(win_start_w);\n    e_w = ceilf(win_end_w);\n    s_h = floorf(win_start_h);\n    e_h = ceilf(win_end_h);\n\n    for (int w_iter = s_w; w_iter < e_w; ++w_iter)\n        for (int h_iter = s_h; h_iter < e_h; ++h_iter)\n            sum_out += PrRoIPoolingMatCalculation(this_data, h_iter, w_iter, h_iter + 1, w_iter + 1,\n                max(win_start_h, float(h_iter)), max(win_start_w, float(w_iter)),\n                min(win_end_h, float(h_iter) + 1.0), min(win_end_w, float(w_iter + 1.0)),\n                height, width);\n    *this_out = sum_out / win_size;\n  }\n}\n\n__global__ void PrRoIPoolingBackward(\n        const int nthreads,\n        F_DEVPTR_IN bottom_rois,\n        F_DEVPTR_IN top_diff,\n        F_DEVPTR_OUT bottom_diff,\n        const int channels,\n        const int height,\n        const int width,\n        const int pooled_height,\n        const int pooled_width,\n        const float spatial_scale) {\n\n  CUDA_KERNEL_LOOP(index, nthreads) {\n    // (n, c, ph, pw) is an element in the pooled output\n    int pw = index % pooled_width;\n    int ph = (index / pooled_width) % pooled_height;\n    int c = (index / pooled_width / pooled_height) % channels;\n    int n = index / pooled_width / pooled_height / channels;\n    bottom_rois += n * 5;\n\n    int roi_batch_ind = bottom_rois[0];\n    float roi_start_w = bottom_rois[1] * spatial_scale;\n    float roi_start_h = bottom_rois[2] * spatial_scale;\n    float roi_end_w = bottom_rois[3] * spatial_scale;\n    float roi_end_h = bottom_rois[4] * spatial_scale;\n\n    float roi_width = max(roi_end_w - roi_start_w, (float)0);\n    float roi_height = max(roi_end_h - roi_start_h, (float)0);\n    float bin_size_h = roi_height / static_cast<float>(pooled_height);\n    float bin_size_w = roi_width / static_cast<float>(pooled_width);\n\n    const float *this_out_grad = top_diff + index;\n    float *this_data_grad = bottom_diff + (roi_batch_ind * channels + c) * height * width;\n\n    float win_start_w = roi_start_w + bin_size_w * pw;\n    float win_start_h = roi_start_h + bin_size_h * ph;\n    float win_end_w = win_start_w + bin_size_w;\n    float win_end_h = win_start_h + bin_size_h;\n\n    float win_size = max(float(0.0), bin_size_w * bin_size_h);\n\n    float sum_out = win_size == float(0) ? float(0) : *this_out_grad / win_size;\n\n    int s_w, s_h, e_w, e_h;\n\n    s_w = floorf(win_start_w);\n    e_w = ceilf(win_end_w);\n    s_h = floorf(win_start_h);\n    e_h = ceilf(win_end_h);\n\n    for (int w_iter = s_w; w_iter < e_w; ++w_iter)\n        for (int h_iter = s_h; h_iter < e_h; ++h_iter)\n            PrRoIPoolingMatDistributeDiff(this_data_grad, sum_out, h_iter, w_iter, h_iter + 1, w_iter + 1,\n                max(win_start_h, float(h_iter)), max(win_start_w, float(w_iter)),\n                min(win_end_h, float(h_iter) + 1.0), min(win_end_w, float(w_iter + 1.0)),\n                height, width);\n\n  }\n}\n\n__global__ void PrRoIPoolingCoorBackward(\n        const int nthreads,\n        F_DEVPTR_IN bottom_data,\n        F_DEVPTR_IN bottom_rois,\n        F_DEVPTR_IN top_data,\n        F_DEVPTR_IN top_diff,\n        F_DEVPTR_OUT bottom_diff,\n        const int channels,\n        const int height,\n        const int width,\n        const int pooled_height,\n        const int pooled_width,\n        const float spatial_scale) {\n\n  CUDA_KERNEL_LOOP(index, nthreads) {\n    // (n, c, ph, pw) is an element in the pooled output\n    int pw = index % pooled_width;\n    int ph = (index / pooled_width) % pooled_height;\n    int c = (index / pooled_width / pooled_height) % channels;\n    int n = index / pooled_width / pooled_height / channels;\n    bottom_rois += n * 5;\n\n    int roi_batch_ind = bottom_rois[0];\n    float roi_start_w = bottom_rois[1] * spatial_scale;\n    float roi_start_h = bottom_rois[2] * spatial_scale;\n    float roi_end_w = bottom_rois[3] * spatial_scale;\n    float roi_end_h = bottom_rois[4] * spatial_scale;\n\n    float roi_width = max(roi_end_w - roi_start_w, (float)0);\n    float roi_height = max(roi_end_h - roi_start_h, (float)0);\n    float bin_size_h = roi_height / static_cast<float>(pooled_height);\n    float bin_size_w = roi_width / static_cast<float>(pooled_width);\n\n    const float *this_out_grad = top_diff + index;\n    const float *this_bottom_data = bottom_data + (roi_batch_ind * channels + c) * height * width;\n    const float *this_top_data = top_data + index;\n    float *this_data_grad = bottom_diff + n * 5;\n\n    float win_start_w = roi_start_w + bin_size_w * pw;\n    float win_start_h = roi_start_h + bin_size_h * ph;\n    float win_end_w = win_start_w + bin_size_w;\n    float win_end_h = win_start_h + bin_size_h;\n\n    float win_size = max(float(0.0), bin_size_w * bin_size_h);\n\n    float sum_out = win_size == float(0) ? float(0) : *this_out_grad / win_size;\n\n    // WARNING: to be discussed\n    if (sum_out == 0)\n        return;\n\n    int s_w, s_h, e_w, e_h;\n\n    s_w = floorf(win_start_w);\n    e_w = ceilf(win_end_w);\n    s_h = floorf(win_start_h);\n    e_h = ceilf(win_end_h);\n\n    float g_x1_y = 0, g_x2_y = 0, g_x_y1 = 0, g_x_y2 = 0;\n    for (int h_iter = s_h; h_iter < e_h; ++h_iter) {\n        g_x1_y += PrRoIPoolingSingleCoorIntegral(max(win_start_h, float(h_iter)) - h_iter,\n                min(win_end_h, float(h_iter + 1)) - h_iter,\n                PrRoIPoolingInterpolation(this_bottom_data, h_iter, win_start_w, height, width),\n                PrRoIPoolingInterpolation(this_bottom_data, h_iter + 1, win_start_w, height, width));\n\n        g_x2_y += PrRoIPoolingSingleCoorIntegral(max(win_start_h, float(h_iter)) - h_iter,\n                min(win_end_h, float(h_iter + 1)) - h_iter,\n                PrRoIPoolingInterpolation(this_bottom_data, h_iter, win_end_w, height, width),\n                PrRoIPoolingInterpolation(this_bottom_data, h_iter + 1, win_end_w, height, width));\n    }\n\n    for (int w_iter = s_w; w_iter < e_w; ++w_iter) {\n        g_x_y1 += PrRoIPoolingSingleCoorIntegral(max(win_start_w, float(w_iter)) - w_iter,\n                min(win_end_w, float(w_iter + 1)) - w_iter,\n                PrRoIPoolingInterpolation(this_bottom_data, win_start_h, w_iter, height, width),\n                PrRoIPoolingInterpolation(this_bottom_data, win_start_h, w_iter + 1, height, width));\n\n        g_x_y2 += PrRoIPoolingSingleCoorIntegral(max(win_start_w, float(w_iter)) - w_iter,\n                min(win_end_w, float(w_iter + 1)) - w_iter,\n                PrRoIPoolingInterpolation(this_bottom_data, win_end_h, w_iter, height, width),\n                PrRoIPoolingInterpolation(this_bottom_data, win_end_h, w_iter + 1, height, width));\n    }\n\n    float partial_x1 = -g_x1_y + (win_end_h - win_start_h) * (*this_top_data);\n    float partial_y1 = -g_x_y1 + (win_end_w - win_start_w) * (*this_top_data);\n    float partial_x2 = g_x2_y - (win_end_h - win_start_h) * (*this_top_data);\n    float partial_y2 = g_x_y2 - (win_end_w - win_start_w) * (*this_top_data);\n\n    partial_x1 = partial_x1 / win_size * spatial_scale;\n    partial_x2 = partial_x2 / win_size * spatial_scale;\n    partial_y1 = partial_y1 / win_size * spatial_scale;\n    partial_y2 = partial_y2 / win_size * spatial_scale;\n\n    // (b, x1, y1, x2, y2)\n\n    this_data_grad[0] = 0;\n    atomicAdd(this_data_grad + 1, (partial_x1 * (1.0 - float(pw) / pooled_width) + partial_x2 * (1.0 - float(pw + 1) / pooled_width))\n            * (*this_out_grad));\n    atomicAdd(this_data_grad + 2, (partial_y1 * (1.0 - float(ph) / pooled_height) + partial_y2 * (1.0 - float(ph + 1) / pooled_height))\n            * (*this_out_grad));\n    atomicAdd(this_data_grad + 3, (partial_x2 * float(pw + 1) / pooled_width + partial_x1 * float(pw) / pooled_width)\n            * (*this_out_grad));\n    atomicAdd(this_data_grad + 4, (partial_y2 * float(ph + 1) / pooled_height + partial_y1 * float(ph) / pooled_height)\n            * (*this_out_grad));\n  }\n}\n\n} /* !anonymous namespace */\n\n#ifdef __cplusplus\nextern \"C\" {\n#endif\n\nvoid PrRoIPoolingForwardGpu(\n    cudaStream_t stream,\n    F_DEVPTR_IN bottom_data,\n    F_DEVPTR_IN bottom_rois,\n    F_DEVPTR_OUT top_data,\n    const int channels_, const int height_, const int width_,\n    const int pooled_height_, const int pooled_width_,\n    const float spatial_scale_,\n    const int top_count) {\n\n    PrRoIPoolingForward<<<CUDA_NUM_BLOCKS(top_count), CUDA_NUM_THREADS, 0, stream>>>(\n        top_count, bottom_data, bottom_rois, top_data,\n        channels_, height_, width_, pooled_height_, pooled_width_, spatial_scale_);\n\n    CUDA_POST_KERNEL_CHECK;\n}\n\nvoid PrRoIPoolingBackwardGpu(\n    cudaStream_t stream,\n    F_DEVPTR_IN bottom_data,\n    F_DEVPTR_IN bottom_rois,\n    F_DEVPTR_IN top_data,\n    F_DEVPTR_IN top_diff,\n    F_DEVPTR_OUT bottom_diff,\n    const int channels_, const int height_, const int width_,\n    const int pooled_height_, const int pooled_width_,\n    const float spatial_scale_,\n    const int top_count, const int bottom_count) {\n\n    cudaMemsetAsync(bottom_diff, 0, sizeof(float) * bottom_count, stream);\n    PrRoIPoolingBackward<<<CUDA_NUM_BLOCKS(top_count), CUDA_NUM_THREADS, 0, stream>>>(\n        top_count, bottom_rois, top_diff, bottom_diff,\n        channels_, height_, width_, pooled_height_, pooled_width_, spatial_scale_);\n    CUDA_POST_KERNEL_CHECK;\n}\n\nvoid PrRoIPoolingCoorBackwardGpu(\n    cudaStream_t stream,\n    F_DEVPTR_IN bottom_data,\n    F_DEVPTR_IN bottom_rois,\n    F_DEVPTR_IN top_data,\n    F_DEVPTR_IN top_diff,\n    F_DEVPTR_OUT bottom_diff,\n    const int channels_, const int height_, const int width_,\n    const int pooled_height_, const int pooled_width_,\n    const float spatial_scale_,\n    const int top_count, const int bottom_count) {\n\n    cudaMemsetAsync(bottom_diff, 0, sizeof(float) * bottom_count, stream);\n    PrRoIPoolingCoorBackward<<<CUDA_NUM_BLOCKS(top_count), CUDA_NUM_THREADS, 0, stream>>>(\n        top_count, bottom_data, bottom_rois, top_data, top_diff, bottom_diff,\n        channels_, height_, width_, pooled_height_, pooled_width_, spatial_scale_);\n    CUDA_POST_KERNEL_CHECK;\n}\n\n} /* !extern \"C\" */\n\n"
  },
  {
    "path": "external/PreciseRoIPooling/pytorch/prroi_pool/src/prroi_pooling_gpu_impl.cuh",
    "content": "/*\n * File   : prroi_pooling_gpu_impl.cuh\n * Author : Tete Xiao, Jiayuan Mao\n * Email  : jasonhsiao97@gmail.com\n *\n * Distributed under terms of the MIT license.\n * Copyright (c) 2017 Megvii Technology Limited.\n */\n\n#ifndef PRROI_POOLING_GPU_IMPL_CUH\n#define PRROI_POOLING_GPU_IMPL_CUH\n\n#ifdef __cplusplus\nextern \"C\" {\n#endif\n\n#define F_DEVPTR_IN const float *\n#define F_DEVPTR_OUT float *\n\nvoid PrRoIPoolingForwardGpu(\n    cudaStream_t stream,\n    F_DEVPTR_IN bottom_data,\n    F_DEVPTR_IN bottom_rois,\n    F_DEVPTR_OUT top_data,\n    const int channels_, const int height_, const int width_,\n    const int pooled_height_, const int pooled_width_,\n    const float spatial_scale_,\n    const int top_count);\n\nvoid PrRoIPoolingBackwardGpu(\n    cudaStream_t stream,\n    F_DEVPTR_IN bottom_data,\n    F_DEVPTR_IN bottom_rois,\n    F_DEVPTR_IN top_data,\n    F_DEVPTR_IN top_diff,\n    F_DEVPTR_OUT bottom_diff,\n    const int channels_, const int height_, const int width_,\n    const int pooled_height_, const int pooled_width_,\n    const float spatial_scale_,\n    const int top_count, const int bottom_count);\n\nvoid PrRoIPoolingCoorBackwardGpu(\n    cudaStream_t stream,\n    F_DEVPTR_IN bottom_data,\n    F_DEVPTR_IN bottom_rois,\n    F_DEVPTR_IN top_data,\n    F_DEVPTR_IN top_diff,\n    F_DEVPTR_OUT bottom_diff,\n    const int channels_, const int height_, const int width_,\n    const int pooled_height_, const int pooled_width_,\n    const float spatial_scale_,\n    const int top_count, const int bottom_count);\n\n#ifdef __cplusplus\n} /* !extern \"C\" */\n#endif\n\n#endif /* !PRROI_POOLING_GPU_IMPL_CUH */\n\n"
  },
  {
    "path": "external/PreciseRoIPooling/pytorch/tests/test_prroi_pooling2d.py",
    "content": "# -*- coding: utf-8 -*-\n# File   : test_prroi_pooling2d.py\n# Author : Jiayuan Mao\n# Email  : maojiayuan@gmail.com\n# Date   : 18/02/2018\n#\n# This file is part of Jacinle.\n\nimport unittest\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom jactorch.utils.unittest import TorchTestCase\n\nfrom prroi_pool import PrRoIPool2D\n\n\nclass TestPrRoIPool2D(TorchTestCase):\n    def test_forward(self):\n        pool = PrRoIPool2D(7, 7, spatial_scale=0.5)\n        features = torch.rand((4, 16, 24, 32)).cuda()\n        rois = torch.tensor([\n            [0, 0, 0, 14, 14],\n            [1, 14, 14, 28, 28],\n        ]).float().cuda()\n\n        out = pool(features, rois)\n        out_gold = F.avg_pool2d(features, kernel_size=2, stride=1)\n\n        self.assertTensorClose(out, torch.stack((\n            out_gold[0, :, :7, :7],\n            out_gold[1, :, 7:14, 7:14],\n        ), dim=0))\n\n    def test_backward_shapeonly(self):\n        pool = PrRoIPool2D(2, 2, spatial_scale=0.5)\n\n        features = torch.rand((4, 2, 24, 32)).cuda()\n        rois = torch.tensor([\n            [0, 0, 0, 4, 4],\n            [1, 14, 14, 18, 18],\n        ]).float().cuda()\n        features.requires_grad = rois.requires_grad = True\n        out = pool(features, rois)\n\n        loss = out.sum()\n        loss.backward()\n\n        self.assertTupleEqual(features.size(), features.grad.size())\n        self.assertTupleEqual(rois.size(), rois.grad.size())\n\n\nif __name__ == '__main__':\n    unittest.main()\n"
  },
  {
    "path": "external/PreciseRoIPooling/src/prroi_pooling_gpu_impl.cu",
    "content": "/*\n * File   : prroi_pooling_gpu_impl.cu\n * Author : Tete Xiao, Jiayuan Mao\n * Email  : jasonhsiao97@gmail.com\n *\n * Distributed under terms of the MIT license.\n * Copyright (c) 2017 Megvii Technology Limited.\n */\n\n#include \"prroi_pooling_gpu_impl.cuh\"\n\n#include <cstdio>\n#include <cfloat>\n\n#define CUDA_KERNEL_LOOP(i, n) \\\n    for (int i = blockIdx.x * blockDim.x + threadIdx.x; \\\n        i < (n); \\\n        i += blockDim.x * gridDim.x)\n\n#define CUDA_POST_KERNEL_CHECK \\\n    do { \\\n        cudaError_t err = cudaGetLastError(); \\\n        if (cudaSuccess != err) { \\\n            fprintf(stderr, \"cudaCheckError() failed : %s\\n\", cudaGetErrorString(err)); \\\n            exit(-1); \\\n        } \\\n    } while(0)\n\n#define CUDA_NUM_THREADS 512\n\nnamespace {\n\nstatic int CUDA_NUM_BLOCKS(const int N) {\n  return (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS;\n}\n\n__device__ static float PrRoIPoolingGetData(F_DEVPTR_IN data, const int h, const int w, const int height, const int width)\n{\n    bool overflow = (h < 0) || (w < 0) || (h >= height) || (w >= width);\n    float retVal = overflow ? 0.0f : data[h * width + w];\n    return retVal;\n}\n\n__device__ static float PrRoIPoolingGetCoeff(float dh, float dw){\n    dw = dw > 0 ? dw : -dw;\n    dh = dh > 0 ? dh : -dh;\n    return (1.0f - dh) * (1.0f - dw);\n}\n\n__device__ static float PrRoIPoolingSingleCoorIntegral(float s, float t, float c1, float c2) {\n    return 0.5 * (t * t - s * s) * c2 + (t - 0.5 * t * t - s + 0.5 * s * s) * c1;\n}\n\n__device__ static float PrRoIPoolingInterpolation(F_DEVPTR_IN data, const float h, const float w, const int height, const int width){\n    float retVal = 0.0f;\n    int h1 = floorf(h);\n    int w1 = floorf(w);\n    retVal += PrRoIPoolingGetData(data, h1, w1, height, width) * PrRoIPoolingGetCoeff(h - float(h1), w - float(w1));\n    h1 = floorf(h)+1;\n    w1 = floorf(w);\n    retVal += PrRoIPoolingGetData(data, h1, w1, height, width) * PrRoIPoolingGetCoeff(h - float(h1), w - float(w1));\n    h1 = floorf(h);\n    w1 = floorf(w)+1;\n    retVal += PrRoIPoolingGetData(data, h1, w1, height, width) * PrRoIPoolingGetCoeff(h - float(h1), w - float(w1));\n    h1 = floorf(h)+1;\n    w1 = floorf(w)+1;\n    retVal += PrRoIPoolingGetData(data, h1, w1, height, width) * PrRoIPoolingGetCoeff(h - float(h1), w - float(w1));\n    return retVal;\n}\n\n__device__ static float PrRoIPoolingMatCalculation(F_DEVPTR_IN this_data, const int s_h, const int s_w, const int e_h, const int e_w,\n        const float y0, const float x0, const float y1, const float x1, const int h0, const int w0)\n{\n    float alpha, beta, lim_alpha, lim_beta, tmp;\n    float sum_out = 0;\n\n    alpha = x0 - float(s_w);\n    beta = y0 - float(s_h);\n    lim_alpha = x1 - float(s_w);\n    lim_beta = y1 - float(s_h);\n    tmp = (lim_alpha - 0.5f * lim_alpha * lim_alpha - alpha + 0.5f * alpha * alpha)\n        * (lim_beta - 0.5f * lim_beta * lim_beta - beta + 0.5f * beta * beta);\n    sum_out += PrRoIPoolingGetData(this_data, s_h, s_w, h0, w0) * tmp;\n\n    alpha = float(e_w) - x1;\n    lim_alpha = float(e_w) - x0;\n    tmp = (lim_alpha - 0.5f * lim_alpha * lim_alpha - alpha + 0.5f * alpha * alpha)\n        * (lim_beta - 0.5f * lim_beta * lim_beta - beta + 0.5f * beta * beta);\n    sum_out += PrRoIPoolingGetData(this_data, s_h, e_w, h0, w0) * tmp;\n\n    alpha = x0 - float(s_w);\n    beta = float(e_h) - y1;\n    lim_alpha = x1 - float(s_w);\n    lim_beta = float(e_h) - y0;\n    tmp = (lim_alpha - 0.5f * lim_alpha * lim_alpha - alpha + 0.5f * alpha * alpha)\n        * (lim_beta - 0.5f * lim_beta * lim_beta - beta + 0.5f * beta * beta);\n    sum_out += PrRoIPoolingGetData(this_data, e_h, s_w, h0, w0) * tmp;\n\n    alpha = float(e_w) - x1;\n    lim_alpha = float(e_w) - x0;\n    tmp = (lim_alpha - 0.5f * lim_alpha * lim_alpha - alpha + 0.5f * alpha * alpha)\n        * (lim_beta - 0.5f * lim_beta * lim_beta - beta + 0.5f * beta * beta);\n    sum_out += PrRoIPoolingGetData(this_data, e_h, e_w, h0, w0) * tmp;\n\n    return sum_out;\n}\n\n__device__ static void PrRoIPoolingDistributeDiff(F_DEVPTR_OUT diff, const float top_diff, const int h, const int w, const int height, const int width, const float coeff)\n{\n    bool overflow = (h < 0) || (w < 0) || (h >= height) || (w >= width);\n    if (!overflow)\n        atomicAdd(diff + h * width + w, top_diff * coeff);\n}\n\n__device__ static void PrRoIPoolingMatDistributeDiff(F_DEVPTR_OUT diff, const float top_diff, const int s_h, const int s_w, const int e_h, const int e_w,\n        const float y0, const float x0, const float y1, const float x1, const int h0, const int w0)\n{\n    float alpha, beta, lim_alpha, lim_beta, tmp;\n\n    alpha = x0 - float(s_w);\n    beta = y0 - float(s_h);\n    lim_alpha = x1 - float(s_w);\n    lim_beta = y1 - float(s_h);\n    tmp = (lim_alpha - 0.5f * lim_alpha * lim_alpha - alpha + 0.5f * alpha * alpha)\n        * (lim_beta - 0.5f * lim_beta * lim_beta - beta + 0.5f * beta * beta);\n    PrRoIPoolingDistributeDiff(diff, top_diff, s_h, s_w, h0, w0, tmp);\n\n    alpha = float(e_w) - x1;\n    lim_alpha = float(e_w) - x0;\n    tmp = (lim_alpha - 0.5f * lim_alpha * lim_alpha - alpha + 0.5f * alpha * alpha)\n        * (lim_beta - 0.5f * lim_beta * lim_beta - beta + 0.5f * beta * beta);\n    PrRoIPoolingDistributeDiff(diff, top_diff, s_h, e_w, h0, w0, tmp);\n\n    alpha = x0 - float(s_w);\n    beta = float(e_h) - y1;\n    lim_alpha = x1 - float(s_w);\n    lim_beta = float(e_h) - y0;\n    tmp = (lim_alpha - 0.5f * lim_alpha * lim_alpha - alpha + 0.5f * alpha * alpha)\n        * (lim_beta - 0.5f * lim_beta * lim_beta - beta + 0.5f * beta * beta);\n    PrRoIPoolingDistributeDiff(diff, top_diff, e_h, s_w, h0, w0, tmp);\n\n    alpha = float(e_w) - x1;\n    lim_alpha = float(e_w) - x0;\n    tmp = (lim_alpha - 0.5f * lim_alpha * lim_alpha - alpha + 0.5f * alpha * alpha)\n        * (lim_beta - 0.5f * lim_beta * lim_beta - beta + 0.5f * beta * beta);\n    PrRoIPoolingDistributeDiff(diff, top_diff, e_h, e_w, h0, w0, tmp);\n}\n\n__global__ void PrRoIPoolingForward(\n        const int nthreads,\n        F_DEVPTR_IN bottom_data,\n        F_DEVPTR_IN bottom_rois,\n        F_DEVPTR_OUT top_data,\n        const int channels,\n        const int height,\n        const int width,\n        const int pooled_height,\n        const int pooled_width,\n        const float spatial_scale) {\n\n  CUDA_KERNEL_LOOP(index, nthreads) {\n    // (n, c, ph, pw) is an element in the pooled output\n    int pw = index % pooled_width;\n    int ph = (index / pooled_width) % pooled_height;\n    int c = (index / pooled_width / pooled_height) % channels;\n    int n = index / pooled_width / pooled_height / channels;\n\n    bottom_rois += n * 5;\n    int roi_batch_ind = bottom_rois[0];\n\n    float roi_start_w = bottom_rois[1] * spatial_scale;\n    float roi_start_h = bottom_rois[2] * spatial_scale;\n    float roi_end_w = bottom_rois[3] * spatial_scale;\n    float roi_end_h = bottom_rois[4] * spatial_scale;\n\n    float roi_width = max(roi_end_w - roi_start_w, ((float)0.0));\n    float roi_height = max(roi_end_h - roi_start_h, ((float)0.0));\n    float bin_size_h = roi_height / static_cast<float>(pooled_height);\n    float bin_size_w = roi_width / static_cast<float>(pooled_width);\n\n    const float *this_data = bottom_data + (roi_batch_ind * channels + c) * height * width;\n    float *this_out = top_data + index;\n\n    float win_start_w = roi_start_w + bin_size_w * pw;\n    float win_start_h = roi_start_h + bin_size_h * ph;\n    float win_end_w = win_start_w + bin_size_w;\n    float win_end_h = win_start_h + bin_size_h;\n\n    float win_size = max(float(0.0), bin_size_w * bin_size_h);\n    if (win_size == 0) {\n        *this_out = 0;\n        return;\n    }\n\n    float sum_out = 0;\n\n    int s_w, s_h, e_w, e_h;\n\n    s_w = floorf(win_start_w);\n    e_w = ceilf(win_end_w);\n    s_h = floorf(win_start_h);\n    e_h = ceilf(win_end_h);\n\n    for (int w_iter = s_w; w_iter < e_w; ++w_iter)\n        for (int h_iter = s_h; h_iter < e_h; ++h_iter)\n            sum_out += PrRoIPoolingMatCalculation(this_data, h_iter, w_iter, h_iter + 1, w_iter + 1,\n                max(win_start_h, float(h_iter)), max(win_start_w, float(w_iter)),\n                min(win_end_h, float(h_iter) + 1.0), min(win_end_w, float(w_iter + 1.0)),\n                height, width);\n    *this_out = sum_out / win_size;\n  }\n}\n\n__global__ void PrRoIPoolingBackward(\n        const int nthreads,\n        F_DEVPTR_IN bottom_rois,\n        F_DEVPTR_IN top_diff,\n        F_DEVPTR_OUT bottom_diff,\n        const int channels,\n        const int height,\n        const int width,\n        const int pooled_height,\n        const int pooled_width,\n        const float spatial_scale) {\n\n  CUDA_KERNEL_LOOP(index, nthreads) {\n    // (n, c, ph, pw) is an element in the pooled output\n    int pw = index % pooled_width;\n    int ph = (index / pooled_width) % pooled_height;\n    int c = (index / pooled_width / pooled_height) % channels;\n    int n = index / pooled_width / pooled_height / channels;\n    bottom_rois += n * 5;\n\n    int roi_batch_ind = bottom_rois[0];\n    float roi_start_w = bottom_rois[1] * spatial_scale;\n    float roi_start_h = bottom_rois[2] * spatial_scale;\n    float roi_end_w = bottom_rois[3] * spatial_scale;\n    float roi_end_h = bottom_rois[4] * spatial_scale;\n\n    float roi_width = max(roi_end_w - roi_start_w, (float)0);\n    float roi_height = max(roi_end_h - roi_start_h, (float)0);\n    float bin_size_h = roi_height / static_cast<float>(pooled_height);\n    float bin_size_w = roi_width / static_cast<float>(pooled_width);\n\n    const float *this_out_grad = top_diff + index;\n    float *this_data_grad = bottom_diff + (roi_batch_ind * channels + c) * height * width;\n\n    float win_start_w = roi_start_w + bin_size_w * pw;\n    float win_start_h = roi_start_h + bin_size_h * ph;\n    float win_end_w = win_start_w + bin_size_w;\n    float win_end_h = win_start_h + bin_size_h;\n\n    float win_size = max(float(0.0), bin_size_w * bin_size_h);\n\n    float sum_out = win_size == float(0) ? float(0) : *this_out_grad / win_size;\n\n    int s_w, s_h, e_w, e_h;\n\n    s_w = floorf(win_start_w);\n    e_w = ceilf(win_end_w);\n    s_h = floorf(win_start_h);\n    e_h = ceilf(win_end_h);\n\n    for (int w_iter = s_w; w_iter < e_w; ++w_iter)\n        for (int h_iter = s_h; h_iter < e_h; ++h_iter)\n            PrRoIPoolingMatDistributeDiff(this_data_grad, sum_out, h_iter, w_iter, h_iter + 1, w_iter + 1,\n                max(win_start_h, float(h_iter)), max(win_start_w, float(w_iter)),\n                min(win_end_h, float(h_iter) + 1.0), min(win_end_w, float(w_iter + 1.0)),\n                height, width);\n\n  }\n}\n\n__global__ void PrRoIPoolingCoorBackward(\n        const int nthreads,\n        F_DEVPTR_IN bottom_data,\n        F_DEVPTR_IN bottom_rois,\n        F_DEVPTR_IN top_data,\n        F_DEVPTR_IN top_diff,\n        F_DEVPTR_OUT bottom_diff,\n        const int channels,\n        const int height,\n        const int width,\n        const int pooled_height,\n        const int pooled_width,\n        const float spatial_scale) {\n\n  CUDA_KERNEL_LOOP(index, nthreads) {\n    // (n, c, ph, pw) is an element in the pooled output\n    int pw = index % pooled_width;\n    int ph = (index / pooled_width) % pooled_height;\n    int c = (index / pooled_width / pooled_height) % channels;\n    int n = index / pooled_width / pooled_height / channels;\n    bottom_rois += n * 5;\n\n    int roi_batch_ind = bottom_rois[0];\n    float roi_start_w = bottom_rois[1] * spatial_scale;\n    float roi_start_h = bottom_rois[2] * spatial_scale;\n    float roi_end_w = bottom_rois[3] * spatial_scale;\n    float roi_end_h = bottom_rois[4] * spatial_scale;\n\n    float roi_width = max(roi_end_w - roi_start_w, (float)0);\n    float roi_height = max(roi_end_h - roi_start_h, (float)0);\n    float bin_size_h = roi_height / static_cast<float>(pooled_height);\n    float bin_size_w = roi_width / static_cast<float>(pooled_width);\n\n    const float *this_out_grad = top_diff + index;\n    const float *this_bottom_data = bottom_data + (roi_batch_ind * channels + c) * height * width;\n    const float *this_top_data = top_data + index;\n    float *this_data_grad = bottom_diff + n * 5;\n\n    float win_start_w = roi_start_w + bin_size_w * pw;\n    float win_start_h = roi_start_h + bin_size_h * ph;\n    float win_end_w = win_start_w + bin_size_w;\n    float win_end_h = win_start_h + bin_size_h;\n\n    float win_size = max(float(0.0), bin_size_w * bin_size_h);\n\n    float sum_out = win_size == float(0) ? float(0) : *this_out_grad / win_size;\n\n    // WARNING: to be discussed\n    if (sum_out == 0)\n        return;\n\n    int s_w, s_h, e_w, e_h;\n\n    s_w = floorf(win_start_w);\n    e_w = ceilf(win_end_w);\n    s_h = floorf(win_start_h);\n    e_h = ceilf(win_end_h);\n\n    float g_x1_y = 0, g_x2_y = 0, g_x_y1 = 0, g_x_y2 = 0;\n    for (int h_iter = s_h; h_iter < e_h; ++h_iter) {\n        g_x1_y += PrRoIPoolingSingleCoorIntegral(max(win_start_h, float(h_iter)) - h_iter,\n                min(win_end_h, float(h_iter + 1)) - h_iter,\n                PrRoIPoolingInterpolation(this_bottom_data, h_iter, win_start_w, height, width),\n                PrRoIPoolingInterpolation(this_bottom_data, h_iter + 1, win_start_w, height, width));\n\n        g_x2_y += PrRoIPoolingSingleCoorIntegral(max(win_start_h, float(h_iter)) - h_iter,\n                min(win_end_h, float(h_iter + 1)) - h_iter,\n                PrRoIPoolingInterpolation(this_bottom_data, h_iter, win_end_w, height, width),\n                PrRoIPoolingInterpolation(this_bottom_data, h_iter + 1, win_end_w, height, width));\n    }\n\n    for (int w_iter = s_w; w_iter < e_w; ++w_iter) {\n        g_x_y1 += PrRoIPoolingSingleCoorIntegral(max(win_start_w, float(w_iter)) - w_iter,\n                min(win_end_w, float(w_iter + 1)) - w_iter,\n                PrRoIPoolingInterpolation(this_bottom_data, win_start_h, w_iter, height, width),\n                PrRoIPoolingInterpolation(this_bottom_data, win_start_h, w_iter + 1, height, width));\n\n        g_x_y2 += PrRoIPoolingSingleCoorIntegral(max(win_start_w, float(w_iter)) - w_iter,\n                min(win_end_w, float(w_iter + 1)) - w_iter,\n                PrRoIPoolingInterpolation(this_bottom_data, win_end_h, w_iter, height, width),\n                PrRoIPoolingInterpolation(this_bottom_data, win_end_h, w_iter + 1, height, width));\n    }\n\n    float partial_x1 = -g_x1_y + (win_end_h - win_start_h) * (*this_top_data);\n    float partial_y1 = -g_x_y1 + (win_end_w - win_start_w) * (*this_top_data);\n    float partial_x2 = g_x2_y - (win_end_h - win_start_h) * (*this_top_data);\n    float partial_y2 = g_x_y2 - (win_end_w - win_start_w) * (*this_top_data);\n\n    partial_x1 = partial_x1 / win_size * spatial_scale;\n    partial_x2 = partial_x2 / win_size * spatial_scale;\n    partial_y1 = partial_y1 / win_size * spatial_scale;\n    partial_y2 = partial_y2 / win_size * spatial_scale;\n\n    // (b, x1, y1, x2, y2)\n\n    this_data_grad[0] = 0;\n    atomicAdd(this_data_grad + 1, (partial_x1 * (1.0 - float(pw) / pooled_width) + partial_x2 * (1.0 - float(pw + 1) / pooled_width))\n            * (*this_out_grad));\n    atomicAdd(this_data_grad + 2, (partial_y1 * (1.0 - float(ph) / pooled_height) + partial_y2 * (1.0 - float(ph + 1) / pooled_height))\n            * (*this_out_grad));\n    atomicAdd(this_data_grad + 3, (partial_x2 * float(pw + 1) / pooled_width + partial_x1 * float(pw) / pooled_width)\n            * (*this_out_grad));\n    atomicAdd(this_data_grad + 4, (partial_y2 * float(ph + 1) / pooled_height + partial_y1 * float(ph) / pooled_height)\n            * (*this_out_grad));\n  }\n}\n\n} /* !anonymous namespace */\n\n#ifdef __cplusplus\nextern \"C\" {\n#endif\n\nvoid PrRoIPoolingForwardGpu(\n    cudaStream_t stream,\n    F_DEVPTR_IN bottom_data,\n    F_DEVPTR_IN bottom_rois,\n    F_DEVPTR_OUT top_data,\n    const int channels_, const int height_, const int width_,\n    const int pooled_height_, const int pooled_width_,\n    const float spatial_scale_,\n    const int top_count) {\n\n    PrRoIPoolingForward<<<CUDA_NUM_BLOCKS(top_count), CUDA_NUM_THREADS, 0, stream>>>(\n        top_count, bottom_data, bottom_rois, top_data,\n        channels_, height_, width_, pooled_height_, pooled_width_, spatial_scale_);\n\n    CUDA_POST_KERNEL_CHECK;\n}\n\nvoid PrRoIPoolingBackwardGpu(\n    cudaStream_t stream,\n    F_DEVPTR_IN bottom_data,\n    F_DEVPTR_IN bottom_rois,\n    F_DEVPTR_IN top_data,\n    F_DEVPTR_IN top_diff,\n    F_DEVPTR_OUT bottom_diff,\n    const int channels_, const int height_, const int width_,\n    const int pooled_height_, const int pooled_width_,\n    const float spatial_scale_,\n    const int top_count, const int bottom_count) {\n\n    cudaMemsetAsync(bottom_diff, 0, sizeof(float) * bottom_count, stream);\n    PrRoIPoolingBackward<<<CUDA_NUM_BLOCKS(top_count), CUDA_NUM_THREADS, 0, stream>>>(\n        top_count, bottom_rois, top_diff, bottom_diff,\n        channels_, height_, width_, pooled_height_, pooled_width_, spatial_scale_);\n    CUDA_POST_KERNEL_CHECK;\n}\n\nvoid PrRoIPoolingCoorBackwardGpu(\n    cudaStream_t stream,\n    F_DEVPTR_IN bottom_data,\n    F_DEVPTR_IN bottom_rois,\n    F_DEVPTR_IN top_data,\n    F_DEVPTR_IN top_diff,\n    F_DEVPTR_OUT bottom_diff,\n    const int channels_, const int height_, const int width_,\n    const int pooled_height_, const int pooled_width_,\n    const float spatial_scale_,\n    const int top_count, const int bottom_count) {\n\n    cudaMemsetAsync(bottom_diff, 0, sizeof(float) * bottom_count, stream);\n    PrRoIPoolingCoorBackward<<<CUDA_NUM_BLOCKS(top_count), CUDA_NUM_THREADS, 0, stream>>>(\n        top_count, bottom_data, bottom_rois, top_data, top_diff, bottom_diff,\n        channels_, height_, width_, pooled_height_, pooled_width_, spatial_scale_);\n    CUDA_POST_KERNEL_CHECK;\n}\n\n} /* !extern \"C\" */\n\n"
  },
  {
    "path": "external/PreciseRoIPooling/src/prroi_pooling_gpu_impl.cuh",
    "content": "/*\n * File   : prroi_pooling_gpu_impl.cuh\n * Author : Tete Xiao, Jiayuan Mao\n * Email  : jasonhsiao97@gmail.com\n *\n * Distributed under terms of the MIT license.\n * Copyright (c) 2017 Megvii Technology Limited.\n */\n\n#ifndef PRROI_POOLING_GPU_IMPL_CUH\n#define PRROI_POOLING_GPU_IMPL_CUH\n\n#ifdef __cplusplus\nextern \"C\" {\n#endif\n\n#define F_DEVPTR_IN const float *\n#define F_DEVPTR_OUT float *\n\nvoid PrRoIPoolingForwardGpu(\n    cudaStream_t stream,\n    F_DEVPTR_IN bottom_data,\n    F_DEVPTR_IN bottom_rois,\n    F_DEVPTR_OUT top_data,\n    const int channels_, const int height_, const int width_,\n    const int pooled_height_, const int pooled_width_,\n    const float spatial_scale_,\n    const int top_count);\n\nvoid PrRoIPoolingBackwardGpu(\n    cudaStream_t stream,\n    F_DEVPTR_IN bottom_data,\n    F_DEVPTR_IN bottom_rois,\n    F_DEVPTR_IN top_data,\n    F_DEVPTR_IN top_diff,\n    F_DEVPTR_OUT bottom_diff,\n    const int channels_, const int height_, const int width_,\n    const int pooled_height_, const int pooled_width_,\n    const float spatial_scale_,\n    const int top_count, const int bottom_count);\n\nvoid PrRoIPoolingCoorBackwardGpu(\n    cudaStream_t stream,\n    F_DEVPTR_IN bottom_data,\n    F_DEVPTR_IN bottom_rois,\n    F_DEVPTR_IN top_data,\n    F_DEVPTR_IN top_diff,\n    F_DEVPTR_OUT bottom_diff,\n    const int channels_, const int height_, const int width_,\n    const int pooled_height_, const int pooled_width_,\n    const float spatial_scale_,\n    const int top_count, const int bottom_count);\n\n#ifdef __cplusplus\n} /* !extern \"C\" */\n#endif\n\n#endif /* !PRROI_POOLING_GPU_IMPL_CUH */\n\n"
  },
  {
    "path": "external/vot20/cttrack/config.yaml",
    "content": "registry:\n- ./trackers.ini\nstack: vot2020\n"
  },
  {
    "path": "external/vot20/cttrack/trackers.ini",
    "content": "[cttrack_large]  # <tracker-name>\nlabel = cttrack_large\nprotocol = traxpython\n\ncommand = from cttrack_start import main;main()\n\n# Specify a path to trax python wrapper if it is not visible (separate by ; if using multiple paths)\n\n# paths = /home/lr/workspace/CTTrack:\npaths = <PATH_OF_CTTRACK>\n\n# Additional environment paths\nenv_PATH = <PATH_OF_CUDA_LIB>:<PATH_OF_PYTHON>"
  },
  {
    "path": "lib/__init__.py",
    "content": ""
  },
  {
    "path": "lib/config/__init__.py",
    "content": ""
  },
  {
    "path": "lib/config/artrack/config.py",
    "content": "from easydict import EasyDict as edict\nimport yaml\n\n\"\"\"\nAdd default config for OSTrack.\n\"\"\"\ncfg = edict()\n\n# MODEL\ncfg.MODEL = edict()\ncfg.MODEL.PRETRAIN_FILE = \"mae_pretrain_vit_base.pth\"\ncfg.MODEL.PRETRAIN_PTH = \"\"\ncfg.MODEL.EXTRA_MERGER = False\n\ncfg.MODEL.RETURN_INTER = False\ncfg.MODEL.RETURN_STAGES = [2, 5, 8, 11]\n\n# MODEL.BACKBONE\ncfg.MODEL.BACKBONE = edict()\ncfg.MODEL.BACKBONE.TYPE = \"vit_base_patch16_224\"\ncfg.MODEL.BACKBONE.STRIDE = 16\ncfg.MODEL.BACKBONE.MID_PE = False\ncfg.MODEL.BACKBONE.SEP_SEG = False\ncfg.MODEL.BACKBONE.CAT_MODE = 'direct'\ncfg.MODEL.BACKBONE.MERGE_LAYER = 0\ncfg.MODEL.BACKBONE.ADD_CLS_TOKEN = False\ncfg.MODEL.BACKBONE.CLS_TOKEN_USE_MODE = 'ignore'\n\ncfg.MODEL.BACKBONE.CE_LOC = []\ncfg.MODEL.BACKBONE.CE_KEEP_RATIO = []\ncfg.MODEL.BACKBONE.CE_TEMPLATE_RANGE = 'ALL'  # choose between ALL, CTR_POINT, CTR_REC, GT_BOX\n\n# MODEL.HEAD\ncfg.MODEL.BINS = 400\ncfg.MODEL.RANGE = 2\ncfg.MODEL.ENCODER_LAYER = 3\ncfg.MODEL.NUM_HEADS = 12\ncfg.MODEL.MLP_RATIO = 4\ncfg.MODEL.QKV_BIAS = True\ncfg.MODEL.DROP_RATE = 0.1\ncfg.MODEL.ATTN_DROP = 0.0\ncfg.MODEL.DROP_PATH = 0.0\ncfg.MODEL.DECODER_LAYER = 6\ncfg.MODEL.HEAD = edict()\ncfg.MODEL.HEAD.TYPE = \"PIX\"\ncfg.MODEL.HEAD.NUM_CHANNELS = 1024\n\n# TRAIN\ncfg.TRAIN = edict()\ncfg.TRAIN.LR = 0.0001\ncfg.TRAIN.WEIGHT_DECAY = 0.0001\ncfg.TRAIN.EPOCH = 500\ncfg.TRAIN.LR_DROP_EPOCH = 400\ncfg.TRAIN.BATCH_SIZE = 16\ncfg.TRAIN.NUM_WORKER = 10\ncfg.TRAIN.OPTIMIZER = \"ADAMW\"\ncfg.TRAIN.BACKBONE_MULTIPLIER = 0.1\ncfg.TRAIN.GIOU_WEIGHT = 2.0\ncfg.TRAIN.L1_WEIGHT = 5.0\ncfg.TRAIN.FREEZE_LAYERS = [0, ]\ncfg.TRAIN.PRINT_INTERVAL = 50\ncfg.TRAIN.VAL_EPOCH_INTERVAL = 20\ncfg.TRAIN.GRAD_CLIP_NORM = 0.1\ncfg.TRAIN.AMP = False\n\ncfg.TRAIN.CE_START_EPOCH = 20  # candidate elimination start epoch\ncfg.TRAIN.CE_WARM_EPOCH = 80  # candidate elimination warm up epoch\ncfg.TRAIN.DROP_PATH_RATE = 0.1  # drop path rate for ViT backbone\n\n# TRAIN.SCHEDULER\ncfg.TRAIN.SCHEDULER = edict()\ncfg.TRAIN.SCHEDULER.TYPE = \"step\"\ncfg.TRAIN.SCHEDULER.DECAY_RATE = 0.1\n\n# DATA\ncfg.DATA = edict()\ncfg.DATA.SAMPLER_MODE = \"causal\"  # sampling methods\ncfg.DATA.MEAN = [0.485, 0.456, 0.406]\ncfg.DATA.STD = [0.229, 0.224, 0.225]\ncfg.DATA.MAX_SAMPLE_INTERVAL = 200\n# DATA.TRAIN\ncfg.DATA.TRAIN = edict()\ncfg.DATA.TRAIN.DATASETS_NAME = [\"LASOT\", \"GOT10K_vottrain\"]\ncfg.DATA.TRAIN.DATASETS_RATIO = [1, 1]\ncfg.DATA.TRAIN.SAMPLE_PER_EPOCH = 60000\n# DATA.VAL\ncfg.DATA.VAL = edict()\ncfg.DATA.VAL.DATASETS_NAME = [\"GOT10K_votval\"]\ncfg.DATA.VAL.DATASETS_RATIO = [1]\ncfg.DATA.VAL.SAMPLE_PER_EPOCH = 10000\n# DATA.SEARCH\ncfg.DATA.SEARCH = edict()\ncfg.DATA.SEARCH.SIZE = 256\ncfg.DATA.SEARCH.FACTOR = 5.0\ncfg.DATA.SEARCH.CENTER_JITTER = 4.5\ncfg.DATA.SEARCH.SCALE_JITTER = 0.5\ncfg.DATA.SEARCH.NUMBER = 1\n# DATA.TEMPLATE\ncfg.DATA.TEMPLATE = edict()\ncfg.DATA.TEMPLATE.NUMBER = 1\ncfg.DATA.TEMPLATE.SIZE = 128\ncfg.DATA.TEMPLATE.FACTOR = 2.0\ncfg.DATA.TEMPLATE.CENTER_JITTER = 0\ncfg.DATA.TEMPLATE.SCALE_JITTER = 0\n\n# TEST\ncfg.TEST = edict()\ncfg.TEST.TEMPLATE_FACTOR = 2.0\ncfg.TEST.TEMPLATE_SIZE = 128\ncfg.TEST.SEARCH_FACTOR = 5.0\ncfg.TEST.SEARCH_SIZE = 256\ncfg.TEST.EPOCH = 500\n\n\ndef _edict2dict(dest_dict, src_edict):\n    if isinstance(dest_dict, dict) and isinstance(src_edict, dict):\n        for k, v in src_edict.items():\n            if not isinstance(v, edict):\n                dest_dict[k] = v\n            else:\n                dest_dict[k] = {}\n                _edict2dict(dest_dict[k], v)\n    else:\n        return\n\n\ndef gen_config(config_file):\n    cfg_dict = {}\n    _edict2dict(cfg_dict, cfg)\n    with open(config_file, 'w') as f:\n        yaml.dump(cfg_dict, f, default_flow_style=False)\n\n\ndef _update_config(base_cfg, exp_cfg):\n    if isinstance(base_cfg, dict) and isinstance(exp_cfg, edict):\n        for k, v in exp_cfg.items():\n            if k in base_cfg:\n                if not isinstance(v, dict):\n                    base_cfg[k] = v\n                else:\n                    _update_config(base_cfg[k], v)\n            else:\n                raise ValueError(\"{} not exist in config.py\".format(k))\n    else:\n        return\n\n\ndef update_config_from_file(filename, base_cfg=None):\n    exp_config = None\n    with open(filename) as f:\n        exp_config = edict(yaml.safe_load(f))\n        if base_cfg is not None:\n            _update_config(base_cfg, exp_config)\n        else:\n            _update_config(cfg, exp_config)\n"
  },
  {
    "path": "lib/config/artrack_seq/config.py",
    "content": "from easydict import EasyDict as edict\r\nimport yaml\r\n\r\n\"\"\"\r\nAdd default config for OSTrack.\r\n\"\"\"\r\ncfg = edict()\r\n\r\n# MODEL\r\ncfg.MODEL = edict()\r\ncfg.MODEL.PRETRAIN_FILE = \"mae_pretrain_vit_base.pth\"\r\ncfg.MODEL.PRETRAIN_PTH = \"\"\r\ncfg.MODEL.PRENUM = 7\r\ncfg.MODEL.EXTRA_MERGER = False\r\n\r\ncfg.MODEL.RETURN_INTER = False\r\ncfg.MODEL.RETURN_STAGES = [2, 5, 8, 11]\r\n\r\n# MODEL.BACKBONE\r\ncfg.MODEL.BACKBONE = edict()\r\ncfg.MODEL.BACKBONE.TYPE = \"vit_base_patch16_224\"\r\ncfg.MODEL.BACKBONE.STRIDE = 16\r\ncfg.MODEL.BACKBONE.MID_PE = False\r\ncfg.MODEL.BACKBONE.SEP_SEG = False\r\ncfg.MODEL.BACKBONE.CAT_MODE = 'direct'\r\ncfg.MODEL.BACKBONE.MERGE_LAYER = 0\r\ncfg.MODEL.BACKBONE.ADD_CLS_TOKEN = False\r\ncfg.MODEL.BACKBONE.CLS_TOKEN_USE_MODE = 'ignore'\r\n\r\ncfg.MODEL.BACKBONE.CE_LOC = []\r\ncfg.MODEL.BACKBONE.CE_KEEP_RATIO = []\r\ncfg.MODEL.BACKBONE.CE_TEMPLATE_RANGE = 'ALL'  # choose between ALL, CTR_POINT, CTR_REC, GT_BOX\r\n\r\n# MODEL.HEAD\r\ncfg.MODEL.BINS = 400\r\ncfg.MODEL.RANGE = 2\r\ncfg.MODEL.ENCODER_LAYER = 3\r\ncfg.MODEL.NUM_HEADS = 12\r\ncfg.MODEL.MLP_RATIO = 4\r\ncfg.MODEL.QKV_BIAS = True\r\ncfg.MODEL.DROP_RATE = 0.1\r\ncfg.MODEL.ATTN_DROP = 0.0\r\ncfg.MODEL.DROP_PATH = 0.0\r\ncfg.MODEL.DECODER_LAYER = 6\r\ncfg.MODEL.HEAD = edict()\r\ncfg.MODEL.HEAD.TYPE = \"PIX\"\r\ncfg.MODEL.HEAD.NUM_CHANNELS = 1024\r\n\r\n# TRAIN\r\ncfg.TRAIN = edict()\r\ncfg.TRAIN.LR = 0.0001\r\ncfg.TRAIN.WEIGHT_DECAY = 0.0001\r\ncfg.TRAIN.EPOCH = 500\r\ncfg.TRAIN.LR_DROP_EPOCH = 400\r\ncfg.TRAIN.BATCH_SIZE = 16\r\ncfg.TRAIN.NUM_WORKER = 10\r\ncfg.TRAIN.OPTIMIZER = \"ADAMW\"\r\ncfg.TRAIN.BACKBONE_MULTIPLIER = 0.1\r\ncfg.TRAIN.GIOU_WEIGHT = 2.0\r\ncfg.TRAIN.L1_WEIGHT = 5.0\r\ncfg.TRAIN.FREEZE_LAYERS = [0, ]\r\ncfg.TRAIN.PRINT_INTERVAL = 50\r\ncfg.TRAIN.VAL_EPOCH_INTERVAL = 20\r\ncfg.TRAIN.GRAD_CLIP_NORM = 0.1\r\ncfg.TRAIN.AMP = False\r\n\r\ncfg.TRAIN.CE_START_EPOCH = 20  # candidate elimination start epoch\r\ncfg.TRAIN.CE_WARM_EPOCH = 80  # candidate elimination warm up epoch\r\ncfg.TRAIN.DROP_PATH_RATE = 0.1  # drop path rate for ViT backbone\r\n\r\n# TRAIN.SCHEDULER\r\ncfg.TRAIN.SCHEDULER = edict()\r\ncfg.TRAIN.SCHEDULER.TYPE = \"step\"\r\ncfg.TRAIN.SCHEDULER.DECAY_RATE = 0.1\r\n\r\n# DATA\r\ncfg.DATA = edict()\r\ncfg.DATA.SAMPLER_MODE = \"causal\"  # sampling methods\r\ncfg.DATA.MEAN = [0.485, 0.456, 0.406]\r\ncfg.DATA.STD = [0.229, 0.224, 0.225]\r\ncfg.DATA.MAX_SAMPLE_INTERVAL = 200\r\ncfg.DATA.MAX_GAP = 300\r\ncfg.DATA.MAX_INTERVAL = 5\r\ncfg.DATA.INTERVAL_PROB = 0.0\r\ncfg.DATA.TEMP = 2\r\n# DATA.TRAIN\r\ncfg.DATA.TRAIN = edict()\r\ncfg.DATA.TRAIN.DATASETS_NAME = [\"LASOT\", \"GOT10K_vottrain\"]\r\ncfg.DATA.TRAIN.DATASETS_RATIO = [1, 1]\r\ncfg.DATA.TRAIN.SAMPLE_PER_EPOCH = 60000\r\n# DATA.VAL\r\ncfg.DATA.VAL = edict()\r\ncfg.DATA.VAL.DATASETS_NAME = [\"GOT10K_votval\"]\r\ncfg.DATA.VAL.DATASETS_RATIO = [1]\r\ncfg.DATA.VAL.SAMPLE_PER_EPOCH = 10000\r\n# DATA.SEARCH\r\ncfg.DATA.SEARCH = edict()\r\ncfg.DATA.SEARCH.SIZE = 256\r\ncfg.DATA.SEARCH.FACTOR = 5.0\r\ncfg.DATA.SEARCH.CENTER_JITTER = 4.5\r\ncfg.DATA.SEARCH.SCALE_JITTER = 0.5\r\ncfg.DATA.SEARCH.NUMBER = 1\r\n# DATA.TEMPLATE\r\ncfg.DATA.TEMPLATE = edict()\r\ncfg.DATA.TEMPLATE.NUMBER = 1\r\ncfg.DATA.TEMPLATE.SIZE = 128\r\ncfg.DATA.TEMPLATE.FACTOR = 2.0\r\ncfg.DATA.TEMPLATE.CENTER_JITTER = 0\r\ncfg.DATA.TEMPLATE.SCALE_JITTER = 0\r\n\r\n# TEST\r\ncfg.TEST = edict()\r\ncfg.TEST.TEMPLATE_FACTOR = 2.0\r\ncfg.TEST.TEMPLATE_SIZE = 128\r\ncfg.TEST.SEARCH_FACTOR = 5.0\r\ncfg.TEST.SEARCH_SIZE = 256\r\ncfg.TEST.EPOCH = 500\r\n\r\n\r\ndef _edict2dict(dest_dict, src_edict):\r\n    if isinstance(dest_dict, dict) and isinstance(src_edict, dict):\r\n        for k, v in src_edict.items():\r\n            if not isinstance(v, edict):\r\n                dest_dict[k] = v\r\n            else:\r\n                dest_dict[k] = {}\r\n                _edict2dict(dest_dict[k], v)\r\n    else:\r\n        return\r\n\r\n\r\ndef gen_config(config_file):\r\n    cfg_dict = {}\r\n    _edict2dict(cfg_dict, cfg)\r\n    with open(config_file, 'w') as f:\r\n        yaml.dump(cfg_dict, f, default_flow_style=False)\r\n\r\n\r\ndef _update_config(base_cfg, exp_cfg):\r\n    if isinstance(base_cfg, dict) and isinstance(exp_cfg, edict):\r\n        for k, v in exp_cfg.items():\r\n            if k in base_cfg:\r\n                if not isinstance(v, dict):\r\n                    base_cfg[k] = v\r\n                else:\r\n                    _update_config(base_cfg[k], v)\r\n            else:\r\n                raise ValueError(\"{} not exist in config.py\".format(k))\r\n    else:\r\n        return\r\n\r\n\r\ndef update_config_from_file(filename, base_cfg=None):\r\n    exp_config = None\r\n    with open(filename) as f:\r\n        exp_config = edict(yaml.safe_load(f))\r\n        if base_cfg is not None:\r\n            _update_config(base_cfg, exp_config)\r\n        else:\r\n            _update_config(cfg, exp_config)\r\n"
  },
  {
    "path": "lib/config/artrackv2/config.py",
    "content": "from easydict import EasyDict as edict\nimport yaml\n\n\"\"\"\nAdd default config for OSTrack.\n\"\"\"\ncfg = edict()\n\n# MODEL\ncfg.MODEL = edict()\ncfg.MODEL.PRETRAIN_FILE = \"mae_pretrain_vit_base.pth\"\ncfg.MODEL.PRETRAIN_PTH = \"\"\ncfg.MODEL.EXTRA_MERGER = False\n\ncfg.MODEL.RETURN_INTER = False\ncfg.MODEL.RETURN_STAGES = [2, 5, 8, 11]\n\n# MODEL.BACKBONE\ncfg.MODEL.BACKBONE = edict()\ncfg.MODEL.BACKBONE.TYPE = \"vit_base_patch16_224\"\ncfg.MODEL.BACKBONE.STRIDE = 16\ncfg.MODEL.BACKBONE.MID_PE = False\ncfg.MODEL.BACKBONE.SEP_SEG = False\ncfg.MODEL.BACKBONE.CAT_MODE = 'direct'\ncfg.MODEL.BACKBONE.MERGE_LAYER = 0\ncfg.MODEL.BACKBONE.ADD_CLS_TOKEN = False\ncfg.MODEL.BACKBONE.CLS_TOKEN_USE_MODE = 'ignore'\n\ncfg.MODEL.BACKBONE.CE_LOC = []\ncfg.MODEL.BACKBONE.CE_KEEP_RATIO = []\ncfg.MODEL.BACKBONE.CE_TEMPLATE_RANGE = 'ALL'  # choose between ALL, CTR_POINT, CTR_REC, GT_BOX\n\n# MODEL.HEAD\ncfg.MODEL.BINS = 400\ncfg.MODEL.RANGE = 2\ncfg.MODEL.EXTENSION = 3\ncfg.MODEL.ENCODER_LAYER = 3\ncfg.MODEL.NUM_HEADS = 12\ncfg.MODEL.MLP_RATIO = 4\ncfg.MODEL.QKV_BIAS = True\ncfg.MODEL.DROP_RATE = 0.1\ncfg.MODEL.ATTN_DROP = 0.0\ncfg.MODEL.DROP_PATH = 0.0\ncfg.MODEL.DECODER_LAYER = 6\ncfg.MODEL.HEAD = edict()\ncfg.MODEL.HEAD.TYPE = \"PIX\"\ncfg.MODEL.HEAD.NUM_CHANNELS = 1024\n\n# TRAIN\ncfg.TRAIN = edict()\ncfg.TRAIN.LR = 0.0001\ncfg.TRAIN.WEIGHT_DECAY = 0.0001\ncfg.TRAIN.EPOCH = 500\ncfg.TRAIN.LR_DROP_EPOCH = 400\ncfg.TRAIN.BATCH_SIZE = 16\ncfg.TRAIN.NUM_WORKER = 10\ncfg.TRAIN.OPTIMIZER = \"ADAMW\"\ncfg.TRAIN.BACKBONE_MULTIPLIER = 0.1\ncfg.TRAIN.GIOU_WEIGHT = 2.0\ncfg.TRAIN.L1_WEIGHT = 5.0\ncfg.TRAIN.SCORE_WEIGHT = 0.0\ncfg.TRAIN.FREEZE_LAYERS = [0, ]\ncfg.TRAIN.PRINT_INTERVAL = 50\ncfg.TRAIN.VAL_EPOCH_INTERVAL = 20\ncfg.TRAIN.GRAD_CLIP_NORM = 0.1\ncfg.TRAIN.AMP = False\n\ncfg.TRAIN.CE_START_EPOCH = 20  # candidate elimination start epoch\ncfg.TRAIN.CE_WARM_EPOCH = 80  # candidate elimination warm up epoch\ncfg.TRAIN.DROP_PATH_RATE = 0.1  # drop path rate for ViT backbone\n\n# TRAIN.SCHEDULER\ncfg.TRAIN.SCHEDULER = edict()\ncfg.TRAIN.SCHEDULER.TYPE = \"step\"\ncfg.TRAIN.SCHEDULER.DECAY_RATE = 0.1\n\n# DATA\ncfg.DATA = edict()\ncfg.DATA.SAMPLER_MODE = \"causal\"  # sampling methods\ncfg.DATA.MEAN = [0.485, 0.456, 0.406]\ncfg.DATA.STD = [0.229, 0.224, 0.225]\ncfg.DATA.MAX_SAMPLE_INTERVAL = 200\n# DATA.TRAIN\ncfg.DATA.TRAIN = edict()\ncfg.DATA.TRAIN.DATASETS_NAME = [\"LASOT\", \"GOT10K_vottrain\"]\ncfg.DATA.TRAIN.DATASETS_RATIO = [1, 1]\ncfg.DATA.TRAIN.SAMPLE_PER_EPOCH = 60000\n# DATA.VAL\ncfg.DATA.VAL = edict()\ncfg.DATA.VAL.DATASETS_NAME = [\"GOT10K_votval\"]\ncfg.DATA.VAL.DATASETS_RATIO = [1]\ncfg.DATA.VAL.SAMPLE_PER_EPOCH = 10000\n# DATA.SEARCH\ncfg.DATA.SEARCH = edict()\ncfg.DATA.SEARCH.SIZE = 256\ncfg.DATA.SEARCH.FACTOR = 5.0\ncfg.DATA.SEARCH.CENTER_JITTER = 4.5\ncfg.DATA.SEARCH.SCALE_JITTER = 0.5\ncfg.DATA.SEARCH.NUMBER = 1\n# DATA.TEMPLATE\ncfg.DATA.TEMPLATE = edict()\ncfg.DATA.TEMPLATE.NUMBER = 1\ncfg.DATA.TEMPLATE.SIZE = 128\ncfg.DATA.TEMPLATE.FACTOR = 2.0\ncfg.DATA.TEMPLATE.CENTER_JITTER = 0\ncfg.DATA.TEMPLATE.SCALE_JITTER = 0\n\n# TEST\ncfg.TEST = edict()\ncfg.TEST.TEMPLATE_FACTOR = 2.0\ncfg.TEST.TEMPLATE_SIZE = 128\ncfg.TEST.SEARCH_FACTOR = 5.0\ncfg.TEST.SEARCH_SIZE = 256\ncfg.TEST.EPOCH = 500\n\n\ndef _edict2dict(dest_dict, src_edict):\n    if isinstance(dest_dict, dict) and isinstance(src_edict, dict):\n        for k, v in src_edict.items():\n            if not isinstance(v, edict):\n                dest_dict[k] = v\n            else:\n                dest_dict[k] = {}\n                _edict2dict(dest_dict[k], v)\n    else:\n        return\n\n\ndef gen_config(config_file):\n    cfg_dict = {}\n    _edict2dict(cfg_dict, cfg)\n    with open(config_file, 'w') as f:\n        yaml.dump(cfg_dict, f, default_flow_style=False)\n\n\ndef _update_config(base_cfg, exp_cfg):\n    if isinstance(base_cfg, dict) and isinstance(exp_cfg, edict):\n        for k, v in exp_cfg.items():\n            if k in base_cfg:\n                if not isinstance(v, dict):\n                    base_cfg[k] = v\n                else:\n                    _update_config(base_cfg[k], v)\n            else:\n                raise ValueError(\"{} not exist in config.py\".format(k))\n    else:\n        return\n\n\ndef update_config_from_file(filename, base_cfg=None):\n    exp_config = None\n    with open(filename) as f:\n        exp_config = edict(yaml.safe_load(f))\n        if base_cfg is not None:\n            _update_config(base_cfg, exp_config)\n        else:\n            _update_config(cfg, exp_config)\n"
  },
  {
    "path": "lib/config/artrackv2_seq/config.py",
    "content": "from easydict import EasyDict as edict\nimport yaml\n\n\"\"\"\nAdd default config for OSTrack.\n\"\"\"\ncfg = edict()\n\n# MODEL\ncfg.MODEL = edict()\ncfg.MODEL.PRETRAIN_FILE = \"mae_pretrain_vit_base.pth\"\ncfg.MODEL.PRETRAIN_PTH = \"\"\ncfg.MODEL.EXTRA_MERGER = False\n\ncfg.MODEL.RETURN_INTER = False\ncfg.MODEL.RETURN_STAGES = [2, 5, 8, 11]\n\n# MODEL.DECODER\ncfg.MODEL.DECODER = edict()\ncfg.MODEL.DECODER.TYPE = \"mask\"\ncfg.MODEL.DECODER.MASK_RATIO = 0.75\ncfg.MODEL.DECODER.EMBEDDIM = 512\ncfg.MODEL.DECODER.DEPTH = 8\ncfg.MODEL.DECODER.NUMHEADS = 16\ncfg.MODEL.DECODER.MLPRATIO = 4\n\n# MODEL.BACKBONE\ncfg.MODEL.BACKBONE = edict()\ncfg.MODEL.BACKBONE.TYPE = \"vit_base_patch16_224\"\ncfg.MODEL.BACKBONE.STRIDE = 16\ncfg.MODEL.BACKBONE.PATCHSIZE = 16\ncfg.MODEL.BACKBONE.MID_PE = False\ncfg.MODEL.BACKBONE.SEP_SEG = False\ncfg.MODEL.BACKBONE.CAT_MODE = 'direct'\ncfg.MODEL.BACKBONE.MERGE_LAYER = 0\ncfg.MODEL.BACKBONE.ADD_CLS_TOKEN = False\ncfg.MODEL.BACKBONE.CLS_TOKEN_USE_MODE = 'ignore'\n\ncfg.MODEL.BACKBONE.CE_LOC = []\ncfg.MODEL.BACKBONE.CE_KEEP_RATIO = []\ncfg.MODEL.BACKBONE.CE_TEMPLATE_RANGE = 'ALL'  # choose between ALL, CTR_POINT, CTR_REC, GT_BOX\n\n# MODEL.HEAD\ncfg.MODEL.BINS = 400\ncfg.MODEL.RANGE = 2\ncfg.MODEL.EXTENSION = 3\ncfg.MODEL.PRENUM = 7\ncfg.MODEL.ENCODER_LAYER = 3\ncfg.MODEL.NUM_HEADS = 12\ncfg.MODEL.MLP_RATIO = 4\ncfg.MODEL.QKV_BIAS = True\ncfg.MODEL.DROP_RATE = 0.1\ncfg.MODEL.ATTN_DROP = 0.0\ncfg.MODEL.DROP_PATH = 0.0\ncfg.MODEL.DECODER_LAYER = 6\ncfg.MODEL.HEAD = edict()\ncfg.MODEL.HEAD.TYPE = \"PIX\"\ncfg.MODEL.HEAD.NUM_CHANNELS = 1024\n\n# TRAIN\ncfg.TRAIN = edict()\ncfg.TRAIN.LR = 0.0001\ncfg.TRAIN.WEIGHT_DECAY = 0.0001\ncfg.TRAIN.EPOCH = 500\ncfg.TRAIN.LR_DROP_EPOCH = 400\ncfg.TRAIN.BATCH_SIZE = 16\ncfg.TRAIN.NUM_WORKER = 10\ncfg.TRAIN.OPTIMIZER = \"ADAMW\"\ncfg.TRAIN.BACKBONE_MULTIPLIER = 0.1\ncfg.TRAIN.GIOU_WEIGHT = 2.0\ncfg.TRAIN.L1_WEIGHT = 5.0\ncfg.TRAIN.SCORE_WEIGHT = 1.0\ncfg.TRAIN.FREEZE_LAYERS = [0, ]\ncfg.TRAIN.PRINT_INTERVAL = 50\ncfg.TRAIN.VAL_EPOCH_INTERVAL = 20\ncfg.TRAIN.GRAD_CLIP_NORM = 0.1\ncfg.TRAIN.AMP = False\n\ncfg.TRAIN.CE_START_EPOCH = 20  # candidate elimination start epoch\ncfg.TRAIN.CE_WARM_EPOCH = 80  # candidate elimination warm up epoch\ncfg.TRAIN.DROP_PATH_RATE = 0.1  # drop path rate for ViT backbone\n\n# TRAIN.SCHEDULER\ncfg.TRAIN.SCHEDULER = edict()\ncfg.TRAIN.SCHEDULER.TYPE = \"step\"\ncfg.TRAIN.SCHEDULER.DECAY_RATE = 0.1\n\n# DATA\ncfg.DATA = edict()\ncfg.DATA.MAX_GAP = 300\ncfg.DATA.SAMPLER_MODE = \"causal\"  # sampling methods\ncfg.DATA.MEAN = [0.485, 0.456, 0.406]\ncfg.DATA.STD = [0.229, 0.224, 0.225]\ncfg.DATA.MAX_SAMPLE_INTERVAL = 200\ncfg.DATA.MAX_GAP = 300\ncfg.DATA.MAX_INTERVAL = 5\ncfg.DATA.INTERVAL_PROB = 0.0\ncfg.DATA.TEMP = 2\n# DATA.TRAIN\ncfg.DATA.TRAIN = edict()\ncfg.DATA.TRAIN.DATASETS_NAME = [\"LASOT\", \"GOT10K_vottrain\"]\ncfg.DATA.TRAIN.DATASETS_RATIO = [1, 1]\ncfg.DATA.TRAIN.SAMPLE_PER_EPOCH = 60000\n# DATA.VAL\ncfg.DATA.VAL = edict()\ncfg.DATA.VAL.DATASETS_NAME = [\"GOT10K_votval\"]\ncfg.DATA.VAL.DATASETS_RATIO = [1]\ncfg.DATA.VAL.SAMPLE_PER_EPOCH = 10000\n# DATA.SEARCH\ncfg.DATA.SEARCH = edict()\ncfg.DATA.SEARCH.SIZE = 256\ncfg.DATA.SEARCH.FACTOR = 5.0\ncfg.DATA.SEARCH.CENTER_JITTER = 4.5\ncfg.DATA.SEARCH.SCALE_JITTER = 0.5\ncfg.DATA.SEARCH.NUMBER = 1\n# DATA.TEMPLATE\ncfg.DATA.TEMPLATE = edict()\ncfg.DATA.TEMPLATE.NUMBER = 1\ncfg.DATA.TEMPLATE.SIZE = 128\ncfg.DATA.TEMPLATE.FACTOR = 2.0\ncfg.DATA.TEMPLATE.CENTER_JITTER = 0\ncfg.DATA.TEMPLATE.SCALE_JITTER = 0\n\n# TEST\ncfg.TEST = edict()\ncfg.TEST.TEMPLATE_FACTOR = 2.0\ncfg.TEST.TEMPLATE_SIZE = 128\ncfg.TEST.SEARCH_FACTOR = 5.0\ncfg.TEST.SEARCH_SIZE = 256\ncfg.TEST.EPOCH = 500\n\n\ndef _edict2dict(dest_dict, src_edict):\n    if isinstance(dest_dict, dict) and isinstance(src_edict, dict):\n        for k, v in src_edict.items():\n            if not isinstance(v, edict):\n                dest_dict[k] = v\n            else:\n                dest_dict[k] = {}\n                _edict2dict(dest_dict[k], v)\n    else:\n        return\n\n\ndef gen_config(config_file):\n    cfg_dict = {}\n    _edict2dict(cfg_dict, cfg)\n    with open(config_file, 'w') as f:\n        yaml.dump(cfg_dict, f, default_flow_style=False)\n\n\ndef _update_config(base_cfg, exp_cfg):\n    if isinstance(base_cfg, dict) and isinstance(exp_cfg, edict):\n        for k, v in exp_cfg.items():\n            if k in base_cfg:\n                if not isinstance(v, dict):\n                    base_cfg[k] = v\n                else:\n                    _update_config(base_cfg[k], v)\n            else:\n                raise ValueError(\"{} not exist in config.py\".format(k))\n    else:\n        return\n\n\ndef update_config_from_file(filename, base_cfg=None):\n    exp_config = None\n    with open(filename) as f:\n        exp_config = edict(yaml.safe_load(f))\n        if base_cfg is not None:\n            _update_config(base_cfg, exp_config)\n        else:\n            _update_config(cfg, exp_config)\n"
  },
  {
    "path": "lib/models/__init__.py",
    "content": "from .artrack.artrack import build_artrack\r\n"
  },
  {
    "path": "lib/models/artrack/__init__.py",
    "content": "from .artrack import build_artrack\r\n"
  },
  {
    "path": "lib/models/artrack/artrack.py",
    "content": "\"\"\"\r\nBasic OSTrack model.\r\n\"\"\"\r\nimport math\r\nimport os\r\nfrom typing import List\r\n\r\nimport torch\r\nfrom torch import nn\r\nfrom torch.nn.modules.transformer import _get_clones\r\nfrom timm.models.layers import DropPath, to_2tuple, trunc_normal_\r\n\r\nfrom lib.models.layers.head import build_pix_head\r\nfrom lib.models.artrack.vit import vit_base_patch16_224, vit_large_patch16_224\r\nfrom lib.utils.box_ops import box_xyxy_to_cxcywh\r\n\r\n\r\nclass ARTrack(nn.Module):\r\n    \"\"\" This is the base class for ARTrack \"\"\"\r\n\r\n    def __init__(self, transformer, pix_head, hidden_dim):\r\n        \"\"\" Initializes the model.\r\n        Parameters:\r\n            transformer: torch module of the transformer architecture.\r\n        \"\"\"\r\n        super().__init__()\r\n        self.identity = torch.nn.Parameter(torch.zeros(1, 2, hidden_dim))\r\n        self.identity = trunc_normal_(self.identity, std=.02)        \r\n        \r\n        self.backbone = transformer\r\n        self.pix_head = pix_head\r\n\r\n    def forward(self, template: torch.Tensor,\r\n                search: torch.Tensor,\r\n                seq_input=None\r\n                ):\r\n\r\n        x = self.backbone(z=template, x=search, identity=self.identity,)\r\n\r\n        # Forward head\r\n        feat_last = x\r\n        if isinstance(x, list):\r\n            feat_last = x[-1]\r\n        \r\n        pos_z = self.backbone.pos_embed_z\r\n        pos_x = self.backbone.pos_embed_x\r\n        \r\n        out = self.forward_head(feat_last, pos_z, pos_x, self.identity, seq_input)\r\n\r\n        out['backbone_feat'] = x\r\n        return out\r\n\r\n    def forward_head(self, cat_feature, pos_z, pos_x, identity, seq_input=None,):\r\n\r\n        output_dict = self.pix_head(cat_feature, pos_z, pos_x, identity, seq_input)\r\n        return output_dict\r\n\r\n\r\ndef build_artrack(cfg, training=True):\r\n    current_dir = os.path.dirname(os.path.abspath(__file__))  # This is your Project Root\r\n    pretrained_path = os.path.join(current_dir, '../../../pretrained_models')\r\n    if cfg.MODEL.PRETRAIN_FILE and ('ARTrack' not in cfg.MODEL.PRETRAIN_FILE) and training:\r\n        pretrained = os.path.join(pretrained_path, cfg.MODEL.PRETRAIN_FILE)\r\n    else:\r\n        pretrained = ''\r\n\r\n    if cfg.MODEL.BACKBONE.TYPE == 'vit_base_patch16_224':\r\n        backbone = vit_base_patch16_224(pretrained, drop_path_rate=cfg.TRAIN.DROP_PATH_RATE)\r\n        hidden_dim = backbone.embed_dim\r\n        patch_start_index = 1\r\n\r\n    elif cfg.MODEL.BACKBONE.TYPE == 'vit_large_patch16_224':\r\n        print(\"i use vit_large\")\r\n        backbone = vit_large_patch16_224(pretrained, drop_path_rate=cfg.TRAIN.DROP_PATH_RATE)\r\n        hidden_dim = backbone.embed_dim\r\n        patch_start_index = 1\r\n\r\n    else:\r\n        raise NotImplementedError\r\n\r\n    backbone.finetune_track(cfg=cfg, patch_start_index=patch_start_index)\r\n\r\n    pix_head = build_pix_head(cfg, hidden_dim)\r\n\r\n    model = ARTrack(\r\n        backbone,\r\n        pix_head,\r\n        hidden_dim,\r\n    )\r\n    if cfg.MODEL.PRETRAIN_PTH != \"\":\r\n        load_from = cfg.MODEL.PRETRAIN_PTH\r\n        checkpoint = torch.load(load_from, map_location=\"cpu\")\r\n        missing_keys, unexpected_keys = model.load_state_dict(checkpoint[\"net\"], strict=False)\r\n        print('Load pretrained model from: ' + load_from)\r\n    if 'ARTrack' in cfg.MODEL.PRETRAIN_FILE and training:\r\n        checkpoint = torch.load(cfg.MODEL.PRETRAIN_FILE, map_location=\"cpu\")\r\n        missing_keys, unexpected_keys = model.load_state_dict(checkpoint[\"net\"], strict=False)\r\n        print('Load pretrained model from: ' + cfg.MODEL.PRETRAIN_FILE)\r\n\r\n    return model\r\n"
  },
  {
    "path": "lib/models/artrack/base_backbone.py",
    "content": "from functools import partial\r\n\r\nimport torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\nfrom timm.models.vision_transformer import resize_pos_embed\r\nfrom timm.models.layers import DropPath, to_2tuple, trunc_normal_\r\n\r\nfrom lib.models.layers.patch_embed import PatchEmbed\r\nfrom lib.models.artrack.utils import combine_tokens, recover_tokens\r\n\r\nclass BaseBackbone(nn.Module):\r\n    def __init__(self):\r\n        super().__init__()\r\n\r\n        # for original ViT\r\n        self.pos_embed = None\r\n        self.img_size = [224, 224]\r\n        self.patch_size = 16\r\n        self.embed_dim = 384\r\n\r\n        self.cat_mode = 'direct'\r\n\r\n        self.pos_embed_z = None\r\n        self.pos_embed_x = None\r\n\r\n        self.template_segment_pos_embed = None\r\n        self.search_segment_pos_embed = None\r\n\r\n        self.return_inter = False\r\n        self.return_stage = [2, 5, 8, 11]\r\n\r\n        self.add_cls_token = False\r\n        self.add_sep_seg = False\r\n\r\n    def finetune_track(self, cfg, patch_start_index=1):\r\n\r\n        search_size = to_2tuple(cfg.DATA.SEARCH.SIZE)\r\n        template_size = to_2tuple(cfg.DATA.TEMPLATE.SIZE)\r\n        new_patch_size = cfg.MODEL.BACKBONE.STRIDE\r\n\r\n        self.cat_mode = cfg.MODEL.BACKBONE.CAT_MODE\r\n        self.return_inter = cfg.MODEL.RETURN_INTER\r\n        self.add_sep_seg = cfg.MODEL.BACKBONE.SEP_SEG\r\n\r\n        # resize patch embedding\r\n        if new_patch_size != self.patch_size:\r\n            print('Inconsistent Patch Size With The Pretrained Weights, Interpolate The Weight!')\r\n            old_patch_embed = {}\r\n            for name, param in self.patch_embed.named_parameters():\r\n                if 'weight' in name:\r\n                    param = nn.functional.interpolate(param, size=(new_patch_size, new_patch_size),\r\n                                                      mode='bicubic', align_corners=False)\r\n                    param = nn.Parameter(param)\r\n                old_patch_embed[name] = param\r\n            self.patch_embed = PatchEmbed(img_size=self.img_size, patch_size=new_patch_size, in_chans=3,\r\n                                          embed_dim=self.embed_dim)\r\n            self.patch_embed.proj.bias = old_patch_embed['proj.bias']\r\n            self.patch_embed.proj.weight = old_patch_embed['proj.weight']\r\n\r\n        # for patch embedding\r\n        patch_pos_embed = self.pos_embed[:, patch_start_index:, :]\r\n        patch_pos_embed = patch_pos_embed.transpose(1, 2)\r\n        B, E, Q = patch_pos_embed.shape\r\n        P_H, P_W = self.img_size[0] // self.patch_size, self.img_size[1] // self.patch_size\r\n        patch_pos_embed = patch_pos_embed.view(B, E, P_H, P_W)\r\n\r\n        # for search region\r\n        H, W = search_size\r\n        new_P_H, new_P_W = H // new_patch_size, W // new_patch_size\r\n        search_patch_pos_embed = nn.functional.interpolate(patch_pos_embed, size=(new_P_H, new_P_W), mode='bicubic',\r\n                                                           align_corners=False)\r\n        search_patch_pos_embed = search_patch_pos_embed.flatten(2).transpose(1, 2)\r\n\r\n        # for template region\r\n        H, W = template_size\r\n        new_P_H, new_P_W = H // new_patch_size, W // new_patch_size\r\n        template_patch_pos_embed = nn.functional.interpolate(patch_pos_embed, size=(new_P_H, new_P_W), mode='bicubic',\r\n                                                             align_corners=False)\r\n        template_patch_pos_embed = template_patch_pos_embed.flatten(2).transpose(1, 2)\r\n\r\n        self.pos_embed_z = nn.Parameter(template_patch_pos_embed)\r\n        self.pos_embed_x = nn.Parameter(search_patch_pos_embed)\r\n\r\n        # for cls token (keep it but not used)\r\n        if self.add_cls_token and patch_start_index > 0:\r\n            cls_pos_embed = self.pos_embed[:, 0:1, :]\r\n            self.cls_pos_embed = nn.Parameter(cls_pos_embed)\r\n\r\n        # separate token and segment token\r\n        if self.add_sep_seg:\r\n            self.template_segment_pos_embed = nn.Parameter(torch.zeros(1, 1, self.embed_dim))\r\n            self.template_segment_pos_embed = trunc_normal_(self.template_segment_pos_embed, std=.02)\r\n            self.search_segment_pos_embed = nn.Parameter(torch.zeros(1, 1, self.embed_dim))\r\n            self.search_segment_pos_embed = trunc_normal_(self.search_segment_pos_embed, std=.02)\r\n\r\n\r\n        if self.return_inter:\r\n            for i_layer in self.fpn_stage:\r\n                if i_layer != 11:\r\n                    norm_layer = partial(nn.LayerNorm, eps=1e-6)\r\n                    layer = norm_layer(self.embed_dim)\r\n                    layer_name = f'norm{i_layer}'\r\n                    self.add_module(layer_name, layer)\r\n\r\n    def forward_features(self, z, x, identity):\r\n        B, H, W = x.shape[0], x.shape[2], x.shape[3]\r\n\r\n        x = self.patch_embed(x)\r\n        z = self.patch_embed(z)\r\n\r\n        s_x = x.shape[1]\r\n        s_z = z.shape[1]\r\n\r\n        if self.add_cls_token:\r\n            cls_tokens = self.cls_token.expand(B, -1, -1)\r\n            cls_tokens = cls_tokens + self.cls_pos_embed\r\n\r\n        z += self.pos_embed_z\r\n        x += self.pos_embed_x\r\n        \r\n        z += identity[:, 0, :].repeat(B, self.pos_embed_z.shape[1], 1)\r\n        x += identity[:, 1, :].repeat(B, self.pos_embed_x.shape[1], 1)\r\n\r\n        if self.add_sep_seg:\r\n            x += self.search_segment_pos_embed\r\n            z += self.template_segment_pos_embed\r\n\r\n        x = combine_tokens(z, x, mode=self.cat_mode)\r\n        if self.add_cls_token:\r\n            x = torch.cat([cls_tokens, x], dim=1)\r\n\r\n        x = self.pos_drop(x)\r\n\r\n        for i, blk in enumerate(self.blocks):\r\n            x = blk(x)\r\n        lens_z = self.pos_embed_z.shape[1]\r\n        lens_x = self.pos_embed_x.shape[1]\r\n        #x = recover_tokens(x, lens_z, lens_x, mode=self.cat_mode)\r\n\r\n        return self.norm(x)\r\n\r\n    def forward(self, z, x, identity, **kwargs):\r\n        \"\"\"\r\n        Joint feature extraction and relation modeling for the basic ViT backbone.\r\n        Args:\r\n            z (torch.Tensor): template feature, [B, C, H_z, W_z]\r\n            x (torch.Tensor): search region feature, [B, C, H_x, W_x]\r\n\r\n        Returns:\r\n            x (torch.Tensor): merged template and search region feature, [B, L_z+L_x, C]\r\n            attn : None\r\n        \"\"\"\r\n        x = self.forward_features(z, x, identity)\r\n\r\n        return x\r\n"
  },
  {
    "path": "lib/models/artrack/utils.py",
    "content": "import math\r\n\r\nimport torch\r\nimport torch.nn.functional as F\r\n\r\n\r\ndef combine_tokens(template_tokens, search_tokens, mode='direct', return_res=False):\r\n    # [B, HW, C]\r\n    len_t = template_tokens.shape[1]\r\n    len_s = search_tokens.shape[1]\r\n\r\n    if mode == 'direct':\r\n        merged_feature = torch.cat((template_tokens, search_tokens), dim=1)\r\n    elif mode == 'template_central':\r\n        central_pivot = len_s // 2\r\n        first_half = search_tokens[:, :central_pivot, :]\r\n        second_half = search_tokens[:, central_pivot:, :]\r\n        merged_feature = torch.cat((first_half, template_tokens, second_half), dim=1)\r\n    elif mode == 'partition':\r\n        feat_size_s = int(math.sqrt(len_s))\r\n        feat_size_t = int(math.sqrt(len_t))\r\n        window_size = math.ceil(feat_size_t / 2.)\r\n        # pad feature maps to multiples of window size\r\n        B, _, C = template_tokens.shape\r\n        H = W = feat_size_t\r\n        template_tokens = template_tokens.view(B, H, W, C)\r\n        pad_l = pad_b = pad_r = 0\r\n        # pad_r = (window_size - W % window_size) % window_size\r\n        pad_t = (window_size - H % window_size) % window_size\r\n        template_tokens = F.pad(template_tokens, (0, 0, pad_l, pad_r, pad_t, pad_b))\r\n        _, Hp, Wp, _ = template_tokens.shape\r\n        template_tokens = template_tokens.view(B, Hp // window_size, window_size, W, C)\r\n        template_tokens = torch.cat([template_tokens[:, 0, ...], template_tokens[:, 1, ...]], dim=2)\r\n        _, Hc, Wc, _ = template_tokens.shape\r\n        template_tokens = template_tokens.view(B, -1, C)\r\n        merged_feature = torch.cat([template_tokens, search_tokens], dim=1)\r\n\r\n        # calculate new h and w, which may be useful for SwinT or others\r\n        merged_h, merged_w = feat_size_s + Hc, feat_size_s\r\n        if return_res:\r\n            return merged_feature, merged_h, merged_w\r\n\r\n    else:\r\n        raise NotImplementedError\r\n\r\n    return merged_feature\r\n\r\n\r\ndef recover_tokens(merged_tokens, len_template_token, len_search_token, mode='direct'):\r\n    if mode == 'direct':\r\n        recovered_tokens = merged_tokens\r\n    elif mode == 'template_central':\r\n        central_pivot = len_search_token // 2\r\n        len_remain = len_search_token - central_pivot\r\n        len_half_and_t = central_pivot + len_template_token\r\n\r\n        first_half = merged_tokens[:, :central_pivot, :]\r\n        second_half = merged_tokens[:, -len_remain:, :]\r\n        template_tokens = merged_tokens[:, central_pivot:len_half_and_t, :]\r\n\r\n        recovered_tokens = torch.cat((template_tokens, first_half, second_half), dim=1)\r\n    elif mode == 'partition':\r\n        recovered_tokens = merged_tokens\r\n    else:\r\n        raise NotImplementedError\r\n\r\n    return recovered_tokens\r\n\r\n\r\ndef window_partition(x, window_size: int):\r\n    \"\"\"\r\n    Args:\r\n        x: (B, H, W, C)\r\n        window_size (int): window size\r\n\r\n    Returns:\r\n        windows: (num_windows*B, window_size, window_size, C)\r\n    \"\"\"\r\n    B, H, W, C = x.shape\r\n    x = x.view(B, H // window_size, window_size, W // window_size, window_size, C)\r\n    windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C)\r\n    return windows\r\n\r\n\r\ndef window_reverse(windows, window_size: int, H: int, W: int):\r\n    \"\"\"\r\n    Args:\r\n        windows: (num_windows*B, window_size, window_size, C)\r\n        window_size (int): Window size\r\n        H (int): Height of image\r\n        W (int): Width of image\r\n\r\n    Returns:\r\n        x: (B, H, W, C)\r\n    \"\"\"\r\n    B = int(windows.shape[0] / (H * W / window_size / window_size))\r\n    x = windows.view(B, H // window_size, W // window_size, window_size, window_size, -1)\r\n    x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1)\r\n    return x\r\n"
  },
  {
    "path": "lib/models/artrack/vit.py",
    "content": "\"\"\" Vision Transformer (ViT) in PyTorch\r\nA PyTorch implement of Vision Transformers as described in:\r\n'An Image Is Worth 16 x 16 Words: Transformers for Image Recognition at Scale'\r\n    - https://arxiv.org/abs/2010.11929\r\n`How to train your ViT? Data, Augmentation, and Regularization in Vision Transformers`\r\n    - https://arxiv.org/abs/2106.10270\r\nThe official jax code is released and available at https://github.com/google-research/vision_transformer\r\nDeiT model defs and weights from https://github.com/facebookresearch/deit,\r\npaper `DeiT: Data-efficient Image Transformers` - https://arxiv.org/abs/2012.12877\r\nAcknowledgments:\r\n* The paper authors for releasing code and weights, thanks!\r\n* I fixed my class token impl based on Phil Wang's https://github.com/lucidrains/vit-pytorch ... check it out\r\nfor some einops/einsum fun\r\n* Simple transformer style inspired by Andrej Karpathy's https://github.com/karpathy/minGPT\r\n* Bert reference code checks against Huggingface Transformers and Tensorflow Bert\r\nHacked together by / Copyright 2021 Ross Wightman\r\n\r\nModified by Botao Ye\r\n\"\"\"\r\nimport math\r\nimport logging\r\nfrom functools import partial\r\nfrom collections import OrderedDict\r\nfrom copy import deepcopy\r\n\r\nimport torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\n\r\nfrom timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_INCEPTION_MEAN, IMAGENET_INCEPTION_STD\r\nfrom timm.models.helpers import build_model_with_cfg, named_apply, adapt_input_conv\r\nfrom timm.models.layers import Mlp, DropPath, trunc_normal_, lecun_normal_\r\nfrom timm.models.registry import register_model\r\n\r\nfrom lib.models.layers.patch_embed import PatchEmbed\r\nfrom lib.models.artrack.base_backbone import BaseBackbone\r\n\r\n\r\nclass Attention(nn.Module):\r\n    def __init__(self, dim, num_heads=8, qkv_bias=False, attn_drop=0., proj_drop=0.):\r\n        super().__init__()\r\n        self.num_heads = num_heads\r\n        head_dim = dim // num_heads\r\n        self.scale = head_dim ** -0.5\r\n\r\n        self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)\r\n        self.attn_drop = nn.Dropout(attn_drop)\r\n        self.proj = nn.Linear(dim, dim)\r\n        self.proj_drop = nn.Dropout(proj_drop)\r\n\r\n    def forward(self, x, return_attention=False):\r\n        B, N, C = x.shape\r\n        qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)\r\n        q, k, v = qkv[0], qkv[1], qkv[2]  # make torchscript happy (cannot use tensor as tuple)\r\n\r\n        attn = (q @ k.transpose(-2, -1)) * self.scale\r\n        attn = attn.softmax(dim=-1)\r\n        attn = self.attn_drop(attn)\r\n\r\n        x = (attn @ v).transpose(1, 2).reshape(B, N, C)\r\n        x = self.proj(x)\r\n        x = self.proj_drop(x)\r\n\r\n        if return_attention:\r\n            return x, attn\r\n        return x\r\n\r\n\r\nclass Block(nn.Module):\r\n\r\n    def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, drop=0., attn_drop=0.,\r\n                 drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm):\r\n        super().__init__()\r\n        self.norm1 = norm_layer(dim)\r\n        self.attn = Attention(dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=drop)\r\n        # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here\r\n        self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()\r\n        self.norm2 = norm_layer(dim)\r\n        mlp_hidden_dim = int(dim * mlp_ratio)\r\n        self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)\r\n\r\n    def forward(self, x, return_attention=False):\r\n        if return_attention:\r\n            feat, attn = self.attn(self.norm1(x), True)\r\n            x = x + self.drop_path(feat)\r\n            x = x + self.drop_path(self.mlp(self.norm2(x)))\r\n            return x, attn\r\n        else:\r\n            x = x + self.drop_path(self.attn(self.norm1(x)))\r\n            x = x + self.drop_path(self.mlp(self.norm2(x)))\r\n            return x\r\n\r\n\r\nclass VisionTransformer(BaseBackbone):\r\n    \"\"\" Vision Transformer\r\n    A PyTorch impl of : `An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale`\r\n        - https://arxiv.org/abs/2010.11929\r\n    Includes distillation token & head support for `DeiT: Data-efficient Image Transformers`\r\n        - https://arxiv.org/abs/2012.12877\r\n    \"\"\"\r\n\r\n    def __init__(self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, embed_dim=768, depth=12,\r\n                 num_heads=12, mlp_ratio=4., qkv_bias=True, representation_size=None, distilled=False,\r\n                 drop_rate=0., attn_drop_rate=0., drop_path_rate=0., embed_layer=PatchEmbed, norm_layer=None,\r\n                 act_layer=None, weight_init=''):\r\n        \"\"\"\r\n        Args:\r\n            img_size (int, tuple): input image size\r\n            patch_size (int, tuple): patch size\r\n            in_chans (int): number of input channels\r\n            num_classes (int): number of classes for classification head\r\n            embed_dim (int): embedding dimension\r\n            depth (int): depth of transformer\r\n            num_heads (int): number of attention heads\r\n            mlp_ratio (int): ratio of mlp hidden dim to embedding dim\r\n            qkv_bias (bool): enable bias for qkv if True\r\n            representation_size (Optional[int]): enable and set representation layer (pre-logits) to this value if set\r\n            distilled (bool): model includes a distillation token and head as in DeiT models\r\n            drop_rate (float): dropout rate\r\n            attn_drop_rate (float): attention dropout rate\r\n            drop_path_rate (float): stochastic depth rate\r\n            embed_layer (nn.Module): patch embedding layer\r\n            norm_layer: (nn.Module): normalization layer\r\n            weight_init: (str): weight init scheme\r\n        \"\"\"\r\n        super().__init__()\r\n        self.num_classes = num_classes\r\n        self.num_features = self.embed_dim = embed_dim  # num_features for consistency with other models\r\n        self.num_tokens = 2 if distilled else 1\r\n        norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6)\r\n        act_layer = act_layer or nn.GELU\r\n\r\n        self.patch_embed = embed_layer(\r\n            img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim)\r\n        num_patches = self.patch_embed.num_patches\r\n\r\n        self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))\r\n        self.dist_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) if distilled else None\r\n        self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + self.num_tokens, embed_dim))\r\n        self.pos_drop = nn.Dropout(p=drop_rate)\r\n\r\n        dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)]  # stochastic depth decay rule\r\n        self.blocks = nn.Sequential(*[\r\n            Block(\r\n                dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, drop=drop_rate,\r\n                attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, act_layer=act_layer)\r\n            for i in range(depth)])\r\n\r\n        # self.extension = nn.Sequential(*[\r\n        #     Block(\r\n        #         dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, drop=drop_rate,\r\n        #         attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, act_layer=act_layer)\r\n        #     for i in range(6)])\r\n        self.norm = norm_layer(embed_dim)\r\n\r\n        # # Representation layer\r\n        # if representation_size and not distilled:\r\n        #     self.num_features = representation_size\r\n        #     self.pre_logits = nn.Sequential(OrderedDict([\r\n        #         ('fc', nn.Linear(embed_dim, representation_size)),\r\n        #         ('act', nn.Tanh())\r\n        #     ]))\r\n        # else:\r\n        #     self.pre_logits = nn.Identity()\r\n        #\r\n        # # Classifier head(s)\r\n        # self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity()\r\n        # self.head_dist = None\r\n        # if distilled:\r\n        #     self.head_dist = nn.Linear(self.embed_dim, self.num_classes) if num_classes > 0 else nn.Identity()\r\n\r\n        self.init_weights(weight_init)\r\n\r\n    def init_weights(self, mode=''):\r\n        assert mode in ('jax', 'jax_nlhb', 'nlhb', '')\r\n        head_bias = -math.log(self.num_classes) if 'nlhb' in mode else 0.\r\n        trunc_normal_(self.pos_embed, std=.02)\r\n        if self.dist_token is not None:\r\n            trunc_normal_(self.dist_token, std=.02)\r\n        if mode.startswith('jax'):\r\n            # leave cls token as zeros to match jax impl\r\n            named_apply(partial(_init_vit_weights, head_bias=head_bias, jax_impl=True), self)\r\n        else:\r\n            trunc_normal_(self.cls_token, std=.02)\r\n            self.apply(_init_vit_weights)\r\n\r\n    def _init_weights(self, m):\r\n        # this fn left here for compat with downstream users\r\n        _init_vit_weights(m)\r\n\r\n    @torch.jit.ignore()\r\n    def load_pretrained(self, checkpoint_path, prefix=''):\r\n        _load_weights(self, checkpoint_path, prefix)\r\n\r\n    @torch.jit.ignore\r\n    def no_weight_decay(self):\r\n        return {'pos_embed', 'cls_token', 'dist_token'}\r\n\r\n    def get_classifier(self):\r\n        if self.dist_token is None:\r\n            return self.head\r\n        else:\r\n            return self.head, self.head_dist\r\n\r\n    def reset_classifier(self, num_classes, global_pool=''):\r\n        self.num_classes = num_classes\r\n        self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity()\r\n        if self.num_tokens == 2:\r\n            self.head_dist = nn.Linear(self.embed_dim, self.num_classes) if num_classes > 0 else nn.Identity()\r\n\r\n\r\ndef _init_vit_weights(module: nn.Module, name: str = '', head_bias: float = 0., jax_impl: bool = False):\r\n    \"\"\" ViT weight initialization\r\n    * When called without n, head_bias, jax_impl args it will behave exactly the same\r\n      as my original init for compatibility with prev hparam / downstream use cases (ie DeiT).\r\n    * When called w/ valid n (module name) and jax_impl=True, will (hopefully) match JAX impl\r\n    \"\"\"\r\n    if isinstance(module, nn.Linear):\r\n        if name.startswith('head'):\r\n            nn.init.zeros_(module.weight)\r\n            nn.init.constant_(module.bias, head_bias)\r\n        elif name.startswith('pre_logits'):\r\n            lecun_normal_(module.weight)\r\n            nn.init.zeros_(module.bias)\r\n        else:\r\n            if jax_impl:\r\n                nn.init.xavier_uniform_(module.weight)\r\n                if module.bias is not None:\r\n                    if 'mlp' in name:\r\n                        nn.init.normal_(module.bias, std=1e-6)\r\n                    else:\r\n                        nn.init.zeros_(module.bias)\r\n            else:\r\n                trunc_normal_(module.weight, std=.02)\r\n                if module.bias is not None:\r\n                    nn.init.zeros_(module.bias)\r\n    elif jax_impl and isinstance(module, nn.Conv2d):\r\n        # NOTE conv was left to pytorch default in my original init\r\n        lecun_normal_(module.weight)\r\n        if module.bias is not None:\r\n            nn.init.zeros_(module.bias)\r\n    elif isinstance(module, (nn.LayerNorm, nn.GroupNorm, nn.BatchNorm2d)):\r\n        nn.init.zeros_(module.bias)\r\n        nn.init.ones_(module.weight)\r\n\r\n\r\n@torch.no_grad()\r\ndef _load_weights(model: VisionTransformer, checkpoint_path: str, prefix: str = ''):\r\n    \"\"\" Load weights from .npz checkpoints for official Google Brain Flax implementation\r\n    \"\"\"\r\n    import numpy as np\r\n\r\n    def _n2p(w, t=True):\r\n        if w.ndim == 4 and w.shape[0] == w.shape[1] == w.shape[2] == 1:\r\n            w = w.flatten()\r\n        if t:\r\n            if w.ndim == 4:\r\n                w = w.transpose([3, 2, 0, 1])\r\n            elif w.ndim == 3:\r\n                w = w.transpose([2, 0, 1])\r\n            elif w.ndim == 2:\r\n                w = w.transpose([1, 0])\r\n        return torch.from_numpy(w)\r\n\r\n    w = np.load(checkpoint_path)\r\n    if not prefix and 'opt/target/embedding/kernel' in w:\r\n        prefix = 'opt/target/'\r\n\r\n    if hasattr(model.patch_embed, 'backbone'):\r\n        # hybrid\r\n        backbone = model.patch_embed.backbone\r\n        stem_only = not hasattr(backbone, 'stem')\r\n        stem = backbone if stem_only else backbone.stem\r\n        stem.conv.weight.copy_(adapt_input_conv(stem.conv.weight.shape[1], _n2p(w[f'{prefix}conv_root/kernel'])))\r\n        stem.norm.weight.copy_(_n2p(w[f'{prefix}gn_root/scale']))\r\n        stem.norm.bias.copy_(_n2p(w[f'{prefix}gn_root/bias']))\r\n        if not stem_only:\r\n            for i, stage in enumerate(backbone.stages):\r\n                for j, block in enumerate(stage.blocks):\r\n                    bp = f'{prefix}block{i + 1}/unit{j + 1}/'\r\n                    for r in range(3):\r\n                        getattr(block, f'conv{r + 1}').weight.copy_(_n2p(w[f'{bp}conv{r + 1}/kernel']))\r\n                        getattr(block, f'norm{r + 1}').weight.copy_(_n2p(w[f'{bp}gn{r + 1}/scale']))\r\n                        getattr(block, f'norm{r + 1}').bias.copy_(_n2p(w[f'{bp}gn{r + 1}/bias']))\r\n                    if block.downsample is not None:\r\n                        block.downsample.conv.weight.copy_(_n2p(w[f'{bp}conv_proj/kernel']))\r\n                        block.downsample.norm.weight.copy_(_n2p(w[f'{bp}gn_proj/scale']))\r\n                        block.downsample.norm.bias.copy_(_n2p(w[f'{bp}gn_proj/bias']))\r\n        embed_conv_w = _n2p(w[f'{prefix}embedding/kernel'])\r\n    else:\r\n        embed_conv_w = adapt_input_conv(\r\n            model.patch_embed.proj.weight.shape[1], _n2p(w[f'{prefix}embedding/kernel']))\r\n    model.patch_embed.proj.weight.copy_(embed_conv_w)\r\n    model.patch_embed.proj.bias.copy_(_n2p(w[f'{prefix}embedding/bias']))\r\n    model.cls_token.copy_(_n2p(w[f'{prefix}cls'], t=False))\r\n    pos_embed_w = _n2p(w[f'{prefix}Transformer/posembed_input/pos_embedding'], t=False)\r\n    if pos_embed_w.shape != model.pos_embed.shape:\r\n        pos_embed_w = resize_pos_embed(  # resize pos embedding when different size from pretrained weights\r\n            pos_embed_w, model.pos_embed, getattr(model, 'num_tokens', 1), model.patch_embed.grid_size)\r\n    model.pos_embed.copy_(pos_embed_w)\r\n    model.norm.weight.copy_(_n2p(w[f'{prefix}Transformer/encoder_norm/scale']))\r\n    model.norm.bias.copy_(_n2p(w[f'{prefix}Transformer/encoder_norm/bias']))\r\n    if isinstance(model.head, nn.Linear) and model.head.bias.shape[0] == w[f'{prefix}head/bias'].shape[-1]:\r\n        model.head.weight.copy_(_n2p(w[f'{prefix}head/kernel']))\r\n        model.head.bias.copy_(_n2p(w[f'{prefix}head/bias']))\r\n    if isinstance(getattr(model.pre_logits, 'fc', None), nn.Linear) and f'{prefix}pre_logits/bias' in w:\r\n        model.pre_logits.fc.weight.copy_(_n2p(w[f'{prefix}pre_logits/kernel']))\r\n        model.pre_logits.fc.bias.copy_(_n2p(w[f'{prefix}pre_logits/bias']))\r\n    for i, block in enumerate(model.blocks.children()):\r\n        block_prefix = f'{prefix}Transformer/encoderblock_{i}/'\r\n        mha_prefix = block_prefix + 'MultiHeadDotProductAttention_1/'\r\n        block.norm1.weight.copy_(_n2p(w[f'{block_prefix}LayerNorm_0/scale']))\r\n        block.norm1.bias.copy_(_n2p(w[f'{block_prefix}LayerNorm_0/bias']))\r\n        block.attn.qkv.weight.copy_(torch.cat([\r\n            _n2p(w[f'{mha_prefix}{n}/kernel'], t=False).flatten(1).T for n in ('query', 'key', 'value')]))\r\n        block.attn.qkv.bias.copy_(torch.cat([\r\n            _n2p(w[f'{mha_prefix}{n}/bias'], t=False).reshape(-1) for n in ('query', 'key', 'value')]))\r\n        block.attn.proj.weight.copy_(_n2p(w[f'{mha_prefix}out/kernel']).flatten(1))\r\n        block.attn.proj.bias.copy_(_n2p(w[f'{mha_prefix}out/bias']))\r\n        for r in range(2):\r\n            getattr(block.mlp, f'fc{r + 1}').weight.copy_(_n2p(w[f'{block_prefix}MlpBlock_3/Dense_{r}/kernel']))\r\n            getattr(block.mlp, f'fc{r + 1}').bias.copy_(_n2p(w[f'{block_prefix}MlpBlock_3/Dense_{r}/bias']))\r\n        block.norm2.weight.copy_(_n2p(w[f'{block_prefix}LayerNorm_2/scale']))\r\n        block.norm2.bias.copy_(_n2p(w[f'{block_prefix}LayerNorm_2/bias']))\r\n\r\n\r\ndef resize_pos_embed(posemb, posemb_new, num_tokens=1, gs_new=()):\r\n    # Rescale the grid of position embeddings when loading from state_dict. Adapted from\r\n    # https://github.com/google-research/vision_transformer/blob/00883dd691c63a6830751563748663526e811cee/vit_jax/checkpoint.py#L224\r\n    print('Resized position embedding: %s to %s', posemb.shape, posemb_new.shape)\r\n    ntok_new = posemb_new.shape[1]\r\n    if num_tokens:\r\n        posemb_tok, posemb_grid = posemb[:, :num_tokens], posemb[0, num_tokens:]\r\n        ntok_new -= num_tokens\r\n    else:\r\n        posemb_tok, posemb_grid = posemb[:, :0], posemb[0]\r\n    gs_old = int(math.sqrt(len(posemb_grid)))\r\n    if not len(gs_new):  # backwards compatibility\r\n        gs_new = [int(math.sqrt(ntok_new))] * 2\r\n    assert len(gs_new) >= 2\r\n    print('Position embedding grid-size from %s to %s', [gs_old, gs_old], gs_new)\r\n    posemb_grid = posemb_grid.reshape(1, gs_old, gs_old, -1).permute(0, 3, 1, 2)\r\n    posemb_grid = F.interpolate(posemb_grid, size=gs_new, mode='bilinear')\r\n    posemb_grid = posemb_grid.permute(0, 2, 3, 1).reshape(1, gs_new[0] * gs_new[1], -1)\r\n    posemb = torch.cat([posemb_tok, posemb_grid], dim=1)\r\n    return posemb\r\n\r\n\r\ndef checkpoint_filter_fn(state_dict, model):\r\n    \"\"\" convert patch embedding weight from manual patchify + linear proj to conv\"\"\"\r\n    out_dict = {}\r\n    if 'model' in state_dict:\r\n        # For deit models\r\n        state_dict = state_dict['model']\r\n    for k, v in state_dict.items():\r\n        if 'patch_embed.proj.weight' in k and len(v.shape) < 4:\r\n            # For old models that I trained prior to conv based patchification\r\n            O, I, H, W = model.patch_embed.proj.weight.shape\r\n            v = v.reshape(O, -1, H, W)\r\n        elif k == 'pos_embed' and v.shape != model.pos_embed.shape:\r\n            # To resize pos embedding when using model at different size from pretrained weights\r\n            v = resize_pos_embed(\r\n                v, model.pos_embed, getattr(model, 'num_tokens', 1), model.patch_embed.grid_size)\r\n        out_dict[k] = v\r\n    return out_dict\r\n\r\n\r\ndef _create_vision_transformer(variant, pretrained=False, default_cfg=None, **kwargs):\r\n    if kwargs.get('features_only', None):\r\n        raise RuntimeError('features_only not implemented for Vision Transformer models.')\r\n\r\n    model = VisionTransformer(**kwargs)\r\n    if pretrained:\r\n        if 'npz' in pretrained:\r\n            model.load_pretrained(pretrained, prefix='')\r\n        else:\r\n            checkpoint = torch.load(pretrained, map_location=\"cpu\")\r\n            missing_keys, unexpected_keys = model.load_state_dict(checkpoint[\"model\"], strict=False)\r\n            print('Load pretrained model from: ' + pretrained)\r\n\r\n    return model\r\n\r\n\r\ndef vit_base_patch16_224(pretrained=False, **kwargs):\r\n    \"\"\"\r\n    ViT-Base model (ViT-B/16) from original paper (https://arxiv.org/abs/2010.11929).\r\n    \"\"\"\r\n    model_kwargs = dict(\r\n        patch_size=16, embed_dim=768, depth=12, num_heads=12, **kwargs)\r\n    model = _create_vision_transformer('vit_base_patch16_224_in21k', pretrained=pretrained, **model_kwargs)\r\n    return model\r\n    \r\ndef vit_large_patch16_224(pretrained=False, **kwargs):\r\n    model_kwargs = dict(\r\n        patch_size=16, embed_dim=1024, depth=24, num_heads=16, **kwargs)\r\n    model = _create_vision_transformer('vit_large_patch16_224_in21k', pretrained=pretrained, **model_kwargs)\r\n    return model\r\n"
  },
  {
    "path": "lib/models/artrack/vit_ce.py",
    "content": "import math\r\nimport logging\r\nfrom functools import partial\r\nfrom collections import OrderedDict\r\nfrom copy import deepcopy\r\n\r\nimport torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\n\r\nfrom timm.models.layers import to_2tuple\r\n\r\nfrom lib.models.layers.patch_embed import PatchEmbed\r\nfrom .utils import combine_tokens, recover_tokens\r\nfrom .vit import VisionTransformer\r\nfrom ..layers.attn_blocks import CEBlock\r\n\r\n_logger = logging.getLogger(__name__)\r\n\r\n\r\nclass VisionTransformerCE(VisionTransformer):\r\n    \"\"\" Vision Transformer with candidate elimination (CE) module\r\n\r\n    A PyTorch impl of : `An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale`\r\n        - https://arxiv.org/abs/2010.11929\r\n\r\n    Includes distillation token & head support for `DeiT: Data-efficient Image Transformers`\r\n        - https://arxiv.org/abs/2012.12877\r\n    \"\"\"\r\n\r\n    def __init__(self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, embed_dim=768, depth=12,\r\n                 num_heads=12, mlp_ratio=4., qkv_bias=True, representation_size=None, distilled=False,\r\n                 drop_rate=0., attn_drop_rate=0., drop_path_rate=0., embed_layer=PatchEmbed, norm_layer=None,\r\n                 act_layer=None, weight_init='',\r\n                 ce_loc=None, ce_keep_ratio=None):\r\n        \"\"\"\r\n        Args:\r\n            img_size (int, tuple): input image size\r\n            patch_size (int, tuple): patch size\r\n            in_chans (int): number of input channels\r\n            num_classes (int): number of classes for classification head\r\n            embed_dim (int): embedding dimension\r\n            depth (int): depth of transformer\r\n            num_heads (int): number of attention heads\r\n            mlp_ratio (int): ratio of mlp hidden dim to embedding dim\r\n            qkv_bias (bool): enable bias for qkv if True\r\n            representation_size (Optional[int]): enable and set representation layer (pre-logits) to this value if set\r\n            distilled (bool): model includes a distillation token and head as in DeiT models\r\n            drop_rate (float): dropout rate\r\n            attn_drop_rate (float): attention dropout rate\r\n            drop_path_rate (float): stochastic depth rate\r\n            embed_layer (nn.Module): patch embedding layer\r\n            norm_layer: (nn.Module): normalization layer\r\n            weight_init: (str): weight init scheme\r\n        \"\"\"\r\n        # super().__init__()\r\n        super().__init__()\r\n        if isinstance(img_size, tuple):\r\n            self.img_size = img_size\r\n        else:\r\n            self.img_size = to_2tuple(img_size)\r\n        self.patch_size = patch_size\r\n        self.in_chans = in_chans\r\n\r\n        self.num_classes = num_classes\r\n        self.num_features = self.embed_dim = embed_dim  # num_features for consistency with other models\r\n        self.num_tokens = 2 if distilled else 1\r\n        norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6)\r\n        act_layer = act_layer or nn.GELU\r\n\r\n        self.patch_embed = embed_layer(\r\n            img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim)\r\n        num_patches = self.patch_embed.num_patches\r\n\r\n        self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))\r\n        self.dist_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) if distilled else None\r\n        self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + self.num_tokens, embed_dim))\r\n        self.pos_drop = nn.Dropout(p=drop_rate)\r\n\r\n        dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)]  # stochastic depth decay rule\r\n        blocks = []\r\n        ce_index = 0\r\n        self.ce_loc = ce_loc\r\n        for i in range(depth):\r\n            ce_keep_ratio_i = 1.0\r\n            if ce_loc is not None and i in ce_loc:\r\n                ce_keep_ratio_i = ce_keep_ratio[ce_index]\r\n                ce_index += 1\r\n\r\n            blocks.append(\r\n                CEBlock(\r\n                    dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, drop=drop_rate,\r\n                    attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, act_layer=act_layer,\r\n                    keep_ratio_search=ce_keep_ratio_i)\r\n            )\r\n\r\n        self.blocks = nn.Sequential(*blocks)\r\n        self.norm = norm_layer(embed_dim)\r\n\r\n        self.init_weights(weight_init)\r\n\r\n    def forward_features(self, z, x, mask_z=None, mask_x=None,\r\n                         ce_template_mask=None, ce_keep_rate=None,\r\n                         return_last_attn=False\r\n                         ):\r\n        B, H, W = x.shape[0], x.shape[2], x.shape[3]\r\n\r\n        x = self.patch_embed(x)\r\n        z = self.patch_embed(z)\r\n\r\n        # attention mask handling\r\n        # B, H, W\r\n        if mask_z is not None and mask_x is not None:\r\n            mask_z = F.interpolate(mask_z[None].float(), scale_factor=1. / self.patch_size).to(torch.bool)[0]\r\n            mask_z = mask_z.flatten(1).unsqueeze(-1)\r\n\r\n            mask_x = F.interpolate(mask_x[None].float(), scale_factor=1. / self.patch_size).to(torch.bool)[0]\r\n            mask_x = mask_x.flatten(1).unsqueeze(-1)\r\n\r\n            mask_x = combine_tokens(mask_z, mask_x, mode=self.cat_mode)\r\n            mask_x = mask_x.squeeze(-1)\r\n\r\n        if self.add_cls_token:\r\n            cls_tokens = self.cls_token.expand(B, -1, -1)\r\n            cls_tokens = cls_tokens + self.cls_pos_embed\r\n\r\n        z += self.pos_embed_z\r\n        x += self.pos_embed_x\r\n\r\n        if self.add_sep_seg:\r\n            x += self.search_segment_pos_embed\r\n            z += self.template_segment_pos_embed\r\n\r\n        x = combine_tokens(z, x, mode=self.cat_mode)\r\n        if self.add_cls_token:\r\n            x = torch.cat([cls_tokens, x], dim=1)\r\n\r\n        x = self.pos_drop(x)\r\n\r\n        lens_z = self.pos_embed_z.shape[1]\r\n        lens_x = self.pos_embed_x.shape[1]\r\n\r\n        global_index_t = torch.linspace(0, lens_z - 1, lens_z).to(x.device)\r\n        global_index_t = global_index_t.repeat(B, 1)\r\n\r\n        global_index_s = torch.linspace(0, lens_x - 1, lens_x).to(x.device)\r\n        global_index_s = global_index_s.repeat(B, 1)\r\n        removed_indexes_s = []\r\n        for i, blk in enumerate(self.blocks):\r\n            x, global_index_t, global_index_s, removed_index_s, attn = \\\r\n                blk(x, global_index_t, global_index_s, mask_x, ce_template_mask, ce_keep_rate)\r\n\r\n            if self.ce_loc is not None and i in self.ce_loc:\r\n                removed_indexes_s.append(removed_index_s)\r\n\r\n        x = self.norm(x)\r\n        lens_x_new = global_index_s.shape[1]\r\n        lens_z_new = global_index_t.shape[1]\r\n\r\n        z = x[:, :lens_z_new]\r\n        x = x[:, lens_z_new:]\r\n\r\n        if removed_indexes_s and removed_indexes_s[0] is not None:\r\n            removed_indexes_cat = torch.cat(removed_indexes_s, dim=1)\r\n\r\n            pruned_lens_x = lens_x - lens_x_new\r\n            pad_x = torch.zeros([B, pruned_lens_x, x.shape[2]], device=x.device)\r\n            x = torch.cat([x, pad_x], dim=1)\r\n            index_all = torch.cat([global_index_s, removed_indexes_cat], dim=1)\r\n            # recover original token order\r\n            C = x.shape[-1]\r\n            # x = x.gather(1, index_all.unsqueeze(-1).expand(B, -1, C).argsort(1))\r\n            x = torch.zeros_like(x).scatter_(dim=1, index=index_all.unsqueeze(-1).expand(B, -1, C).to(torch.int64), src=x)\r\n\r\n        x = recover_tokens(x, lens_z_new, lens_x, mode=self.cat_mode)\r\n\r\n        # re-concatenate with the template, which may be further used by other modules\r\n        x = torch.cat([z, x], dim=1)\r\n\r\n        aux_dict = {\r\n            \"attn\": attn,\r\n            \"removed_indexes_s\": removed_indexes_s,  # used for visualization\r\n        }\r\n\r\n        return x, aux_dict\r\n\r\n    def forward(self, z, x, ce_template_mask=None, ce_keep_rate=None,\r\n                tnc_keep_rate=None,\r\n                return_last_attn=False):\r\n\r\n        x, aux_dict = self.forward_features(z, x, ce_template_mask=ce_template_mask, ce_keep_rate=ce_keep_rate,)\r\n\r\n        return x, aux_dict\r\n\r\n\r\ndef _create_vision_transformer(pretrained=False, **kwargs):\r\n    model = VisionTransformerCE(**kwargs)\r\n\r\n    if pretrained:\r\n        if 'npz' in pretrained:\r\n            model.load_pretrained(pretrained, prefix='')\r\n        else:\r\n            checkpoint = torch.load(pretrained, map_location=\"cpu\")\r\n            missing_keys, unexpected_keys = model.load_state_dict(checkpoint[\"model\"], strict=False)\r\n            print('Load pretrained model from: ' + pretrained)\r\n\r\n    return model\r\n\r\n\r\ndef vit_base_patch16_224_ce(pretrained=False, **kwargs):\r\n    \"\"\" ViT-Base model (ViT-B/16) from original paper (https://arxiv.org/abs/2010.11929).\r\n    \"\"\"\r\n    model_kwargs = dict(\r\n        patch_size=16, embed_dim=768, depth=12, num_heads=12, **kwargs)\r\n    model = _create_vision_transformer(pretrained=pretrained, **model_kwargs)\r\n    return model\r\n\r\n\r\ndef vit_large_patch16_224_ce(pretrained=False, **kwargs):\r\n    \"\"\" ViT-Large model (ViT-L/16) from original paper (https://arxiv.org/abs/2010.11929).\r\n    \"\"\"\r\n    model_kwargs = dict(\r\n        patch_size=16, embed_dim=1024, depth=24, num_heads=16, **kwargs)\r\n    model = _create_vision_transformer(pretrained=pretrained, **model_kwargs)\r\n    return model\r\n"
  },
  {
    "path": "lib/models/artrack_seq/__init__.py",
    "content": "from .artrack_seq import build_artrack_seq\r\n"
  },
  {
    "path": "lib/models/artrack_seq/artrack_seq.py",
    "content": "\"\"\"\r\nBasic OSTrack model.\r\n\"\"\"\r\nimport math\r\nimport os\r\nfrom typing import List\r\n\r\nimport torch\r\nfrom torch import nn\r\nfrom torch.nn.modules.transformer import _get_clones\r\nfrom timm.models.layers import DropPath, to_2tuple, trunc_normal_\r\n\r\nfrom lib.models.layers.head_seq import build_pix_head\r\nfrom lib.models.artrack_seq.vit import vit_base_patch16_224, vit_large_patch16_224\r\nfrom lib.utils.box_ops import box_xyxy_to_cxcywh\r\n\r\nimport time\r\n\r\n\r\nclass ARTrackSeq(nn.Module):\r\n    \"\"\" This is the base class for ARTrackSeq \"\"\"\r\n\r\n    def __init__(self, transformer, pix_head, hidden_dim):\r\n        \"\"\" Initializes the model.\r\n        Parameters:\r\n            transformer: torch module of the transformer architecture.\r\n            aux_loss: True if auxiliary decoding losses (loss at each decoder layer) are to be used.\r\n        \"\"\"\r\n        super().__init__()\r\n        self.backbone = transformer\r\n        self.pix_head = pix_head\r\n            \r\n        self.identity = torch.nn.Parameter(torch.zeros(1, 2, hidden_dim))\r\n        self.identity = trunc_normal_(self.identity, std=.02)\r\n\r\n    def forward(self, template: torch.Tensor,\r\n                search: torch.Tensor,\r\n                seq_input=None,\r\n                head_type=None,\r\n                stage=None,\r\n                search_feature=None,\r\n                update=None\r\n                ):\r\n        x, aux_dict = self.backbone(z=template, x=search, identity=self.identity)\r\n\r\n        # Forward head\r\n        feat_last = x\r\n        if isinstance(x, list):\r\n            feat_last = x[-1]\r\n            \r\n        pos_z = self.backbone.pos_embed_z\r\n        pos_x = self.backbone.pos_embed_x\r\n        out = self.forward_head(feat_last, pos_z, pos_x, self.identity, seq_input, stage)\r\n\r\n        out.update(aux_dict)\r\n        out['backbone_feat'] = x\r\n        return out\r\n\r\n    def forward_head(self, cat_feature, pos_z, pos_x, identity, seq_input=None, stage=None):\r\n        \"\"\"\r\n        cat_feature: output embeddings of the backbone, it can be (HW1+HW2, B, C) or (HW2, B, C)\r\n        \"\"\"\r\n\r\n        output_dict = self.pix_head(cat_feature, pos_z, pos_x, identity, seq_input, stage)\r\n        return output_dict\r\n\r\n\r\n\r\ndef build_artrack_seq(cfg, training=True):\r\n    current_dir = os.path.dirname(os.path.abspath(__file__))  # This is your Project Root\r\n    pretrained_path = os.path.join(current_dir, '../../../pretrained_models')\r\n    if cfg.MODEL.PRETRAIN_FILE and ('ARTrack' not in cfg.MODEL.PRETRAIN_FILE) and training:\r\n        pretrained = os.path.join(pretrained_path, cfg.MODEL.PRETRAIN_FILE)\r\n    else:\r\n        pretrained = ''\r\n\r\n    if cfg.MODEL.BACKBONE.TYPE == 'vit_base_patch16_224':\r\n        backbone = vit_base_patch16_224(pretrained, drop_path_rate=cfg.TRAIN.DROP_PATH_RATE)\r\n        hidden_dim = backbone.embed_dim\r\n        patch_start_index = 1\r\n\r\n    elif cfg.MODEL.BACKBONE.TYPE == 'vit_large_patch16_224':\r\n        print(\"i use vit_large\")\r\n        backbone = vit_large_patch16_224(pretrained, drop_path_rate=cfg.TRAIN.DROP_PATH_RATE)\r\n        hidden_dim = backbone.embed_dim\r\n        patch_start_index = 1\r\n\r\n    else:\r\n        raise NotImplementedError\r\n\r\n    backbone.finetune_track(cfg=cfg, patch_start_index=patch_start_index)\r\n\r\n    pix_head = build_pix_head(cfg, hidden_dim)\r\n\r\n    model = ARTrackSeq(\r\n        backbone,\r\n        pix_head,\r\n        hidden_dim,\r\n    )\r\n    load_from = cfg.MODEL.PRETRAIN_PTH\r\n    checkpoint = torch.load(load_from, map_location=\"cpu\")\r\n    missing_keys, unexpected_keys = model.load_state_dict(checkpoint[\"net\"], strict=False)\r\n    print('Load pretrained model from: ' + load_from)\r\n    if 'sequence' in cfg.MODEL.PRETRAIN_FILE and training:\r\n        print(\"i change myself\")\r\n        checkpoint = torch.load(cfg.MODEL.PRETRAIN_FILE, map_location=\"cpu\")\r\n        missing_keys, unexpected_keys = model.load_state_dict(checkpoint[\"net\"], strict=False)\r\n        print('Load pretrained model from: ' + cfg.MODEL.PRETRAIN_FILE)\r\n\r\n    return model\r\n"
  },
  {
    "path": "lib/models/artrack_seq/base_backbone.py",
    "content": "from functools import partial\r\n\r\nimport torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\nfrom timm.models.vision_transformer import resize_pos_embed\r\nfrom timm.models.layers import DropPath, to_2tuple, trunc_normal_\r\n\r\nfrom lib.models.layers.patch_embed import PatchEmbed\r\nfrom lib.models.artrack_seq.utils import combine_tokens, recover_tokens\r\n\r\nclass BaseBackbone(nn.Module):\r\n    def __init__(self):\r\n        super().__init__()\r\n\r\n        # for original ViT\r\n        self.pos_embed = None\r\n        self.img_size = [224, 224]\r\n        self.patch_size = 16\r\n        self.embed_dim = 384\r\n\r\n        self.cat_mode = 'direct'\r\n\r\n        self.pos_embed_z = None\r\n        self.pos_embed_x = None\r\n\r\n        self.template_segment_pos_embed = None\r\n        self.search_segment_pos_embed = None\r\n\r\n        self.return_inter = False\r\n        self.return_stage = [2, 5, 8, 11]\r\n\r\n        self.add_cls_token = False\r\n        self.add_sep_seg = False\r\n\r\n    def finetune_track(self, cfg, patch_start_index=1):\r\n\r\n        search_size = to_2tuple(cfg.DATA.SEARCH.SIZE)\r\n        template_size = to_2tuple(cfg.DATA.TEMPLATE.SIZE)\r\n        new_patch_size = cfg.MODEL.BACKBONE.STRIDE\r\n\r\n        self.cat_mode = cfg.MODEL.BACKBONE.CAT_MODE\r\n        self.return_inter = cfg.MODEL.RETURN_INTER\r\n        self.add_sep_seg = cfg.MODEL.BACKBONE.SEP_SEG\r\n\r\n        # resize patch embedding\r\n        if new_patch_size != self.patch_size:\r\n            print('Inconsistent Patch Size With The Pretrained Weights, Interpolate The Weight!')\r\n            old_patch_embed = {}\r\n            for name, param in self.patch_embed.named_parameters():\r\n                if 'weight' in name:\r\n                    param = nn.functional.interpolate(param, size=(new_patch_size, new_patch_size),\r\n                                                      mode='bicubic', align_corners=False)\r\n                    param = nn.Parameter(param)\r\n                old_patch_embed[name] = param\r\n            self.patch_embed = PatchEmbed(img_size=self.img_size, patch_size=new_patch_size, in_chans=3,\r\n                                          embed_dim=self.embed_dim)\r\n            self.patch_embed.proj.bias = old_patch_embed['proj.bias']\r\n            self.patch_embed.proj.weight = old_patch_embed['proj.weight']\r\n\r\n        # for patch embedding\r\n        patch_pos_embed = self.pos_embed[:, patch_start_index:, :]\r\n        patch_pos_embed = patch_pos_embed.transpose(1, 2)\r\n        B, E, Q = patch_pos_embed.shape\r\n        P_H, P_W = self.img_size[0] // self.patch_size, self.img_size[1] // self.patch_size\r\n        patch_pos_embed = patch_pos_embed.view(B, E, P_H, P_W)\r\n\r\n        # for search region\r\n        H, W = search_size\r\n        new_P_H, new_P_W = H // new_patch_size, W // new_patch_size\r\n        search_patch_pos_embed = nn.functional.interpolate(patch_pos_embed, size=(new_P_H, new_P_W), mode='bicubic',\r\n                                                           align_corners=False)\r\n        search_patch_pos_embed = search_patch_pos_embed.flatten(2).transpose(1, 2)\r\n\r\n        # for template region\r\n        H, W = template_size\r\n        new_P_H, new_P_W = H // new_patch_size, W // new_patch_size\r\n        template_patch_pos_embed = nn.functional.interpolate(patch_pos_embed, size=(new_P_H, new_P_W), mode='bicubic',\r\n                                                             align_corners=False)\r\n        template_patch_pos_embed = template_patch_pos_embed.flatten(2).transpose(1, 2)\r\n\r\n        self.pos_embed_z = nn.Parameter(template_patch_pos_embed)\r\n        self.pos_embed_x = nn.Parameter(search_patch_pos_embed)\r\n\r\n        # for cls token (keep it but not used)\r\n        if self.add_cls_token and patch_start_index > 0:\r\n            cls_pos_embed = self.pos_embed[:, 0:1, :]\r\n            self.cls_pos_embed = nn.Parameter(cls_pos_embed)\r\n\r\n        # separate token and segment token\r\n        if self.add_sep_seg:\r\n            self.template_segment_pos_embed = nn.Parameter(torch.zeros(1, 1, self.embed_dim))\r\n            self.template_segment_pos_embed = trunc_normal_(self.template_segment_pos_embed, std=.02)\r\n            self.search_segment_pos_embed = nn.Parameter(torch.zeros(1, 1, self.embed_dim))\r\n            self.search_segment_pos_embed = trunc_normal_(self.search_segment_pos_embed, std=.02)\r\n\r\n        # self.cls_token = None\r\n        # self.pos_embed = None\r\n\r\n        if self.return_inter:\r\n            for i_layer in self.fpn_stage:\r\n                if i_layer != 11:\r\n                    norm_layer = partial(nn.LayerNorm, eps=1e-6)\r\n                    layer = norm_layer(self.embed_dim)\r\n                    layer_name = f'norm{i_layer}'\r\n                    self.add_module(layer_name, layer)\r\n\r\n    def forward_features(self, z, x, identity):\r\n        B, H, W = x.shape[0], x.shape[2], x.shape[3]\r\n\r\n        x = self.patch_embed(x)\r\n        z = self.patch_embed(z)\r\n\r\n        s_x = x.shape[1]\r\n        s_z = z.shape[1]\r\n\r\n        if self.add_cls_token:\r\n            cls_tokens = self.cls_token.expand(B, -1, -1)\r\n            cls_tokens = cls_tokens + self.cls_pos_embed\r\n\r\n        z += self.pos_embed_z\r\n        x += self.pos_embed_x\r\n\r\n        z += identity[:, 0, :].repeat(B, self.pos_embed_z.shape[1], 1)\r\n        x += identity[:, 1, :].repeat(B, self.pos_embed_x.shape[1], 1)\r\n\r\n        if self.add_sep_seg:\r\n            x += self.search_segment_pos_embed\r\n            z += self.template_segment_pos_embed\r\n\r\n        x = combine_tokens(z, x, mode=self.cat_mode)\r\n        if self.add_cls_token:\r\n            x = torch.cat([cls_tokens, x], dim=1)\r\n\r\n        x = self.pos_drop(x)\r\n\r\n        for i, blk in enumerate(self.blocks):\r\n            x = blk(x)\r\n        lens_z = self.pos_embed_z.shape[1]\r\n        lens_x = self.pos_embed_x.shape[1]\r\n        # x = recover_tokens(x, lens_z, lens_x, mode=self.cat_mode)\r\n\r\n        aux_dict = {\"attn\": None}\r\n        return self.norm(x), aux_dict\r\n\r\n    def forward(self, z, x, identity, **kwargs):\r\n        \"\"\"\r\n        Joint feature extraction and relation modeling for the basic ViT backbone.\r\n        Args:\r\n            z (torch.Tensor): template feature, [B, C, H_z, W_z]\r\n            x (torch.Tensor): search region feature, [B, C, H_x, W_x]\r\n\r\n        Returns:\r\n            x (torch.Tensor): merged template and search region feature, [B, L_z+L_x, C]\r\n            attn : None\r\n        \"\"\"\r\n        x, aux_dict = self.forward_features(z, x, identity)\r\n\r\n        return x, aux_dict\r\n"
  },
  {
    "path": "lib/models/artrack_seq/utils.py",
    "content": "import math\r\n\r\nimport torch\r\nimport torch.nn.functional as F\r\n\r\n\r\ndef combine_tokens(template_tokens, search_tokens, mode='direct', return_res=False):\r\n    # [B, HW, C]\r\n    len_t = template_tokens.shape[1]\r\n    len_s = search_tokens.shape[1]\r\n\r\n    if mode == 'direct':\r\n        merged_feature = torch.cat((template_tokens, search_tokens), dim=1)\r\n    elif mode == 'template_central':\r\n        central_pivot = len_s // 2\r\n        first_half = search_tokens[:, :central_pivot, :]\r\n        second_half = search_tokens[:, central_pivot:, :]\r\n        merged_feature = torch.cat((first_half, template_tokens, second_half), dim=1)\r\n    elif mode == 'partition':\r\n        feat_size_s = int(math.sqrt(len_s))\r\n        feat_size_t = int(math.sqrt(len_t))\r\n        window_size = math.ceil(feat_size_t / 2.)\r\n        # pad feature maps to multiples of window size\r\n        B, _, C = template_tokens.shape\r\n        H = W = feat_size_t\r\n        template_tokens = template_tokens.view(B, H, W, C)\r\n        pad_l = pad_b = pad_r = 0\r\n        # pad_r = (window_size - W % window_size) % window_size\r\n        pad_t = (window_size - H % window_size) % window_size\r\n        template_tokens = F.pad(template_tokens, (0, 0, pad_l, pad_r, pad_t, pad_b))\r\n        _, Hp, Wp, _ = template_tokens.shape\r\n        template_tokens = template_tokens.view(B, Hp // window_size, window_size, W, C)\r\n        template_tokens = torch.cat([template_tokens[:, 0, ...], template_tokens[:, 1, ...]], dim=2)\r\n        _, Hc, Wc, _ = template_tokens.shape\r\n        template_tokens = template_tokens.view(B, -1, C)\r\n        merged_feature = torch.cat([template_tokens, search_tokens], dim=1)\r\n\r\n        # calculate new h and w, which may be useful for SwinT or others\r\n        merged_h, merged_w = feat_size_s + Hc, feat_size_s\r\n        if return_res:\r\n            return merged_feature, merged_h, merged_w\r\n\r\n    else:\r\n        raise NotImplementedError\r\n\r\n    return merged_feature\r\n\r\n\r\ndef recover_tokens(merged_tokens, len_template_token, len_search_token, mode='direct'):\r\n    if mode == 'direct':\r\n        recovered_tokens = merged_tokens\r\n    elif mode == 'template_central':\r\n        central_pivot = len_search_token // 2\r\n        len_remain = len_search_token - central_pivot\r\n        len_half_and_t = central_pivot + len_template_token\r\n\r\n        first_half = merged_tokens[:, :central_pivot, :]\r\n        second_half = merged_tokens[:, -len_remain:, :]\r\n        template_tokens = merged_tokens[:, central_pivot:len_half_and_t, :]\r\n\r\n        recovered_tokens = torch.cat((template_tokens, first_half, second_half), dim=1)\r\n    elif mode == 'partition':\r\n        recovered_tokens = merged_tokens\r\n    else:\r\n        raise NotImplementedError\r\n\r\n    return recovered_tokens\r\n\r\n\r\ndef window_partition(x, window_size: int):\r\n    \"\"\"\r\n    Args:\r\n        x: (B, H, W, C)\r\n        window_size (int): window size\r\n\r\n    Returns:\r\n        windows: (num_windows*B, window_size, window_size, C)\r\n    \"\"\"\r\n    B, H, W, C = x.shape\r\n    x = x.view(B, H // window_size, window_size, W // window_size, window_size, C)\r\n    windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C)\r\n    return windows\r\n\r\n\r\ndef window_reverse(windows, window_size: int, H: int, W: int):\r\n    \"\"\"\r\n    Args:\r\n        windows: (num_windows*B, window_size, window_size, C)\r\n        window_size (int): Window size\r\n        H (int): Height of image\r\n        W (int): Width of image\r\n\r\n    Returns:\r\n        x: (B, H, W, C)\r\n    \"\"\"\r\n    B = int(windows.shape[0] / (H * W / window_size / window_size))\r\n    x = windows.view(B, H // window_size, W // window_size, window_size, window_size, -1)\r\n    x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1)\r\n    return x\r\n"
  },
  {
    "path": "lib/models/artrack_seq/vit.py",
    "content": "\"\"\" Vision Transformer (ViT) in PyTorch\r\nA PyTorch implement of Vision Transformers as described in:\r\n'An Image Is Worth 16 x 16 Words: Transformers for Image Recognition at Scale'\r\n    - https://arxiv.org/abs/2010.11929\r\n`How to train your ViT? Data, Augmentation, and Regularization in Vision Transformers`\r\n    - https://arxiv.org/abs/2106.10270\r\nThe official jax code is released and available at https://github.com/google-research/vision_transformer\r\nDeiT model defs and weights from https://github.com/facebookresearch/deit,\r\npaper `DeiT: Data-efficient Image Transformers` - https://arxiv.org/abs/2012.12877\r\nAcknowledgments:\r\n* The paper authors for releasing code and weights, thanks!\r\n* I fixed my class token impl based on Phil Wang's https://github.com/lucidrains/vit-pytorch ... check it out\r\nfor some einops/einsum fun\r\n* Simple transformer style inspired by Andrej Karpathy's https://github.com/karpathy/minGPT\r\n* Bert reference code checks against Huggingface Transformers and Tensorflow Bert\r\nHacked together by / Copyright 2021 Ross Wightman\r\n\r\nModified by Botao Ye\r\n\"\"\"\r\nimport math\r\nimport logging\r\nfrom functools import partial\r\nfrom collections import OrderedDict\r\nfrom copy import deepcopy\r\n\r\nimport torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\n\r\nfrom timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_INCEPTION_MEAN, IMAGENET_INCEPTION_STD\r\nfrom timm.models.helpers import build_model_with_cfg, named_apply, adapt_input_conv\r\nfrom timm.models.layers import Mlp, DropPath, trunc_normal_, lecun_normal_\r\nfrom timm.models.registry import register_model\r\n\r\nfrom lib.models.layers.patch_embed import PatchEmbed\r\nfrom lib.models.artrack_seq.base_backbone import BaseBackbone\r\n\r\n\r\nclass Attention(nn.Module):\r\n    def __init__(self, dim, num_heads=8, qkv_bias=False, attn_drop=0., proj_drop=0.):\r\n        super().__init__()\r\n        self.num_heads = num_heads\r\n        head_dim = dim // num_heads\r\n        self.scale = head_dim ** -0.5\r\n\r\n        self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)\r\n        self.attn_drop = nn.Dropout(attn_drop)\r\n        self.proj = nn.Linear(dim, dim)\r\n        self.proj_drop = nn.Dropout(proj_drop)\r\n\r\n    def forward(self, x, return_attention=False):\r\n        B, N, C = x.shape\r\n        qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)\r\n        q, k, v = qkv[0], qkv[1], qkv[2]  # make torchscript happy (cannot use tensor as tuple)\r\n\r\n        attn = (q @ k.transpose(-2, -1)) * self.scale\r\n        attn = attn.softmax(dim=-1)\r\n        attn = self.attn_drop(attn)\r\n\r\n        x = (attn @ v).transpose(1, 2).reshape(B, N, C)\r\n        x = self.proj(x)\r\n        x = self.proj_drop(x)\r\n\r\n        if return_attention:\r\n            return x, attn\r\n        return x\r\n\r\n\r\nclass Block(nn.Module):\r\n\r\n    def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, drop=0., attn_drop=0.,\r\n                 drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm):\r\n        super().__init__()\r\n        self.norm1 = norm_layer(dim)\r\n        self.attn = Attention(dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=drop)\r\n        # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here\r\n        self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()\r\n        self.norm2 = norm_layer(dim)\r\n        mlp_hidden_dim = int(dim * mlp_ratio)\r\n        self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)\r\n\r\n    def forward(self, x, return_attention=False):\r\n        if return_attention:\r\n            feat, attn = self.attn(self.norm1(x), True)\r\n            x = x + self.drop_path(feat)\r\n            x = x + self.drop_path(self.mlp(self.norm2(x)))\r\n            return x, attn\r\n        else:\r\n            x = x + self.drop_path(self.attn(self.norm1(x)))\r\n            x = x + self.drop_path(self.mlp(self.norm2(x)))\r\n            return x\r\n\r\n\r\nclass VisionTransformer(BaseBackbone):\r\n    \"\"\" Vision Transformer\r\n    A PyTorch impl of : `An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale`\r\n        - https://arxiv.org/abs/2010.11929\r\n    Includes distillation token & head support for `DeiT: Data-efficient Image Transformers`\r\n        - https://arxiv.org/abs/2012.12877\r\n    \"\"\"\r\n\r\n    def __init__(self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, embed_dim=768, depth=12,\r\n                 num_heads=12, mlp_ratio=4., qkv_bias=True, representation_size=None, distilled=False,\r\n                 drop_rate=0., attn_drop_rate=0., drop_path_rate=0., embed_layer=PatchEmbed, norm_layer=None,\r\n                 act_layer=None, weight_init=''):\r\n        \"\"\"\r\n        Args:\r\n            img_size (int, tuple): input image size\r\n            patch_size (int, tuple): patch size\r\n            in_chans (int): number of input channels\r\n            num_classes (int): number of classes for classification head\r\n            embed_dim (int): embedding dimension\r\n            depth (int): depth of transformer\r\n            num_heads (int): number of attention heads\r\n            mlp_ratio (int): ratio of mlp hidden dim to embedding dim\r\n            qkv_bias (bool): enable bias for qkv if True\r\n            representation_size (Optional[int]): enable and set representation layer (pre-logits) to this value if set\r\n            distilled (bool): model includes a distillation token and head as in DeiT models\r\n            drop_rate (float): dropout rate\r\n            attn_drop_rate (float): attention dropout rate\r\n            drop_path_rate (float): stochastic depth rate\r\n            embed_layer (nn.Module): patch embedding layer\r\n            norm_layer: (nn.Module): normalization layer\r\n            weight_init: (str): weight init scheme\r\n        \"\"\"\r\n        super().__init__()\r\n        self.num_classes = num_classes\r\n        self.num_features = self.embed_dim = embed_dim  # num_features for consistency with other models\r\n        self.num_tokens = 2 if distilled else 1\r\n        norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6)\r\n        act_layer = act_layer or nn.GELU\r\n\r\n        self.patch_embed = embed_layer(\r\n            img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim)\r\n        num_patches = self.patch_embed.num_patches\r\n\r\n        self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))\r\n        self.dist_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) if distilled else None\r\n        self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + self.num_tokens, embed_dim))\r\n        self.pos_drop = nn.Dropout(p=drop_rate)\r\n\r\n        dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)]  # stochastic depth decay rule\r\n        self.blocks = nn.Sequential(*[\r\n            Block(\r\n                dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, drop=drop_rate,\r\n                attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, act_layer=act_layer)\r\n            for i in range(depth)])\r\n\r\n        self.norm = norm_layer(embed_dim)\r\n\r\n        # # Representation layer\r\n        # if representation_size and not distilled:\r\n        #     self.num_features = representation_size\r\n        #     self.pre_logits = nn.Sequential(OrderedDict([\r\n        #         ('fc', nn.Linear(embed_dim, representation_size)),\r\n        #         ('act', nn.Tanh())\r\n        #     ]))\r\n        # else:\r\n        #     self.pre_logits = nn.Identity()\r\n        #\r\n        # # Classifier head(s)\r\n        # self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity()\r\n        # self.head_dist = None\r\n        # if distilled:\r\n        #     self.head_dist = nn.Linear(self.embed_dim, self.num_classes) if num_classes > 0 else nn.Identity()\r\n\r\n        self.init_weights(weight_init)\r\n\r\n    def init_weights(self, mode=''):\r\n        assert mode in ('jax', 'jax_nlhb', 'nlhb', '')\r\n        head_bias = -math.log(self.num_classes) if 'nlhb' in mode else 0.\r\n        trunc_normal_(self.pos_embed, std=.02)\r\n        if self.dist_token is not None:\r\n            trunc_normal_(self.dist_token, std=.02)\r\n        if mode.startswith('jax'):\r\n            # leave cls token as zeros to match jax impl\r\n            named_apply(partial(_init_vit_weights, head_bias=head_bias, jax_impl=True), self)\r\n        else:\r\n            trunc_normal_(self.cls_token, std=.02)\r\n            self.apply(_init_vit_weights)\r\n\r\n    def _init_weights(self, m):\r\n        # this fn left here for compat with downstream users\r\n        _init_vit_weights(m)\r\n\r\n    @torch.jit.ignore()\r\n    def load_pretrained(self, checkpoint_path, prefix=''):\r\n        _load_weights(self, checkpoint_path, prefix)\r\n\r\n    @torch.jit.ignore\r\n    def no_weight_decay(self):\r\n        return {'pos_embed', 'cls_token', 'dist_token'}\r\n\r\n    def get_classifier(self):\r\n        if self.dist_token is None:\r\n            return self.head\r\n        else:\r\n            return self.head, self.head_dist\r\n\r\n    def reset_classifier(self, num_classes, global_pool=''):\r\n        self.num_classes = num_classes\r\n        self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity()\r\n        if self.num_tokens == 2:\r\n            self.head_dist = nn.Linear(self.embed_dim, self.num_classes) if num_classes > 0 else nn.Identity()\r\n\r\n\r\ndef _init_vit_weights(module: nn.Module, name: str = '', head_bias: float = 0., jax_impl: bool = False):\r\n    \"\"\" ViT weight initialization\r\n    * When called without n, head_bias, jax_impl args it will behave exactly the same\r\n      as my original init for compatibility with prev hparam / downstream use cases (ie DeiT).\r\n    * When called w/ valid n (module name) and jax_impl=True, will (hopefully) match JAX impl\r\n    \"\"\"\r\n    if isinstance(module, nn.Linear):\r\n        if name.startswith('head'):\r\n            nn.init.zeros_(module.weight)\r\n            nn.init.constant_(module.bias, head_bias)\r\n        elif name.startswith('pre_logits'):\r\n            lecun_normal_(module.weight)\r\n            nn.init.zeros_(module.bias)\r\n        else:\r\n            if jax_impl:\r\n                nn.init.xavier_uniform_(module.weight)\r\n                if module.bias is not None:\r\n                    if 'mlp' in name:\r\n                        nn.init.normal_(module.bias, std=1e-6)\r\n                    else:\r\n                        nn.init.zeros_(module.bias)\r\n            else:\r\n                trunc_normal_(module.weight, std=.02)\r\n                if module.bias is not None:\r\n                    nn.init.zeros_(module.bias)\r\n    elif jax_impl and isinstance(module, nn.Conv2d):\r\n        # NOTE conv was left to pytorch default in my original init\r\n        lecun_normal_(module.weight)\r\n        if module.bias is not None:\r\n            nn.init.zeros_(module.bias)\r\n    elif isinstance(module, (nn.LayerNorm, nn.GroupNorm, nn.BatchNorm2d)):\r\n        nn.init.zeros_(module.bias)\r\n        nn.init.ones_(module.weight)\r\n\r\n\r\n@torch.no_grad()\r\ndef _load_weights(model: VisionTransformer, checkpoint_path: str, prefix: str = ''):\r\n    \"\"\" Load weights from .npz checkpoints for official Google Brain Flax implementation\r\n    \"\"\"\r\n    import numpy as np\r\n\r\n    def _n2p(w, t=True):\r\n        if w.ndim == 4 and w.shape[0] == w.shape[1] == w.shape[2] == 1:\r\n            w = w.flatten()\r\n        if t:\r\n            if w.ndim == 4:\r\n                w = w.transpose([3, 2, 0, 1])\r\n            elif w.ndim == 3:\r\n                w = w.transpose([2, 0, 1])\r\n            elif w.ndim == 2:\r\n                w = w.transpose([1, 0])\r\n        return torch.from_numpy(w)\r\n\r\n    w = np.load(checkpoint_path)\r\n    if not prefix and 'opt/target/embedding/kernel' in w:\r\n        prefix = 'opt/target/'\r\n\r\n    if hasattr(model.patch_embed, 'backbone'):\r\n        # hybrid\r\n        backbone = model.patch_embed.backbone\r\n        stem_only = not hasattr(backbone, 'stem')\r\n        stem = backbone if stem_only else backbone.stem\r\n        stem.conv.weight.copy_(adapt_input_conv(stem.conv.weight.shape[1], _n2p(w[f'{prefix}conv_root/kernel'])))\r\n        stem.norm.weight.copy_(_n2p(w[f'{prefix}gn_root/scale']))\r\n        stem.norm.bias.copy_(_n2p(w[f'{prefix}gn_root/bias']))\r\n        if not stem_only:\r\n            for i, stage in enumerate(backbone.stages):\r\n                for j, block in enumerate(stage.blocks):\r\n                    bp = f'{prefix}block{i + 1}/unit{j + 1}/'\r\n                    for r in range(3):\r\n                        getattr(block, f'conv{r + 1}').weight.copy_(_n2p(w[f'{bp}conv{r + 1}/kernel']))\r\n                        getattr(block, f'norm{r + 1}').weight.copy_(_n2p(w[f'{bp}gn{r + 1}/scale']))\r\n                        getattr(block, f'norm{r + 1}').bias.copy_(_n2p(w[f'{bp}gn{r + 1}/bias']))\r\n                    if block.downsample is not None:\r\n                        block.downsample.conv.weight.copy_(_n2p(w[f'{bp}conv_proj/kernel']))\r\n                        block.downsample.norm.weight.copy_(_n2p(w[f'{bp}gn_proj/scale']))\r\n                        block.downsample.norm.bias.copy_(_n2p(w[f'{bp}gn_proj/bias']))\r\n        embed_conv_w = _n2p(w[f'{prefix}embedding/kernel'])\r\n    else:\r\n        embed_conv_w = adapt_input_conv(\r\n            model.patch_embed.proj.weight.shape[1], _n2p(w[f'{prefix}embedding/kernel']))\r\n    model.patch_embed.proj.weight.copy_(embed_conv_w)\r\n    model.patch_embed.proj.bias.copy_(_n2p(w[f'{prefix}embedding/bias']))\r\n    model.cls_token.copy_(_n2p(w[f'{prefix}cls'], t=False))\r\n    pos_embed_w = _n2p(w[f'{prefix}Transformer/posembed_input/pos_embedding'], t=False)\r\n    if pos_embed_w.shape != model.pos_embed.shape:\r\n        pos_embed_w = resize_pos_embed(  # resize pos embedding when different size from pretrained weights\r\n            pos_embed_w, model.pos_embed, getattr(model, 'num_tokens', 1), model.patch_embed.grid_size)\r\n    model.pos_embed.copy_(pos_embed_w)\r\n    model.norm.weight.copy_(_n2p(w[f'{prefix}Transformer/encoder_norm/scale']))\r\n    model.norm.bias.copy_(_n2p(w[f'{prefix}Transformer/encoder_norm/bias']))\r\n    if isinstance(model.head, nn.Linear) and model.head.bias.shape[0] == w[f'{prefix}head/bias'].shape[-1]:\r\n        model.head.weight.copy_(_n2p(w[f'{prefix}head/kernel']))\r\n        model.head.bias.copy_(_n2p(w[f'{prefix}head/bias']))\r\n    if isinstance(getattr(model.pre_logits, 'fc', None), nn.Linear) and f'{prefix}pre_logits/bias' in w:\r\n        model.pre_logits.fc.weight.copy_(_n2p(w[f'{prefix}pre_logits/kernel']))\r\n        model.pre_logits.fc.bias.copy_(_n2p(w[f'{prefix}pre_logits/bias']))\r\n    for i, block in enumerate(model.blocks.children()):\r\n        block_prefix = f'{prefix}Transformer/encoderblock_{i}/'\r\n        mha_prefix = block_prefix + 'MultiHeadDotProductAttention_1/'\r\n        block.norm1.weight.copy_(_n2p(w[f'{block_prefix}LayerNorm_0/scale']))\r\n        block.norm1.bias.copy_(_n2p(w[f'{block_prefix}LayerNorm_0/bias']))\r\n        block.attn.qkv.weight.copy_(torch.cat([\r\n            _n2p(w[f'{mha_prefix}{n}/kernel'], t=False).flatten(1).T for n in ('query', 'key', 'value')]))\r\n        block.attn.qkv.bias.copy_(torch.cat([\r\n            _n2p(w[f'{mha_prefix}{n}/bias'], t=False).reshape(-1) for n in ('query', 'key', 'value')]))\r\n        block.attn.proj.weight.copy_(_n2p(w[f'{mha_prefix}out/kernel']).flatten(1))\r\n        block.attn.proj.bias.copy_(_n2p(w[f'{mha_prefix}out/bias']))\r\n        for r in range(2):\r\n            getattr(block.mlp, f'fc{r + 1}').weight.copy_(_n2p(w[f'{block_prefix}MlpBlock_3/Dense_{r}/kernel']))\r\n            getattr(block.mlp, f'fc{r + 1}').bias.copy_(_n2p(w[f'{block_prefix}MlpBlock_3/Dense_{r}/bias']))\r\n        block.norm2.weight.copy_(_n2p(w[f'{block_prefix}LayerNorm_2/scale']))\r\n        block.norm2.bias.copy_(_n2p(w[f'{block_prefix}LayerNorm_2/bias']))\r\n\r\n\r\ndef resize_pos_embed(posemb, posemb_new, num_tokens=1, gs_new=()):\r\n    # Rescale the grid of position embeddings when loading from state_dict. Adapted from\r\n    # https://github.com/google-research/vision_transformer/blob/00883dd691c63a6830751563748663526e811cee/vit_jax/checkpoint.py#L224\r\n    print('Resized position embedding: %s to %s', posemb.shape, posemb_new.shape)\r\n    ntok_new = posemb_new.shape[1]\r\n    if num_tokens:\r\n        posemb_tok, posemb_grid = posemb[:, :num_tokens], posemb[0, num_tokens:]\r\n        ntok_new -= num_tokens\r\n    else:\r\n        posemb_tok, posemb_grid = posemb[:, :0], posemb[0]\r\n    gs_old = int(math.sqrt(len(posemb_grid)))\r\n    if not len(gs_new):  # backwards compatibility\r\n        gs_new = [int(math.sqrt(ntok_new))] * 2\r\n    assert len(gs_new) >= 2\r\n    print('Position embedding grid-size from %s to %s', [gs_old, gs_old], gs_new)\r\n    posemb_grid = posemb_grid.reshape(1, gs_old, gs_old, -1).permute(0, 3, 1, 2)\r\n    posemb_grid = F.interpolate(posemb_grid, size=gs_new, mode='bilinear')\r\n    posemb_grid = posemb_grid.permute(0, 2, 3, 1).reshape(1, gs_new[0] * gs_new[1], -1)\r\n    posemb = torch.cat([posemb_tok, posemb_grid], dim=1)\r\n    return posemb\r\n\r\n\r\ndef checkpoint_filter_fn(state_dict, model):\r\n    \"\"\" convert patch embedding weight from manual patchify + linear proj to conv\"\"\"\r\n    out_dict = {}\r\n    if 'model' in state_dict:\r\n        # For deit models\r\n        state_dict = state_dict['model']\r\n    for k, v in state_dict.items():\r\n        if 'patch_embed.proj.weight' in k and len(v.shape) < 4:\r\n            # For old models that I trained prior to conv based patchification\r\n            O, I, H, W = model.patch_embed.proj.weight.shape\r\n            v = v.reshape(O, -1, H, W)\r\n        elif k == 'pos_embed' and v.shape != model.pos_embed.shape:\r\n            # To resize pos embedding when using model at different size from pretrained weights\r\n            v = resize_pos_embed(\r\n                v, model.pos_embed, getattr(model, 'num_tokens', 1), model.patch_embed.grid_size)\r\n        out_dict[k] = v\r\n    return out_dict\r\n\r\n\r\ndef _create_vision_transformer(variant, pretrained=False, default_cfg=None, **kwargs):\r\n    if kwargs.get('features_only', None):\r\n        raise RuntimeError('features_only not implemented for Vision Transformer models.')\r\n\r\n    model = VisionTransformer(**kwargs)\r\n    if pretrained:\r\n        if 'npz' in pretrained:\r\n            model.load_pretrained(pretrained, prefix='')\r\n        else:\r\n            checkpoint = torch.load(pretrained, map_location=\"cpu\")\r\n            missing_keys, unexpected_keys = model.load_state_dict(checkpoint[\"model\"], strict=False)\r\n            print('Load pretrained model from: ' + pretrained)\r\n\r\n    return model\r\n\r\n\r\ndef vit_base_patch16_224(pretrained=False, **kwargs):\r\n    \"\"\"\r\n    ViT-Base model (ViT-B/16) from original paper (https://arxiv.org/abs/2010.11929).\r\n    \"\"\"\r\n    model_kwargs = dict(\r\n        patch_size=16, embed_dim=768, depth=12, num_heads=12, **kwargs)\r\n    model = _create_vision_transformer('vit_base_patch16_224_in21k', pretrained=pretrained, **model_kwargs)\r\n    return model\r\n\r\ndef vit_large_patch16_224(pretrained=False, **kwargs):\r\n    model_kwargs = dict(\r\n        patch_size=16, embed_dim=1024, depth=24, num_heads=16, **kwargs)\r\n    model = _create_vision_transformer('vit_large_patch16_224_in21k', pretrained=pretrained, **model_kwargs)\r\n    return model\r\n"
  },
  {
    "path": "lib/models/artrack_seq/vit_ce.py",
    "content": "import math\r\nimport logging\r\nfrom functools import partial\r\nfrom collections import OrderedDict\r\nfrom copy import deepcopy\r\n\r\nimport torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\n\r\nfrom timm.models.layers import to_2tuple\r\n\r\nfrom lib.models.layers.patch_embed import PatchEmbed\r\nfrom .utils import combine_tokens, recover_tokens\r\nfrom .vit import VisionTransformer\r\nfrom ..layers.attn_blocks import CEBlock\r\n\r\n_logger = logging.getLogger(__name__)\r\n\r\n\r\nclass VisionTransformerCE(VisionTransformer):\r\n    \"\"\" Vision Transformer with candidate elimination (CE) module\r\n\r\n    A PyTorch impl of : `An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale`\r\n        - https://arxiv.org/abs/2010.11929\r\n\r\n    Includes distillation token & head support for `DeiT: Data-efficient Image Transformers`\r\n        - https://arxiv.org/abs/2012.12877\r\n    \"\"\"\r\n\r\n    def __init__(self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, embed_dim=768, depth=12,\r\n                 num_heads=12, mlp_ratio=4., qkv_bias=True, representation_size=None, distilled=False,\r\n                 drop_rate=0., attn_drop_rate=0., drop_path_rate=0., embed_layer=PatchEmbed, norm_layer=None,\r\n                 act_layer=None, weight_init='',\r\n                 ce_loc=None, ce_keep_ratio=None):\r\n        \"\"\"\r\n        Args:\r\n            img_size (int, tuple): input image size\r\n            patch_size (int, tuple): patch size\r\n            in_chans (int): number of input channels\r\n            num_classes (int): number of classes for classification head\r\n            embed_dim (int): embedding dimension\r\n            depth (int): depth of transformer\r\n            num_heads (int): number of attention heads\r\n            mlp_ratio (int): ratio of mlp hidden dim to embedding dim\r\n            qkv_bias (bool): enable bias for qkv if True\r\n            representation_size (Optional[int]): enable and set representation layer (pre-logits) to this value if set\r\n            distilled (bool): model includes a distillation token and head as in DeiT models\r\n            drop_rate (float): dropout rate\r\n            attn_drop_rate (float): attention dropout rate\r\n            drop_path_rate (float): stochastic depth rate\r\n            embed_layer (nn.Module): patch embedding layer\r\n            norm_layer: (nn.Module): normalization layer\r\n            weight_init: (str): weight init scheme\r\n        \"\"\"\r\n        # super().__init__()\r\n        super().__init__()\r\n        if isinstance(img_size, tuple):\r\n            self.img_size = img_size\r\n        else:\r\n            self.img_size = to_2tuple(img_size)\r\n        self.patch_size = patch_size\r\n        self.in_chans = in_chans\r\n\r\n        self.num_classes = num_classes\r\n        self.num_features = self.embed_dim = embed_dim  # num_features for consistency with other models\r\n        self.num_tokens = 2 if distilled else 1\r\n        norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6)\r\n        act_layer = act_layer or nn.GELU\r\n\r\n        self.patch_embed = embed_layer(\r\n            img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim)\r\n        num_patches = self.patch_embed.num_patches\r\n\r\n        self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))\r\n        self.dist_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) if distilled else None\r\n        self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + self.num_tokens, embed_dim))\r\n        self.pos_drop = nn.Dropout(p=drop_rate)\r\n\r\n        dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)]  # stochastic depth decay rule\r\n        blocks = []\r\n        ce_index = 0\r\n        self.ce_loc = ce_loc\r\n        for i in range(depth):\r\n            ce_keep_ratio_i = 1.0\r\n            if ce_loc is not None and i in ce_loc:\r\n                ce_keep_ratio_i = ce_keep_ratio[ce_index]\r\n                ce_index += 1\r\n\r\n            blocks.append(\r\n                CEBlock(\r\n                    dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, drop=drop_rate,\r\n                    attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, act_layer=act_layer,\r\n                    keep_ratio_search=ce_keep_ratio_i)\r\n            )\r\n\r\n        self.blocks = nn.Sequential(*blocks)\r\n        self.norm = norm_layer(embed_dim)\r\n\r\n        self.init_weights(weight_init)\r\n\r\n    def forward_features(self, z, x, mask_z=None, mask_x=None,\r\n                         ce_template_mask=None, ce_keep_rate=None,\r\n                         return_last_attn=False\r\n                         ):\r\n        B, H, W = x.shape[0], x.shape[2], x.shape[3]\r\n\r\n        x = self.patch_embed(x)\r\n        z = self.patch_embed(z)\r\n\r\n        # attention mask handling\r\n        # B, H, W\r\n        if mask_z is not None and mask_x is not None:\r\n            mask_z = F.interpolate(mask_z[None].float(), scale_factor=1. / self.patch_size).to(torch.bool)[0]\r\n            mask_z = mask_z.flatten(1).unsqueeze(-1)\r\n\r\n            mask_x = F.interpolate(mask_x[None].float(), scale_factor=1. / self.patch_size).to(torch.bool)[0]\r\n            mask_x = mask_x.flatten(1).unsqueeze(-1)\r\n\r\n            mask_x = combine_tokens(mask_z, mask_x, mode=self.cat_mode)\r\n            mask_x = mask_x.squeeze(-1)\r\n\r\n        if self.add_cls_token:\r\n            cls_tokens = self.cls_token.expand(B, -1, -1)\r\n            cls_tokens = cls_tokens + self.cls_pos_embed\r\n\r\n        z += self.pos_embed_z\r\n        x += self.pos_embed_x\r\n\r\n        if self.add_sep_seg:\r\n            x += self.search_segment_pos_embed\r\n            z += self.template_segment_pos_embed\r\n\r\n        x = combine_tokens(z, x, mode=self.cat_mode)\r\n        if self.add_cls_token:\r\n            x = torch.cat([cls_tokens, x], dim=1)\r\n\r\n        x = self.pos_drop(x)\r\n\r\n        lens_z = self.pos_embed_z.shape[1]\r\n        lens_x = self.pos_embed_x.shape[1]\r\n\r\n        global_index_t = torch.linspace(0, lens_z - 1, lens_z).to(x.device)\r\n        global_index_t = global_index_t.repeat(B, 1)\r\n\r\n        global_index_s = torch.linspace(0, lens_x - 1, lens_x).to(x.device)\r\n        global_index_s = global_index_s.repeat(B, 1)\r\n        removed_indexes_s = []\r\n        for i, blk in enumerate(self.blocks):\r\n            x, global_index_t, global_index_s, removed_index_s, attn = \\\r\n                blk(x, global_index_t, global_index_s, mask_x, ce_template_mask, ce_keep_rate)\r\n\r\n            if self.ce_loc is not None and i in self.ce_loc:\r\n                removed_indexes_s.append(removed_index_s)\r\n\r\n        x = self.norm(x)\r\n        lens_x_new = global_index_s.shape[1]\r\n        lens_z_new = global_index_t.shape[1]\r\n\r\n        z = x[:, :lens_z_new]\r\n        x = x[:, lens_z_new:]\r\n\r\n        if removed_indexes_s and removed_indexes_s[0] is not None:\r\n            removed_indexes_cat = torch.cat(removed_indexes_s, dim=1)\r\n\r\n            pruned_lens_x = lens_x - lens_x_new\r\n            pad_x = torch.zeros([B, pruned_lens_x, x.shape[2]], device=x.device)\r\n            x = torch.cat([x, pad_x], dim=1)\r\n            index_all = torch.cat([global_index_s, removed_indexes_cat], dim=1)\r\n            # recover original token order\r\n            C = x.shape[-1]\r\n            # x = x.gather(1, index_all.unsqueeze(-1).expand(B, -1, C).argsort(1))\r\n            x = torch.zeros_like(x).scatter_(dim=1, index=index_all.unsqueeze(-1).expand(B, -1, C).to(torch.int64), src=x)\r\n\r\n        x = recover_tokens(x, lens_z_new, lens_x, mode=self.cat_mode)\r\n\r\n        # re-concatenate with the template, which may be further used by other modules\r\n        x = torch.cat([z, x], dim=1)\r\n\r\n        aux_dict = {\r\n            \"attn\": attn,\r\n            \"removed_indexes_s\": removed_indexes_s,  # used for visualization\r\n        }\r\n\r\n        return x, aux_dict\r\n\r\n    def forward(self, z, x, ce_template_mask=None, ce_keep_rate=None,\r\n                tnc_keep_rate=None,\r\n                return_last_attn=False):\r\n\r\n        x, aux_dict = self.forward_features(z, x, ce_template_mask=ce_template_mask, ce_keep_rate=ce_keep_rate,)\r\n\r\n        return x, aux_dict\r\n\r\n\r\ndef _create_vision_transformer(pretrained=False, **kwargs):\r\n    model = VisionTransformerCE(**kwargs)\r\n\r\n    if pretrained:\r\n        if 'npz' in pretrained:\r\n            model.load_pretrained(pretrained, prefix='')\r\n        else:\r\n            checkpoint = torch.load(pretrained, map_location=\"cpu\")\r\n            missing_keys, unexpected_keys = model.load_state_dict(checkpoint[\"model\"], strict=False)\r\n            print('Load pretrained model from: ' + pretrained)\r\n\r\n    return model\r\n\r\n\r\ndef vit_base_patch16_224_ce(pretrained=False, **kwargs):\r\n    \"\"\" ViT-Base model (ViT-B/16) from original paper (https://arxiv.org/abs/2010.11929).\r\n    \"\"\"\r\n    model_kwargs = dict(\r\n        patch_size=16, embed_dim=768, depth=12, num_heads=12, **kwargs)\r\n    model = _create_vision_transformer(pretrained=pretrained, **model_kwargs)\r\n    return model\r\n\r\n\r\ndef vit_large_patch16_224_ce(pretrained=False, **kwargs):\r\n    \"\"\" ViT-Large model (ViT-L/16) from original paper (https://arxiv.org/abs/2010.11929).\r\n    \"\"\"\r\n    model_kwargs = dict(\r\n        patch_size=16, embed_dim=1024, depth=24, num_heads=16, **kwargs)\r\n    model = _create_vision_transformer(pretrained=pretrained, **model_kwargs)\r\n    return model\r\n"
  },
  {
    "path": "lib/models/artrackv2/__init__.py",
    "content": "from .artrackv2 import build_artrackv2"
  },
  {
    "path": "lib/models/artrackv2/artrackv2.py",
    "content": "from copy import deepcopy\nimport math\nimport os\nfrom typing import List\n\nimport torch\nfrom torch import nn\nfrom torch.nn.modules.transformer import _get_clones\nfrom timm.models.layers import DropPath, to_2tuple, trunc_normal_\n\nfrom lib.models.artrackv2.vit import vit_base_patch16_224, vit_large_patch16_224\nfrom lib.utils.box_ops import box_xyxy_to_cxcywh\n\n\nclass ARTrackV2(nn.Module):\n\n    def __init__(self,\n                 transformer,\n                 score_mlp,\n                 hidden_dim,\n                 ):\n\n        super().__init__()\n        self.identity = torch.nn.Parameter(torch.zeros(1, 3, hidden_dim))\n        self.identity = trunc_normal_(self.identity, std=.02)        \n        \n        self.backbone = transformer\n        self.score_mlp = score_mlp\n\n    def forward(self, template: torch.Tensor,\n                search: torch.Tensor,\n                ce_template_mask=None,\n                ce_keep_rate=None,\n                return_last_attn=False,\n                seq_input=None,\n                target_in_search_img=None,\n                gt_bboxes=None,\n                ):\n        template_0 = template[0]\n        template_1 = template[1]\n        out, z_0_feat, z_1_feat, x_feat = self.backbone(z_0=template_0, z_1=template_1, x=search, identity=self.identity, seqs_input=seq_input,\n                                    ce_template_mask=ce_template_mask,\n                                    ce_keep_rate=ce_keep_rate,\n                                    return_last_attn=return_last_attn,)\n        score_feat = out[\"score_feat\"]\n        score = self.score_mlp(score_feat)\n        out[\"score\"] = score\n\n        return out\n\nclass MlpScoreDecoder(nn.Module):\n    def __init__(self, in_dim, hidden_dim, num_layers, bn=False):\n        super().__init__()\n        self.num_layers = num_layers\n        h = [hidden_dim] * (num_layers - 1)\n        out_dim = 1 # score\n        if bn:\n            self.layers = nn.Sequential(*[nn.Sequential(nn.Linear(n, k), nn.BatchNorm1d(k), nn.ReLU())\n                                          if i < num_layers - 1\n                                          else nn.Sequential(nn.Linear(n, k), nn.BatchNorm1d(k))\n                                          for i, (n, k) in enumerate(zip([in_dim] + h, h + [out_dim]))])\n        else:\n            self.layers = nn.Sequential(*[nn.Sequential(nn.Linear(n, k), nn.ReLU())\n                                          if i < num_layers - 1\n                                          else nn.Linear(n, k)\n                                          for i, (n, k) in enumerate(zip([in_dim] + h, h + [out_dim]))])\n\n    def forward(self, reg_tokens):\n        \"\"\"\n        reg tokens shape: (b, 4, embed_dim)\n        \"\"\"\n        x = self.layers(reg_tokens) # (b, 4, 1)\n        x = x.mean(dim=1)   # (b, 1)\n        return x\n\ndef build_score_decoder(cfg, hidden_dim):\n    return MlpScoreDecoder(\n        in_dim=hidden_dim,\n        hidden_dim=hidden_dim,\n        num_layers=2,\n        bn=False\n    )\n\n\ndef build_artrackv2(cfg, training=True):\n    current_dir = os.path.dirname(os.path.abspath(__file__))  # This is your Project Root\n    pretrained_path = os.path.join(current_dir, '../../../pretrained_models')\n    if cfg.MODEL.PRETRAIN_FILE and ('ARTrack' not in cfg.MODEL.PRETRAIN_FILE) and training:\n        pretrained = os.path.join(pretrained_path, cfg.MODEL.PRETRAIN_FILE)\n    else:\n        pretrained = ''\n\n    if cfg.MODEL.BACKBONE.TYPE == 'vit_base_patch16_224':\n        backbone = vit_base_patch16_224(pretrained, drop_path_rate=cfg.TRAIN.DROP_PATH_RATE, bins=cfg.MODEL.BINS, range=cfg.MODEL.RANGE, extension=cfg.MODEL.EXTENSION)\n        hidden_dim = backbone.embed_dim\n        patch_start_index = 1\n    elif cfg.MODEL.BACKBONE.TYPE == 'vit_large_patch16_224':\n        print(\"i use vit_large\")\n        backbone = vit_large_patch16_224(pretrained, drop_path_rate=cfg.TRAIN.DROP_PATH_RATE, bins=cfg.MODEL.BINS, range=cfg.MODEL.RANGE, extension=cfg.MODEL.EXTENSION)\n        hidden_dim = backbone.embed_dim\n        patch_start_index = 1\n\n    else:\n        raise NotImplementedError\n\n    backbone.finetune_track(cfg=cfg, patch_start_index=patch_start_index)\n    score_decoder = build_score_decoder(cfg, hidden_dim)\n\n    model = ARTrackV2(\n        backbone,\n        score_decoder,\n        hidden_dim,\n    )\n    return model\n"
  },
  {
    "path": "lib/models/artrackv2/base_backbone.py",
    "content": "from functools import partial\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom timm.models.vision_transformer import resize_pos_embed\nfrom timm.models.layers import DropPath, to_2tuple, trunc_normal_\n\nfrom lib.models.layers.patch_embed import PatchEmbed\nfrom lib.models.artrackv2.utils import combine_tokens, recover_tokens\n\ndef generate_square_subsequent_mask(sz, sx, ss):\n    r\"\"\"Generate a square mask for the sequence. The masked positions are filled with float('-inf').\n        Unmasked positions are filled with float(0.0).\n    \"\"\"\n    sum = sz + sx + ss\n    mask = (torch.triu(torch.ones(sum, sum)) == 1).transpose(0, 1)\n    mask[:, :] = 0\n    mask[:int(sz/2), :int(sz/2)] = 1 #template self\n    mask[int(sz/2):sz, int(sz/2):sz] = 1 # dt self\n    mask[int(sz/2):sz, sz:sz+sx] = 1 # dt search\n    mask[int(sz / 2):sz, -1] = 1  # dt search\n    mask[sz:sz+sx, :sz+sx] = 1 # sr dt-t-sr\n    mask[sz+sx:, :] = 1 # co dt-t-sr-co\n    return ~mask\n\nclass BaseBackbone(nn.Module):\n    def __init__(self):\n        super().__init__()\n\n        # for original ViT\n        self.pos_embed = None\n        self.img_size = [224, 224]\n        self.patch_size = 16\n        self.embed_dim = 384\n\n        self.cat_mode = 'direct'\n\n        self.pos_embed_z0 = None\n        self.pos_embed_z1 = None\n        self.pos_embed_x = None\n\n        self.template_segment_pos_embed = None\n        self.search_segment_pos_embed = None\n\n        self.return_inter = False\n        self.return_stage = [2, 5, 8, 11]\n\n        self.add_cls_token = False\n        self.add_sep_seg = False\n\n    def finetune_track(self, cfg, patch_start_index=1):\n\n        search_size = to_2tuple(cfg.DATA.SEARCH.SIZE)\n        template_size = to_2tuple(cfg.DATA.TEMPLATE.SIZE)\n        new_patch_size = cfg.MODEL.BACKBONE.STRIDE\n\n        self.cat_mode = cfg.MODEL.BACKBONE.CAT_MODE\n        self.return_inter = cfg.MODEL.RETURN_INTER\n        self.add_sep_seg = cfg.MODEL.BACKBONE.SEP_SEG\n\n        # resize patch embedding\n        if new_patch_size != self.patch_size:\n            print('Inconsistent Patch Size With The Pretrained Weights, Interpolate The Weight!')\n            old_patch_embed = {}\n            for name, param in self.patch_embed.named_parameters():\n                if 'weight' in name:\n                    param = nn.functional.interpolate(param, size=(new_patch_size, new_patch_size),\n                                                      mode='bicubic', align_corners=False)\n                    param = nn.Parameter(param)\n                old_patch_embed[name] = param\n            self.patch_embed = PatchEmbed(img_size=self.img_size, patch_size=new_patch_size, in_chans=3,\n                                          embed_dim=self.embed_dim)\n            self.patch_embed.proj.bias = old_patch_embed['proj.bias']\n            self.patch_embed.proj.weight = old_patch_embed['proj.weight']\n\n        # for patch embedding\n        patch_pos_embed = self.pos_embed[:, patch_start_index:, :]\n        patch_pos_embed = patch_pos_embed.transpose(1, 2)\n        B, E, Q = patch_pos_embed.shape\n        P_H, P_W = self.img_size[0] // self.patch_size, self.img_size[1] // self.patch_size\n        patch_pos_embed = patch_pos_embed.view(B, E, P_H, P_W)\n\n        # for search region\n        H, W = search_size\n        new_P_H, new_P_W = H // new_patch_size, W // new_patch_size\n        search_patch_pos_embed = nn.functional.interpolate(patch_pos_embed, size=(new_P_H, new_P_W), mode='bicubic',\n                                                           align_corners=False)\n        search_patch_pos_embed = search_patch_pos_embed.flatten(2).transpose(1, 2)\n\n        # for template region\n        H, W = template_size\n        new_P_H, new_P_W = H // new_patch_size, W // new_patch_size\n        template_patch_pos_embed = nn.functional.interpolate(patch_pos_embed, size=(new_P_H, new_P_W), mode='bicubic',\n                                                             align_corners=False)\n        template_patch_pos_embed = template_patch_pos_embed.flatten(2).transpose(1, 2)\n\n        self.pos_embed_z0 = nn.Parameter(template_patch_pos_embed)\n        self.pos_embed_z1 = nn.Parameter(template_patch_pos_embed)\n        self.pos_embed_x = nn.Parameter(search_patch_pos_embed)\n\n        # for cls token (keep it but not used)\n        if self.add_cls_token and patch_start_index > 0:\n            cls_pos_embed = self.pos_embed[:, 0:1, :]\n            self.cls_pos_embed = nn.Parameter(cls_pos_embed)\n\n        # separate token and segment token\n        if self.add_sep_seg:\n            self.template_segment_pos_embed = nn.Parameter(torch.zeros(1, 1, self.embed_dim))\n            self.template_segment_pos_embed = trunc_normal_(self.template_segment_pos_embed, std=.02)\n            self.search_segment_pos_embed = nn.Parameter(torch.zeros(1, 1, self.embed_dim))\n            self.search_segment_pos_embed = trunc_normal_(self.search_segment_pos_embed, std=.02)\n\n\n        if self.return_inter:\n            for i_layer in self.fpn_stage:\n                if i_layer != 11:\n                    norm_layer = partial(nn.LayerNorm, eps=1e-6)\n                    layer = norm_layer(self.embed_dim)\n                    layer_name = f'norm{i_layer}'\n                    self.add_module(layer_name, layer)\n\n    def forward_features(self, z_0, z_1, x, identity, seqs_input):\n        share_weight = self.word_embeddings.weight.T\n\n        B, H, W = x.shape[0], x.shape[2], x.shape[3]\n\n        seqs_input = seqs_input.to(torch.int64).to(x.device)\n        tgt = self.word_embeddings(seqs_input).permute(1, 0, 2)\n        query_embed = self.position_embeddings.weight.unsqueeze(1)\n        query_embed = query_embed.repeat(1, B, 1)\n\n        tgt = tgt.transpose(0, 1)\n        query_embed = query_embed.transpose(0, 1)\n\n        x = self.patch_embed(x)\n        z_0 = self.patch_embed(z_0)\n        z_1 = self.patch_embed(z_1)\n\n        len_x = x.shape[1]\n        len_z = z_0.shape[1] + z_1.shape[1]\n        len_seq = seqs_input.shape[1]\n\n        mask = generate_square_subsequent_mask(len_z, len_x, len_seq).to(tgt.device)\n\n        if self.add_cls_token:\n            cls_tokens = self.cls_token.expand(B, -1, -1)\n            cls_tokens = cls_tokens + self.cls_pos_embed\n\n        z_0 += self.pos_embed_z0\n        z_1 += self.pos_embed_z1\n        x += self.pos_embed_x\n        tgt += query_embed\n        \n        z_0 += identity[:, 0, :].repeat(B, self.pos_embed_z0.shape[1], 1)\n        z_1 += identity[:, 1, :].repeat(B, self.pos_embed_z1.shape[1], 1)\n\n        x += identity[:, 2, :].repeat(B, self.pos_embed_x.shape[1], 1)\n\n        if self.add_sep_seg:\n            x += self.search_segment_pos_embed\n            z += self.template_segment_pos_embed\n\n        z = torch.cat((z_0, z_1), dim=1)\n\n        x = combine_tokens(z, x, mode=self.cat_mode)\n        x = torch.cat((x, tgt), dim=1)\n        if self.add_cls_token:\n            x = torch.cat([cls_tokens, x], dim=1)\n\n        x = self.pos_drop(x)\n\n        for i, blk in enumerate(self.blocks):\n            x = blk(x, padding_mask=mask)\n        #\n        for j, blk in enumerate(self.extension):\n            x = blk(x, padding_mask=mask)\n        x_out = self.norm(x[:, -5:-1])\n        score_feat = x[:, -1]\n\n        lens_z = self.pos_embed_z0.shape[1]\n        lens_x = self.pos_embed_x.shape[1]\n\n        z_0_feat = x[:, :lens_z]\n        z_1_feat = x[:, lens_z:lens_z*2]\n        x_feat = x[:, lens_z*2:lens_z*2+lens_x]\n\n        #x = recover_tokens(x, lens_z, lens_x, mode=self.cat_mode)\n        at = torch.matmul(x_out, share_weight)\n        at = at + self.output_bias\n        at = at[:, -4:]\n        at = at.transpose(0, 1)\n        output = {'feat': at, 'score_feat':score_feat, \"state\": \"train\"}\n\n        return output, z_0_feat, z_1_feat, x_feat\n\n    def forward_track(self, z_0, z_1, x, identity):\n        share_weight = self.word_embeddings.weight.T\n        out_list = []\n\n        x0 = self.bins * self.range\n        y0 = self.bins * self.range + 1\n        x1 = self.bins * self.range + 2\n        y1 = self.bins * self.range + 3\n        score = self.bins * self.range + 5\n\n        B, H, W = x.shape[0], x.shape[2], x.shape[3]\n\n        seq = torch.cat([torch.ones((B, 1)).to(x) * x0, torch.ones((B, 1)).to(x) * y0,\n                       torch.ones((B, 1)).to(x) * x1,\n                       torch.ones((B, 1)).to(x) * y1,\n                       torch.ones((B, 1)).to(x) * score], dim=1)\n\n        seq_all = torch.cat([seq], dim=1)\n\n        seqs_input = seq_all.to(torch.int64).to(x.device)\n        output_x_feat = x.clone()\n        tgt = self.word_embeddings(seqs_input).permute(1, 0, 2)\n\n        x = self.patch_embed(x)\n        z_0 = self.patch_embed(z_0)\n        z_1 = self.patch_embed(z_1)\n\n        len_x = x.shape[1]\n        len_z = z_0.shape[1] + z_1.shape[1]\n        len_seq = seqs_input.shape[1]\n\n        z_0 += identity[:, 0, :].repeat(B, self.pos_embed_z0.shape[1], 1)\n        z_1 += identity[:, 1, :].repeat(B, self.pos_embed_z0.shape[1], 1)\n\n        x += identity[:, 2, :].repeat(B, self.pos_embed_x.shape[1], 1)\n\n        query_pos_embed = self.position_embeddings.weight.unsqueeze(1)\n        query_pos_embed = query_pos_embed.repeat(1, B, 1)\n\n        tgt = tgt.transpose(0, 1)\n        query_pos_embed = query_pos_embed.transpose(0, 1)\n\n        z_0 += self.pos_embed_z0\n        z_1 += self.pos_embed_z1\n        x += self.pos_embed_x\n\n\n        mask = generate_square_subsequent_mask(len_z, len_x, len_seq).to(tgt.device)\n\n        tgt += query_pos_embed[:, :tgt.shape[1]]\n\n        z = torch.cat((z_0, z_1), dim=1)\n\n        zx = combine_tokens(z, x, mode=self.cat_mode)\n        zxs = torch.cat((zx, tgt), dim=1)\n\n        zxs = self.pos_drop(zxs)\n\n        for j, blk in enumerate(self.blocks):\n            zxs = blk(zxs, padding_mask=mask)\n\n        for j, blk in enumerate(self.extension):\n            zxs = blk(zxs, padding_mask=mask)\n\n        lens_z_single = self.pos_embed_z0.shape[1]\n        lens_x = self.pos_embed_x.shape[1]\n\n        z_0_feat = zxs[:, :lens_z_single]\n        z_1_feat = zxs[:, lens_z_single:lens_z_single * 2]\n        x_feat = zxs[:, lens_z_single * 2:lens_z_single * 2 + lens_x]\n\n        x_out = self.norm(zxs[:, -5:-1])\n        score_feat = x[:, -1]\n\n        possibility = torch.matmul(x_out, share_weight)\n        out = possibility + self.output_bias\n        temp = out.transpose(0, 1)\n\n        out_list.append(out.unsqueeze(0))\n        out = out.softmax(-1)\n\n        value, extra_seq = out.topk(dim=-1, k=1)[0], out.topk(dim=-1, k=1)[1]\n        for i in range(4):\n            value, extra_seq = out[:, i, :].topk(dim=-1, k=1)[0], out[:, i, :].topk(dim=-1, k=1)[1]\n            if i == 0:\n                seqs_output = extra_seq\n                values = value\n            else:\n                seqs_output = torch.cat([seqs_output, extra_seq], dim=-1)\n                values = torch.cat([values, value], dim=-1)\n\n        output = {'seqs': seqs_output, 'class': values, 'feat': temp, \"state\": \"val/test\",\n                  \"x_feat\": output_x_feat.detach(), \"score_feat\": score_feat}\n\n        return output, None, None, None\n\n    def forward(self, z_0, z_1, x, identity, seqs_input, **kwargs):\n        \"\"\"\n        Joint feature extraction and relation modeling for the basic ViT backbone.\n        Args:\n            z (torch.Tensor): template feature, [B, C, H_z, W_z]\n            x (torch.Tensor): search region feature, [B, C, H_x, W_x]\n\n        Returns:\n            x (torch.Tensor): merged template and search region feature, [B, L_z+L_x, C]\n            attn : None\n        \"\"\"\n        if seqs_input == None:\n            output = self.forward_track(z_0, z_1, x, identity)\n        else:\n            output = self.forward_features(z_0, z_1, x, identity, seqs_input)\n\n        return output\n"
  },
  {
    "path": "lib/models/artrackv2/utils.py",
    "content": "import math\n\nimport torch\nimport torch.nn.functional as F\n\n\ndef combine_tokens(template_tokens, search_tokens, mode='direct', return_res=False):\n    # [B, HW, C]\n    len_t = template_tokens.shape[1]\n    len_s = search_tokens.shape[1]\n\n    if mode == 'direct':\n        merged_feature = torch.cat((template_tokens, search_tokens), dim=1)\n    elif mode == 'template_central':\n        central_pivot = len_s // 2\n        first_half = search_tokens[:, :central_pivot, :]\n        second_half = search_tokens[:, central_pivot:, :]\n        merged_feature = torch.cat((first_half, template_tokens, second_half), dim=1)\n    elif mode == 'partition':\n        feat_size_s = int(math.sqrt(len_s))\n        feat_size_t = int(math.sqrt(len_t))\n        window_size = math.ceil(feat_size_t / 2.)\n        # pad feature maps to multiples of window size\n        B, _, C = template_tokens.shape\n        H = W = feat_size_t\n        template_tokens = template_tokens.view(B, H, W, C)\n        pad_l = pad_b = pad_r = 0\n        # pad_r = (window_size - W % window_size) % window_size\n        pad_t = (window_size - H % window_size) % window_size\n        template_tokens = F.pad(template_tokens, (0, 0, pad_l, pad_r, pad_t, pad_b))\n        _, Hp, Wp, _ = template_tokens.shape\n        template_tokens = template_tokens.view(B, Hp // window_size, window_size, W, C)\n        template_tokens = torch.cat([template_tokens[:, 0, ...], template_tokens[:, 1, ...]], dim=2)\n        _, Hc, Wc, _ = template_tokens.shape\n        template_tokens = template_tokens.view(B, -1, C)\n        merged_feature = torch.cat([template_tokens, search_tokens], dim=1)\n\n        # calculate new h and w, which may be useful for SwinT or others\n        merged_h, merged_w = feat_size_s + Hc, feat_size_s\n        if return_res:\n            return merged_feature, merged_h, merged_w\n\n    else:\n        raise NotImplementedError\n\n    return merged_feature\n\n\ndef recover_tokens(merged_tokens, len_template_token, len_search_token, mode='direct'):\n    if mode == 'direct':\n        recovered_tokens = merged_tokens\n    elif mode == 'template_central':\n        central_pivot = len_search_token // 2\n        len_remain = len_search_token - central_pivot\n        len_half_and_t = central_pivot + len_template_token\n\n        first_half = merged_tokens[:, :central_pivot, :]\n        second_half = merged_tokens[:, -len_remain:, :]\n        template_tokens = merged_tokens[:, central_pivot:len_half_and_t, :]\n\n        recovered_tokens = torch.cat((template_tokens, first_half, second_half), dim=1)\n    elif mode == 'partition':\n        recovered_tokens = merged_tokens\n    else:\n        raise NotImplementedError\n\n    return recovered_tokens\n\n\ndef window_partition(x, window_size: int):\n    \"\"\"\n    Args:\n        x: (B, H, W, C)\n        window_size (int): window size\n\n    Returns:\n        windows: (num_windows*B, window_size, window_size, C)\n    \"\"\"\n    B, H, W, C = x.shape\n    x = x.view(B, H // window_size, window_size, W // window_size, window_size, C)\n    windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C)\n    return windows\n\n\ndef window_reverse(windows, window_size: int, H: int, W: int):\n    \"\"\"\n    Args:\n        windows: (num_windows*B, window_size, window_size, C)\n        window_size (int): Window size\n        H (int): Height of image\n        W (int): Width of image\n\n    Returns:\n        x: (B, H, W, C)\n    \"\"\"\n    B = int(windows.shape[0] / (H * W / window_size / window_size))\n    x = windows.view(B, H // window_size, W // window_size, window_size, window_size, -1)\n    x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1)\n    return x\n"
  },
  {
    "path": "lib/models/artrackv2/vit.py",
    "content": "\"\"\" Vision Transformer (ViT) in PyTorch\nA PyTorch implement of Vision Transformers as described in:\n'An Image Is Worth 16 x 16 Words: Transformers for Image Recognition at Scale'\n    - https://arxiv.org/abs/2010.11929\n`How to train your ViT? Data, Augmentation, and Regularization in Vision Transformers`\n    - https://arxiv.org/abs/2106.10270\nThe official jax code is released and available at https://github.com/google-research/vision_transformer\nDeiT model defs and weights from https://github.com/facebookresearch/deit,\npaper `DeiT: Data-efficient Image Transformers` - https://arxiv.org/abs/2012.12877\nAcknowledgments:\n* The paper authors for releasing code and weights, thanks!\n* I fixed my class token impl based on Phil Wang's https://github.com/lucidrains/vit-pytorch ... check it out\nfor some einops/einsum fun\n* Simple transformer style inspired by Andrej Karpathy's https://github.com/karpathy/minGPT\n* Bert reference code checks against Huggingface Transformers and Tensorflow Bert\nHacked together by / Copyright 2021 Ross Wightman\n\nModified by Botao Ye\n\"\"\"\nimport math\nimport logging\nfrom functools import partial\nfrom collections import OrderedDict\nfrom copy import deepcopy\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_INCEPTION_MEAN, IMAGENET_INCEPTION_STD\nfrom timm.models.helpers import build_model_with_cfg, named_apply, adapt_input_conv\nfrom timm.models.layers import Mlp, DropPath, trunc_normal_, lecun_normal_\nfrom timm.models.registry import register_model\n\nfrom lib.models.layers.patch_embed import PatchEmbed\nfrom lib.models.artrackv2.base_backbone import BaseBackbone\n\n\nclass Attention(nn.Module):\n    def __init__(self, dim, num_heads=8, qkv_bias=False, attn_drop=0., proj_drop=0.):\n        super().__init__()\n        self.num_heads = num_heads\n        head_dim = dim // num_heads\n        self.scale = head_dim ** -0.5\n\n        self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)\n        self.attn_drop = nn.Dropout(attn_drop)\n        self.proj = nn.Linear(dim, dim)\n        self.proj_drop = nn.Dropout(proj_drop)\n\n    def forward(self, x, return_attention=False, padding_mask=None):\n        B, N, C = x.shape\n        qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)\n        q, k, v = qkv[0], qkv[1], qkv[2]  # make torchscript happy (cannot use tensor as tuple)\n\n        attn = (q @ k.transpose(-2, -1)) * self.scale\n\n        if padding_mask != None:\n            attn = attn.masked_fill(padding_mask, float(\"-inf\"))\n\n        attn = attn.softmax(dim=-1)\n        attn = self.attn_drop(attn)\n\n        x = (attn @ v).transpose(1, 2).reshape(B, N, C)\n        x = self.proj(x)\n        x = self.proj_drop(x)\n\n        if return_attention:\n            return x, attn\n        return x\n\n\nclass Block(nn.Module):\n\n    def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, drop=0., attn_drop=0.,\n                 drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm):\n        super().__init__()\n        self.norm1 = norm_layer(dim)\n        self.attn = Attention(dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=drop)\n        # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here\n        self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()\n        self.norm2 = norm_layer(dim)\n        mlp_hidden_dim = int(dim * mlp_ratio)\n        self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)\n\n    def forward(self, x, return_attention=False, padding_mask=None):\n        if return_attention:\n            feat, attn = self.attn(self.norm1(x), True, padding_mask)\n            x = x + self.drop_path(feat)\n            x = x + self.drop_path(self.mlp(self.norm2(x)))\n            return x, attn\n        else:\n            x = x + self.drop_path(self.attn(self.norm1(x), padding_mask=padding_mask))\n            x = x + self.drop_path(self.mlp(self.norm2(x)))\n            return x\n\n\nclass VisionTransformer(BaseBackbone):\n    \"\"\" Vision Transformer\n    A PyTorch impl of : `An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale`\n        - https://arxiv.org/abs/2010.11929\n    Includes distillation token & head support for `DeiT: Data-efficient Image Transformers`\n        - https://arxiv.org/abs/2012.12877\n    \"\"\"\n\n    def __init__(self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, embed_dim=768, depth=12,\n                 num_heads=12, mlp_ratio=4., qkv_bias=True, representation_size=None, distilled=False,\n                 drop_rate=0., attn_drop_rate=0., drop_path_rate=0., embed_layer=PatchEmbed, norm_layer=None,\n                 act_layer=None, weight_init='', bins=400, range_time=2, extension=3):\n        \"\"\"\n        Args:\n            img_size (int, tuple): input image size\n            patch_size (int, tuple): patch size\n            in_chans (int): number of input channels\n            num_classes (int): number of classes for classification head\n            embed_dim (int): embedding dimension\n            depth (int): depth of transformer\n            num_heads (int): number of attention heads\n            mlp_ratio (int): ratio of mlp hidden dim to embedding dim\n            qkv_bias (bool): enable bias for qkv if True\n            representation_size (Optional[int]): enable and set representation layer (pre-logits) to this value if set\n            distilled (bool): model includes a distillation token and head as in DeiT models\n            drop_rate (float): dropout rate\n            attn_drop_rate (float): attention dropout rate\n            drop_path_rate (float): stochastic depth rate\n            embed_layer (nn.Module): patch embedding layer\n            norm_layer: (nn.Module): normalization layer\n            weight_init: (str): weight init scheme\n        \"\"\"\n        super().__init__()\n        self.num_classes = num_classes\n        self.num_features = self.embed_dim = embed_dim  # num_features for consistency with other models\n        self.num_tokens = 2 if distilled else 1\n        norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6)\n        act_layer = act_layer or nn.GELU\n\n        self.bins = bins\n        in_channel = embed_dim\n        self.range = range_time\n        self.word_embeddings = nn.Embedding(self.bins * self.range + 6, in_channel, padding_idx=self.bins * self.range + 4, max_norm=None, norm_type=2.0)\n\n        nn.init.kaiming_normal_(self.word_embeddings.weight.data)\n        self.position_embeddings = nn.Embedding(\n            5, in_channel)\n        self.output_bias = torch.nn.Parameter(torch.zeros(self.bins * self.range + 6))\n\n        self.patch_embed = embed_layer(\n            img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim)\n        num_patches = self.patch_embed.num_patches\n\n        self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))\n        self.dist_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) if distilled else None\n        self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + self.num_tokens, embed_dim))\n        self.pos_drop = nn.Dropout(p=drop_rate)\n\n        dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)]  # stochastic depth decay rule\n        self.blocks = nn.Sequential(*[\n            Block(\n                dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, drop=drop_rate,\n                attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, act_layer=act_layer)\n            for i in range(depth)])\n\n        self.extension = nn.Sequential(*[\n            Block(\n                dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, drop=drop_rate,\n                attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, act_layer=act_layer)\n            for i in range(extension)])\n        self.norm = norm_layer(embed_dim)\n        self.apply(self.ext_init_weights)\n        self.init_weights(weight_init)\n\n    def ext_init_weights(self, m):\n        if isinstance(m, nn.Linear):\n            # we use xavier_uniform following official JAX ViT:\n            torch.nn.init.xavier_uniform_(m.weight)\n            if isinstance(m, nn.Linear) and m.bias is not None:\n                nn.init.constant_(m.bias, 0)\n        elif isinstance(m, nn.LayerNorm):\n            nn.init.constant_(m.bias, 0)\n            nn.init.constant_(m.weight, 1.0)\n\n    def init_weights(self, mode=''):\n        assert mode in ('jax', 'jax_nlhb', 'nlhb', '')\n        head_bias = -math.log(self.num_classes) if 'nlhb' in mode else 0.\n        trunc_normal_(self.pos_embed, std=.02)\n        if self.dist_token is not None:\n            trunc_normal_(self.dist_token, std=.02)\n        if mode.startswith('jax'):\n            # leave cls token as zeros to match jax impl\n            named_apply(partial(_init_vit_weights, head_bias=head_bias, jax_impl=True), self)\n        else:\n            trunc_normal_(self.cls_token, std=.02)\n            self.apply(_init_vit_weights)\n\n    def _init_weights(self, m):\n        # this fn left here for compat with downstream users\n        _init_vit_weights(m)\n\n    @torch.jit.ignore()\n    def load_pretrained(self, checkpoint_path, prefix=''):\n        _load_weights(self, checkpoint_path, prefix)\n\n    @torch.jit.ignore\n    def no_weight_decay(self):\n        return {'pos_embed', 'cls_token', 'dist_token'}\n\n    def get_classifier(self):\n        if self.dist_token is None:\n            return self.head\n        else:\n            return self.head, self.head_dist\n\n    def reset_classifier(self, num_classes, global_pool=''):\n        self.num_classes = num_classes\n        self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity()\n        if self.num_tokens == 2:\n            self.head_dist = nn.Linear(self.embed_dim, self.num_classes) if num_classes > 0 else nn.Identity()\n\n\ndef _init_vit_weights(module: nn.Module, name: str = '', head_bias: float = 0., jax_impl: bool = False):\n    \"\"\" ViT weight initialization\n    * When called without n, head_bias, jax_impl args it will behave exactly the same\n      as my original init for compatibility with prev hparam / downstream use cases (ie DeiT).\n    * When called w/ valid n (module name) and jax_impl=True, will (hopefully) match JAX impl\n    \"\"\"\n    if isinstance(module, nn.Linear):\n        if name.startswith('head'):\n            nn.init.zeros_(module.weight)\n            nn.init.constant_(module.bias, head_bias)\n        elif name.startswith('pre_logits'):\n            lecun_normal_(module.weight)\n            nn.init.zeros_(module.bias)\n        else:\n            if jax_impl:\n                nn.init.xavier_uniform_(module.weight)\n                if module.bias is not None:\n                    if 'mlp' in name:\n                        nn.init.normal_(module.bias, std=1e-6)\n                    else:\n                        nn.init.zeros_(module.bias)\n            else:\n                trunc_normal_(module.weight, std=.02)\n                if module.bias is not None:\n                    nn.init.zeros_(module.bias)\n    elif jax_impl and isinstance(module, nn.Conv2d):\n        # NOTE conv was left to pytorch default in my original init\n        lecun_normal_(module.weight)\n        if module.bias is not None:\n            nn.init.zeros_(module.bias)\n    elif isinstance(module, (nn.LayerNorm, nn.GroupNorm, nn.BatchNorm2d)):\n        nn.init.zeros_(module.bias)\n        nn.init.ones_(module.weight)\n\n\n@torch.no_grad()\ndef _load_weights(model: VisionTransformer, checkpoint_path: str, prefix: str = ''):\n    \"\"\" Load weights from .npz checkpoints for official Google Brain Flax implementation\n    \"\"\"\n    import numpy as np\n\n    def _n2p(w, t=True):\n        if w.ndim == 4 and w.shape[0] == w.shape[1] == w.shape[2] == 1:\n            w = w.flatten()\n        if t:\n            if w.ndim == 4:\n                w = w.transpose([3, 2, 0, 1])\n            elif w.ndim == 3:\n                w = w.transpose([2, 0, 1])\n            elif w.ndim == 2:\n                w = w.transpose([1, 0])\n        return torch.from_numpy(w)\n\n    w = np.load(checkpoint_path)\n    if not prefix and 'opt/target/embedding/kernel' in w:\n        prefix = 'opt/target/'\n\n    if hasattr(model.patch_embed, 'backbone'):\n        # hybrid\n        backbone = model.patch_embed.backbone\n        stem_only = not hasattr(backbone, 'stem')\n        stem = backbone if stem_only else backbone.stem\n        stem.conv.weight.copy_(adapt_input_conv(stem.conv.weight.shape[1], _n2p(w[f'{prefix}conv_root/kernel'])))\n        stem.norm.weight.copy_(_n2p(w[f'{prefix}gn_root/scale']))\n        stem.norm.bias.copy_(_n2p(w[f'{prefix}gn_root/bias']))\n        if not stem_only:\n            for i, stage in enumerate(backbone.stages):\n                for j, block in enumerate(stage.blocks):\n                    bp = f'{prefix}block{i + 1}/unit{j + 1}/'\n                    for r in range(3):\n                        getattr(block, f'conv{r + 1}').weight.copy_(_n2p(w[f'{bp}conv{r + 1}/kernel']))\n                        getattr(block, f'norm{r + 1}').weight.copy_(_n2p(w[f'{bp}gn{r + 1}/scale']))\n                        getattr(block, f'norm{r + 1}').bias.copy_(_n2p(w[f'{bp}gn{r + 1}/bias']))\n                    if block.downsample is not None:\n                        block.downsample.conv.weight.copy_(_n2p(w[f'{bp}conv_proj/kernel']))\n                        block.downsample.norm.weight.copy_(_n2p(w[f'{bp}gn_proj/scale']))\n                        block.downsample.norm.bias.copy_(_n2p(w[f'{bp}gn_proj/bias']))\n        embed_conv_w = _n2p(w[f'{prefix}embedding/kernel'])\n    else:\n        embed_conv_w = adapt_input_conv(\n            model.patch_embed.proj.weight.shape[1], _n2p(w[f'{prefix}embedding/kernel']))\n    model.patch_embed.proj.weight.copy_(embed_conv_w)\n    model.patch_embed.proj.bias.copy_(_n2p(w[f'{prefix}embedding/bias']))\n    model.cls_token.copy_(_n2p(w[f'{prefix}cls'], t=False))\n    pos_embed_w = _n2p(w[f'{prefix}Transformer/posembed_input/pos_embedding'], t=False)\n    if pos_embed_w.shape != model.pos_embed.shape:\n        pos_embed_w = resize_pos_embed(  # resize pos embedding when different size from pretrained weights\n            pos_embed_w, model.pos_embed, getattr(model, 'num_tokens', 1), model.patch_embed.grid_size)\n    model.pos_embed.copy_(pos_embed_w)\n    model.norm.weight.copy_(_n2p(w[f'{prefix}Transformer/encoder_norm/scale']))\n    model.norm.bias.copy_(_n2p(w[f'{prefix}Transformer/encoder_norm/bias']))\n    if isinstance(model.head, nn.Linear) and model.head.bias.shape[0] == w[f'{prefix}head/bias'].shape[-1]:\n        model.head.weight.copy_(_n2p(w[f'{prefix}head/kernel']))\n        model.head.bias.copy_(_n2p(w[f'{prefix}head/bias']))\n    if isinstance(getattr(model.pre_logits, 'fc', None), nn.Linear) and f'{prefix}pre_logits/bias' in w:\n        model.pre_logits.fc.weight.copy_(_n2p(w[f'{prefix}pre_logits/kernel']))\n        model.pre_logits.fc.bias.copy_(_n2p(w[f'{prefix}pre_logits/bias']))\n    for i, block in enumerate(model.blocks.children()):\n        block_prefix = f'{prefix}Transformer/encoderblock_{i}/'\n        mha_prefix = block_prefix + 'MultiHeadDotProductAttention_1/'\n        block.norm1.weight.copy_(_n2p(w[f'{block_prefix}LayerNorm_0/scale']))\n        block.norm1.bias.copy_(_n2p(w[f'{block_prefix}LayerNorm_0/bias']))\n        block.attn.qkv.weight.copy_(torch.cat([\n            _n2p(w[f'{mha_prefix}{n}/kernel'], t=False).flatten(1).T for n in ('query', 'key', 'value')]))\n        block.attn.qkv.bias.copy_(torch.cat([\n            _n2p(w[f'{mha_prefix}{n}/bias'], t=False).reshape(-1) for n in ('query', 'key', 'value')]))\n        block.attn.proj.weight.copy_(_n2p(w[f'{mha_prefix}out/kernel']).flatten(1))\n        block.attn.proj.bias.copy_(_n2p(w[f'{mha_prefix}out/bias']))\n        for r in range(2):\n            getattr(block.mlp, f'fc{r + 1}').weight.copy_(_n2p(w[f'{block_prefix}MlpBlock_3/Dense_{r}/kernel']))\n            getattr(block.mlp, f'fc{r + 1}').bias.copy_(_n2p(w[f'{block_prefix}MlpBlock_3/Dense_{r}/bias']))\n        block.norm2.weight.copy_(_n2p(w[f'{block_prefix}LayerNorm_2/scale']))\n        block.norm2.bias.copy_(_n2p(w[f'{block_prefix}LayerNorm_2/bias']))\n\n\ndef resize_pos_embed(posemb, posemb_new, num_tokens=1, gs_new=()):\n    # Rescale the grid of position embeddings when loading from state_dict. Adapted from\n    # https://github.com/google-research/vision_transformer/blob/00883dd691c63a6830751563748663526e811cee/vit_jax/checkpoint.py#L224\n    print('Resized position embedding: %s to %s', posemb.shape, posemb_new.shape)\n    ntok_new = posemb_new.shape[1]\n    if num_tokens:\n        posemb_tok, posemb_grid = posemb[:, :num_tokens], posemb[0, num_tokens:]\n        ntok_new -= num_tokens\n    else:\n        posemb_tok, posemb_grid = posemb[:, :0], posemb[0]\n    gs_old = int(math.sqrt(len(posemb_grid)))\n    if not len(gs_new):  # backwards compatibility\n        gs_new = [int(math.sqrt(ntok_new))] * 2\n    assert len(gs_new) >= 2\n    print('Position embedding grid-size from %s to %s', [gs_old, gs_old], gs_new)\n    posemb_grid = posemb_grid.reshape(1, gs_old, gs_old, -1).permute(0, 3, 1, 2)\n    posemb_grid = F.interpolate(posemb_grid, size=gs_new, mode='bilinear')\n    posemb_grid = posemb_grid.permute(0, 2, 3, 1).reshape(1, gs_new[0] * gs_new[1], -1)\n    posemb = torch.cat([posemb_tok, posemb_grid], dim=1)\n    return posemb\n\n\ndef checkpoint_filter_fn(state_dict, model):\n    \"\"\" convert patch embedding weight from manual patchify + linear proj to conv\"\"\"\n    out_dict = {}\n    if 'model' in state_dict:\n        # For deit models\n        state_dict = state_dict['model']\n    for k, v in state_dict.items():\n        if 'patch_embed.proj.weight' in k and len(v.shape) < 4:\n            # For old models that I trained prior to conv based patchification\n            O, I, H, W = model.patch_embed.proj.weight.shape\n            v = v.reshape(O, -1, H, W)\n        elif k == 'pos_embed' and v.shape != model.pos_embed.shape:\n            # To resize pos embedding when using model at different size from pretrained weights\n            v = resize_pos_embed(\n                v, model.pos_embed, getattr(model, 'num_tokens', 1), model.patch_embed.grid_size)\n        out_dict[k] = v\n    return out_dict\n\n\ndef _create_vision_transformer(variant, pretrained=False, default_cfg=None, **kwargs):\n    if kwargs.get('features_only', None):\n        raise RuntimeError('features_only not implemented for Vision Transformer models.')\n\n    model = VisionTransformer(**kwargs)\n    if pretrained:\n        if 'npz' in pretrained:\n            model.load_pretrained(pretrained, prefix='')\n        else:\n            checkpoint = torch.load(pretrained, map_location=\"cpu\")\n            missing_keys, unexpected_keys = model.load_state_dict(checkpoint[\"model\"], strict=False)\n            print('Load pretrained model from: ' + pretrained)\n\n    return model\n\n\ndef vit_base_patch16_224(pretrained=False, bins=400, range=2, extension=3, **kwargs):\n    \"\"\"\n    ViT-Base model (ViT-B/16) from original paper (https://arxiv.org/abs/2010.11929).\n    \"\"\"\n    model_kwargs = dict(\n        patch_size=16, embed_dim=768, depth=12, num_heads=12, bins=bins, range_time=range, extension=extension, **kwargs)\n    model = _create_vision_transformer('vit_base_patch16_224_in21k', pretrained=pretrained, **model_kwargs)\n    return model\n    \ndef vit_large_patch16_224(pretrained=False, bins=400, range=2, extension=3, **kwargs):\n    model_kwargs = dict(\n        patch_size=16, embed_dim=1024, depth=24, num_heads=16, bins=bins, range_time=range, extension=extension, **kwargs)\n    model = _create_vision_transformer('vit_large_patch16_224_in21k', pretrained=pretrained, **model_kwargs)\n    return model\n"
  },
  {
    "path": "lib/models/artrackv2_seq/__init__.py",
    "content": "from .artrackv2_seq import build_artrackv2_seq\n"
  },
  {
    "path": "lib/models/artrackv2_seq/artrackv2_seq.py",
    "content": "\"\"\"\nBasic OSTrack model.\n\"\"\"\nfrom copy import deepcopy\nimport math\nimport os\nfrom typing import List\n\nimport torch\nfrom torch import nn\nfrom torch.nn.modules.transformer import _get_clones\nfrom timm.models.layers import DropPath, to_2tuple, trunc_normal_\n\nfrom lib.models.artrackv2_seq.vit import vit_base_patch16_224, vit_large_patch16_224\nfrom lib.utils.box_ops import box_xyxy_to_cxcywh\nfrom lib.models.layers.mask_decoder import build_maskdecoder\n\nfrom lib.models.layers.head import build_decoder, MLP, DropPathAllocator\n\nimport time\n\n\nclass ARTrackV2Seq(nn.Module):\n    \"\"\" This is the base class for OSTrack \"\"\"\n\n    def __init__(self, transformer,\n                 cross_2_decoder,\n                 score_mlp,\n                 hidden_dim,\n                 ):\n        \"\"\" Initializes the model.\n        Parameters:\n            transformer: torch module of the transformer architecture.\n            aux_loss: True if auxiliary decoding losses (loss at each decoder layer) are to be used.\n        \"\"\"\n        super().__init__()\n        self.backbone = transformer\n        self.score_mlp = score_mlp\n\n        self.identity = torch.nn.Parameter(torch.zeros(1, 3, hidden_dim))\n        self.identity = trunc_normal_(self.identity, std=.02)\n\n        self.cross_2_decoder = cross_2_decoder\n\n    def forward(self, template: torch.Tensor,\n                dz_feat: torch.Tensor,\n                search: torch.Tensor,\n                ce_template_mask=None,\n                ce_keep_rate=None,\n                return_last_attn=False,\n                seq_input=None,\n                head_type=None,\n                stage=None,\n                search_feature=None,\n                target_in_search_img=None,\n                gt_bboxes=None,\n                ):\n        template_0 = template[:, 0]\n        out, z_0_feat, z_1_feat, x_feat, score_feat = self.backbone(z_0=template_0, z_1_feat=dz_feat, x=search, identity=self.identity, seqs_input=seq_input,\n                                    ce_template_mask=ce_template_mask,\n                                    ce_keep_rate=ce_keep_rate,\n                                    return_last_attn=return_last_attn,)\n\n        seq_feat = out['seq_feat'].permute(1, 0 ,2)\n\n        score = self.score_mlp(score_feat)\n        out['score'] = score\n\n        loss = torch.tensor(0.0, dtype=torch.float32).to(search.device)\n        if target_in_search_img != None:\n            target_in_search_gt = self.backbone.patch_embed(target_in_search_img)\n            z_1_feat = z_1_feat.reshape(z_1_feat.shape[0], int(z_1_feat.shape[1] ** 0.5), int(z_1_feat.shape[1] ** 0.5),\n                                        z_1_feat.shape[2]).permute(0, 3, 1, 2)\n            target_in_search_gt = self.cross_2_decoder.unpatchify(target_in_search_gt)\n\n            update_img, loss_temp = self.cross_2_decoder(z_1_feat, target_in_search_gt)\n            update_feat = self.cross_2_decoder.patchify(update_img)\n            out['dz_feat'] = update_feat\n            loss += loss_temp\n\n            out['renew_loss'] = loss\n\n        else:\n           z_1_feat = z_1_feat.reshape(z_1_feat.shape[0], int(z_1_feat.shape[1] ** 0.5), int(z_1_feat.shape[1] ** 0.5),\n                                        z_1_feat.shape[2]).permute(0, 3, 1, 2)\n           update_feat = self.cross_2_decoder(z_1_feat, eval=True)\n           update_feat = self.cross_2_decoder.patchify(update_feat)\n           out['dz_feat'] = update_feat\n\n        return out\n\nclass MlpScoreDecoder(nn.Module):\n    def __init__(self, in_dim, hidden_dim, num_layers, bn=False):\n        super().__init__()\n        self.num_layers = num_layers\n        h = [hidden_dim] * (num_layers - 1)\n        out_dim = 1 # score\n        if bn:\n            self.layers = nn.Sequential(*[nn.Sequential(nn.Linear(n, k), nn.BatchNorm1d(k), nn.ReLU())\n                                          if i < num_layers - 1\n                                          else nn.Sequential(nn.Linear(n, k), nn.BatchNorm1d(k))\n                                          for i, (n, k) in enumerate(zip([in_dim] + h, h + [out_dim]))])\n        else:\n            self.layers = nn.Sequential(*[nn.Sequential(nn.Linear(n, k), nn.ReLU())\n                                          if i < num_layers - 1\n                                          else nn.Linear(n, k)\n                                          for i, (n, k) in enumerate(zip([in_dim] + h, h + [out_dim]))])\n\n    def forward(self, reg_tokens):\n        \"\"\"\n        reg tokens shape: (b, 4, embed_dim)\n        \"\"\"\n        x = self.layers(reg_tokens) # (b, 4, 1)\n        x = x.mean(dim=1)   # (b, 1)\n        return x\n\ndef build_score_decoder(cfg, hidden_dim):\n    return MlpScoreDecoder(\n        in_dim=hidden_dim,\n        hidden_dim=hidden_dim,\n        num_layers=2,\n        bn=False\n    )\n\ndef build_artrackv2_seq(cfg, training=True):\n    current_dir = os.path.dirname(os.path.abspath(__file__))  # This is your Project Root\n    pretrained_path = os.path.join(current_dir, '../../../pretrained_models')\n    if cfg.MODEL.PRETRAIN_FILE and ('OSTrack' not in cfg.MODEL.PRETRAIN_FILE) and training:\n        pretrained = os.path.join(pretrained_path, cfg.MODEL.PRETRAIN_FILE)\n    else:\n        pretrained = ''\n\n    if cfg.MODEL.BACKBONE.TYPE == 'vit_base_patch16_224':\n        backbone = vit_base_patch16_224(pretrained, drop_path_rate=cfg.TRAIN.DROP_PATH_RATE, bins=cfg.MODEL.BINS, range=cfg.MODEL.RANGE, extension=cfg.MODEL.EXTENSION, prenum=cfg.MODEL.PRENUM)\n        hidden_dim = backbone.embed_dim\n        patch_start_index = 1\n    elif cfg.MODEL.BACKBONE.TYPE == 'vit_large_patch16_224':\n        print(\"i use vit_large\")\n        backbone = vit_large_patch16_224(pretrained, drop_path_rate=cfg.TRAIN.DROP_PATH_RATE, bins=cfg.MODEL.BINS, range=cfg.MODEL.RANGE, extension=cfg.MODEL.EXTENSION, prenum=cfg.MODEL.PRENUM)\n        hidden_dim = backbone.embed_dim\n        patch_start_index = 1\n\n    else:\n        raise NotImplementedError\n\n    backbone.finetune_track(cfg=cfg, patch_start_index=patch_start_index)\n\n    cross_2_decoder = build_maskdecoder(cfg, hidden_dim)\n\n    drop_path = cfg.MODEL.DROP_PATH\n    drop_path_allocator = DropPathAllocator(drop_path)\n    num_heads = cfg.MODEL.NUM_HEADS\n    mlp_ratio = cfg.MODEL.MLP_RATIO\n    qkv_bias = cfg.MODEL.QKV_BIAS\n    drop_rate = cfg.MODEL.DROP_RATE\n    attn_drop = cfg.MODEL.ATTN_DROP\n    score_mlp = build_score_decoder(cfg, hidden_dim)\n\n    model = ARTrackV2Seq(\n        backbone,\n        cross_2_decoder,\n        score_mlp,\n        hidden_dim,\n    )\n    load_from = cfg.MODEL.PRETRAIN_PTH\n    checkpoint = torch.load(load_from, map_location=\"cpu\")\n    missing_keys, unexpected_keys = model.load_state_dict(checkpoint[\"net\"], strict=False)\n    print('Load pretrained model from: ' + load_from)\n    if 'sequence' in cfg.MODEL.PRETRAIN_FILE and training:\n        print(\"i change myself\")\n        checkpoint = torch.load(cfg.MODEL.PRETRAIN_FILE, map_location=\"cpu\")\n        missing_keys, unexpected_keys = model.load_state_dict(checkpoint[\"net\"], strict=False)\n        print('Load pretrained model from: ' + cfg.MODEL.PRETRAIN_FILE)\n\n    return model\n"
  },
  {
    "path": "lib/models/artrackv2_seq/base_backbone.py",
    "content": "from functools import partial\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom timm.models.vision_transformer import resize_pos_embed\nfrom timm.models.layers import DropPath, to_2tuple, trunc_normal_\n\nfrom lib.models.layers.patch_embed import PatchEmbed\nfrom lib.models.artrackv2_seq.utils import combine_tokens, recover_tokens\n\n\nimport time\n\ndef generate_square_subsequent_mask(sz, sx, ss):\n    r\"\"\"Generate a square mask for the sequence. The masked positions are filled with float('-inf').\n        Unmasked positions are filled with float(0.0).\n    \"\"\"\n    # 0 means mask, 1 means visible\n    sum = sz + sx + ss\n    mask = (torch.triu(torch.ones(sum, sum)) == 1).transpose(0, 1)\n    mask[:, :] = 0\n    mask[:int(sz/2), :int(sz/2)] = 1 #template self\n    mask[int(sz/2):sz, int(sz/2):sz] = 1 # dt self\n    mask[int(sz/2):sz, sz:sz+sx] = 1 # dt search\n    mask[int(sz / 2):sz, -1] = 1  # dt search\n    mask[sz:sz+sx, :sz+sx] = 1 # sr dt-t-sr\n    mask[sz+sx:, :] = 1 # co dt-t-sr-co\n    return ~mask\n\nclass BaseBackbone(nn.Module):\n    def __init__(self):\n        super().__init__()\n\n        # for original ViT\n        self.pos_embed = None\n        self.img_size = [224, 224]\n        self.patch_size = 16\n        self.embed_dim = 384\n\n        self.cat_mode = 'direct'\n\n        self.pos_embed_z = None\n        self.pos_embed_x = None\n\n        self.template_segment_pos_embed = None\n        self.search_segment_pos_embed = None\n\n        self.return_inter = False\n        self.return_stage = [2, 5, 8, 11]\n\n        self.add_cls_token = False\n        self.add_sep_seg = False\n\n    def finetune_track(self, cfg, patch_start_index=1):\n\n        search_size = to_2tuple(cfg.DATA.SEARCH.SIZE)\n        template_size = to_2tuple(cfg.DATA.TEMPLATE.SIZE)\n        new_patch_size = cfg.MODEL.BACKBONE.STRIDE\n\n        self.cat_mode = cfg.MODEL.BACKBONE.CAT_MODE\n        self.return_inter = cfg.MODEL.RETURN_INTER\n        self.add_sep_seg = cfg.MODEL.BACKBONE.SEP_SEG\n\n        # resize patch embedding\n        if new_patch_size != self.patch_size:\n            print('Inconsistent Patch Size With The Pretrained Weights, Interpolate The Weight!')\n            old_patch_embed = {}\n            for name, param in self.patch_embed.named_parameters():\n                if 'weight' in name:\n                    param = nn.functional.interpolate(param, size=(new_patch_size, new_patch_size),\n                                                      mode='bicubic', align_corners=False)\n                    param = nn.Parameter(param)\n                old_patch_embed[name] = param\n            self.patch_embed = PatchEmbed(img_size=self.img_size, patch_size=new_patch_size, in_chans=3,\n                                          embed_dim=self.embed_dim)\n            self.patch_embed.proj.bias = old_patch_embed['proj.bias']\n            self.patch_embed.proj.weight = old_patch_embed['proj.weight']\n\n        # for patch embedding\n        patch_pos_embed = self.pos_embed[:, patch_start_index:, :]\n        patch_pos_embed = patch_pos_embed.transpose(1, 2)\n        B, E, Q = patch_pos_embed.shape\n        P_H, P_W = self.img_size[0] // self.patch_size, self.img_size[1] // self.patch_size\n        patch_pos_embed = patch_pos_embed.view(B, E, P_H, P_W)\n\n        # for search region\n        H, W = search_size\n        new_P_H, new_P_W = H // new_patch_size, W // new_patch_size\n        search_patch_pos_embed = nn.functional.interpolate(patch_pos_embed, size=(new_P_H, new_P_W), mode='bicubic',\n                                                           align_corners=False)\n        search_patch_pos_embed = search_patch_pos_embed.flatten(2).transpose(1, 2)\n\n        # for template region\n        H, W = template_size\n        new_P_H, new_P_W = H // new_patch_size, W // new_patch_size\n        template_patch_pos_embed = nn.functional.interpolate(patch_pos_embed, size=(new_P_H, new_P_W), mode='bicubic',\n                                                             align_corners=False)\n        template_patch_pos_embed = template_patch_pos_embed.flatten(2).transpose(1, 2)\n\n        self.pos_embed_z = nn.Parameter(template_patch_pos_embed)\n        self.pos_embed_z0 = nn.Parameter(template_patch_pos_embed)\n        self.pos_embed_z1 = nn.Parameter(template_patch_pos_embed)\n        self.pos_embed_x = nn.Parameter(search_patch_pos_embed)\n\n        # for cls token (keep it but not used)\n        if self.add_cls_token and patch_start_index > 0:\n            cls_pos_embed = self.pos_embed[:, 0:1, :]\n            self.cls_pos_embed = nn.Parameter(cls_pos_embed)\n\n        # separate token and segment token\n        if self.add_sep_seg:\n            self.template_segment_pos_embed = nn.Parameter(torch.zeros(1, 1, self.embed_dim))\n            self.template_segment_pos_embed = trunc_normal_(self.template_segment_pos_embed, std=.02)\n            self.search_segment_pos_embed = nn.Parameter(torch.zeros(1, 1, self.embed_dim))\n            self.search_segment_pos_embed = trunc_normal_(self.search_segment_pos_embed, std=.02)\n\n        # self.cls_token = None\n        # self.pos_embed = None\n\n        if self.return_inter:\n            for i_layer in self.fpn_stage:\n                if i_layer != 11:\n                    norm_layer = partial(nn.LayerNorm, eps=1e-6)\n                    layer = norm_layer(self.embed_dim)\n                    layer_name = f'norm{i_layer}'\n                    self.add_module(layer_name, layer)\n\n    def forward_features(self, z_0, z_1_feat, x, identity, seqs_input):\n        share_weight = self.word_embeddings.weight.T\n        out_list = []\n\n        x0 = self.bins * self.range\n        y0 = self.bins * self.range + 1\n        x1 = self.bins * self.range + 2\n        y1 = self.bins * self.range + 3\n        score = self.bins * self.range + 5\n\n        B, H, W = x.shape[0], x.shape[2], x.shape[3]\n\n        command = torch.cat([torch.ones((B, 1)).to(x) * x0, torch.ones((B, 1)).to(x) * y0,\n                       torch.ones((B, 1)).to(x) * x1,\n                       torch.ones((B, 1)).to(x) * y1,\n                       torch.ones((B, 1)).to(x) * score], dim=1)\n        trajectory = seqs_input\n        command = command.to(trajectory)\n        seqs_input_ = torch.cat([trajectory, command], dim=1)\n        \n        seqs_input_ = seqs_input_.to(torch.int64).to(x.device)\n        output_x_feat = x.clone()\n        tgt = self.word_embeddings(seqs_input_).permute(1, 0, 2)\n        \n        x = self.patch_embed(x)\n        z_0 = self.patch_embed(z_0)\n        z_1 = z_1_feat\n\n        len_x = x.shape[1]\n        len_z = z_0.shape[1] + z_1.shape[1]\n        len_seq = seqs_input_.shape[1]\n\n        z_0 += identity[:, 0, :].repeat(B, self.pos_embed_z.shape[1], 1)\n        z_1 += identity[:, 1, :].repeat(B, self.pos_embed_z.shape[1], 1)\n\n        x += identity[:, 2, :].repeat(B, self.pos_embed_x.shape[1], 1)\n\n        query_command_embed_ = self.position_embeddings.weight.unsqueeze(1)\n        prev_embed_ = self.prev_position_embeddings.weight.unsqueeze(1)\n        query_seq_embed = torch.cat([prev_embed_, query_command_embed_], dim=0)\n\n        query_seq_embed = query_seq_embed.repeat(1, B, 1)\n\n        tgt = tgt.transpose(0, 1)\n        query_seq_embed = query_seq_embed.transpose(0, 1)\n        \n        z_0 += self.pos_embed_z0\n        z_1 += self.pos_embed_z1\n        x += self.pos_embed_x\n\n        mask = generate_square_subsequent_mask(len_z, len_x, len_seq).to(tgt.device)\n\n        tgt += query_seq_embed[:, :tgt.shape[1]]\n\n        z = torch.cat((z_0, z_1), dim=1)\n\n        zx = combine_tokens(z, x, mode=self.cat_mode)\n        zxs = torch.cat((zx, tgt), dim=1)\n\n        zxs = self.pos_drop(zxs)\n\n        for j, blk in enumerate(self.blocks):\n            zxs = blk(zxs, padding_mask=mask)\n        for j, blk in enumerate(self.extension):\n            zxs = blk(zxs, padding_mask=mask)\n\n        lens_z_single = self.pos_embed_z.shape[1]\n\n        z_0_feat = zxs[:, :lens_z_single]\n        z_1_feat = zxs[:, lens_z_single:lens_z_single*2]\n        x_feat = zxs[:, lens_z_single*2:lens_z_single*2+len_x]\n\n        x_out = self.norm(zxs[:, -5:-1])\n        score_feat = zxs[:, -1]\n        seq_feat = x_out\n\n        possibility = torch.matmul(x_out, share_weight)\n        out = possibility + self.output_bias\n        temp = out.transpose(0, 1)\n\n        out_list.append(out.unsqueeze(0))\n        out = out.softmax(-1)\n\n        value, extra_seq = out.topk(dim=-1, k=1)[0], out.topk(dim=-1, k=1)[1]\n        for i in range(4):\n            value, extra_seq = out[:, i, :].topk(dim=-1, k=1)[0], out[:, i, :].topk(dim=-1, k=1)[1]\n            if i == 0:\n                seqs_output = extra_seq\n                values = value\n            else:\n                seqs_output = torch.cat([seqs_output, extra_seq], dim=-1)\n                values = torch.cat([values, value], dim=-1)\n\n        output = {'seqs': seqs_output, 'class': values, 'feat': temp, \"state\": \"val/test\", \"x_feat\": output_x_feat.detach(), \"seq_feat\": seq_feat}\n\n        return output, z_0_feat, z_1_feat, x_feat, score_feat\n\n    def forward(self, z_0, z_1_feat, x, identity, seqs_input, **kwargs):\n        \"\"\"\n        Joint feature extraction and relation modeling for the basic ViT backbone.\n        Args:\n            z (torch.Tensor): template feature, [B, C, H_z, W_z]\n            x (torch.Tensor): search region feature, [B, C, H_x, W_x]\n\n        Returns:\n            x (torch.Tensor): merged template and search region feature, [B, L_z+L_x, C]\n            attn : None\n        \"\"\"\n        output = self.forward_features(z_0, z_1_feat, x, identity, seqs_input)\n\n        return output\n"
  },
  {
    "path": "lib/models/artrackv2_seq/utils.py",
    "content": "import math\n\nimport torch\nimport torch.nn.functional as F\n\n\ndef combine_tokens(template_tokens, search_tokens, mode='direct', return_res=False):\n    # [B, HW, C]\n    len_t = template_tokens.shape[1]\n    len_s = search_tokens.shape[1]\n\n    if mode == 'direct':\n        merged_feature = torch.cat((template_tokens, search_tokens), dim=1)\n    elif mode == 'template_central':\n        central_pivot = len_s // 2\n        first_half = search_tokens[:, :central_pivot, :]\n        second_half = search_tokens[:, central_pivot:, :]\n        merged_feature = torch.cat((first_half, template_tokens, second_half), dim=1)\n    elif mode == 'partition':\n        feat_size_s = int(math.sqrt(len_s))\n        feat_size_t = int(math.sqrt(len_t))\n        window_size = math.ceil(feat_size_t / 2.)\n        # pad feature maps to multiples of window size\n        B, _, C = template_tokens.shape\n        H = W = feat_size_t\n        template_tokens = template_tokens.view(B, H, W, C)\n        pad_l = pad_b = pad_r = 0\n        # pad_r = (window_size - W % window_size) % window_size\n        pad_t = (window_size - H % window_size) % window_size\n        template_tokens = F.pad(template_tokens, (0, 0, pad_l, pad_r, pad_t, pad_b))\n        _, Hp, Wp, _ = template_tokens.shape\n        template_tokens = template_tokens.view(B, Hp // window_size, window_size, W, C)\n        template_tokens = torch.cat([template_tokens[:, 0, ...], template_tokens[:, 1, ...]], dim=2)\n        _, Hc, Wc, _ = template_tokens.shape\n        template_tokens = template_tokens.view(B, -1, C)\n        merged_feature = torch.cat([template_tokens, search_tokens], dim=1)\n\n        # calculate new h and w, which may be useful for SwinT or others\n        merged_h, merged_w = feat_size_s + Hc, feat_size_s\n        if return_res:\n            return merged_feature, merged_h, merged_w\n\n    else:\n        raise NotImplementedError\n\n    return merged_feature\n\n\ndef recover_tokens(merged_tokens, len_template_token, len_search_token, mode='direct'):\n    if mode == 'direct':\n        recovered_tokens = merged_tokens\n    elif mode == 'template_central':\n        central_pivot = len_search_token // 2\n        len_remain = len_search_token - central_pivot\n        len_half_and_t = central_pivot + len_template_token\n\n        first_half = merged_tokens[:, :central_pivot, :]\n        second_half = merged_tokens[:, -len_remain:, :]\n        template_tokens = merged_tokens[:, central_pivot:len_half_and_t, :]\n\n        recovered_tokens = torch.cat((template_tokens, first_half, second_half), dim=1)\n    elif mode == 'partition':\n        recovered_tokens = merged_tokens\n    else:\n        raise NotImplementedError\n\n    return recovered_tokens\n\n\ndef window_partition(x, window_size: int):\n    \"\"\"\n    Args:\n        x: (B, H, W, C)\n        window_size (int): window size\n\n    Returns:\n        windows: (num_windows*B, window_size, window_size, C)\n    \"\"\"\n    B, H, W, C = x.shape\n    x = x.view(B, H // window_size, window_size, W // window_size, window_size, C)\n    windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C)\n    return windows\n\n\ndef window_reverse(windows, window_size: int, H: int, W: int):\n    \"\"\"\n    Args:\n        windows: (num_windows*B, window_size, window_size, C)\n        window_size (int): Window size\n        H (int): Height of image\n        W (int): Width of image\n\n    Returns:\n        x: (B, H, W, C)\n    \"\"\"\n    B = int(windows.shape[0] / (H * W / window_size / window_size))\n    x = windows.view(B, H // window_size, W // window_size, window_size, window_size, -1)\n    x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1)\n    return x\n"
  },
  {
    "path": "lib/models/artrackv2_seq/vit.py",
    "content": "\"\"\" Vision Transformer (ViT) in PyTorch\nA PyTorch implement of Vision Transformers as described in:\n'An Image Is Worth 16 x 16 Words: Transformers for Image Recognition at Scale'\n    - https://arxiv.org/abs/2010.11929\n`How to train your ViT? Data, Augmentation, and Regularization in Vision Transformers`\n    - https://arxiv.org/abs/2106.10270\nThe official jax code is released and available at https://github.com/google-research/vision_transformer\nDeiT model defs and weights from https://github.com/facebookresearch/deit,\npaper `DeiT: Data-efficient Image Transformers` - https://arxiv.org/abs/2012.12877\nAcknowledgments:\n* The paper authors for releasing code and weights, thanks!\n* I fixed my class token impl based on Phil Wang's https://github.com/lucidrains/vit-pytorch ... check it out\nfor some einops/einsum fun\n* Simple transformer style inspired by Andrej Karpathy's https://github.com/karpathy/minGPT\n* Bert reference code checks against Huggingface Transformers and Tensorflow Bert\nHacked together by / Copyright 2021 Ross Wightman\n\nModified by Botao Ye\n\"\"\"\nimport math\nimport logging\nfrom functools import partial\nfrom collections import OrderedDict\nfrom copy import deepcopy\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_INCEPTION_MEAN, IMAGENET_INCEPTION_STD\nfrom timm.models.helpers import build_model_with_cfg, named_apply, adapt_input_conv\nfrom timm.models.layers import Mlp, DropPath, trunc_normal_, lecun_normal_\nfrom timm.models.registry import register_model\n\nfrom lib.models.layers.patch_embed import PatchEmbed\nfrom lib.models.artrackv2_seq.base_backbone import BaseBackbone\n\n\nclass Attention(nn.Module):\n    def __init__(self, dim, num_heads=8, qkv_bias=False, attn_drop=0., proj_drop=0.):\n        super().__init__()\n        self.num_heads = num_heads\n        head_dim = dim // num_heads\n        self.scale = head_dim ** -0.5\n\n        self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)\n        self.attn_drop = nn.Dropout(attn_drop)\n        self.proj = nn.Linear(dim, dim)\n        self.proj_drop = nn.Dropout(proj_drop)\n\n    def forward(self, x, return_attention=False, padding_mask=None):\n        B, N, C = x.shape\n        qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)\n        q, k, v = qkv[0], qkv[1], qkv[2]  # make torchscript happy (cannot use tensor as tuple)\n\n        attn = (q @ k.transpose(-2, -1)) * self.scale\n\n        if padding_mask != None:\n            attn = attn.masked_fill(padding_mask, float(\"-inf\"))\n\n        attn = attn.softmax(dim=-1)\n        attn = self.attn_drop(attn)\n\n        x = (attn @ v).transpose(1, 2).reshape(B, N, C)\n        x = self.proj(x)\n        x = self.proj_drop(x)\n\n        if return_attention:\n            return x, attn\n        return x\n\n\nclass Block(nn.Module):\n\n    def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, drop=0., attn_drop=0.,\n                 drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm):\n        super().__init__()\n        self.norm1 = norm_layer(dim)\n        self.attn = Attention(dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=drop)\n        # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here\n        self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()\n        self.norm2 = norm_layer(dim)\n        mlp_hidden_dim = int(dim * mlp_ratio)\n        self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)\n\n    def forward(self, x, return_attention=False, padding_mask=None):\n        if return_attention:\n            feat, attn = self.attn(self.norm1(x), True, padding_mask)\n            x = x + self.drop_path(feat)\n            x = x + self.drop_path(self.mlp(self.norm2(x)))\n            return x, attn\n        else:\n        \n            x = x + self.drop_path(self.attn(self.norm1(x), padding_mask=padding_mask))\n            x = x + self.drop_path(self.mlp(self.norm2(x)))\n            return x\n\n\nclass VisionTransformer(BaseBackbone):\n    \"\"\" Vision Transformer\n    A PyTorch impl of : `An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale`\n        - https://arxiv.org/abs/2010.11929\n    Includes distillation token & head support for `DeiT: Data-efficient Image Transformers`\n        - https://arxiv.org/abs/2012.12877\n    \"\"\"\n\n    def __init__(self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, embed_dim=768, depth=12,\n                 num_heads=12, mlp_ratio=4., qkv_bias=True, representation_size=None, distilled=False,\n                 drop_rate=0., attn_drop_rate=0., drop_path_rate=0., embed_layer=PatchEmbed, norm_layer=None,\n                 act_layer=None, weight_init='', bins=400, range_time=2, prenum=7, extension=3):\n        \"\"\"\n        Args:\n            img_size (int, tuple): input image size\n            patch_size (int, tuple): patch size\n            in_chans (int): number of input channels\n            num_classes (int): number of classes for classification head\n            embed_dim (int): embedding dimension\n            depth (int): depth of transformer\n            num_heads (int): number of attention heads\n            mlp_ratio (int): ratio of mlp hidden dim to embedding dim\n            qkv_bias (bool): enable bias for qkv if True\n            representation_size (Optional[int]): enable and set representation layer (pre-logits) to this value if set\n            distilled (bool): model includes a distillation token and head as in DeiT models\n            drop_rate (float): dropout rate\n            attn_drop_rate (float): attention dropout rate\n            drop_path_rate (float): stochastic depth rate\n            embed_layer (nn.Module): patch embedding layer\n            norm_layer: (nn.Module): normalization layer\n            weight_init: (str): weight init scheme\n        \"\"\"\n        super().__init__()\n        self.num_classes = num_classes\n        self.num_features = self.embed_dim = embed_dim  # num_features for consistency with other models\n        self.num_tokens = 2 if distilled else 1\n        norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6)\n        act_layer = act_layer or nn.GELU\n\n        self.bins = bins\n        in_channel = embed_dim\n        self.range = range_time\n        self.word_embeddings = nn.Embedding(self.bins * self.range + 6, in_channel, padding_idx=self.bins * self.range+4, max_norm=1, norm_type=2.0)\n\n        self.position_embeddings = nn.Embedding(\n            5, in_channel)\n        self.output_bias = torch.nn.Parameter(torch.zeros(self.bins * self.range + 6))\n        self.prev_position_embeddings = nn.Embedding(prenum * 4, in_channel)\n\n        self.patch_embed = embed_layer(\n            img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim)\n        num_patches = self.patch_embed.num_patches\n\n        self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))\n        self.dist_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) if distilled else None\n        self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + self.num_tokens, embed_dim))\n        self.pos_drop = nn.Dropout(p=drop_rate)\n\n        dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)]  # stochastic depth decay rule\n        self.blocks = nn.Sequential(*[\n            Block(\n                dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, drop=drop_rate,\n                attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, act_layer=act_layer)\n            for i in range(depth)])\n        self.extension = nn.Sequential(*[\n            Block(\n                dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, drop=drop_rate,\n                attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, act_layer=act_layer)\n            for i in range(extension)])\n        self.norm = norm_layer(embed_dim)\n\n        self.init_weights(weight_init)\n\n    def init_weights(self, mode=''):\n        assert mode in ('jax', 'jax_nlhb', 'nlhb', '')\n        head_bias = -math.log(self.num_classes) if 'nlhb' in mode else 0.\n        trunc_normal_(self.pos_embed, std=.02)\n        if self.dist_token is not None:\n            trunc_normal_(self.dist_token, std=.02)\n        if mode.startswith('jax'):\n            # leave cls token as zeros to match jax impl\n            named_apply(partial(_init_vit_weights, head_bias=head_bias, jax_impl=True), self)\n        else:\n            trunc_normal_(self.cls_token, std=.02)\n            self.apply(_init_vit_weights)\n\n    def _init_weights(self, m):\n        # this fn left here for compat with downstream users\n        _init_vit_weights(m)\n\n    @torch.jit.ignore()\n    def load_pretrained(self, checkpoint_path, prefix=''):\n        _load_weights(self, checkpoint_path, prefix)\n\n    @torch.jit.ignore\n    def no_weight_decay(self):\n        return {'pos_embed', 'cls_token', 'dist_token'}\n\n    def get_classifier(self):\n        if self.dist_token is None:\n            return self.head\n        else:\n            return self.head, self.head_dist\n\n    def reset_classifier(self, num_classes, global_pool=''):\n        self.num_classes = num_classes\n        self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity()\n        if self.num_tokens == 2:\n            self.head_dist = nn.Linear(self.embed_dim, self.num_classes) if num_classes > 0 else nn.Identity()\n\n\ndef _init_vit_weights(module: nn.Module, name: str = '', head_bias: float = 0., jax_impl: bool = False):\n    \"\"\" ViT weight initialization\n    * When called without n, head_bias, jax_impl args it will behave exactly the same\n      as my original init for compatibility with prev hparam / downstream use cases (ie DeiT).\n    * When called w/ valid n (module name) and jax_impl=True, will (hopefully) match JAX impl\n    \"\"\"\n    if isinstance(module, nn.Linear):\n        if name.startswith('head'):\n            nn.init.zeros_(module.weight)\n            nn.init.constant_(module.bias, head_bias)\n        elif name.startswith('pre_logits'):\n            lecun_normal_(module.weight)\n            nn.init.zeros_(module.bias)\n        else:\n            if jax_impl:\n                nn.init.xavier_uniform_(module.weight)\n                if module.bias is not None:\n                    if 'mlp' in name:\n                        nn.init.normal_(module.bias, std=1e-6)\n                    else:\n                        nn.init.zeros_(module.bias)\n            else:\n                trunc_normal_(module.weight, std=.02)\n                if module.bias is not None:\n                    nn.init.zeros_(module.bias)\n    elif jax_impl and isinstance(module, nn.Conv2d):\n        # NOTE conv was left to pytorch default in my original init\n        lecun_normal_(module.weight)\n        if module.bias is not None:\n            nn.init.zeros_(module.bias)\n    elif isinstance(module, (nn.LayerNorm, nn.GroupNorm, nn.BatchNorm2d)):\n        nn.init.zeros_(module.bias)\n        nn.init.ones_(module.weight)\n\n\n@torch.no_grad()\ndef _load_weights(model: VisionTransformer, checkpoint_path: str, prefix: str = ''):\n    \"\"\" Load weights from .npz checkpoints for official Google Brain Flax implementation\n    \"\"\"\n    import numpy as np\n\n    def _n2p(w, t=True):\n        if w.ndim == 4 and w.shape[0] == w.shape[1] == w.shape[2] == 1:\n            w = w.flatten()\n        if t:\n            if w.ndim == 4:\n                w = w.transpose([3, 2, 0, 1])\n            elif w.ndim == 3:\n                w = w.transpose([2, 0, 1])\n            elif w.ndim == 2:\n                w = w.transpose([1, 0])\n        return torch.from_numpy(w)\n\n    w = np.load(checkpoint_path)\n    if not prefix and 'opt/target/embedding/kernel' in w:\n        prefix = 'opt/target/'\n\n    if hasattr(model.patch_embed, 'backbone'):\n        # hybrid\n        backbone = model.patch_embed.backbone\n        stem_only = not hasattr(backbone, 'stem')\n        stem = backbone if stem_only else backbone.stem\n        stem.conv.weight.copy_(adapt_input_conv(stem.conv.weight.shape[1], _n2p(w[f'{prefix}conv_root/kernel'])))\n        stem.norm.weight.copy_(_n2p(w[f'{prefix}gn_root/scale']))\n        stem.norm.bias.copy_(_n2p(w[f'{prefix}gn_root/bias']))\n        if not stem_only:\n            for i, stage in enumerate(backbone.stages):\n                for j, block in enumerate(stage.blocks):\n                    bp = f'{prefix}block{i + 1}/unit{j + 1}/'\n                    for r in range(3):\n                        getattr(block, f'conv{r + 1}').weight.copy_(_n2p(w[f'{bp}conv{r + 1}/kernel']))\n                        getattr(block, f'norm{r + 1}').weight.copy_(_n2p(w[f'{bp}gn{r + 1}/scale']))\n                        getattr(block, f'norm{r + 1}').bias.copy_(_n2p(w[f'{bp}gn{r + 1}/bias']))\n                    if block.downsample is not None:\n                        block.downsample.conv.weight.copy_(_n2p(w[f'{bp}conv_proj/kernel']))\n                        block.downsample.norm.weight.copy_(_n2p(w[f'{bp}gn_proj/scale']))\n                        block.downsample.norm.bias.copy_(_n2p(w[f'{bp}gn_proj/bias']))\n        embed_conv_w = _n2p(w[f'{prefix}embedding/kernel'])\n    else:\n        embed_conv_w = adapt_input_conv(\n            model.patch_embed.proj.weight.shape[1], _n2p(w[f'{prefix}embedding/kernel']))\n    model.patch_embed.proj.weight.copy_(embed_conv_w)\n    model.patch_embed.proj.bias.copy_(_n2p(w[f'{prefix}embedding/bias']))\n    model.cls_token.copy_(_n2p(w[f'{prefix}cls'], t=False))\n    pos_embed_w = _n2p(w[f'{prefix}Transformer/posembed_input/pos_embedding'], t=False)\n    if pos_embed_w.shape != model.pos_embed.shape:\n        pos_embed_w = resize_pos_embed(  # resize pos embedding when different size from pretrained weights\n            pos_embed_w, model.pos_embed, getattr(model, 'num_tokens', 1), model.patch_embed.grid_size)\n    model.pos_embed.copy_(pos_embed_w)\n    model.norm.weight.copy_(_n2p(w[f'{prefix}Transformer/encoder_norm/scale']))\n    model.norm.bias.copy_(_n2p(w[f'{prefix}Transformer/encoder_norm/bias']))\n    if isinstance(model.head, nn.Linear) and model.head.bias.shape[0] == w[f'{prefix}head/bias'].shape[-1]:\n        model.head.weight.copy_(_n2p(w[f'{prefix}head/kernel']))\n        model.head.bias.copy_(_n2p(w[f'{prefix}head/bias']))\n    if isinstance(getattr(model.pre_logits, 'fc', None), nn.Linear) and f'{prefix}pre_logits/bias' in w:\n        model.pre_logits.fc.weight.copy_(_n2p(w[f'{prefix}pre_logits/kernel']))\n        model.pre_logits.fc.bias.copy_(_n2p(w[f'{prefix}pre_logits/bias']))\n    for i, block in enumerate(model.blocks.children()):\n        block_prefix = f'{prefix}Transformer/encoderblock_{i}/'\n        mha_prefix = block_prefix + 'MultiHeadDotProductAttention_1/'\n        block.norm1.weight.copy_(_n2p(w[f'{block_prefix}LayerNorm_0/scale']))\n        block.norm1.bias.copy_(_n2p(w[f'{block_prefix}LayerNorm_0/bias']))\n        block.attn.qkv.weight.copy_(torch.cat([\n            _n2p(w[f'{mha_prefix}{n}/kernel'], t=False).flatten(1).T for n in ('query', 'key', 'value')]))\n        block.attn.qkv.bias.copy_(torch.cat([\n            _n2p(w[f'{mha_prefix}{n}/bias'], t=False).reshape(-1) for n in ('query', 'key', 'value')]))\n        block.attn.proj.weight.copy_(_n2p(w[f'{mha_prefix}out/kernel']).flatten(1))\n        block.attn.proj.bias.copy_(_n2p(w[f'{mha_prefix}out/bias']))\n        for r in range(2):\n            getattr(block.mlp, f'fc{r + 1}').weight.copy_(_n2p(w[f'{block_prefix}MlpBlock_3/Dense_{r}/kernel']))\n            getattr(block.mlp, f'fc{r + 1}').bias.copy_(_n2p(w[f'{block_prefix}MlpBlock_3/Dense_{r}/bias']))\n        block.norm2.weight.copy_(_n2p(w[f'{block_prefix}LayerNorm_2/scale']))\n        block.norm2.bias.copy_(_n2p(w[f'{block_prefix}LayerNorm_2/bias']))\n\n\ndef resize_pos_embed(posemb, posemb_new, num_tokens=1, gs_new=()):\n    # Rescale the grid of position embeddings when loading from state_dict. Adapted from\n    # https://github.com/google-research/vision_transformer/blob/00883dd691c63a6830751563748663526e811cee/vit_jax/checkpoint.py#L224\n    print('Resized position embedding: %s to %s', posemb.shape, posemb_new.shape)\n    ntok_new = posemb_new.shape[1]\n    if num_tokens:\n        posemb_tok, posemb_grid = posemb[:, :num_tokens], posemb[0, num_tokens:]\n        ntok_new -= num_tokens\n    else:\n        posemb_tok, posemb_grid = posemb[:, :0], posemb[0]\n    gs_old = int(math.sqrt(len(posemb_grid)))\n    if not len(gs_new):  # backwards compatibility\n        gs_new = [int(math.sqrt(ntok_new))] * 2\n    assert len(gs_new) >= 2\n    print('Position embedding grid-size from %s to %s', [gs_old, gs_old], gs_new)\n    posemb_grid = posemb_grid.reshape(1, gs_old, gs_old, -1).permute(0, 3, 1, 2)\n    posemb_grid = F.interpolate(posemb_grid, size=gs_new, mode='bilinear')\n    posemb_grid = posemb_grid.permute(0, 2, 3, 1).reshape(1, gs_new[0] * gs_new[1], -1)\n    posemb = torch.cat([posemb_tok, posemb_grid], dim=1)\n    return posemb\n\n\ndef checkpoint_filter_fn(state_dict, model):\n    \"\"\" convert patch embedding weight from manual patchify + linear proj to conv\"\"\"\n    out_dict = {}\n    if 'model' in state_dict:\n        # For deit models\n        state_dict = state_dict['model']\n    for k, v in state_dict.items():\n        if 'patch_embed.proj.weight' in k and len(v.shape) < 4:\n            # For old models that I trained prior to conv based patchification\n            O, I, H, W = model.patch_embed.proj.weight.shape\n            v = v.reshape(O, -1, H, W)\n        elif k == 'pos_embed' and v.shape != model.pos_embed.shape:\n            # To resize pos embedding when using model at different size from pretrained weights\n            v = resize_pos_embed(\n                v, model.pos_embed, getattr(model, 'num_tokens', 1), model.patch_embed.grid_size)\n        out_dict[k] = v\n    return out_dict\n\n\ndef _create_vision_transformer(variant, pretrained=False, default_cfg=None, **kwargs):\n    if kwargs.get('features_only', None):\n        raise RuntimeError('features_only not implemented for Vision Transformer models.')\n\n    model = VisionTransformer(**kwargs)\n    if pretrained:\n        if 'npz' in pretrained:\n            model.load_pretrained(pretrained, prefix='')\n        else:\n            checkpoint = torch.load(pretrained, map_location=\"cpu\")\n            missing_keys, unexpected_keys = model.load_state_dict(checkpoint[\"model\"], strict=False)\n            print('Load pretrained model from: ' + pretrained)\n\n    return model\n\n\ndef vit_base_patch16_224(pretrained=False, bins=400, range=2, extension=3, prenum=7, **kwargs):\n    \"\"\"\n    ViT-Base model (ViT-B/16) from original paper (https://arxiv.org/abs/2010.11929).\n    \"\"\"\n    model_kwargs = dict(\n        patch_size=16, embed_dim=768, depth=12, num_heads=12, bins=bins, range_time=range, extension=extension, prenum=prenum, **kwargs)\n\n    model = _create_vision_transformer('vit_base_patch16_224_in21k', pretrained=pretrained, **model_kwargs)\n    return model\n\ndef vit_large_patch16_224(pretrained=False, bins=400, range=2, extension=3, prenum=7, **kwargs):\n    model_kwargs = dict(\n        patch_size=16, embed_dim=1024, depth=24, num_heads=16, bins=bins, range_time=range, extension=extension, prenum=prenum, **kwargs)\n    model = _create_vision_transformer('vit_large_patch16_224_in21k', pretrained=pretrained, **model_kwargs)\n    return model\n"
  },
  {
    "path": "lib/models/layers/__init__.py",
    "content": ""
  },
  {
    "path": "lib/models/layers/attn.py",
    "content": "import torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\nfrom timm.models.layers import trunc_normal_\r\n\r\nfrom lib.models.layers.rpe import generate_2d_concatenated_self_attention_relative_positional_encoding_index\r\n\r\n\r\nclass Attention(nn.Module):\r\n    def __init__(self, dim, num_heads=8, qkv_bias=False, attn_drop=0., proj_drop=0.,\r\n                 rpe=False, z_size=7, x_size=14):\r\n        super().__init__()\r\n        self.num_heads = num_heads\r\n        head_dim = dim // num_heads\r\n        self.scale = head_dim ** -0.5\r\n\r\n        self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)\r\n        self.attn_drop = nn.Dropout(attn_drop)\r\n        self.proj = nn.Linear(dim, dim)\r\n        self.proj_drop = nn.Dropout(proj_drop)\r\n\r\n        self.rpe =rpe\r\n        if self.rpe:\r\n            relative_position_index = \\\r\n                generate_2d_concatenated_self_attention_relative_positional_encoding_index([z_size, z_size],\r\n                                                                                           [x_size, x_size])\r\n            self.register_buffer(\"relative_position_index\", relative_position_index)\r\n            # define a parameter table of relative position bias\r\n            self.relative_position_bias_table = nn.Parameter(torch.empty((num_heads,\r\n                                                                          relative_position_index.max() + 1)))\r\n            trunc_normal_(self.relative_position_bias_table, std=0.02)\r\n\r\n    def forward(self, x, mask=None, return_attention=False):\r\n        # x: B, N, C\r\n        # mask: [B, N, ] torch.bool\r\n        B, N, C = x.shape\r\n        qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)\r\n        q, k, v = qkv.unbind(0)   # make torchscript happy (cannot use tensor as tuple)\r\n\r\n        attn = (q @ k.transpose(-2, -1)) * self.scale\r\n\r\n        if self.rpe:\r\n            relative_position_bias = self.relative_position_bias_table[:, self.relative_position_index].unsqueeze(0)\r\n            attn += relative_position_bias\r\n\r\n        if mask is not None:\r\n            attn = attn.masked_fill(mask.unsqueeze(1).unsqueeze(2), float('-inf'),)\r\n\r\n        split_attn = False\r\n        len_t = 49\r\n        if split_attn:\r\n            attn_t = attn[..., :len_t].softmax(dim=-1)\r\n            attn_s = attn[..., len_t:].softmax(dim=-1)\r\n            attn = torch.cat([attn_t, attn_s], dim=-1)\r\n        else:\r\n            attn = attn.softmax(dim=-1)\r\n        attn = self.attn_drop(attn)\r\n\r\n        x = (attn @ v).transpose(1, 2).reshape(B, N, C)\r\n        x = self.proj(x)\r\n        x = self.proj_drop(x)\r\n\r\n        if return_attention:\r\n            return x, attn\r\n        else:\r\n            return x\r\n\r\n\r\nclass Attention_talking_head(nn.Module):\r\n    # taken from https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py\r\n    # with slight modifications to add Talking Heads Attention (https://arxiv.org/pdf/2003.02436v1.pdf)\r\n    def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0.,\r\n                 rpe=True, z_size=7, x_size=14):\r\n        super().__init__()\r\n\r\n        self.num_heads = num_heads\r\n\r\n        head_dim = dim // num_heads\r\n\r\n        self.scale = qk_scale or head_dim ** -0.5\r\n\r\n        self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)\r\n        self.attn_drop = nn.Dropout(attn_drop)\r\n\r\n        self.proj = nn.Linear(dim, dim)\r\n\r\n        self.proj_l = nn.Linear(num_heads, num_heads)\r\n        self.proj_w = nn.Linear(num_heads, num_heads)\r\n\r\n        self.proj_drop = nn.Dropout(proj_drop)\r\n\r\n        self.rpe = rpe\r\n        if self.rpe:\r\n            relative_position_index = \\\r\n                generate_2d_concatenated_self_attention_relative_positional_encoding_index([z_size, z_size],\r\n                                                                                           [x_size, x_size])\r\n            self.register_buffer(\"relative_position_index\", relative_position_index)\r\n            # define a parameter table of relative position bias\r\n            self.relative_position_bias_table = nn.Parameter(torch.empty((num_heads,\r\n                                                                          relative_position_index.max() + 1)))\r\n            trunc_normal_(self.relative_position_bias_table, std=0.02)\r\n\r\n    def forward(self, x, mask=None):\r\n        B, N, C = x.shape\r\n        qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)\r\n        q, k, v = qkv[0] * self.scale, qkv[1], qkv[2]\r\n\r\n        attn = (q @ k.transpose(-2, -1))\r\n\r\n        if self.rpe:\r\n            relative_position_bias = self.relative_position_bias_table[:, self.relative_position_index].unsqueeze(0)\r\n            attn += relative_position_bias\r\n\r\n        if mask is not None:\r\n            attn = attn.masked_fill(mask.unsqueeze(1).unsqueeze(2),\r\n                                    float('-inf'),)\r\n\r\n        attn = self.proj_l(attn.permute(0, 2, 3, 1)).permute(0, 3, 1, 2)\r\n\r\n        attn = attn.softmax(dim=-1)\r\n\r\n        attn = self.proj_w(attn.permute(0, 2, 3, 1)).permute(0, 3, 1, 2)\r\n        attn = self.attn_drop(attn)\r\n\r\n        x = (attn @ v).transpose(1, 2).reshape(B, N, C)\r\n        x = self.proj(x)\r\n        x = self.proj_drop(x)\r\n        return x"
  },
  {
    "path": "lib/models/layers/attn_blocks.py",
    "content": "import math\r\nimport torch\r\nimport torch.nn as nn\r\nfrom timm.models.layers import Mlp, DropPath, trunc_normal_, lecun_normal_\r\n\r\nfrom lib.models.layers.attn import Attention\r\n\r\n\r\ndef candidate_elimination(attn: torch.Tensor, tokens: torch.Tensor, lens_t: int, keep_ratio: float, global_index: torch.Tensor, box_mask_z: torch.Tensor):\r\n    \"\"\"\r\n    Eliminate potential background candidates for computation reduction and noise cancellation.\r\n    Args:\r\n        attn (torch.Tensor): [B, num_heads, L_t + L_s, L_t + L_s], attention weights\r\n        tokens (torch.Tensor):  [B, L_t + L_s, C], template and search region tokens\r\n        lens_t (int): length of template\r\n        keep_ratio (float): keep ratio of search region tokens (candidates)\r\n        global_index (torch.Tensor): global index of search region tokens\r\n        box_mask_z (torch.Tensor): template mask used to accumulate attention weights\r\n\r\n    Returns:\r\n        tokens_new (torch.Tensor): tokens after candidate elimination\r\n        keep_index (torch.Tensor): indices of kept search region tokens\r\n        removed_index (torch.Tensor): indices of removed search region tokens\r\n    \"\"\"\r\n    lens_s = attn.shape[-1] - lens_t\r\n    bs, hn, _, _ = attn.shape\r\n\r\n    lens_keep = math.ceil(keep_ratio * lens_s)\r\n    if lens_keep == lens_s:\r\n        return tokens, global_index, None\r\n\r\n    attn_t = attn[:, :, :lens_t, lens_t:]\r\n\r\n    if box_mask_z is not None:\r\n        box_mask_z = box_mask_z.unsqueeze(1).unsqueeze(-1).expand(-1, attn_t.shape[1], -1, attn_t.shape[-1])\r\n        # attn_t = attn_t[:, :, box_mask_z, :]\r\n        attn_t = attn_t[box_mask_z]\r\n        attn_t = attn_t.view(bs, hn, -1, lens_s)\r\n        attn_t = attn_t.mean(dim=2).mean(dim=1)  # B, H, L-T, L_s --> B, L_s\r\n\r\n        # attn_t = [attn_t[i, :, box_mask_z[i, :], :] for i in range(attn_t.size(0))]\r\n        # attn_t = [attn_t[i].mean(dim=1).mean(dim=0) for i in range(len(attn_t))]\r\n        # attn_t = torch.stack(attn_t, dim=0)\r\n    else:\r\n        attn_t = attn_t.mean(dim=2).mean(dim=1)  # B, H, L-T, L_s --> B, L_s\r\n\r\n    # use sort instead of topk, due to the speed issue\r\n    # https://github.com/pytorch/pytorch/issues/22812\r\n    sorted_attn, indices = torch.sort(attn_t, dim=1, descending=True)\r\n\r\n    topk_attn, topk_idx = sorted_attn[:, :lens_keep], indices[:, :lens_keep]\r\n    non_topk_attn, non_topk_idx = sorted_attn[:, lens_keep:], indices[:, lens_keep:]\r\n\r\n    keep_index = global_index.gather(dim=1, index=topk_idx)\r\n    removed_index = global_index.gather(dim=1, index=non_topk_idx)\r\n\r\n    # separate template and search tokens\r\n    tokens_t = tokens[:, :lens_t]\r\n    tokens_s = tokens[:, lens_t:]\r\n\r\n    # obtain the attentive and inattentive tokens\r\n    B, L, C = tokens_s.shape\r\n    # topk_idx_ = topk_idx.unsqueeze(-1).expand(B, lens_keep, C)\r\n    attentive_tokens = tokens_s.gather(dim=1, index=topk_idx.unsqueeze(-1).expand(B, -1, C))\r\n    # inattentive_tokens = tokens_s.gather(dim=1, index=non_topk_idx.unsqueeze(-1).expand(B, -1, C))\r\n\r\n    # compute the weighted combination of inattentive tokens\r\n    # fused_token = non_topk_attn @ inattentive_tokens\r\n\r\n    # concatenate these tokens\r\n    # tokens_new = torch.cat([tokens_t, attentive_tokens, fused_token], dim=0)\r\n    tokens_new = torch.cat([tokens_t, attentive_tokens], dim=1)\r\n\r\n    return tokens_new, keep_index, removed_index\r\n\r\n\r\nclass CEBlock(nn.Module):\r\n\r\n    def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, drop=0., attn_drop=0.,\r\n                 drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, keep_ratio_search=1.0,):\r\n        super().__init__()\r\n        self.norm1 = norm_layer(dim)\r\n        self.attn = Attention(dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=drop)\r\n        # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here\r\n        self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()\r\n        self.norm2 = norm_layer(dim)\r\n        mlp_hidden_dim = int(dim * mlp_ratio)\r\n        self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)\r\n\r\n        self.keep_ratio_search = keep_ratio_search\r\n\r\n    def forward(self, x, global_index_template, global_index_search, mask=None, ce_template_mask=None, keep_ratio_search=None):\r\n        x_attn, attn = self.attn(self.norm1(x), mask, True)\r\n        x = x + self.drop_path(x_attn)\r\n        lens_t = global_index_template.shape[1]\r\n\r\n        removed_index_search = None\r\n        if self.keep_ratio_search < 1 and (keep_ratio_search is None or keep_ratio_search < 1):\r\n            keep_ratio_search = self.keep_ratio_search if keep_ratio_search is None else keep_ratio_search\r\n            x, global_index_search, removed_index_search = candidate_elimination(attn, x, lens_t, keep_ratio_search, global_index_search, ce_template_mask)\r\n\r\n        x = x + self.drop_path(self.mlp(self.norm2(x)))\r\n        return x, global_index_template, global_index_search, removed_index_search, attn\r\n\r\n\r\nclass Block(nn.Module):\r\n\r\n    def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, drop=0., attn_drop=0.,\r\n                 drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm):\r\n        super().__init__()\r\n        self.norm1 = norm_layer(dim)\r\n        self.attn = Attention(dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=drop)\r\n        # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here\r\n        self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()\r\n        self.norm2 = norm_layer(dim)\r\n        mlp_hidden_dim = int(dim * mlp_ratio)\r\n        self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)\r\n\r\n    def forward(self, x, mask=None):\r\n        x = x + self.drop_path(self.attn(self.norm1(x), mask))\r\n        x = x + self.drop_path(self.mlp(self.norm2(x)))\r\n        return x\r\n"
  },
  {
    "path": "lib/models/layers/frozen_bn.py",
    "content": "import torch\r\n\r\n\r\nclass FrozenBatchNorm2d(torch.nn.Module):\r\n    \"\"\"\r\n    BatchNorm2d where the batch statistics and the affine parameters are fixed.\r\n\r\n    Copy-paste from torchvision.misc.ops with added eps before rqsrt,\r\n    without which any other models than torchvision.models.resnet[18,34,50,101]\r\n    produce nans.\r\n    \"\"\"\r\n\r\n    def __init__(self, n):\r\n        super(FrozenBatchNorm2d, self).__init__()\r\n        self.register_buffer(\"weight\", torch.ones(n))\r\n        self.register_buffer(\"bias\", torch.zeros(n))\r\n        self.register_buffer(\"running_mean\", torch.zeros(n))\r\n        self.register_buffer(\"running_var\", torch.ones(n))\r\n\r\n    def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict,\r\n                              missing_keys, unexpected_keys, error_msgs):\r\n        num_batches_tracked_key = prefix + 'num_batches_tracked'\r\n        if num_batches_tracked_key in state_dict:\r\n            del state_dict[num_batches_tracked_key]\r\n\r\n        super(FrozenBatchNorm2d, self)._load_from_state_dict(\r\n            state_dict, prefix, local_metadata, strict,\r\n            missing_keys, unexpected_keys, error_msgs)\r\n\r\n    def forward(self, x):\r\n        # move reshapes to the beginning\r\n        # to make it fuser-friendly\r\n        w = self.weight.reshape(1, -1, 1, 1)\r\n        b = self.bias.reshape(1, -1, 1, 1)\r\n        rv = self.running_var.reshape(1, -1, 1, 1)\r\n        rm = self.running_mean.reshape(1, -1, 1, 1)\r\n        eps = 1e-5\r\n        scale = w * (rv + eps).rsqrt()  # rsqrt(x): 1/sqrt(x), r: reciprocal\r\n        bias = b - rm * scale\r\n        return x * scale + bias\r\n"
  },
  {
    "path": "lib/models/layers/head.py",
    "content": "import torch.nn as nn\nimport torch\nimport torch.nn.functional as F\nfrom typing import Optional\nfrom torch import Tensor\nfrom torch.nn import Identity\nfrom timm.models.layers import trunc_normal_\nfrom timm.models.layers import DropPath\nfrom lib.models.layers.frozen_bn import FrozenBatchNorm2d\nimport copy\n\ndef top_k_top_p_filtering_batch(logits, top_k=0, top_p=0.0, filter_value=-float('Inf')):\n    \"\"\" Filter a distribution of logits using top-k and/or nucleus (top-p) filtering\n        Args:\n            logits: logits distribution shape (vocabulary size)\n            top_k > 0: keep only top k tokens with highest probability (top-k filtering).\n            top_p > 0.0: keep the top tokens with cumulative probability >= top_p (nucleus filtering).\n                Nucleus filtering is described in Holtzman et al. (http://arxiv.org/abs/1904.09751)\n        From: https://gist.github.com/thomwolf/1a5a29f6962089e871b94cbd09daf317\n    \"\"\"\n    top_k = min(top_k, logits.size(-1))  # Safety check\n    if top_k > 0:\n\n        for i in range(logits.shape[0]):\n            indices_to_remove = logits[i] < torch.topk(logits[i], top_k)[0][..., -1, None]\n            logits[i][indices_to_remove] = filter_value\n\n    if top_p > 0.0:\n        for i in range(logits.shape[0]):\n            sorted_logits, sorted_indices = torch.sort(logits[i], descending=True)\n            cumulative_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)\n\n            # Remove tokens with cumulative probability above the threshold\n            sorted_indices_to_remove = cumulative_probs > top_p\n            # Shift the indices to the right to keep also the first token above the threshold\n            sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()\n            sorted_indices_to_remove[..., 0] = 0\n\n            indices_to_remove = sorted_indices[sorted_indices_to_remove]\n            logits[i][indices_to_remove] = filter_value\n    return logits\n    \ndef conv(in_planes, out_planes, kernel_size=3, stride=1, padding=1, dilation=1,\n         freeze_bn=False):\n    if freeze_bn:\n        return nn.Sequential(\n            nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride,\n                      padding=padding, dilation=dilation, bias=True),\n            FrozenBatchNorm2d(out_planes),\n            nn.ReLU(inplace=True))\n    else:\n        return nn.Sequential(\n            nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride,\n                      padding=padding, dilation=dilation, bias=True),\n            nn.BatchNorm2d(out_planes),\n            nn.ReLU(inplace=True))\n\n\nclass Corner_Predictor(nn.Module):\n    \"\"\" Corner Predictor module\"\"\"\n\n    def __init__(self, inplanes=64, channel=256, feat_sz=20, stride=16, freeze_bn=False):\n        super(Corner_Predictor, self).__init__()\n        self.feat_sz = feat_sz\n        self.stride = stride\n        self.img_sz = self.feat_sz * self.stride\n        '''top-left corner'''\n        self.conv1_tl = conv(inplanes, channel, freeze_bn=freeze_bn)\n        self.conv2_tl = conv(channel, channel // 2, freeze_bn=freeze_bn)\n        self.conv3_tl = conv(channel // 2, channel // 4, freeze_bn=freeze_bn)\n        self.conv4_tl = conv(channel // 4, channel // 8, freeze_bn=freeze_bn)\n        self.conv5_tl = nn.Conv2d(channel // 8, 1, kernel_size=1)\n\n        '''bottom-right corner'''\n        self.conv1_br = conv(inplanes, channel, freeze_bn=freeze_bn)\n        self.conv2_br = conv(channel, channel // 2, freeze_bn=freeze_bn)\n        self.conv3_br = conv(channel // 2, channel // 4, freeze_bn=freeze_bn)\n        self.conv4_br = conv(channel // 4, channel // 8, freeze_bn=freeze_bn)\n        self.conv5_br = nn.Conv2d(channel // 8, 1, kernel_size=1)\n\n        '''about coordinates and indexs'''\n        with torch.no_grad():\n            self.indice = torch.arange(0, self.feat_sz).view(-1, 1) * self.stride\n            # generate mesh-grid\n            self.coord_x = self.indice.repeat((self.feat_sz, 1)) \\\n                .view((self.feat_sz * self.feat_sz,)).float().cuda()\n            self.coord_y = self.indice.repeat((1, self.feat_sz)) \\\n                .view((self.feat_sz * self.feat_sz,)).float().cuda()\n\n    def forward(self, x, return_dist=False, softmax=True):\n        \"\"\" Forward pass with input x. \"\"\"\n        score_map_tl, score_map_br = self.get_score_map(x)\n        if return_dist:\n            coorx_tl, coory_tl, prob_vec_tl = self.soft_argmax(score_map_tl, return_dist=True, softmax=softmax)\n            coorx_br, coory_br, prob_vec_br = self.soft_argmax(score_map_br, return_dist=True, softmax=softmax)\n            return torch.stack((coorx_tl, coory_tl, coorx_br, coory_br), dim=1) / self.img_sz, prob_vec_tl, prob_vec_br\n        else:\n            coorx_tl, coory_tl = self.soft_argmax(score_map_tl)\n            coorx_br, coory_br = self.soft_argmax(score_map_br)\n            return torch.stack((coorx_tl, coory_tl, coorx_br, coory_br), dim=1) / self.img_sz\n\n    def get_score_map(self, x):\n        # top-left branch\n        x_tl1 = self.conv1_tl(x)\n        x_tl2 = self.conv2_tl(x_tl1)\n        x_tl3 = self.conv3_tl(x_tl2)\n        x_tl4 = self.conv4_tl(x_tl3)\n        score_map_tl = self.conv5_tl(x_tl4)\n\n        # bottom-right branch\n        x_br1 = self.conv1_br(x)\n        x_br2 = self.conv2_br(x_br1)\n        x_br3 = self.conv3_br(x_br2)\n        x_br4 = self.conv4_br(x_br3)\n        score_map_br = self.conv5_br(x_br4)\n        return score_map_tl, score_map_br\n\n    def soft_argmax(self, score_map, return_dist=False, softmax=True):\n        \"\"\" get soft-argmax coordinate for a given heatmap \"\"\"\n        score_vec = score_map.view((-1, self.feat_sz * self.feat_sz))  # (batch, feat_sz * feat_sz)\n        prob_vec = nn.functional.softmax(score_vec, dim=1)\n        exp_x = torch.sum((self.coord_x * prob_vec), dim=1)\n        exp_y = torch.sum((self.coord_y * prob_vec), dim=1)\n        if return_dist:\n            if softmax:\n                return exp_x, exp_y, prob_vec\n            else:\n                return exp_x, exp_y, score_vec\n        else:\n            return exp_x, exp_y\n\n\nclass CenterPredictor(nn.Module, ):\n    def __init__(self, inplanes=64, channel=256, feat_sz=20, stride=16, freeze_bn=False):\n        super(CenterPredictor, self).__init__()\n        self.feat_sz = feat_sz\n        self.stride = stride\n        self.img_sz = self.feat_sz * self.stride\n\n        # corner predict\n        self.conv1_ctr = conv(inplanes, channel, freeze_bn=freeze_bn)\n        self.conv2_ctr = conv(channel, channel // 2, freeze_bn=freeze_bn)\n        self.conv3_ctr = conv(channel // 2, channel // 4, freeze_bn=freeze_bn)\n        self.conv4_ctr = conv(channel // 4, channel // 8, freeze_bn=freeze_bn)\n        self.conv5_ctr = nn.Conv2d(channel // 8, 1, kernel_size=1)\n\n        # size regress\n        self.conv1_offset = conv(inplanes, channel, freeze_bn=freeze_bn)\n        self.conv2_offset = conv(channel, channel // 2, freeze_bn=freeze_bn)\n        self.conv3_offset = conv(channel // 2, channel // 4, freeze_bn=freeze_bn)\n        self.conv4_offset = conv(channel // 4, channel // 8, freeze_bn=freeze_bn)\n        self.conv5_offset = nn.Conv2d(channel // 8, 2, kernel_size=1)\n\n        # size regress\n        self.conv1_size = conv(inplanes, channel, freeze_bn=freeze_bn)\n        self.conv2_size = conv(channel, channel // 2, freeze_bn=freeze_bn)\n        self.conv3_size = conv(channel // 2, channel // 4, freeze_bn=freeze_bn)\n        self.conv4_size = conv(channel // 4, channel // 8, freeze_bn=freeze_bn)\n        self.conv5_size = nn.Conv2d(channel // 8, 2, kernel_size=1)\n\n        for p in self.parameters():\n            if p.dim() > 1:\n                nn.init.xavier_uniform_(p)\n\n    def forward(self, x, gt_score_map=None):\n        \"\"\" Forward pass with input x. \"\"\"\n        score_map_ctr, size_map, offset_map = self.get_score_map(x)\n\n        # assert gt_score_map is None\n        if gt_score_map is None:\n            bbox = self.cal_bbox(score_map_ctr, size_map, offset_map)\n        else:\n            bbox = self.cal_bbox(gt_score_map.unsqueeze(1), size_map, offset_map)\n\n        return score_map_ctr, bbox, size_map, offset_map\n\n    def cal_bbox(self, score_map_ctr, size_map, offset_map, return_score=False):\n        max_score, idx = torch.max(score_map_ctr.flatten(1), dim=1, keepdim=True)\n        idx_y = idx // self.feat_sz\n        idx_x = idx % self.feat_sz\n\n        idx = idx.unsqueeze(1).expand(idx.shape[0], 2, 1)\n        size = size_map.flatten(2).gather(dim=2, index=idx)\n        offset = offset_map.flatten(2).gather(dim=2, index=idx).squeeze(-1)\n\n        # bbox = torch.cat([idx_x - size[:, 0] / 2, idx_y - size[:, 1] / 2,\n        #                   idx_x + size[:, 0] / 2, idx_y + size[:, 1] / 2], dim=1) / self.feat_sz\n        # cx, cy, w, h\n        bbox = torch.cat([(idx_x.to(torch.float) + offset[:, :1]) / self.feat_sz,\n                          (idx_y.to(torch.float) + offset[:, 1:]) / self.feat_sz,\n                          size.squeeze(-1)], dim=1)\n\n        if return_score:\n            return bbox, max_score\n        return bbox\n\n    def get_pred(self, score_map_ctr, size_map, offset_map):\n        max_score, idx = torch.max(score_map_ctr.flatten(1), dim=1, keepdim=True)\n        idx_y = idx // self.feat_sz\n        idx_x = idx % self.feat_sz\n\n        idx = idx.unsqueeze(1).expand(idx.shape[0], 2, 1)\n        size = size_map.flatten(2).gather(dim=2, index=idx)\n        offset = offset_map.flatten(2).gather(dim=2, index=idx).squeeze(-1)\n\n        # bbox = torch.cat([idx_x - size[:, 0] / 2, idx_y - size[:, 1] / 2,\n        #                   idx_x + size[:, 0] / 2, idx_y + size[:, 1] / 2], dim=1) / self.feat_sz\n        return size * self.feat_sz, offset\n\n    def get_score_map(self, x):\n\n        def _sigmoid(x):\n            y = torch.clamp(x.sigmoid_(), min=1e-4, max=1 - 1e-4)\n            return y\n\n        # ctr branch\n        x_ctr1 = self.conv1_ctr(x)\n        x_ctr2 = self.conv2_ctr(x_ctr1)\n        x_ctr3 = self.conv3_ctr(x_ctr2)\n        x_ctr4 = self.conv4_ctr(x_ctr3)\n        score_map_ctr = self.conv5_ctr(x_ctr4)\n\n        # offset branch\n        x_offset1 = self.conv1_offset(x)\n        x_offset2 = self.conv2_offset(x_offset1)\n        x_offset3 = self.conv3_offset(x_offset2)\n        x_offset4 = self.conv4_offset(x_offset3)\n        score_map_offset = self.conv5_offset(x_offset4)\n\n        # size branch\n        x_size1 = self.conv1_size(x)\n        x_size2 = self.conv2_size(x_size1)\n        x_size3 = self.conv3_size(x_size2)\n        x_size4 = self.conv4_size(x_size3)\n        score_map_size = self.conv5_size(x_size4)\n        return _sigmoid(score_map_ctr), _sigmoid(score_map_size), score_map_offset\n\n\nclass MLP(nn.Module):\n    \"\"\" Very simple multi-layer perceptron (also called FFN)\"\"\"\n\n    def __init__(self, input_dim, hidden_dim, output_dim, num_layers, BN=False):\n        super().__init__()\n        self.num_layers = num_layers\n        h = [hidden_dim] * (num_layers - 1)\n        if BN:\n            self.layers = nn.ModuleList(nn.Sequential(nn.Linear(n, k), nn.BatchNorm1d(k))\n                                        for n, k in zip([input_dim] + h, h + [output_dim]))\n        else:\n            self.layers = nn.ModuleList(nn.Linear(n, k)\n                                        for n, k in zip([input_dim] + h, h + [output_dim]))\n\n    def forward(self, x):\n        for i, layer in enumerate(self.layers):\n            x = F.relu(layer(x)) if i < self.num_layers - 1 else layer(x)\n        return x\n\nclass SelfAttention(nn.Module):\n    def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0.,\n                 attn_pos_encoding_only=False):\n        super(SelfAttention, self).__init__()\n        assert dim % num_heads == 0, f\"dim {dim} should be divided by num_heads {num_heads}.\"\n\n        self.dim = dim\n        self.num_heads = num_heads\n        head_dim = dim // num_heads\n        self.scale = qk_scale or head_dim ** -0.5\n\n        if attn_pos_encoding_only:\n            self.qkv = nn.Linear(dim, 3 * dim, bias=qkv_bias)\n        else:\n            self.q = nn.Linear(dim, dim, bias=qkv_bias)\n            self.k = nn.Linear(dim, dim, bias=qkv_bias)\n            self.v = nn.Linear(dim, dim, bias=qkv_bias)\n        self.attn_drop = nn.Dropout(attn_drop)\n        self.proj = nn.Linear(dim, dim)\n        self.proj_drop = nn.Dropout(proj_drop)\n\n        self.attn_pos_encoding_only = attn_pos_encoding_only\n\n    def forward(self, x, q_ape, k_ape, attn_pos):\n        '''\n            Args:\n                x (torch.Tensor): (B, L, C)\n                q_ape (torch.Tensor | None): (1 or B, L, C), absolute positional encoding for q\n                k_ape (torch.Tensor | None): (1 or B, L, C), absolute positional encoding for k\n                attn_pos (torch.Tensor | None): (1 or B, num_heads, L, L), untied positional encoding\n            Returns:\n                torch.Tensor: (B, L, C)\n        '''\n        B, N, C = x.shape\n\n        if self.attn_pos_encoding_only:\n            assert q_ape is None and k_ape is None\n            qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)\n            q, k, v = qkv[0], qkv[1], qkv[2]\n        else:\n            q = x + q_ape if q_ape is not None else x\n            q = self.q(q).reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3)\n\n            k = x + k_ape if k_ape is not None else x\n            k = self.k(k).reshape(B, -1, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3)\n            v = self.v(x).reshape(B, -1, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3)\n\n        attn = q @ k.transpose(-2, -1)\n        attn = attn * self.scale\n        if attn_pos is not None:\n            attn = attn + attn_pos\n        attn = attn.softmax(dim=-1)\n        attn = self.attn_drop(attn)\n\n        x = attn @ v\n        x = x.transpose(1, 2).reshape(B, N, C)\n        x = self.proj(x)\n        x = self.proj_drop(x)\n\n        return x\n\nclass CrossAttention(nn.Module):\n    def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0.,\n                 attn_pos_encoding_only=False):\n        super(CrossAttention, self).__init__()\n        assert dim % num_heads == 0, f\"dim {dim} should be divided by num_heads {num_heads}.\"\n\n        self.dim = dim\n        self.num_heads = num_heads\n        head_dim = dim // num_heads\n        self.scale = qk_scale or head_dim ** -0.5\n\n        if attn_pos_encoding_only:\n            self.q = nn.Linear(dim, dim, bias=qkv_bias)\n            self.kv = nn.Linear(dim, 2 * dim, bias=qkv_bias)\n        else:\n            self.q = nn.Linear(dim, dim, bias=qkv_bias)\n            self.k = nn.Linear(dim, dim, bias=qkv_bias)\n            self.v = nn.Linear(dim, dim, bias=qkv_bias)\n        self.attn_drop = nn.Dropout(attn_drop)\n        self.proj = nn.Linear(dim, dim)\n        self.proj_drop = nn.Dropout(proj_drop)\n\n        self.attn_pos_encoding_only = attn_pos_encoding_only\n\n    def forward(self, q, kv, q_ape, k_ape, attn_pos):\n        '''\n            Args:\n                q (torch.Tensor): (B, L_q, C)\n                kv (torch.Tensor): (B, L_kv, C)\n                q_ape (torch.Tensor | None): (1 or B, L_q, C), absolute positional encoding for q\n                k_ape (torch.Tensor | None): (1 or B, L_kv, C), absolute positional encoding for k\n                attn_pos (torch.Tensor | None): (1 or B, num_heads, L_q, L_kv), untied positional encoding\n            Returns:\n                torch.Tensor: (B, L_q, C)\n        '''\n        B, q_N, C = q.shape\n        kv_N = kv.shape[1]\n\n        if self.attn_pos_encoding_only:\n            assert q_ape is None and k_ape is None\n            q = self.q(q).reshape(B, q_N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3)\n            kv = self.kv(kv).reshape(B, kv_N, 2, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)\n            k, v = kv[0], kv[1]\n        else:\n            q = q + q_ape if q_ape is not None else q\n            q = self.q(q).reshape(B, q_N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3)\n            k = kv + k_ape if k_ape is not None else kv\n            k = self.k(k).reshape(B, -1, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3)\n            v = self.v(kv).reshape(B, -1, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3)\n\n        attn = q @ k.transpose(-2, -1)\n        attn = attn * self.scale\n        if attn_pos is not None:\n            attn = attn + attn_pos\n        attn = attn.softmax(dim=-1)\n        attn = self.attn_drop(attn)\n        x = attn @ v\n        x = x.transpose(1, 2).reshape(B, q_N, C)\n        x = self.proj(x)\n        x = self.proj_drop(x)\n\n        return x\n\nclass Mlp(nn.Module):\n    \"\"\" Multilayer perceptron.\"\"\"\n\n    def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):\n        super().__init__()\n        out_features = out_features or in_features\n        hidden_features = hidden_features or in_features\n        self.fc1 = nn.Linear(in_features, hidden_features)\n        self.act = act_layer()\n        self.fc2 = nn.Linear(hidden_features, out_features)\n        self.drop = nn.Dropout(drop)\n\n    def forward(self, x):\n        '''\n            Args:\n                x (torch.Tensor): (B, L, C), input tensor\n            Returns:\n                torch.Tensor: (B, L, C), output tensor\n        '''\n        x = self.fc1(x)\n        x = self.act(x)\n        x = self.drop(x)\n        x = self.fc2(x)\n        x = self.drop(x)\n        return x\n\nclass FeatureFusion(nn.Module):\n    def __init__(self,\n                 dim, num_heads, mlp_ratio=2., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0.,\n                 drop_path=nn.Identity(), act_layer=nn.GELU, norm_layer=nn.LayerNorm, attn_pos_encoding_only=False):\n        super(FeatureFusion, self).__init__()\n        self.z_norm1 = norm_layer(dim)\n        self.x_norm1 = norm_layer(dim)\n        self.z_self_attn = SelfAttention(dim, num_heads, qkv_bias, qk_scale, attn_drop, drop, attn_pos_encoding_only)\n        self.x_self_attn = SelfAttention(dim, num_heads, qkv_bias, qk_scale, attn_drop, drop, attn_pos_encoding_only)\n\n        self.z_norm2_1 = norm_layer(dim)\n        self.z_norm2_2 = norm_layer(dim)\n        self.x_norm2_1 = norm_layer(dim)\n        self.x_norm2_2 = norm_layer(dim)\n\n        self.z_x_cross_attention = CrossAttention(dim, num_heads, qkv_bias, qk_scale, attn_drop, drop, attn_pos_encoding_only)\n        self.x_z_cross_attention = CrossAttention(dim, num_heads, qkv_bias, qk_scale, attn_drop, drop, attn_pos_encoding_only)\n\n        mlp_hidden_dim = int(dim * mlp_ratio)\n        self.z_norm3 = norm_layer(dim)\n        self.x_norm3 = norm_layer(dim)\n        print(mlp_ratio)\n        self.z_mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)\n        self.x_mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)\n\n        self.drop_path = drop_path\n\n    def forward(self, z, x, z_self_attn_pos, x_self_attn_pos, z_x_cross_attn_pos, x_z_cross_attn_pos):\n        z = z + self.drop_path(self.z_self_attn(self.z_norm1(z), None, None, z_self_attn_pos))\n        x = x + self.drop_path(self.x_self_attn(self.x_norm1(x), None, None, x_self_attn_pos))\n\n        z = z + self.drop_path(self.z_x_cross_attention(self.z_norm2_1(z), self.x_norm2_1(x), None, None, z_x_cross_attn_pos))\n        x = x + self.drop_path(self.x_z_cross_attention(self.x_norm2_2(x), self.z_norm2_2(z), None, None, x_z_cross_attn_pos))\n\n        z = z + self.drop_path(self.z_mlp(self.z_norm3(z)))\n        x = x + self.drop_path(self.x_mlp(self.x_norm3(x)))\n        return z, x\n\n\nclass FeatureFusionEncoder(nn.Module):\n    def __init__(self, feature_fusion_layers, z_pos_enc, x_pos_enc,\n                 z_rel_pos_index, x_rel_pos_index, z_x_rel_pos_index, x_z_rel_pos_index,\n                 z_rel_pos_bias_table, x_rel_pos_bias_table, z_x_rel_pos_bias_table, x_z_rel_pos_bias_table):\n        super(FeatureFusionEncoder, self).__init__()\n        self.layers = nn.ModuleList(feature_fusion_layers)\n        self.z_pos_enc = z_pos_enc\n        self.x_pos_enc = x_pos_enc\n        self.register_buffer('z_rel_pos_index', z_rel_pos_index, False)\n        self.register_buffer('x_rel_pos_index', x_rel_pos_index, False)\n        self.register_buffer('z_x_rel_pos_index', z_x_rel_pos_index, False)\n        self.register_buffer('x_z_rel_pos_index', x_z_rel_pos_index, False)\n        self.z_rel_pos_bias_table = z_rel_pos_bias_table\n        self.x_rel_pos_bias_table = x_rel_pos_bias_table\n        self.z_x_rel_pos_bias_table = z_x_rel_pos_bias_table\n        self.x_z_rel_pos_bias_table = x_z_rel_pos_bias_table\n\n    def forward(self, z, x, z_pos, x_pos):\n        '''\n            Args:\n                z (torch.Tensor): (B, L_z, C), template image feature tokens\n                x (torch.Tensor): (B, L_x, C), search image feature tokens\n                z_pos (torch.Tensor | None): (1 or B, L_z, C), optional positional encoding for z\n                x_pos (torch.Tensor | None): (1 or B, L_x, C), optional positional encoding for x\n            Returns:\n                Tuple[torch.Tensor, torch.Tensor]:\n                    (B, L_z, C): template image feature tokens\n                    (B, L_x, C): search image feature tokens\n        '''\n        # Support untied positional encoding only for simplicity\n        assert z_pos is None and x_pos is None\n\n        # untied positional encoding\n        z_q_pos, z_k_pos = self.z_pos_enc()\n        x_q_pos, x_k_pos = self.x_pos_enc()\n        z_self_attn_pos = (z_q_pos @ z_k_pos.transpose(-2, -1)).unsqueeze(0)\n        x_self_attn_pos = (x_q_pos @ x_k_pos.transpose(-2, -1)).unsqueeze(0)\n\n        z_x_cross_attn_pos = (z_q_pos @ x_k_pos.transpose(-2, -1)).unsqueeze(0)\n        x_z_cross_attn_pos = (x_q_pos @ z_k_pos.transpose(-2, -1)).unsqueeze(0)\n\n        # relative positional encoding\n        z_self_attn_pos = z_self_attn_pos + self.z_rel_pos_bias_table(self.z_rel_pos_index)\n        x_self_attn_pos = x_self_attn_pos + self.x_rel_pos_bias_table(self.x_rel_pos_index)\n        z_x_cross_attn_pos = z_x_cross_attn_pos + self.z_x_rel_pos_bias_table(self.z_x_rel_pos_index)\n        x_z_cross_attn_pos = x_z_cross_attn_pos + self.x_z_rel_pos_bias_table(self.x_z_rel_pos_index)\n\n        for layer in self.layers:\n            z, x = layer(z, x, z_self_attn_pos, x_self_attn_pos, z_x_cross_attn_pos, x_z_cross_attn_pos)\n\n        return z, x\n\nclass Learned2DPositionalEncoder(nn.Module):\n    def __init__(self, dim, w, h):\n        super(Learned2DPositionalEncoder, self).__init__()\n        self.w_pos = nn.Parameter(torch.empty(w, dim))\n        self.h_pos = nn.Parameter(torch.empty(h, dim))\n        trunc_normal_(self.w_pos, std=0.02)\n        trunc_normal_(self.h_pos, std=0.02)\n\n    def forward(self):\n        w = self.w_pos.shape[0]\n        h = self.h_pos.shape[0]\n        return (self.w_pos[None, :, :] + self.h_pos[:, None, :]).view(h * w, -1)\n\nclass Untied2DPositionalEncoder(nn.Module):\n    def __init__(self, dim, num_heads, w, h, scale=None, with_q=True, with_k=True):\n        super(Untied2DPositionalEncoder, self).__init__()\n        assert dim % num_heads == 0, f\"dim {dim} should be divided by num_heads {num_heads}.\"\n        self.pos = Learned2DPositionalEncoder(dim, w, h)\n        self.norm = nn.LayerNorm(dim)\n        self.pos_q_linear = None\n        self.pos_k_linear = None\n        if with_q:\n            self.pos_q_linear = nn.Linear(dim, dim)\n        if with_k:\n            self.pos_k_linear = nn.Linear(dim, dim)\n\n        self.num_heads = num_heads\n        head_dim = dim // num_heads\n        self.scale = scale or head_dim ** -0.5\n\n    def forward(self):\n        pos = self.norm(self.pos())\n        seq_len = pos.shape[0]\n        if self.pos_q_linear is not None and self.pos_k_linear is not None:\n            pos_q = self.pos_q_linear(pos).view(seq_len, self.num_heads, -1).transpose(0, 1) * self.scale\n            pos_k = self.pos_k_linear(pos).view(seq_len, self.num_heads, -1).transpose(0, 1)\n            return pos_q, pos_k\n        elif self.pos_q_linear is not None:\n            pos_q = self.pos_q_linear(pos).view(seq_len, self.num_heads, -1).transpose(0, 1) * self.scale\n            return pos_q\n        elif self.pos_k_linear is not None:\n            pos_k = self.pos_k_linear(pos).view(seq_len, self.num_heads, -1).transpose(0, 1)\n            return pos_k\n        else:\n            raise RuntimeError\n\ndef generate_2d_relative_positional_encoding_index(z_shape, x_shape):\n    '''\n        z_shape: (z_h, z_w)\n        x_shape: (x_h, x_w)\n    '''\n    z_2d_index_h, z_2d_index_w = torch.meshgrid(torch.arange(z_shape[0]), torch.arange(z_shape[1]))\n    x_2d_index_h, x_2d_index_w = torch.meshgrid(torch.arange(x_shape[0]), torch.arange(x_shape[1]))\n\n    z_2d_index_h = z_2d_index_h.flatten(0)\n    z_2d_index_w = z_2d_index_w.flatten(0)\n    x_2d_index_h = x_2d_index_h.flatten(0)\n    x_2d_index_w = x_2d_index_w.flatten(0)\n\n    diff_h = z_2d_index_h[:, None] - x_2d_index_h[None, :]\n    diff_w = z_2d_index_w[:, None] - x_2d_index_w[None, :]\n\n    diff = torch.stack((diff_h, diff_w), dim=-1)\n    _, indices = torch.unique(diff.view(-1, 2), return_inverse=True, dim=0)\n    return indices.view(z_shape[0] * z_shape[1], x_shape[0] * x_shape[1])\n\nclass RelativePosition2DEncoder(nn.Module):\n    def __init__(self, num_heads, embed_size):\n        super(RelativePosition2DEncoder, self).__init__()\n        self.relative_position_bias_table = nn.Parameter(torch.empty((num_heads, embed_size)))\n        trunc_normal_(self.relative_position_bias_table, std=0.02)\n\n    def forward(self, attn_rpe_index):\n        '''\n            Args:\n                attn_rpe_index (torch.Tensor): (*), any shape containing indices, max(attn_rpe_index) < embed_size\n            Returns:\n                torch.Tensor: (1, num_heads, *)\n        '''\n        return self.relative_position_bias_table[:, attn_rpe_index].unsqueeze(0)\n\nclass DropPathAllocator:\n    def __init__(self, max_drop_path_rate, stochastic_depth_decay = True):\n        self.max_drop_path_rate = max_drop_path_rate\n        self.stochastic_depth_decay = stochastic_depth_decay\n        self.allocated = []\n        self.allocating = []\n\n    def __enter__(self):\n        self.allocating = []\n\n    def __exit__(self, exc_type, exc_val, exc_tb):\n        if len(self.allocating) != 0:\n            self.allocated.append(self.allocating)\n        self.allocating = None\n        if not self.stochastic_depth_decay:\n            for depth_module in self.allocated:\n                for module in depth_module:\n                    if isinstance(module, DropPath):\n                        module.drop_prob = self.max_drop_path_rate\n        else:\n            depth = self.get_depth()\n            dpr = [x.item() for x in torch.linspace(0, self.max_drop_path_rate, depth)]\n            assert len(dpr) == len(self.allocated)\n            for drop_path_rate, depth_modules in zip(dpr, self.allocated):\n                for module in depth_modules:\n                    if isinstance(module, DropPath):\n                        module.drop_prob = drop_path_rate\n\n    def __len__(self):\n        length = 0\n\n        for depth_modules in self.allocated:\n            length += len(depth_modules)\n\n        return length\n\n    def increase_depth(self):\n        self.allocated.append(self.allocating)\n        self.allocating = []\n\n    def get_depth(self):\n        return len(self.allocated)\n\n    def allocate(self):\n        if self.max_drop_path_rate == 0 or (self.stochastic_depth_decay and self.get_depth() == 0):\n            drop_path_module = Identity()\n        else:\n            drop_path_module = DropPath()\n        self.allocating.append(drop_path_module)\n        return drop_path_module\n\n    def get_all_allocated(self):\n        allocated = []\n        for depth_module in self.allocated:\n            for module in depth_module:\n                allocated.append(module)\n        return allocated\n\ndef build_encoder(encoder_layer, num_heads, mlp_ratio, qkv_bias, drop_rate, attn_drop, dim, z_size, x_size, drop_path):\n    z_shape = [z_size, z_size]\n    x_shape = [x_size, x_size]\n    encoder_layers = []\n    for i in range(encoder_layer):\n        encoder_layers.append(\n            FeatureFusion(dim, num_heads, mlp_ratio, qkv_bias, drop=drop_rate, attn_drop=attn_drop,\n                          drop_path=drop_path.allocate(),\n                          attn_pos_encoding_only=True)\n        )\n    z_abs_encoder = Untied2DPositionalEncoder(dim, num_heads, z_shape[0], z_shape[1])\n    x_abs_encoder = Untied2DPositionalEncoder(dim, num_heads, x_shape[0], x_shape[1])\n\n    z_self_attn_rel_pos_index = generate_2d_relative_positional_encoding_index(z_shape, z_shape)\n    x_self_attn_rel_pos_index = generate_2d_relative_positional_encoding_index(x_shape, x_shape)\n\n    z_x_cross_attn_rel_pos_index = generate_2d_relative_positional_encoding_index(z_shape, x_shape)\n    x_z_cross_attn_rel_pos_index = generate_2d_relative_positional_encoding_index(x_shape, z_shape)\n\n    z_self_attn_rel_pos_bias_table = RelativePosition2DEncoder(num_heads, z_self_attn_rel_pos_index.max() + 1)\n    x_self_attn_rel_pos_bias_table = RelativePosition2DEncoder(num_heads, x_self_attn_rel_pos_index.max() + 1)\n    z_x_cross_attn_rel_pos_bias_table = RelativePosition2DEncoder(num_heads, z_x_cross_attn_rel_pos_index.max() + 1)\n    x_z_cross_attn_rel_pos_bias_table = RelativePosition2DEncoder(num_heads, x_z_cross_attn_rel_pos_index.max() + 1)\n\n    return FeatureFusionEncoder(encoder_layers, z_abs_encoder, x_abs_encoder, z_self_attn_rel_pos_index,\n                                x_self_attn_rel_pos_index,\n                                z_x_cross_attn_rel_pos_index, x_z_cross_attn_rel_pos_index,\n                                z_self_attn_rel_pos_bias_table,\n                                x_self_attn_rel_pos_bias_table, z_x_cross_attn_rel_pos_bias_table,\n                                x_z_cross_attn_rel_pos_bias_table)\n\nclass TargetQueryDecoderLayer(nn.Module):\n    def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0.,\n                 drop_path=nn.Identity(), act_layer=nn.GELU, norm_layer=nn.LayerNorm):\n        super(TargetQueryDecoderLayer, self).__init__()\n        self.norm_1 = norm_layer(dim)\n\n        self.self_attn1 = nn.MultiheadAttention(dim, num_heads, dropout=drop)\n        self.norm_2_query = norm_layer(dim)\n        self.norm_2_memory = norm_layer(dim)\n\n        self.multihead_attn = nn.MultiheadAttention(dim, num_heads, dropout=drop)\n        self.norm_3 = norm_layer(dim)\n        mlp_hidden_dim = int(dim * mlp_ratio)\n        self.mlpz = Mlp(dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)\n\n        self.drop_path = drop_path\n\n    def forward(self, query, memoryz, memoryx, query_pos, pos_z, pos_x, identity, tgt_mask: Optional[Tensor] = None,\n                memory_mask: Optional[Tensor] = None,\n                tgt_key_padding_mask: Optional[Tensor] = None,\n                memory_key_padding_mask: Optional[Tensor] = None,\n                ):\n        '''\n            Args:\n                query (torch.Tensor): (B, num_queries, C)\n                memory (torch.Tensor): (B, L, C)\n                query_pos (torch.Tensor): (1 or B, num_queries, C)\n                memory_pos (torch.Tensor): (1 or B, L, C)\n            Returns:\n                torch.Tensor: (B, num_queries, C)\n        '''\n        tgt = query\n        q = k = self.norm_1(query) + query_pos\n        query = query + self.drop_path(self.self_attn1(q, k, value=tgt, attn_mask=tgt_mask,\n                                                       key_padding_mask=tgt_key_padding_mask)[0])\n        q2 = self.norm_2_query(query) + query_pos\n        memory = torch.cat((memoryz,memoryx),dim=1)\n        \n        pos = torch.cat((pos_z, pos_x), dim=1)\n        ide = torch.cat((identity[: ,0, :].repeat(1, pos_z.shape[1], 1), identity[:, 1, :].repeat(1, pos_x.shape[1], 1)), dim=1)\n\n        k2 = (self.norm_2_memory(memory) + pos + ide).permute(1, 0, 2)\n        memory_in = memory.permute(1, 0, 2)\n        query = query + self.drop_path(\n            self.multihead_attn(query=q2, key=k2, value=memory_in, attn_mask=memory_mask,\n                            key_padding_mask=memory_key_padding_mask)[0])\n        query = query + self.drop_path(self.mlpz(self.norm_3(query)))\n\n        return query\n\ndef _get_clones(module, N):\n    return nn.ModuleList([copy.deepcopy(module) for i in range(N)])\n\nclass TargetQueryDecoderBlock(nn.Module):\n    def __init__(self, dim, decoder_layers, num_layer):\n        super(TargetQueryDecoderBlock, self).__init__()\n        self.layers = nn.ModuleList(decoder_layers)\n        self.num_layers = num_layer\n        self.norm = nn.LayerNorm(dim)\n\n    def forward(self, tgt, z, x, pos_z, pos_x, identity, query_pos: Optional[Tensor] = None,\n                tgt_mask: Optional[Tensor] = None,\n                memory_mask: Optional[Tensor] = None,\n                tgt_key_padding_mask: Optional[Tensor] = None,\n                memory_key_padding_mask: Optional[Tensor] = None):\n        '''\n            Args:\n                z (torch.Tensor): (B, L_z, C)\n                x (torch.Tensor): (B, L_x, C)\n            Returns:\n                torch.Tensor: (B, num_queries, C)\n        '''\n        output = tgt\n        for layer in self.layers:\n            output = layer(output, z, x, query_pos, pos_z, pos_x, identity,\n                           tgt_mask=tgt_mask,\n                           memory_mask=memory_mask,\n                           tgt_key_padding_mask=tgt_key_padding_mask,\n                           memory_key_padding_mask=memory_key_padding_mask)\n        output = self.norm(output)\n\n        return output\n\ndef build_decoder(decoder_layer, drop_path, dim, num_heads, mlp_ratio, qkv_bias, drop_rate, attn_drop_rate, z_size, x_size):\n    z_shape = [z_size, z_size]\n    x_shape = [x_size, x_size]\n    num_layers = decoder_layer\n    decoder_layers = []\n    for _ in range(num_layers):\n        decoder_layers.append(\n            TargetQueryDecoderLayer(dim, num_heads, mlp_ratio, qkv_bias, drop=drop_rate, attn_drop=attn_drop_rate,\n                                    drop_path=drop_path.allocate()))\n        drop_path.increase_depth()\n\n\n    decoder = TargetQueryDecoderBlock(dim, decoder_layers, num_layers)\n    return decoder\n\ndef generate_square_subsequent_mask(sz):\n    r\"\"\"Generate a square mask for the sequence. The masked positions are filled with float('-inf').\n        Unmasked positions are filled with float(0.0).\n    \"\"\"\n    mask = (torch.triu(torch.ones(sz, sz)) == 1).transpose(0, 1)\n    mask = mask.float().masked_fill(mask == 0, float(\n        '-inf')).masked_fill(mask == 1, float(0.0))\n    return mask\n\nclass Pix2Track(nn.Module):\n    def __init__(self, in_channel=64, feat_sz=20, feat_tz=10, range=2, stride=16, encoder_layer=3, decoder_layer=3,\n                 bins=400,num_heads=12, mlp_ratio=2, qkv_bias=True, drop_rate=0.0,attn_drop=0.0, drop_path=nn.Identity):\n        super(Pix2Track, self).__init__()\n        self.bins = bins\n        self.range = range\n        self.word_embeddings = nn.Embedding(self.bins * self.range + 2, in_channel, padding_idx=self.bins * self.range, max_norm=1, norm_type=2.0)\n        print(self.bins)\n        self.position_embeddings = nn.Embedding(\n            5, in_channel)\n        self.output_bias = torch.nn.Parameter(torch.zeros(self.bins * self.range + 2))\n\n        self.encoder_layer = encoder_layer\n        self.drop_path = drop_path\n        self.tz = feat_tz * feat_tz\n        self.sz = feat_sz * feat_sz\n        trunc_normal_(self.word_embeddings.weight, std=.02)\n        if self.encoder_layer > 0 :\n            self.encoder = build_encoder(encoder_layer, num_heads, mlp_ratio, qkv_bias,\n                        drop_rate, attn_drop, in_channel, feat_tz, feat_sz, self.drop_path)\n        else:\n            self.encoder = None\n        self.decoder = build_decoder(decoder_layer, self.drop_path, in_channel, num_heads,\n                                     mlp_ratio, qkv_bias, drop_rate, attn_drop, feat_tz, feat_sz)\n    def forward(self, zx_feat, pos_z, pos_x, identity, seqs_input=None):\n        share_weight = self.word_embeddings.weight.T\n\n        z_feat = zx_feat[:, :self.tz]\n        x_feat = zx_feat[:, self.tz:]\n\n        bs = zx_feat.shape[0]\n        if self.encoder != None:\n            z_feat, x_feat = self.encoder(z_feat, x_feat, None, None)\n        if seqs_input != None:\n            seqs_input = seqs_input.to(torch.int64).to(zx_feat.device)\n            tgt = self.word_embeddings(seqs_input).permute(1, 0, 2)\n            query_embed = self.position_embeddings.weight.unsqueeze(1)\n            query_embed = query_embed.repeat(1, bs, 1)\n            decoder_feat = self.decoder(tgt, z_feat, x_feat, pos_z, pos_x, identity, query_embed,\n                                        tgt_mask=generate_square_subsequent_mask(len(tgt)).to(tgt.device))\n\n            at = torch.matmul(decoder_feat, share_weight)\n            at = at + self.output_bias\n            output = {'feat': at, \"state\": \"train\"}\n        else:\n            origin_seq = torch.ones(bs, 1) * self.bins * self.range\n            seqs_input = origin_seq.to(zx_feat.device).to(torch.int64)\n            for i in range(4):\n                tgt = self.word_embeddings(seqs_input).permute(1, 0, 2)\n                query_embed = self.position_embeddings.weight.unsqueeze(1)\n                query_embed = query_embed.repeat(1, bs, 1)\n                decoder_feat_cls = self.decoder(tgt, z_feat, x_feat, pos_z, pos_x, identity, query_embed[:len(tgt)],\n                                                tgt_mask=generate_square_subsequent_mask(len(tgt)).to(tgt.device))\n\n                out = torch.matmul(decoder_feat_cls.transpose(0, 1)[:, -1, :], share_weight) + self.output_bias\n\n                out = out.softmax(-1)\n                value, extra_seq = out.topk(dim=-1, k=1)[0], out.topk(dim=-1, k=1)[1]\n                seqs_input = torch.cat([seqs_input, extra_seq], dim=-1)\n                if i == 0:\n                    seqs_output = extra_seq\n                    values = value\n                else:\n                    seqs_output = torch.cat([seqs_output, extra_seq], dim=-1)\n                    values = torch.cat([values, value], dim=-1)\n            output = {'seqs': seqs_output, 'class': values, \"state\": \"val/test\"}\n\n        return output\n\n\n\n\n\n\ndef build_pix_head(cfg, hidden_dim):\n    stride = cfg.MODEL.BACKBONE.STRIDE\n\n    if cfg.MODEL.HEAD.TYPE == \"MLP\":\n        mlp_head = MLP(hidden_dim, hidden_dim, 4, 3)  # dim_in, dim_hidden, dim_out, 3 layers\n        return mlp_head\n    elif \"CORNER\" in cfg.MODEL.HEAD.TYPE:\n        feat_sz = int(cfg.DATA.SEARCH.SIZE / stride)\n        channel = getattr(cfg.MODEL, \"NUM_CHANNELS\", 256)\n        print(\"head channel: %d\" % channel)\n        if cfg.MODEL.HEAD.TYPE == \"CORNER\":\n            corner_head = Corner_Predictor(inplanes=cfg.MODEL.HIDDEN_DIM, channel=channel,\n                                           feat_sz=feat_sz, stride=stride)\n        else:\n            raise ValueError()\n        return corner_head\n    elif cfg.MODEL.HEAD.TYPE == \"CENTER\":\n        in_channel = hidden_dim\n        out_channel = cfg.MODEL.HEAD.NUM_CHANNELS\n        feat_sz = int(cfg.DATA.SEARCH.SIZE / stride)\n        center_head = CenterPredictor(inplanes=in_channel, channel=out_channel,\n                                      feat_sz=feat_sz, stride=stride)\n        return center_head\n    elif cfg.MODEL.HEAD.TYPE == \"PIX\":\n        in_channel = hidden_dim\n        feat_sz = int(cfg.DATA.SEARCH.SIZE / stride)\n        feat_tz = int(cfg.DATA.TEMPLATE.SIZE / stride)\n        decoder_layer = cfg.MODEL.DECODER_LAYER\n        encoder_layer = cfg.MODEL.ENCODER_LAYER\n        bins = cfg.MODEL.BINS\n        num_heads = cfg.MODEL.NUM_HEADS\n        mlp_ratio = cfg.MODEL.MLP_RATIO\n        qkv_bias = cfg.MODEL.QKV_BIAS\n        drop_rate = cfg.MODEL.DROP_RATE\n        attn_drop = cfg.MODEL.ATTN_DROP\n        drop_path = cfg.MODEL.DROP_PATH\n        drop_path_allocator = DropPathAllocator(drop_path)\n        range = cfg.MODEL.RANGE\n        pix_head = Pix2Track(in_channel=in_channel, feat_sz=feat_sz, feat_tz=feat_tz, range=range,\n                             stride=stride, encoder_layer=encoder_layer, decoder_layer=decoder_layer, bins=bins,\n                             num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, drop_rate=drop_rate,\n                             attn_drop=attn_drop, drop_path=drop_path_allocator)\n        return pix_head\n    else:\n        raise ValueError(\"HEAD TYPE %s is not supported.\" % cfg.MODEL.HEAD_TYPE)\n"
  },
  {
    "path": "lib/models/layers/head_seq.py",
    "content": "import torch.nn as nn\r\nimport torch\r\nimport torch.nn.functional as F\r\nfrom typing import Optional\r\nfrom torch import Tensor\r\nfrom torch.nn import Identity\r\nfrom timm.models.layers import trunc_normal_\r\nfrom timm.models.layers import DropPath\r\nfrom lib.models.layers.frozen_bn import FrozenBatchNorm2d\r\nimport copy\r\n\r\n\r\ndef top_k_top_p_filtering_batch(logits, top_k=0, top_p=0.0, filter_value=-float('Inf')):\r\n    \"\"\" Filter a distribution of logits using top-k and/or nucleus (top-p) filtering\r\n        Args:\r\n            logits: logits distribution shape (vocabulary size)\r\n            top_k > 0: keep only top k tokens with highest probability (top-k filtering).\r\n            top_p > 0.0: keep the top tokens with cumulative probability >= top_p (nucleus filtering).\r\n                Nucleus filtering is described in Holtzman et al. (http://arxiv.org/abs/1904.09751)\r\n        From: https://gist.github.com/thomwolf/1a5a29f6962089e871b94cbd09daf317\r\n    \"\"\"\r\n    top_k = min(top_k, logits.size(-1))  # Safety check\r\n    if top_k > 0:\r\n        # Remove all tokens with a probability less than the last token of the top-k\r\n        # torch.topk()返回最后一维最大的top_k个元素，返回值为二维(values,indices)\r\n        # ...表示其他维度由计算机自行推断\r\n        for i in range(logits.shape[0]):\r\n            indices_to_remove = logits[i] < torch.topk(logits[i], top_k)[0][..., -1, None]\r\n            logits[i][indices_to_remove] = filter_value  # 对于topk之外的其他元素的logits值设为负无穷\r\n\r\n    if top_p > 0.0:\r\n        for i in range(logits.shape[0]):\r\n            sorted_logits, sorted_indices = torch.sort(logits[i], descending=True)  # 对logits进行递减排序\r\n            cumulative_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)\r\n\r\n            # Remove tokens with cumulative probability above the threshold\r\n            sorted_indices_to_remove = cumulative_probs > top_p\r\n            # Shift the indices to the right to keep also the first token above the threshold\r\n            sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()\r\n            sorted_indices_to_remove[..., 0] = 0\r\n\r\n            indices_to_remove = sorted_indices[sorted_indices_to_remove]\r\n            logits[i][indices_to_remove] = filter_value\r\n    return logits\r\n\r\n\r\ndef conv(in_planes, out_planes, kernel_size=3, stride=1, padding=1, dilation=1,\r\n         freeze_bn=False):\r\n    if freeze_bn:\r\n        return nn.Sequential(\r\n            nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride,\r\n                      padding=padding, dilation=dilation, bias=True),\r\n            FrozenBatchNorm2d(out_planes),\r\n            nn.ReLU(inplace=True))\r\n    else:\r\n        return nn.Sequential(\r\n            nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride,\r\n                      padding=padding, dilation=dilation, bias=True),\r\n            nn.BatchNorm2d(out_planes),\r\n            nn.ReLU(inplace=True))\r\n\r\n\r\nclass Corner_Predictor(nn.Module):\r\n    \"\"\" Corner Predictor module\"\"\"\r\n\r\n    def __init__(self, inplanes=64, channel=256, feat_sz=20, stride=16, freeze_bn=False):\r\n        super(Corner_Predictor, self).__init__()\r\n        self.feat_sz = feat_sz\r\n        self.stride = stride\r\n        self.img_sz = self.feat_sz * self.stride\r\n        '''top-left corner'''\r\n        self.conv1_tl = conv(inplanes, channel, freeze_bn=freeze_bn)\r\n        self.conv2_tl = conv(channel, channel // 2, freeze_bn=freeze_bn)\r\n        self.conv3_tl = conv(channel // 2, channel // 4, freeze_bn=freeze_bn)\r\n        self.conv4_tl = conv(channel // 4, channel // 8, freeze_bn=freeze_bn)\r\n        self.conv5_tl = nn.Conv2d(channel // 8, 1, kernel_size=1)\r\n\r\n        '''bottom-right corner'''\r\n        self.conv1_br = conv(inplanes, channel, freeze_bn=freeze_bn)\r\n        self.conv2_br = conv(channel, channel // 2, freeze_bn=freeze_bn)\r\n        self.conv3_br = conv(channel // 2, channel // 4, freeze_bn=freeze_bn)\r\n        self.conv4_br = conv(channel // 4, channel // 8, freeze_bn=freeze_bn)\r\n        self.conv5_br = nn.Conv2d(channel // 8, 1, kernel_size=1)\r\n\r\n        '''about coordinates and indexs'''\r\n        with torch.no_grad():\r\n            self.indice = torch.arange(0, self.feat_sz).view(-1, 1) * self.stride\r\n            # generate mesh-grid\r\n            self.coord_x = self.indice.repeat((self.feat_sz, 1)) \\\r\n                .view((self.feat_sz * self.feat_sz,)).float().cuda()\r\n            self.coord_y = self.indice.repeat((1, self.feat_sz)) \\\r\n                .view((self.feat_sz * self.feat_sz,)).float().cuda()\r\n\r\n    def forward(self, x, return_dist=False, softmax=True):\r\n        \"\"\" Forward pass with input x. \"\"\"\r\n        score_map_tl, score_map_br = self.get_score_map(x)\r\n        if return_dist:\r\n            coorx_tl, coory_tl, prob_vec_tl = self.soft_argmax(score_map_tl, return_dist=True, softmax=softmax)\r\n            coorx_br, coory_br, prob_vec_br = self.soft_argmax(score_map_br, return_dist=True, softmax=softmax)\r\n            return torch.stack((coorx_tl, coory_tl, coorx_br, coory_br), dim=1) / self.img_sz, prob_vec_tl, prob_vec_br\r\n        else:\r\n            coorx_tl, coory_tl = self.soft_argmax(score_map_tl)\r\n            coorx_br, coory_br = self.soft_argmax(score_map_br)\r\n            return torch.stack((coorx_tl, coory_tl, coorx_br, coory_br), dim=1) / self.img_sz\r\n\r\n    def get_score_map(self, x):\r\n        # top-left branch\r\n        x_tl1 = self.conv1_tl(x)\r\n        x_tl2 = self.conv2_tl(x_tl1)\r\n        x_tl3 = self.conv3_tl(x_tl2)\r\n        x_tl4 = self.conv4_tl(x_tl3)\r\n        score_map_tl = self.conv5_tl(x_tl4)\r\n\r\n        # bottom-right branch\r\n        x_br1 = self.conv1_br(x)\r\n        x_br2 = self.conv2_br(x_br1)\r\n        x_br3 = self.conv3_br(x_br2)\r\n        x_br4 = self.conv4_br(x_br3)\r\n        score_map_br = self.conv5_br(x_br4)\r\n        return score_map_tl, score_map_br\r\n\r\n    def soft_argmax(self, score_map, return_dist=False, softmax=True):\r\n        \"\"\" get soft-argmax coordinate for a given heatmap \"\"\"\r\n        score_vec = score_map.view((-1, self.feat_sz * self.feat_sz))  # (batch, feat_sz * feat_sz)\r\n        prob_vec = nn.functional.softmax(score_vec, dim=1)\r\n        exp_x = torch.sum((self.coord_x * prob_vec), dim=1)\r\n        exp_y = torch.sum((self.coord_y * prob_vec), dim=1)\r\n        if return_dist:\r\n            if softmax:\r\n                return exp_x, exp_y, prob_vec\r\n            else:\r\n                return exp_x, exp_y, score_vec\r\n        else:\r\n            return exp_x, exp_y\r\n\r\n\r\nclass CenterPredictor(nn.Module, ):\r\n    def __init__(self, inplanes=64, channel=256, feat_sz=20, stride=16, freeze_bn=False):\r\n        super(CenterPredictor, self).__init__()\r\n        self.feat_sz = feat_sz\r\n        self.stride = stride\r\n        self.img_sz = self.feat_sz * self.stride\r\n\r\n        # corner predict\r\n        self.conv1_ctr = conv(inplanes, channel, freeze_bn=freeze_bn)\r\n        self.conv2_ctr = conv(channel, channel // 2, freeze_bn=freeze_bn)\r\n        self.conv3_ctr = conv(channel // 2, channel // 4, freeze_bn=freeze_bn)\r\n        self.conv4_ctr = conv(channel // 4, channel // 8, freeze_bn=freeze_bn)\r\n        self.conv5_ctr = nn.Conv2d(channel // 8, 1, kernel_size=1)\r\n\r\n        # size regress\r\n        self.conv1_offset = conv(inplanes, channel, freeze_bn=freeze_bn)\r\n        self.conv2_offset = conv(channel, channel // 2, freeze_bn=freeze_bn)\r\n        self.conv3_offset = conv(channel // 2, channel // 4, freeze_bn=freeze_bn)\r\n        self.conv4_offset = conv(channel // 4, channel // 8, freeze_bn=freeze_bn)\r\n        self.conv5_offset = nn.Conv2d(channel // 8, 2, kernel_size=1)\r\n\r\n        # size regress\r\n        self.conv1_size = conv(inplanes, channel, freeze_bn=freeze_bn)\r\n        self.conv2_size = conv(channel, channel // 2, freeze_bn=freeze_bn)\r\n        self.conv3_size = conv(channel // 2, channel // 4, freeze_bn=freeze_bn)\r\n        self.conv4_size = conv(channel // 4, channel // 8, freeze_bn=freeze_bn)\r\n        self.conv5_size = nn.Conv2d(channel // 8, 2, kernel_size=1)\r\n\r\n        for p in self.parameters():\r\n            if p.dim() > 1:\r\n                nn.init.xavier_uniform_(p)\r\n\r\n    def forward(self, x, gt_score_map=None):\r\n        \"\"\" Forward pass with input x. \"\"\"\r\n        score_map_ctr, size_map, offset_map = self.get_score_map(x)\r\n\r\n        # assert gt_score_map is None\r\n        if gt_score_map is None:\r\n            bbox = self.cal_bbox(score_map_ctr, size_map, offset_map)\r\n        else:\r\n            bbox = self.cal_bbox(gt_score_map.unsqueeze(1), size_map, offset_map)\r\n\r\n        return score_map_ctr, bbox, size_map, offset_map\r\n\r\n    def cal_bbox(self, score_map_ctr, size_map, offset_map, return_score=False):\r\n        max_score, idx = torch.max(score_map_ctr.flatten(1), dim=1, keepdim=True)\r\n        idx_y = idx // self.feat_sz\r\n        idx_x = idx % self.feat_sz\r\n\r\n        idx = idx.unsqueeze(1).expand(idx.shape[0], 2, 1)\r\n        size = size_map.flatten(2).gather(dim=2, index=idx)\r\n        offset = offset_map.flatten(2).gather(dim=2, index=idx).squeeze(-1)\r\n\r\n        # bbox = torch.cat([idx_x - size[:, 0] / 2, idx_y - size[:, 1] / 2,\r\n        #                   idx_x + size[:, 0] / 2, idx_y + size[:, 1] / 2], dim=1) / self.feat_sz\r\n        # cx, cy, w, h\r\n        bbox = torch.cat([(idx_x.to(torch.float) + offset[:, :1]) / self.feat_sz,\r\n                          (idx_y.to(torch.float) + offset[:, 1:]) / self.feat_sz,\r\n                          size.squeeze(-1)], dim=1)\r\n\r\n        if return_score:\r\n            return bbox, max_score\r\n        return bbox\r\n\r\n    def get_pred(self, score_map_ctr, size_map, offset_map):\r\n        max_score, idx = torch.max(score_map_ctr.flatten(1), dim=1, keepdim=True)\r\n        idx_y = idx // self.feat_sz\r\n        idx_x = idx % self.feat_sz\r\n\r\n        idx = idx.unsqueeze(1).expand(idx.shape[0], 2, 1)\r\n        size = size_map.flatten(2).gather(dim=2, index=idx)\r\n        offset = offset_map.flatten(2).gather(dim=2, index=idx).squeeze(-1)\r\n\r\n        # bbox = torch.cat([idx_x - size[:, 0] / 2, idx_y - size[:, 1] / 2,\r\n        #                   idx_x + size[:, 0] / 2, idx_y + size[:, 1] / 2], dim=1) / self.feat_sz\r\n        return size * self.feat_sz, offset\r\n\r\n    def get_score_map(self, x):\r\n\r\n        def _sigmoid(x):\r\n            y = torch.clamp(x.sigmoid_(), min=1e-4, max=1 - 1e-4)\r\n            return y\r\n\r\n        # ctr branch\r\n        x_ctr1 = self.conv1_ctr(x)\r\n        x_ctr2 = self.conv2_ctr(x_ctr1)\r\n        x_ctr3 = self.conv3_ctr(x_ctr2)\r\n        x_ctr4 = self.conv4_ctr(x_ctr3)\r\n        score_map_ctr = self.conv5_ctr(x_ctr4)\r\n\r\n        # offset branch\r\n        x_offset1 = self.conv1_offset(x)\r\n        x_offset2 = self.conv2_offset(x_offset1)\r\n        x_offset3 = self.conv3_offset(x_offset2)\r\n        x_offset4 = self.conv4_offset(x_offset3)\r\n        score_map_offset = self.conv5_offset(x_offset4)\r\n\r\n        # size branch\r\n        x_size1 = self.conv1_size(x)\r\n        x_size2 = self.conv2_size(x_size1)\r\n        x_size3 = self.conv3_size(x_size2)\r\n        x_size4 = self.conv4_size(x_size3)\r\n        score_map_size = self.conv5_size(x_size4)\r\n        return _sigmoid(score_map_ctr), _sigmoid(score_map_size), score_map_offset\r\n\r\n\r\nclass MLP(nn.Module):\r\n    \"\"\" Very simple multi-layer perceptron (also called FFN)\"\"\"\r\n\r\n    def __init__(self, input_dim, hidden_dim, output_dim, num_layers, BN=False):\r\n        super().__init__()\r\n        self.num_layers = num_layers\r\n        h = [hidden_dim] * (num_layers - 1)\r\n        if BN:\r\n            self.layers = nn.ModuleList(nn.Sequential(nn.Linear(n, k), nn.BatchNorm1d(k))\r\n                                        for n, k in zip([input_dim] + h, h + [output_dim]))\r\n        else:\r\n            self.layers = nn.ModuleList(nn.Linear(n, k)\r\n                                        for n, k in zip([input_dim] + h, h + [output_dim]))\r\n\r\n    def forward(self, x):\r\n        for i, layer in enumerate(self.layers):\r\n            x = F.relu(layer(x)) if i < self.num_layers - 1 else layer(x)\r\n        return x\r\n\r\n\r\nclass SelfAttention(nn.Module):\r\n    def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0.,\r\n                 attn_pos_encoding_only=False):\r\n        super(SelfAttention, self).__init__()\r\n        assert dim % num_heads == 0, f\"dim {dim} should be divided by num_heads {num_heads}.\"\r\n\r\n        self.dim = dim\r\n        self.num_heads = num_heads\r\n        head_dim = dim // num_heads\r\n        self.scale = qk_scale or head_dim ** -0.5\r\n\r\n        if attn_pos_encoding_only:\r\n            self.qkv = nn.Linear(dim, 3 * dim, bias=qkv_bias)\r\n        else:\r\n            self.q = nn.Linear(dim, dim, bias=qkv_bias)\r\n            self.k = nn.Linear(dim, dim, bias=qkv_bias)\r\n            self.v = nn.Linear(dim, dim, bias=qkv_bias)\r\n        self.attn_drop = nn.Dropout(attn_drop)\r\n        self.proj = nn.Linear(dim, dim)\r\n        self.proj_drop = nn.Dropout(proj_drop)\r\n\r\n        self.attn_pos_encoding_only = attn_pos_encoding_only\r\n\r\n    def forward(self, x, q_ape, k_ape, attn_pos):\r\n        '''\r\n            Args:\r\n                x (torch.Tensor): (B, L, C)\r\n                q_ape (torch.Tensor | None): (1 or B, L, C), absolute positional encoding for q\r\n                k_ape (torch.Tensor | None): (1 or B, L, C), absolute positional encoding for k\r\n                attn_pos (torch.Tensor | None): (1 or B, num_heads, L, L), untied positional encoding\r\n            Returns:\r\n                torch.Tensor: (B, L, C)\r\n        '''\r\n        B, N, C = x.shape\r\n\r\n        if self.attn_pos_encoding_only:\r\n            assert q_ape is None and k_ape is None\r\n            qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)\r\n            q, k, v = qkv[0], qkv[1], qkv[2]\r\n        else:\r\n            q = x + q_ape if q_ape is not None else x\r\n            q = self.q(q).reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3)\r\n\r\n            k = x + k_ape if k_ape is not None else x\r\n            k = self.k(k).reshape(B, -1, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3)\r\n            v = self.v(x).reshape(B, -1, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3)\r\n\r\n        attn = q @ k.transpose(-2, -1)\r\n        attn = attn * self.scale\r\n        if attn_pos is not None:\r\n            attn = attn + attn_pos\r\n        attn = attn.softmax(dim=-1)\r\n        attn = self.attn_drop(attn)\r\n\r\n        x = attn @ v\r\n        x = x.transpose(1, 2).reshape(B, N, C)\r\n        x = self.proj(x)\r\n        x = self.proj_drop(x)\r\n\r\n        return x\r\n\r\n\r\nclass CrossAttention(nn.Module):\r\n    def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0.,\r\n                 attn_pos_encoding_only=False):\r\n        super(CrossAttention, self).__init__()\r\n        assert dim % num_heads == 0, f\"dim {dim} should be divided by num_heads {num_heads}.\"\r\n\r\n        self.dim = dim\r\n        self.num_heads = num_heads\r\n        head_dim = dim // num_heads\r\n        self.scale = qk_scale or head_dim ** -0.5\r\n\r\n        if attn_pos_encoding_only:\r\n            self.q = nn.Linear(dim, dim, bias=qkv_bias)\r\n            self.kv = nn.Linear(dim, 2 * dim, bias=qkv_bias)\r\n        else:\r\n            self.q = nn.Linear(dim, dim, bias=qkv_bias)\r\n            self.k = nn.Linear(dim, dim, bias=qkv_bias)\r\n            self.v = nn.Linear(dim, dim, bias=qkv_bias)\r\n        self.attn_drop = nn.Dropout(attn_drop)\r\n        self.proj = nn.Linear(dim, dim)\r\n        self.proj_drop = nn.Dropout(proj_drop)\r\n\r\n        self.attn_pos_encoding_only = attn_pos_encoding_only\r\n\r\n    def forward(self, q, kv, q_ape, k_ape, attn_pos):\r\n        '''\r\n            Args:\r\n                q (torch.Tensor): (B, L_q, C)\r\n                kv (torch.Tensor): (B, L_kv, C)\r\n                q_ape (torch.Tensor | None): (1 or B, L_q, C), absolute positional encoding for q\r\n                k_ape (torch.Tensor | None): (1 or B, L_kv, C), absolute positional encoding for k\r\n                attn_pos (torch.Tensor | None): (1 or B, num_heads, L_q, L_kv), untied positional encoding\r\n            Returns:\r\n                torch.Tensor: (B, L_q, C)\r\n        '''\r\n        B, q_N, C = q.shape\r\n        kv_N = kv.shape[1]\r\n\r\n        if self.attn_pos_encoding_only:\r\n            assert q_ape is None and k_ape is None\r\n            q = self.q(q).reshape(B, q_N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3)\r\n            kv = self.kv(kv).reshape(B, kv_N, 2, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)\r\n            k, v = kv[0], kv[1]\r\n        else:\r\n            q = q + q_ape if q_ape is not None else q\r\n            q = self.q(q).reshape(B, q_N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3)\r\n            k = kv + k_ape if k_ape is not None else kv\r\n            k = self.k(k).reshape(B, -1, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3)\r\n            v = self.v(kv).reshape(B, -1, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3)\r\n\r\n        attn = q @ k.transpose(-2, -1)\r\n        attn = attn * self.scale\r\n        if attn_pos is not None:\r\n            attn = attn + attn_pos\r\n        attn = attn.softmax(dim=-1)\r\n        attn = self.attn_drop(attn)\r\n        x = attn @ v\r\n        x = x.transpose(1, 2).reshape(B, q_N, C)\r\n        x = self.proj(x)\r\n        x = self.proj_drop(x)\r\n\r\n        return x\r\n\r\n\r\nclass Mlp(nn.Module):\r\n    \"\"\" Multilayer perceptron.\"\"\"\r\n\r\n    def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):\r\n        super().__init__()\r\n        out_features = out_features or in_features\r\n        hidden_features = hidden_features or in_features\r\n        self.fc1 = nn.Linear(in_features, hidden_features)\r\n        self.act = act_layer()\r\n        self.fc2 = nn.Linear(hidden_features, out_features)\r\n        self.drop = nn.Dropout(drop)\r\n\r\n    def forward(self, x):\r\n        '''\r\n            Args:\r\n                x (torch.Tensor): (B, L, C), input tensor\r\n            Returns:\r\n                torch.Tensor: (B, L, C), output tensor\r\n        '''\r\n        x = self.fc1(x)\r\n        x = self.act(x)\r\n        x = self.drop(x)\r\n        x = self.fc2(x)\r\n        x = self.drop(x)\r\n        return x\r\n\r\n\r\nclass FeatureFusion(nn.Module):\r\n    def __init__(self,\r\n                 dim, num_heads, mlp_ratio=2., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0.,\r\n                 drop_path=nn.Identity(), act_layer=nn.GELU, norm_layer=nn.LayerNorm, attn_pos_encoding_only=False):\r\n        super(FeatureFusion, self).__init__()\r\n        self.z_norm1 = norm_layer(dim)\r\n        self.x_norm1 = norm_layer(dim)\r\n        self.z_self_attn = SelfAttention(dim, num_heads, qkv_bias, qk_scale, attn_drop, drop, attn_pos_encoding_only)\r\n        self.x_self_attn = SelfAttention(dim, num_heads, qkv_bias, qk_scale, attn_drop, drop, attn_pos_encoding_only)\r\n\r\n        self.z_norm2_1 = norm_layer(dim)\r\n        self.z_norm2_2 = norm_layer(dim)\r\n        self.x_norm2_1 = norm_layer(dim)\r\n        self.x_norm2_2 = norm_layer(dim)\r\n\r\n        self.z_x_cross_attention = CrossAttention(dim, num_heads, qkv_bias, qk_scale, attn_drop, drop,\r\n                                                  attn_pos_encoding_only)\r\n        self.x_z_cross_attention = CrossAttention(dim, num_heads, qkv_bias, qk_scale, attn_drop, drop,\r\n                                                  attn_pos_encoding_only)\r\n\r\n        mlp_hidden_dim = int(dim * mlp_ratio)\r\n        self.z_norm3 = norm_layer(dim)\r\n        self.x_norm3 = norm_layer(dim)\r\n        print(mlp_ratio)\r\n        self.z_mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)\r\n        self.x_mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)\r\n\r\n        self.drop_path = drop_path\r\n\r\n    def forward(self, z, x, z_self_attn_pos, x_self_attn_pos, z_x_cross_attn_pos, x_z_cross_attn_pos):\r\n        z = z + self.drop_path(self.z_self_attn(self.z_norm1(z), None, None, z_self_attn_pos))\r\n        x = x + self.drop_path(self.x_self_attn(self.x_norm1(x), None, None, x_self_attn_pos))\r\n\r\n        z = z + self.drop_path(\r\n            self.z_x_cross_attention(self.z_norm2_1(z), self.x_norm2_1(x), None, None, z_x_cross_attn_pos))\r\n        x = x + self.drop_path(\r\n            self.x_z_cross_attention(self.x_norm2_2(x), self.z_norm2_2(z), None, None, x_z_cross_attn_pos))\r\n\r\n        z = z + self.drop_path(self.z_mlp(self.z_norm3(z)))\r\n        x = x + self.drop_path(self.x_mlp(self.x_norm3(x)))\r\n        return z, x\r\n\r\n\r\nclass FeatureFusionEncoder(nn.Module):\r\n    def __init__(self, feature_fusion_layers, z_pos_enc, x_pos_enc,\r\n                 z_rel_pos_index, x_rel_pos_index, z_x_rel_pos_index, x_z_rel_pos_index,\r\n                 z_rel_pos_bias_table, x_rel_pos_bias_table, z_x_rel_pos_bias_table, x_z_rel_pos_bias_table):\r\n        super(FeatureFusionEncoder, self).__init__()\r\n        self.layers = nn.ModuleList(feature_fusion_layers)\r\n        self.z_pos_enc = z_pos_enc\r\n        self.x_pos_enc = x_pos_enc\r\n        self.register_buffer('z_rel_pos_index', z_rel_pos_index, False)\r\n        self.register_buffer('x_rel_pos_index', x_rel_pos_index, False)\r\n        self.register_buffer('z_x_rel_pos_index', z_x_rel_pos_index, False)\r\n        self.register_buffer('x_z_rel_pos_index', x_z_rel_pos_index, False)\r\n        self.z_rel_pos_bias_table = z_rel_pos_bias_table\r\n        self.x_rel_pos_bias_table = x_rel_pos_bias_table\r\n        self.z_x_rel_pos_bias_table = z_x_rel_pos_bias_table\r\n        self.x_z_rel_pos_bias_table = x_z_rel_pos_bias_table\r\n\r\n    def forward(self, z, x, z_pos, x_pos):\r\n        '''\r\n            Args:\r\n                z (torch.Tensor): (B, L_z, C), template image feature tokens\r\n                x (torch.Tensor): (B, L_x, C), search image feature tokens\r\n                z_pos (torch.Tensor | None): (1 or B, L_z, C), optional positional encoding for z\r\n                x_pos (torch.Tensor | None): (1 or B, L_x, C), optional positional encoding for x\r\n            Returns:\r\n                Tuple[torch.Tensor, torch.Tensor]:\r\n                    (B, L_z, C): template image feature tokens\r\n                    (B, L_x, C): search image feature tokens\r\n        '''\r\n        # Support untied positional encoding only for simplicity\r\n        assert z_pos is None and x_pos is None\r\n\r\n        # untied positional encoding\r\n        z_q_pos, z_k_pos = self.z_pos_enc()\r\n        x_q_pos, x_k_pos = self.x_pos_enc()\r\n        z_self_attn_pos = (z_q_pos @ z_k_pos.transpose(-2, -1)).unsqueeze(0)\r\n        x_self_attn_pos = (x_q_pos @ x_k_pos.transpose(-2, -1)).unsqueeze(0)\r\n\r\n        z_x_cross_attn_pos = (z_q_pos @ x_k_pos.transpose(-2, -1)).unsqueeze(0)\r\n        x_z_cross_attn_pos = (x_q_pos @ z_k_pos.transpose(-2, -1)).unsqueeze(0)\r\n\r\n        # relative positional encoding\r\n        z_self_attn_pos = z_self_attn_pos + self.z_rel_pos_bias_table(self.z_rel_pos_index)\r\n        x_self_attn_pos = x_self_attn_pos + self.x_rel_pos_bias_table(self.x_rel_pos_index)\r\n        z_x_cross_attn_pos = z_x_cross_attn_pos + self.z_x_rel_pos_bias_table(self.z_x_rel_pos_index)\r\n        x_z_cross_attn_pos = x_z_cross_attn_pos + self.x_z_rel_pos_bias_table(self.x_z_rel_pos_index)\r\n\r\n        for layer in self.layers:\r\n            z, x = layer(z, x, z_self_attn_pos, x_self_attn_pos, z_x_cross_attn_pos, x_z_cross_attn_pos)\r\n\r\n        return z, x\r\n\r\n\r\nclass Learned2DPositionalEncoder(nn.Module):\r\n    def __init__(self, dim, w, h):\r\n        super(Learned2DPositionalEncoder, self).__init__()\r\n        self.w_pos = nn.Parameter(torch.empty(w, dim))\r\n        self.h_pos = nn.Parameter(torch.empty(h, dim))\r\n        trunc_normal_(self.w_pos, std=0.02)\r\n        trunc_normal_(self.h_pos, std=0.02)\r\n\r\n    def forward(self):\r\n        w = self.w_pos.shape[0]\r\n        h = self.h_pos.shape[0]\r\n        return (self.w_pos[None, :, :] + self.h_pos[:, None, :]).view(h * w, -1)\r\n\r\n\r\nclass Untied2DPositionalEncoder(nn.Module):\r\n    def __init__(self, dim, num_heads, w, h, scale=None, with_q=True, with_k=True):\r\n        super(Untied2DPositionalEncoder, self).__init__()\r\n        assert dim % num_heads == 0, f\"dim {dim} should be divided by num_heads {num_heads}.\"\r\n        self.pos = Learned2DPositionalEncoder(dim, w, h)\r\n        self.norm = nn.LayerNorm(dim)\r\n        self.pos_q_linear = None\r\n        self.pos_k_linear = None\r\n        if with_q:\r\n            self.pos_q_linear = nn.Linear(dim, dim)\r\n        if with_k:\r\n            self.pos_k_linear = nn.Linear(dim, dim)\r\n\r\n        self.num_heads = num_heads\r\n        head_dim = dim // num_heads\r\n        self.scale = scale or head_dim ** -0.5\r\n\r\n    def forward(self):\r\n        pos = self.norm(self.pos())\r\n        seq_len = pos.shape[0]\r\n        if self.pos_q_linear is not None and self.pos_k_linear is not None:\r\n            pos_q = self.pos_q_linear(pos).view(seq_len, self.num_heads, -1).transpose(0, 1) * self.scale\r\n            pos_k = self.pos_k_linear(pos).view(seq_len, self.num_heads, -1).transpose(0, 1)\r\n            return pos_q, pos_k\r\n        elif self.pos_q_linear is not None:\r\n            pos_q = self.pos_q_linear(pos).view(seq_len, self.num_heads, -1).transpose(0, 1) * self.scale\r\n            return pos_q\r\n        elif self.pos_k_linear is not None:\r\n            pos_k = self.pos_k_linear(pos).view(seq_len, self.num_heads, -1).transpose(0, 1)\r\n            return pos_k\r\n        else:\r\n            raise RuntimeError\r\n\r\n\r\ndef generate_2d_relative_positional_encoding_index(z_shape, x_shape):\r\n    '''\r\n        z_shape: (z_h, z_w)\r\n        x_shape: (x_h, x_w)\r\n    '''\r\n    z_2d_index_h, z_2d_index_w = torch.meshgrid(torch.arange(z_shape[0]), torch.arange(z_shape[1]))\r\n    x_2d_index_h, x_2d_index_w = torch.meshgrid(torch.arange(x_shape[0]), torch.arange(x_shape[1]))\r\n\r\n    z_2d_index_h = z_2d_index_h.flatten(0)\r\n    z_2d_index_w = z_2d_index_w.flatten(0)\r\n    x_2d_index_h = x_2d_index_h.flatten(0)\r\n    x_2d_index_w = x_2d_index_w.flatten(0)\r\n\r\n    diff_h = z_2d_index_h[:, None] - x_2d_index_h[None, :]\r\n    diff_w = z_2d_index_w[:, None] - x_2d_index_w[None, :]\r\n\r\n    diff = torch.stack((diff_h, diff_w), dim=-1)\r\n    _, indices = torch.unique(diff.view(-1, 2), return_inverse=True, dim=0)\r\n    return indices.view(z_shape[0] * z_shape[1], x_shape[0] * x_shape[1])\r\n\r\n\r\nclass RelativePosition2DEncoder(nn.Module):\r\n    def __init__(self, num_heads, embed_size):\r\n        super(RelativePosition2DEncoder, self).__init__()\r\n        self.relative_position_bias_table = nn.Parameter(torch.empty((num_heads, embed_size)))\r\n        trunc_normal_(self.relative_position_bias_table, std=0.02)\r\n\r\n    def forward(self, attn_rpe_index):\r\n        '''\r\n            Args:\r\n                attn_rpe_index (torch.Tensor): (*), any shape containing indices, max(attn_rpe_index) < embed_size\r\n            Returns:\r\n                torch.Tensor: (1, num_heads, *)\r\n        '''\r\n        return self.relative_position_bias_table[:, attn_rpe_index].unsqueeze(0)\r\n\r\n\r\nclass DropPathAllocator:\r\n    def __init__(self, max_drop_path_rate, stochastic_depth_decay=True):\r\n        self.max_drop_path_rate = max_drop_path_rate\r\n        self.stochastic_depth_decay = stochastic_depth_decay\r\n        self.allocated = []\r\n        self.allocating = []\r\n\r\n    def __enter__(self):\r\n        self.allocating = []\r\n\r\n    def __exit__(self, exc_type, exc_val, exc_tb):\r\n        if len(self.allocating) != 0:\r\n            self.allocated.append(self.allocating)\r\n        self.allocating = None\r\n        if not self.stochastic_depth_decay:\r\n            for depth_module in self.allocated:\r\n                for module in depth_module:\r\n                    if isinstance(module, DropPath):\r\n                        module.drop_prob = self.max_drop_path_rate\r\n        else:\r\n            depth = self.get_depth()\r\n            dpr = [x.item() for x in torch.linspace(0, self.max_drop_path_rate, depth)]\r\n            assert len(dpr) == len(self.allocated)\r\n            for drop_path_rate, depth_modules in zip(dpr, self.allocated):\r\n                for module in depth_modules:\r\n                    if isinstance(module, DropPath):\r\n                        module.drop_prob = drop_path_rate\r\n\r\n    def __len__(self):\r\n        length = 0\r\n\r\n        for depth_modules in self.allocated:\r\n            length += len(depth_modules)\r\n\r\n        return length\r\n\r\n    def increase_depth(self):\r\n        self.allocated.append(self.allocating)\r\n        self.allocating = []\r\n\r\n    def get_depth(self):\r\n        return len(self.allocated)\r\n\r\n    def allocate(self):\r\n        if self.max_drop_path_rate == 0 or (self.stochastic_depth_decay and self.get_depth() == 0):\r\n            drop_path_module = Identity()\r\n        else:\r\n            drop_path_module = DropPath()\r\n        self.allocating.append(drop_path_module)\r\n        return drop_path_module\r\n\r\n    def get_all_allocated(self):\r\n        allocated = []\r\n        for depth_module in self.allocated:\r\n            for module in depth_module:\r\n                allocated.append(module)\r\n        return allocated\r\n\r\n\r\ndef build_encoder(encoder_layer, num_heads, mlp_ratio, qkv_bias, drop_rate, attn_drop, dim, z_size, x_size, drop_path):\r\n    z_shape = [z_size, z_size]\r\n    x_shape = [x_size, x_size]\r\n    encoder_layers = []\r\n    for i in range(encoder_layer):\r\n        encoder_layers.append(\r\n            FeatureFusion(dim, num_heads, mlp_ratio, qkv_bias, drop=drop_rate, attn_drop=attn_drop,\r\n                          drop_path=drop_path.allocate(),\r\n                          attn_pos_encoding_only=True)\r\n        )\r\n    z_abs_encoder = Untied2DPositionalEncoder(dim, num_heads, z_shape[0], z_shape[1])\r\n    x_abs_encoder = Untied2DPositionalEncoder(dim, num_heads, x_shape[0], x_shape[1])\r\n\r\n    z_self_attn_rel_pos_index = generate_2d_relative_positional_encoding_index(z_shape, z_shape)\r\n    x_self_attn_rel_pos_index = generate_2d_relative_positional_encoding_index(x_shape, x_shape)\r\n\r\n    z_x_cross_attn_rel_pos_index = generate_2d_relative_positional_encoding_index(z_shape, x_shape)\r\n    x_z_cross_attn_rel_pos_index = generate_2d_relative_positional_encoding_index(x_shape, z_shape)\r\n\r\n    z_self_attn_rel_pos_bias_table = RelativePosition2DEncoder(num_heads, z_self_attn_rel_pos_index.max() + 1)\r\n    x_self_attn_rel_pos_bias_table = RelativePosition2DEncoder(num_heads, x_self_attn_rel_pos_index.max() + 1)\r\n    z_x_cross_attn_rel_pos_bias_table = RelativePosition2DEncoder(num_heads, z_x_cross_attn_rel_pos_index.max() + 1)\r\n    x_z_cross_attn_rel_pos_bias_table = RelativePosition2DEncoder(num_heads, x_z_cross_attn_rel_pos_index.max() + 1)\r\n\r\n    return FeatureFusionEncoder(encoder_layers, z_abs_encoder, x_abs_encoder, z_self_attn_rel_pos_index,\r\n                                x_self_attn_rel_pos_index,\r\n                                z_x_cross_attn_rel_pos_index, x_z_cross_attn_rel_pos_index,\r\n                                z_self_attn_rel_pos_bias_table,\r\n                                x_self_attn_rel_pos_bias_table, z_x_cross_attn_rel_pos_bias_table,\r\n                                x_z_cross_attn_rel_pos_bias_table)\r\n\r\n\r\nclass TargetQueryDecoderLayer(nn.Module):\r\n    def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0.,\r\n                 drop_path=nn.Identity(), act_layer=nn.GELU, norm_layer=nn.LayerNorm):\r\n        super(TargetQueryDecoderLayer, self).__init__()\r\n        self.norm_1 = norm_layer(dim)\r\n        self.self_attn1 = nn.MultiheadAttention(dim, num_heads, dropout=drop)\r\n        self.norm_2_query = norm_layer(dim)\r\n        self.norm_2_memory = norm_layer(dim)\r\n        self.multihead_attn = nn.MultiheadAttention(dim, num_heads, dropout=drop)\r\n        self.norm_3 = norm_layer(dim)\r\n        mlp_hidden_dim = int(dim * mlp_ratio)\r\n        self.mlpz = Mlp(dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)\r\n\r\n        self.drop_path = drop_path\r\n\r\n    def forward(self, query, memoryz, memoryx, query_pos, pos_z, pos_x, identity, identity_search,\r\n                tgt_mask: Optional[Tensor] = None,\r\n                memory_mask: Optional[Tensor] = None,\r\n                tgt_key_padding_mask: Optional[Tensor] = None,\r\n                memory_key_padding_mask: Optional[Tensor] = None,\r\n                ):\r\n        '''\r\n            Args:\r\n                query (torch.Tensor): (B, num_queries, C)\r\n                memory (torch.Tensor): (B, L, C)\r\n                query_pos (torch.Tensor): (1 or B, num_queries, C)\r\n                memory_pos (torch.Tensor): (1 or B, L, C)\r\n            Returns:\r\n                torch.Tensor: (B, num_queries, C)\r\n        '''\r\n\r\n        tgt = query\r\n        q = k = self.norm_1(query) + query_pos\r\n        query = query + self.drop_path(self.self_attn1(q, k, value=tgt, attn_mask=tgt_mask,\r\n                                                       key_padding_mask=tgt_key_padding_mask)[0])\r\n        q2 = self.norm_2_query(query) + query_pos\r\n        memory = torch.cat((memoryz, memoryx), dim=1)\r\n\r\n        pos = torch.cat((pos_z, pos_x), dim=1)\r\n\r\n        ide = torch.cat(\r\n            (identity[:, 0, :].repeat(1, pos_z.shape[1], 1), identity[:, 1, :].repeat(1, pos_x.shape[1], 1)), dim=1)\r\n\r\n        k2 = (self.norm_2_memory(memory) + pos + ide).permute(1, 0, 2)\r\n        memory_in = memory.permute(1, 0, 2)\r\n        query = query + self.drop_path(\r\n            self.multihead_attn(query=q2, key=k2, value=memory_in, attn_mask=memory_mask,\r\n                                key_padding_mask=memory_key_padding_mask)[0])\r\n        query = query + self.drop_path(self.mlpz(self.norm_3(query)))\r\n\r\n        return query\r\n\r\n\r\ndef _get_clones(module, N):\r\n    return nn.ModuleList([copy.deepcopy(module) for i in range(N)])\r\n\r\n\r\nclass TargetQueryDecoderBlock(nn.Module):\r\n    def __init__(self, dim, decoder_layers, num_layer):\r\n        super(TargetQueryDecoderBlock, self).__init__()\r\n        self.layers = nn.ModuleList(decoder_layers)\r\n        self.num_layers = num_layer\r\n        self.norm = nn.LayerNorm(dim)\r\n\r\n    def forward(self, tgt, z, x, pos_z, pos_x, identity, identity_search, query_pos: Optional[Tensor] = None,\r\n                tgt_mask: Optional[Tensor] = None,\r\n                memory_mask: Optional[Tensor] = None,\r\n                tgt_key_padding_mask: Optional[Tensor] = None,\r\n                memory_key_padding_mask: Optional[Tensor] = None):\r\n        '''\r\n            Args:\r\n                z (torch.Tensor): (B, L_z, C)\r\n                x (torch.Tensor): (B, L_x, C)\r\n            Returns:\r\n                torch.Tensor: (B, num_queries, C)\r\n        '''\r\n        output = tgt\r\n        for layer in self.layers:\r\n            output = layer(output, z, x, query_pos, pos_z, pos_x, identity, identity_search,\r\n                           tgt_mask=tgt_mask,\r\n                           memory_mask=memory_mask,\r\n                           tgt_key_padding_mask=tgt_key_padding_mask,\r\n                           memory_key_padding_mask=memory_key_padding_mask)\r\n        output = self.norm(output)\r\n\r\n        return output\r\n\r\n\r\ndef build_decoder(decoder_layer, drop_path, dim, num_heads, mlp_ratio, qkv_bias, drop_rate, attn_drop_rate, z_size,\r\n                  x_size):\r\n    z_shape = [z_size, z_size]\r\n    x_shape = [x_size, x_size]\r\n    num_layers = decoder_layer\r\n    decoder_layers = []\r\n    for _ in range(num_layers):\r\n        decoder_layers.append(\r\n            TargetQueryDecoderLayer(dim, num_heads, mlp_ratio, qkv_bias, drop=drop_rate, attn_drop=attn_drop_rate,\r\n                                    drop_path=drop_path.allocate()))\r\n        drop_path.increase_depth()\r\n\r\n    decoder = TargetQueryDecoderBlock(dim, decoder_layers, num_layers)\r\n    return decoder\r\n\r\n\r\ndef generate_square_subsequent_mask(sz):\r\n    r\"\"\"Generate a square mask for the sequence. The masked positions are filled with float('-inf').\r\n        Unmasked positions are filled with float(0.0).\r\n    \"\"\"\r\n    mask = (torch.triu(torch.ones(sz, sz)) == 1).transpose(0, 1)\r\n    mask = mask.float().masked_fill(mask == 0, float(\r\n        '-inf')).masked_fill(mask == 1, float(0.0))\r\n    return mask\r\n\r\n\r\nclass Pix2Track(nn.Module):\r\n    def __init__(self, in_channel=64, feat_sz=20, feat_tz=10, range=2, pre_num=7, stride=16, encoder_layer=3, decoder_layer=3,\r\n                 bins=400, num_heads=12, mlp_ratio=2, qkv_bias=True, drop_rate=0.0, attn_drop=0.0,\r\n                 drop_path=nn.Identity):\r\n        super(Pix2Track, self).__init__()\r\n        self.bins = bins\r\n        self.range = range\r\n        self.pre_num = pre_num\r\n        self.word_embeddings = nn.Embedding(self.bins * self.range + 2, in_channel, padding_idx=self.bins * self.range, max_norm=1,\r\n                                            norm_type=2.0)\r\n\r\n        self.position_embeddings = nn.Embedding(\r\n            5, in_channel)\r\n        self.prev_position_embeddings = nn.Embedding(self.pre_num * 4, in_channel)\r\n        self.output_bias = torch.nn.Parameter(torch.zeros(self.bins * self.range + 2))\r\n\r\n        self.momentum_param = 0.25\r\n        self.identity_search = torch.nn.Parameter(torch.zeros(1, 1, 768))\r\n        self.identity_search = trunc_normal_(self.identity_search, std=.02)\r\n        self.encoder_layer = encoder_layer\r\n        self.drop_path = drop_path\r\n        self.tz = feat_tz * feat_tz\r\n        self.sz = feat_sz * feat_sz\r\n        trunc_normal_(self.word_embeddings.weight, std=.02)\r\n        if self.encoder_layer > 0:\r\n            self.encoder = build_encoder(encoder_layer, num_heads, mlp_ratio, qkv_bias,\r\n                                         drop_rate, attn_drop, in_channel, feat_tz, feat_sz, self.drop_path)\r\n        else:\r\n            self.encoder = None\r\n        self.decoder = build_decoder(decoder_layer, self.drop_path, in_channel, num_heads,\r\n                                     mlp_ratio, qkv_bias, drop_rate, attn_drop, feat_tz, feat_sz)\r\n        self.magic_num = (self.range-1) * 0.5\r\n\r\n    def forward(self, zx_feat, pos_z, pos_x, identity, seqs_input=None, stage=None):\r\n        emb_weight = self.word_embeddings.weight.clone()\r\n        share_weight = emb_weight.T\r\n\r\n        z_feat = zx_feat[:, :self.tz]\r\n        x_feat = zx_feat[:, self.tz:]\r\n\r\n        out_list = []\r\n        bs = zx_feat.shape[0]\r\n        if self.encoder != None:\r\n            z_feat, x_feat = self.encoder(z_feat, x_feat, None, None)\r\n        output_x_feat = x_feat.clone()\r\n\r\n        if stage == None:\r\n            seqs_input = seqs_input.to(torch.int64).to(zx_feat.device)\r\n            tgt = self.word_embeddings(seqs_input).permute(1, 0, 2)\r\n            query_embed_ = self.position_embeddings.weight.unsqueeze(1)\r\n            prev_embed = self.prev_position_embeddings.weight.unsqueeze(1)\r\n            query_embed = torch.cat([prev_embed, query_embed_], dim=0)\r\n            query_embed = query_embed.repeat(1, bs, 1)\r\n\r\n            decoder_feat_cls = self.decoder(tgt, z_feat, x_feat, pos_z, pos_x, identity, self.identity_search,\r\n                                            query_embed[:len(tgt)],\r\n                                            tgt_mask=generate_square_subsequent_mask(len(tgt)).to(tgt.device))\r\n\r\n            at = torch.matmul(decoder_feat_cls, share_weight)\r\n            at = at + self.output_bias\r\n            output = {'feat': at, \"state\": \"train\"}\r\n\r\n        else:\r\n            seqs_origin = seqs_input\r\n            start_token = torch.ones(bs, 1) * self.bins * self.range\r\n            start_token = start_token.to(seqs_origin)\r\n\r\n            real_seq = torch.cat([seqs_origin, start_token], dim=1)\r\n\r\n            seqs_input = real_seq.to(zx_feat.device).to(torch.int32)\r\n\r\n            for i in range(4):\r\n                tgt = self.word_embeddings(seqs_input).permute(1, 0, 2)\r\n                query_embed_ = self.position_embeddings.weight.unsqueeze(1)\r\n                prev_embed = self.prev_position_embeddings.weight.unsqueeze(1)\r\n                query_embed = torch.cat([prev_embed, query_embed_], dim=0)\r\n                query_embed = query_embed.repeat(1, bs, 1)\r\n\r\n                decoder_feat_cls = self.decoder(tgt, z_feat, x_feat, pos_z, pos_x, identity, self.identity_search,\r\n                                                query_embed[:len(tgt)],\r\n                                                tgt_mask=generate_square_subsequent_mask(len(seqs_input[0])).to(\r\n                                                    tgt.device))\r\n\r\n                out = torch.matmul(decoder_feat_cls.transpose(0, 1)[:, -1, :],\r\n                                   share_weight) + self.output_bias\r\n\r\n                out_list.append(out.unsqueeze(0))\r\n                out = out.softmax(-1)\r\n\r\n                value, extra_seq = out.topk(dim=-1, k=1)[0], out.topk(dim=-1, k=1)[1]\r\n                seqs_input = torch.cat([seqs_input, extra_seq], dim=-1)\r\n                if i == 0:\r\n                    seqs_output = extra_seq\r\n                    values = value\r\n                else:\r\n                    seqs_output = torch.cat([seqs_output, extra_seq], dim=-1)\r\n                    values = torch.cat([values, value], dim=-1)\r\n            if not (not out_list):\r\n                feat = torch.cat(out_list)\r\n            output = {'seqs': seqs_output, 'class': values, 'feat': feat, \"state\": \"val/test\",\r\n                      \"x_feat\": output_x_feat.detach()}\r\n        return output\r\n\r\n\r\ndef build_pix_head(cfg, hidden_dim):\r\n    stride = cfg.MODEL.BACKBONE.STRIDE\r\n    in_channel = hidden_dim\r\n    feat_sz = int(cfg.DATA.SEARCH.SIZE / stride)\r\n    feat_tz = int(cfg.DATA.TEMPLATE.SIZE / stride)\r\n    decoder_layer = cfg.MODEL.DECODER_LAYER\r\n    encoder_layer = cfg.MODEL.ENCODER_LAYER\r\n    pre_num = cfg.MODEL.PRENUM\r\n    bins = cfg.MODEL.BINS\r\n    range = cfg.MODEL.RANGE\r\n    num_heads = cfg.MODEL.NUM_HEADS\r\n    mlp_ratio = cfg.MODEL.MLP_RATIO\r\n    qkv_bias = cfg.MODEL.QKV_BIAS\r\n    drop_rate = cfg.MODEL.DROP_RATE\r\n    attn_drop = cfg.MODEL.ATTN_DROP\r\n    drop_path = cfg.MODEL.DROP_PATH\r\n    drop_path_allocator = DropPathAllocator(drop_path)\r\n    pix_head = Pix2Track(in_channel=in_channel, feat_sz=feat_sz, feat_tz=feat_tz, range=range, pre_num=pre_num,\r\n                         stride=stride, encoder_layer=encoder_layer, decoder_layer=decoder_layer, bins=bins,\r\n                         num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, drop_rate=drop_rate,\r\n                         attn_drop=attn_drop, drop_path=drop_path_allocator)\r\n    return pix_head\r\n\r\n"
  },
  {
    "path": "lib/models/layers/mask_decoder.py",
    "content": "# -*- coding:utf-8 -*-\n# author  : Skye Song\n# file    : vit_decoder.py\n# Copyright (c) Skye-Song. All Rights Reserved\n\nimport torch\nimport torch.nn as nn\nfrom einops import rearrange\n\nfrom lib.utils.box_ops import box_xywh_to_cxywh, box_cxcywh_to_xyxy\nfrom ..mask_decoder.block import Block\nfrom ..mask_decoder.pos_embed import get_2d_sincos_pos_embed\n\nfrom external.PreciseRoIPooling.pytorch.prroi_pool import PrRoIPool2D\n\nfrom lib.utils.image import *\n\nclass MaskDecoder(nn.Module):\n\tdef __init__(self, mask_ratio=0.75, patch_size=16, num_patches=8 ** 2, embed_dim=1024, decoder_embed_dim=512,\n\t             decoder_depth=8, decoder_num_heads=16, pool_size=8,\n\t             mlp_ratio=4., norm_layer=nn.LayerNorm, norm_pix_loss=False):\n\t\tsuper().__init__()\n\t\tself.mask_ratio = mask_ratio\n\t\tprint(self.mask_ratio)\n\t\t#self.mask_ratio = 0.75\n\n\t\tself.num_patches = num_patches\n\t\tself.patch_size = patch_size\n\n\t\tself.search_prroipool = PrRoIPool2D(pool_size, pool_size, spatial_scale=1.0)\n\n\t\tself.decoder_embed = nn.Linear(embed_dim, decoder_embed_dim, bias=True)\n\n\t\tself.mask_token = nn.Parameter(torch.zeros(1, 1, decoder_embed_dim))\n\n\t\tself.decoder_pos_embed = nn.Parameter(torch.zeros(1, num_patches, decoder_embed_dim),\n\t\t                                      requires_grad=False)  # fixed sin-cos embedding\n\n\t\tself.decoder_blocks = nn.ModuleList([\n\t\t\tBlock(decoder_embed_dim, decoder_num_heads, mlp_ratio, qkv_bias=True, norm_layer=norm_layer)\n\t\t\tfor i in range(decoder_depth)])\n\n\t\tself.decoder_norm = norm_layer(decoder_embed_dim)\n\t\tself.decoder_pred = nn.Linear(decoder_embed_dim, patch_size ** 2 * 3, bias=True)  # decoder to patch\n\n\t\tself.norm_pix_loss = norm_pix_loss\n\n\t\tself.initialize_weights()\n\n\tdef initialize_weights(self):\n\t\t# initialize (and freeze) pos_embed by sin-cos embedding\n\t\tdecoder_pos_embed = get_2d_sincos_pos_embed(self.decoder_pos_embed.shape[-1],\n\t\t                                            int(self.num_patches ** .5), cls_token=False)\n\t\tself.decoder_pos_embed.data.copy_(torch.from_numpy(decoder_pos_embed).float().unsqueeze(0))\n\n\t\t# timm's trunc_normal_(std=.02) is effectively normal_(std=0.02) as cutoff is too big (2.)\n\t\ttorch.nn.init.normal_(self.mask_token, std=.02)\n\t\t# initialize nn.Linear and nn.LayerNorm\n\t\tself.apply(self._init_weights)\n\n\tdef _init_weights(self, m):\n\t\tif isinstance(m, nn.Linear):\n\t\t\t# we use xavier_uniform following official JAX ViT:\n\t\t\ttorch.nn.init.xavier_uniform_(m.weight)\n\t\t\tif isinstance(m, nn.Linear) and m.bias is not None:\n\t\t\t\tnn.init.constant_(m.bias, 0)\n\t\telif isinstance(m, nn.LayerNorm):\n\t\t\tnn.init.constant_(m.bias, 0)\n\t\t\tnn.init.constant_(m.weight, 1.0)\n\n\tdef random_masking(self, x):\n\t\t\"\"\"\n\t\tPerform per-sample random masking by per-sample shuffling.\n\t\tPer-sample shuffling is done by argsort random noise.\n\t\tx: [N, L, D], sequence\n\t\t\"\"\"\n\t\tN, L, D = x.shape  # batch, length, dim\n\t\tlen_keep = int(L * (1 - self.mask_ratio))\n\n\t\tnoise = torch.rand(N, L, device=x.device)  # noise in [0, 1]\n\n\t\t# sort noise for each sample\n\t\tids_shuffle = torch.argsort(noise, dim=1)  # ascend: small is keep, large is remove\n\t\tids_restore = torch.argsort(ids_shuffle, dim=1)\n\n\t\t# keep the first subset\n\t\tids_keep = ids_shuffle[:, :len_keep]\n\t\tx_keep = torch.gather(x, dim=1, index=ids_keep.unsqueeze(-1).repeat(1, 1, D))\n\n\t\t# generate the binary mask: 0 is keep, 1 is remove\n\t\tmask = torch.ones([N, L], device=x.device)\n\t\tmask[:, :len_keep] = 0\n\t\t# unshuffle to get the binary mask\n\t\tmask = torch.gather(mask, dim=1, index=ids_restore)\n\n\t\t# get the masked x\n\t\tmask_tokens = self.mask_token.repeat(x.shape[0], ids_restore.shape[1] - x_keep.shape[1], 1)\n\t\tx_ = torch.cat([x_keep, mask_tokens], dim=1)  # no cls token\n\t\tx_masked = torch.gather(x_, dim=1, index=ids_restore.unsqueeze(-1).repeat(1, 1, x.shape[2]))  # unshuffle\n\n\t\treturn x_masked, mask\n\n\tdef forward_decoder(self, x, eval=False):\n\t\t# embed tokens\n\n\t\tx = self.decoder_embed(x)\n\t\tmask = None\n\n\t\t# append mask tokens to sequence\n\t\tif not eval:\n\t\t\tx, mask = self.random_masking(x)\n\n\t\t# add pos embed\n\t\tx = x + self.decoder_pos_embed\n\n\t\t# apply Transformer blocks\n\t\tfor blk in self.decoder_blocks:\n\t\t\tx = blk(x)\n\t\tx = self.decoder_norm(x)\n\n\t\t# predictor projection\n\t\tx = self.decoder_pred(x)\n\t\treturn x, mask\n\n\tdef unpatchify(self, x):\n\t\t\"\"\"\n        x: (N, L, patch_size**2 *3)\n        imgs: (N, 3, H, W)\n        \"\"\"\n\t\tp = self.patch_size\n\t\th = w = int(x.shape[1] ** .5)\n\t\tassert h * w == x.shape[1]\n\n\t\tx = x.reshape(shape=(x.shape[0], h, w, p, p, 3))\n\t\tx = torch.einsum('nhwpqc->nchpwq', x)\n\t\timgs = x.reshape(shape=(x.shape[0], 3, h * p, h * p))\n\t\treturn imgs\n\n\tdef patchify(self, imgs):\n\t\t\"\"\"\n\t\timgs: (N, 3, H, W)\n\t\tx: (N, L, patch_size**2 *3)\n\t\t\"\"\"\n\t\tp = self.patch_size\n\t\tassert imgs.shape[2] == imgs.shape[3] and imgs.shape[2] % p == 0\n\n\t\th = w = imgs.shape[2] // p\n\t\tx = imgs.reshape(shape=(imgs.shape[0], 3, h, p, w, p))\n\t\tx = torch.einsum('nchpwq->nhwpqc', x)\n\t\tx = x.reshape(shape=(imgs.shape[0], h * w, p ** 2 * 3))\n\n\t\treturn x\n\n\tdef forward_loss(self, imgs, pred, mask=None):\n\t\t\"\"\"\n\t\timgs: [N, 3, H, W]\n\t\tpred: [N, L, p*p*3]\n\t\tmask: [N, L], 0 is keep, 1 is remove,\n\t\t\"\"\"\n\t\ttarget = self.patchify(imgs)\n\t\tif self.norm_pix_loss:\n\t\t\tmean = target.mean(dim=-1, keepdim=True)\n\t\t\tvar = target.var(dim=-1, keepdim=True)\n\t\t\ttarget = (target - mean) / (var + 1.e-6) ** .5\n\n\t\tloss = (pred - target) ** 2\n\t\tloss = loss.mean(dim=-1)  # [N, L], mean loss per patc\n\t\tif mask == None:\n\t\t\tloss = loss.sum() / pred.shape[1] / pred.shape[0]  # mean loss on removed patches\n\t\telse:\n\t\t\tloss = loss.sum() / pred.shape[1] / pred.shape[0]\n\t\treturn loss\n\n\tdef crop_search_feat(self, search_feat, gt_bboxes):\n\n\t\tcrop_bboxes = box_xywh_to_cxywh(gt_bboxes)\n\t\tcrop_sz = torch.sqrt(gt_bboxes[:, 2] * gt_bboxes[:, 3]) * 2.0\n\t\tcrop_sz = torch.clamp(crop_sz, min=0., max=1.)\n\t\tcrop_bboxes[:, 2] = crop_bboxes[:, 3] = crop_sz\n\n\t\tcrop_bboxes = crop_bboxes * search_feat.shape[-1]\n\t\tcrop_bboxes = box_cxcywh_to_xyxy(crop_bboxes.clone().view(-1, 4))\n\t\tbatch_size = crop_bboxes.shape[0]\n\t\tbatch_index = torch.arange(batch_size, dtype=torch.float32).view(-1, 1).to(crop_bboxes.device)\n\n\t\ttarget_roi = torch.cat((batch_index, crop_bboxes), dim=1)\n\t\tsearch_box_feat = self.search_prroipool(search_feat, target_roi)\n\t\treturn search_box_feat\n\n\t\t# gt_bboxes = gt_bboxes * search_feat.shape[-1]\n\t\t# gt_bboxes = box_xywh_to_xyxy(gt_bboxes.clone().view(-1, 4))\n\t\t# batch_size = gt_bboxes.shape[0]\n\t\t# batch_index = torch.arange(batch_size, dtype=torch.float32).view(-1, 1).to(gt_bboxes.device)\n\t\t#\n\t\t# target_roi = torch.cat((batch_index, gt_bboxes), dim=1)\n\t\t# search_box_feat = self.search_prroipool(search_feat, target_roi)\n\t\t# return search_box_feat\n\n\tdef forward(self, x, images=None, gt_bboxes=None, eval=False,):\n\t\t# input x = [B,C,H,W]\n\t\t# input images = [b,3 h,w]\n\t\tif gt_bboxes is not None:\n\t\t\tx = self.crop_search_feat(x, gt_bboxes)\n\n\t\tx = rearrange(x, 'b c h w -> b (h w) c').contiguous()\n\t\tpred, mask = self.forward_decoder(x, eval)  # [N, L, p*p*3]\n\t\tif eval:\n\t\t\treturn self.unpatchify(pred)\n\t\tif mask != None:\n\t\t\tloss = self.forward_loss(imgs=images, pred=pred, mask=mask)\n\t\telse:\n\t\t\tloss = self.forward_loss(imgs=images, pred=pred)\n\t\tpred = self.unpatchify(pred)\n\t\treturn pred, loss\n\n\ndef mask_decoder():\n\tmodel = MaskDecoder(\n\t\tmask_ratio=0.75, patch_size=16, num_patches=8 ** 2, embed_dim=1024, decoder_embed_dim=512, decoder_depth=8,\n\t\tdecoder_num_heads=16, mlp_ratio=4., norm_layer=nn.LayerNorm, norm_pix_loss=False)\n\treturn model\n\n\ndef build_maskdecoder(cfg, hidden_dim):\n\tpool_size = int(cfg.DATA.TEMPLATE.SIZE / cfg.MODEL.BACKBONE.PATCHSIZE)\n\n\tnum_patches = (cfg.DATA.TEMPLATE.SIZE // cfg.MODEL.BACKBONE.PATCHSIZE) ** 2\n\n\tmodel = MaskDecoder(\n\t\tmask_ratio=cfg.MODEL.DECODER.MASK_RATIO,\n\t\tpatch_size=cfg.MODEL.BACKBONE.PATCHSIZE,\n\t\tnum_patches=num_patches,\n\t\tembed_dim=hidden_dim,\n\t\tdecoder_embed_dim=cfg.MODEL.DECODER.EMBEDDIM,\n\t\tdecoder_depth=cfg.MODEL.DECODER.DEPTH,\n\t\tdecoder_num_heads=cfg.MODEL.DECODER.NUMHEADS,\n\t\tpool_size=pool_size,\n\t\tmlp_ratio=cfg.MODEL.DECODER.MLPRATIO,\n\t\tnorm_layer=nn.LayerNorm,\n\t\tnorm_pix_loss=False)\n\treturn model\n"
  },
  {
    "path": "lib/models/layers/patch_embed.py",
    "content": "import torch.nn as nn\r\n\r\nfrom timm.models.layers import to_2tuple\r\n\r\n\r\nclass PatchEmbed(nn.Module):\r\n    \"\"\" 2D Image to Patch Embedding\r\n    \"\"\"\r\n\r\n    def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768, norm_layer=None, flatten=True):\r\n        super().__init__()\r\n        img_size = to_2tuple(img_size)\r\n        patch_size = to_2tuple(patch_size)\r\n        self.img_size = img_size\r\n        self.patch_size = patch_size\r\n        self.grid_size = (img_size[0] // patch_size[0], img_size[1] // patch_size[1])\r\n        self.num_patches = self.grid_size[0] * self.grid_size[1]\r\n        self.flatten = flatten\r\n\r\n        self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)\r\n        self.norm = norm_layer(embed_dim) if norm_layer else nn.Identity()\r\n\r\n    def forward(self, x):\r\n        # allow different input size\r\n        # B, C, H, W = x.shape\r\n        # _assert(H == self.img_size[0], f\"Input image height ({H}) doesn't match model ({self.img_size[0]}).\")\r\n        # _assert(W == self.img_size[1], f\"Input image width ({W}) doesn't match model ({self.img_size[1]}).\")\r\n        x = self.proj(x)\r\n        if self.flatten:\r\n            x = x.flatten(2).transpose(1, 2)  # BCHW -> BNC\r\n        x = self.norm(x)\r\n        return x\r\n"
  },
  {
    "path": "lib/models/layers/rpe.py",
    "content": "import torch\nimport torch.nn as nn\nfrom timm.models.layers import trunc_normal_\n\n\ndef generate_2d_relative_positional_encoding_index(z_shape, x_shape):\n    '''\n        z_shape: (z_h, z_w)\n        x_shape: (x_h, x_w)\n    '''\n    z_2d_index_h, z_2d_index_w = torch.meshgrid(torch.arange(z_shape[0]), torch.arange(z_shape[1]))\n    x_2d_index_h, x_2d_index_w = torch.meshgrid(torch.arange(x_shape[0]), torch.arange(x_shape[1]))\n\n    z_2d_index_h = z_2d_index_h.flatten(0)\n    z_2d_index_w = z_2d_index_w.flatten(0)\n    x_2d_index_h = x_2d_index_h.flatten(0)\n    x_2d_index_w = x_2d_index_w.flatten(0)\n\n    diff_h = z_2d_index_h[:, None] - x_2d_index_h[None, :]\n    diff_w = z_2d_index_w[:, None] - x_2d_index_w[None, :]\n\n    diff = torch.stack((diff_h, diff_w), dim=-1)\n    _, indices = torch.unique(diff.view(-1, 2), return_inverse=True, dim=0)\n    return indices.view(z_shape[0] * z_shape[1], x_shape[0] * x_shape[1])\n\n\ndef generate_2d_concatenated_self_attention_relative_positional_encoding_index(z_shape, x_shape):\n    '''\n        z_shape: (z_h, z_w)\n        x_shape: (x_h, x_w)\n    '''\n    z_2d_index_h, z_2d_index_w = torch.meshgrid(torch.arange(z_shape[0]), torch.arange(z_shape[1]))\n    x_2d_index_h, x_2d_index_w = torch.meshgrid(torch.arange(x_shape[0]), torch.arange(x_shape[1]))\n\n    z_2d_index_h = z_2d_index_h.flatten(0)\n    z_2d_index_w = z_2d_index_w.flatten(0)\n    x_2d_index_h = x_2d_index_h.flatten(0)\n    x_2d_index_w = x_2d_index_w.flatten(0)\n\n    concatenated_2d_index_h = torch.cat((z_2d_index_h, x_2d_index_h))\n    concatenated_2d_index_w = torch.cat((z_2d_index_w, x_2d_index_w))\n\n    diff_h = concatenated_2d_index_h[:, None] - concatenated_2d_index_h[None, :]\n    diff_w = concatenated_2d_index_w[:, None] - concatenated_2d_index_w[None, :]\n\n    z_len = z_shape[0] * z_shape[1]\n    x_len = x_shape[0] * x_shape[1]\n    a = torch.empty((z_len + x_len), dtype=torch.int64)\n    a[:z_len] = 0\n    a[z_len:] = 1\n    b=a[:, None].repeat(1, z_len + x_len)\n    c=a[None, :].repeat(z_len + x_len, 1)\n\n    diff = torch.stack((diff_h, diff_w, b, c), dim=-1)\n    _, indices = torch.unique(diff.view((z_len + x_len) * (z_len + x_len), 4), return_inverse=True, dim=0)\n    return indices.view((z_len + x_len), (z_len + x_len))\n\n\ndef generate_2d_concatenated_cross_attention_relative_positional_encoding_index(z_shape, x_shape):\n    '''\n        z_shape: (z_h, z_w)\n        x_shape: (x_h, x_w)\n    '''\n    z_2d_index_h, z_2d_index_w = torch.meshgrid(torch.arange(z_shape[0]), torch.arange(z_shape[1]))\n    x_2d_index_h, x_2d_index_w = torch.meshgrid(torch.arange(x_shape[0]), torch.arange(x_shape[1]))\n\n    z_2d_index_h = z_2d_index_h.flatten(0)\n    z_2d_index_w = z_2d_index_w.flatten(0)\n    x_2d_index_h = x_2d_index_h.flatten(0)\n    x_2d_index_w = x_2d_index_w.flatten(0)\n\n    concatenated_2d_index_h = torch.cat((z_2d_index_h, x_2d_index_h))\n    concatenated_2d_index_w = torch.cat((z_2d_index_w, x_2d_index_w))\n\n    diff_h = x_2d_index_h[:, None] - concatenated_2d_index_h[None, :]\n    diff_w = x_2d_index_w[:, None] - concatenated_2d_index_w[None, :]\n\n    z_len = z_shape[0] * z_shape[1]\n    x_len = x_shape[0] * x_shape[1]\n\n    a = torch.empty(z_len + x_len, dtype=torch.int64)\n    a[: z_len] = 0\n    a[z_len:] = 1\n    c = a[None, :].repeat(x_len, 1)\n\n    diff = torch.stack((diff_h, diff_w, c), dim=-1)\n    _, indices = torch.unique(diff.view(x_len * (z_len + x_len), 3), return_inverse=True, dim=0)\n    return indices.view(x_len, (z_len + x_len))\n\n\nclass RelativePosition2DEncoder(nn.Module):\n    def __init__(self, num_heads, embed_size):\n        super(RelativePosition2DEncoder, self).__init__()\n        self.relative_position_bias_table = nn.Parameter(torch.empty((num_heads, embed_size)))\n        trunc_normal_(self.relative_position_bias_table, std=0.02)\n\n    def forward(self, attn_rpe_index):\n        '''\n            Args:\n                attn_rpe_index (torch.Tensor): (*), any shape containing indices, max(attn_rpe_index) < embed_size\n            Returns:\n                torch.Tensor: (1, num_heads, *)\n        '''\n        return self.relative_position_bias_table[:, attn_rpe_index].unsqueeze(0)\n"
  },
  {
    "path": "lib/models/mask_decoder/__init__.py",
    "content": "# -*- coding:utf-8 -*-\n# author  : Skye Song\n# file    : __init__.py.py\n# Copyright (c) Skye-Song. All Rights Reserved\n"
  },
  {
    "path": "lib/models/mask_decoder/attention.py",
    "content": "# -*- coding:utf-8 -*-\n# author  : Skye Song\n# file    : attention.py\n# Copyright (c) Skye-Song. All Rights Reserved\nimport torch\nimport torch.nn as nn\nfrom einops import rearrange\n\nfrom lib.utils.image import *\n\n\nclass Attention(nn.Module):\n\tdef __init__(self, dim, num_heads=8, qkv_bias=False, attn_drop=0., proj_drop=0.):\n\t\tsuper().__init__()\n\t\tself.num_heads = num_heads\n\t\thead_dim = dim // num_heads\n\t\tself.scale = head_dim ** -0.5\n\n\t\tself.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)\n\t\tself.attn_drop = nn.Dropout(attn_drop)\n\t\tself.proj = nn.Linear(dim, dim)\n\t\tself.proj_drop = nn.Dropout(proj_drop)\n\n\tdef forward(self, x, padding_mask=None, **kwargs):\n\t\tB, N, C = x.shape\n\t\tqkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)\n\t\tq, k, v = qkv[0], qkv[1], qkv[2]  # (B, head, N, C//head)\n\n\t\tattn = (q @ k.transpose(-2, -1)) * self.scale  # (B, head, N, N)\n\n\t\tif padding_mask is not None:\n\t\t\tassert padding_mask.size()[0] == B\n\t\t\tassert padding_mask.size()[1] == N\n\t\t\tattn = attn.masked_fill(padding_mask.unsqueeze(1).unsqueeze(2), float(\"-inf\"))\n\n\t\tattn = attn.softmax(dim=-1)\n\t\tattn = self.attn_drop(attn)\n\n\t\tx = (attn @ v).transpose(1, 2).reshape(B, N, C)\n\t\tx = self.proj(x)\n\t\tx = self.proj_drop(x)\n\t\treturn x\n\n\nclass ClsMixAttention(nn.Module):\n\tdef __init__(self,\n\t             dim,\n\t             num_heads,\n\t             qkv_bias=False,\n\t             attn_drop=0.,\n\t             proj_drop=0.,\n\t             ):\n\t\tsuper().__init__()\n\t\tself.num_heads = num_heads\n\t\thead_dim = dim // num_heads\n\t\tself.scale = head_dim ** -0.5\n\n\t\tself.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)\n\t\tself.attn_drop = nn.Dropout(attn_drop)\n\t\tself.proj = nn.Linear(dim, dim)\n\t\tself.proj_drop = nn.Dropout(proj_drop)\n\n\tdef forward(self, x, t_h, t_w, s_h, s_w, online_size=1, padding_mask=None):\n\t\tB, N, C = x.shape\n\t\tqkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)\n\t\tq, k, v = qkv[0], qkv[1], qkv[2]  # (B, head, N, C)\n\n\t\tq_cls, q_t, q_s = torch.split(q, [1, t_h * t_w * (1 + online_size), s_h * s_w], dim=2)\n\t\tk_cls, k_t, k_s = torch.split(k, [1, t_h * t_w * (1 + online_size), s_h * s_w], dim=2)\n\t\tv_cls, v_t, v_s = torch.split(v, [1, t_h * t_w * (1 + online_size), s_h * s_w], dim=2)\n\t\t# cls token attention\n\t\tattn = (q_cls @ k.transpose(-2, -1)) * self.scale  # (B, head, N_q, N)\n\t\tif padding_mask is not None:\n\t\t\tassert padding_mask.size()[0] == B\n\t\t\tassert padding_mask.size()[1] == N\n\t\t\tattn = attn.masked_fill(padding_mask.unsqueeze(1).unsqueeze(2), float(\"-inf\"))\n\t\tattn = attn.softmax(dim=-1)\n\t\tattn = self.attn_drop(attn)\n\t\tx_cls = rearrange(attn @ v, 'b h t d -> b t (h d)')\n\n\t\t# template attention\n\t\tattn = (q_t @ k_t.transpose(-2, -1)) * self.scale  # (B, head, N_q, N)\n\t\tif padding_mask is not None:\n\t\t\tassert padding_mask.size()[0] == B\n\t\t\tassert padding_mask.size()[1] == N\n\t\t\tattn = attn.masked_fill(padding_mask.unsqueeze(1).unsqueeze(2), float(\"-inf\"))\n\t\tattn = attn.softmax(dim=-1)\n\t\tattn = self.attn_drop(attn)\n\t\tx_t = rearrange(attn @ v_t, 'b h t d -> b t (h d)')\n\n\t\t# search region attention\n\t\tattn = (q_s @ k.transpose(-2, -1)) * self.scale  # (B, head, N_s, N)\n\t\tif padding_mask is not None:\n\t\t\tassert padding_mask.size()[0] == B\n\t\t\tassert padding_mask.size()[1] == N\n\t\t\tattn = attn.masked_fill(padding_mask.unsqueeze(1).unsqueeze(2), float(\"-inf\"))\n\t\tattn = attn.softmax(dim=-1)\n\t\tattn = self.attn_drop(attn)\n\t\tx_s = rearrange(attn @ v, 'b h t d -> b t (h d)')\n\n\t\tx = torch.cat([x_cls, x_t, x_s], dim=1)\n\n\t\tx = self.proj(x)\n\t\tx = self.proj_drop(x)\n\t\treturn x\n\n\nclass MixAttention(nn.Module):\n\tdef __init__(self,\n\t             dim,\n\t             num_heads,\n\t             qkv_bias=False,\n\t             attn_drop=0.,\n\t             proj_drop=0.,\n\t             ):\n\t\tsuper().__init__()\n\t\tself.num_heads = num_heads\n\t\thead_dim = dim // num_heads\n\t\tself.scale = head_dim ** -0.5\n\n\t\tself.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)\n\t\tself.attn_drop = nn.Dropout(attn_drop)\n\t\tself.proj = nn.Linear(dim, dim)\n\t\tself.proj_drop = nn.Dropout(proj_drop)\n\n\tdef forward(self, x, t_h, t_w, s_h, s_w, padding_mask=None):\n\t\tB, N, C = x.shape\n\t\tqkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)\n\t\tq, k, v = qkv[0], qkv[1], qkv[2]  # (B, head, N, C)\n\n\t\tq_t, q_s = torch.split(q, [t_h * t_w * 2, s_h * s_w], dim=2)\n\t\tk_t, k_s = torch.split(k, [t_h * t_w * 2, s_h * s_w], dim=2)\n\t\tv_t, v_s = torch.split(v, [t_h * t_w * 2, s_h * s_w], dim=2)\n\n\t\t# template attention\n\t\tattn = (q_t @ k_t.transpose(-2, -1)) * self.scale  # (B, head, N_q, N)\n\t\tif padding_mask is not None:\n\t\t\tassert padding_mask.size()[0] == B\n\t\t\tassert padding_mask.size()[1] == N\n\t\t\tattn = attn.masked_fill(padding_mask.unsqueeze(1).unsqueeze(2), float(\"-inf\"))\n\t\tattn = attn.softmax(dim=-1)\n\t\tattn = self.attn_drop(attn)\n\t\tx_t = rearrange(attn @ v_t, 'b h t d -> b t (h d)')\n\n\t\t# search region attention\n\t\tattn = (q_s @ k.transpose(-2, -1)) * self.scale  # (B, head, N_s, N)\n\t\tif padding_mask is not None:\n\t\t\tassert padding_mask.size()[0] == B\n\t\t\tassert padding_mask.size()[1] == N\n\t\t\tattn = attn.masked_fill(padding_mask.unsqueeze(1).unsqueeze(2), float(\"-inf\"))\n\t\tattn = attn.softmax(dim=-1)\n\t\tattn = self.attn_drop(attn)\n\t\tx_s = rearrange(attn @ v, 'b h t d -> b t (h d)')\n\n\t\tx = torch.cat([x_t, x_s], dim=1)\n\n\t\tx = self.proj(x)\n\t\tx = self.proj_drop(x)\n\t\treturn x\n\n\nclass NottAttention(nn.Module):\n\tdef __init__(self,\n\t             dim,\n\t             num_heads,\n\t             qkv_bias=False,\n\t             attn_drop=0.,\n\t             proj_drop=0.,\n\t             ):\n\t\tsuper().__init__()\n\t\tself.num_heads = num_heads\n\t\thead_dim = dim // num_heads\n\t\tself.scale = head_dim ** -0.5\n\n\t\tself.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)\n\t\tself.attn_drop = nn.Dropout(attn_drop)\n\t\tself.proj = nn.Linear(dim, dim)\n\t\tself.proj_drop = nn.Dropout(proj_drop)\n\n\tdef forward(self, x, t_h, t_w, s_h, s_w, padding_mask=None):\n\t\tB, N, C = x.shape\n\t\tqkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)\n\t\tq, k, v = qkv[0], qkv[1], qkv[2]  # (B, head, N, C)\n\n\t\tq_t, q_s = torch.split(q, [t_h * t_w * 2, s_h * s_w], dim=2)\n\t\tk_t, k_s = torch.split(k, [t_h * t_w * 2, s_h * s_w], dim=2)\n\t\tv_t, v_s = torch.split(v, [t_h * t_w * 2, s_h * s_w], dim=2)\n\n\t\t# template attention\n\t\tattn = (q_t @ k_s.transpose(-2, -1)) * self.scale  # (B, head, N_q, N)\n\t\tif padding_mask is not None:\n\t\t\tassert padding_mask.size()[0] == B\n\t\t\tassert padding_mask.size()[1] == N\n\t\t\tattn = attn.masked_fill(padding_mask.unsqueeze(1).unsqueeze(2), float(\"-inf\"))\n\t\tattn = attn.softmax(dim=-1)\n\t\tattn = self.attn_drop(attn)\n\t\tx_t = rearrange(attn @ v_s, 'b h t d -> b t (h d)')\n\n\t\t# search region attention\n\t\tattn = (q_s @ k.transpose(-2, -1)) * self.scale  # (B, head, N_s, N)\n\t\tif padding_mask is not None:\n\t\t\tassert padding_mask.size()[0] == B\n\t\t\tassert padding_mask.size()[1] == N\n\t\t\tattn = attn.masked_fill(padding_mask.unsqueeze(1).unsqueeze(2), float(\"-inf\"))\n\t\tattn = attn.softmax(dim=-1)\n\t\tattn = self.attn_drop(attn)\n\t\tx_s = rearrange(attn @ v, 'b h t d -> b t (h d)')\n\n\t\tx = torch.cat([x_t, x_s], dim=1)\n\n\t\tx = self.proj(x)\n\t\tx = self.proj_drop(x)\n\t\treturn x\n\n\nclass NossAttention(nn.Module):\n\tdef __init__(self,\n\t             dim,\n\t             num_heads,\n\t             qkv_bias=False,\n\t             attn_drop=0.,\n\t             proj_drop=0.,\n\t             ):\n\t\tsuper().__init__()\n\t\tself.num_heads = num_heads\n\t\thead_dim = dim // num_heads\n\t\tself.scale = head_dim ** -0.5\n\n\t\tself.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)\n\t\tself.attn_drop = nn.Dropout(attn_drop)\n\t\tself.proj = nn.Linear(dim, dim)\n\t\tself.proj_drop = nn.Dropout(proj_drop)\n\n\tdef forward(self, x, t_h, t_w, s_h, s_w, padding_mask=None):\n\t\tB, N, C = x.shape\n\t\tqkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)\n\t\tq, k, v = qkv[0], qkv[1], qkv[2]  # (B, head, N, C)\n\n\t\tq_t, q_s = torch.split(q, [t_h * t_w * 2, s_h * s_w], dim=2)\n\t\tk_t, k_s = torch.split(k, [t_h * t_w * 2, s_h * s_w], dim=2)\n\t\tv_t, v_s = torch.split(v, [t_h * t_w * 2, s_h * s_w], dim=2)\n\n\t\t# template attention\n\t\tattn = (q_t @ k.transpose(-2, -1)) * self.scale  # (B, head, N_q, N)\n\t\tif padding_mask is not None:\n\t\t\tassert padding_mask.size()[0] == B\n\t\t\tassert padding_mask.size()[1] == N\n\t\t\tattn = attn.masked_fill(padding_mask.unsqueeze(1).unsqueeze(2), float(\"-inf\"))\n\t\tattn = attn.softmax(dim=-1)\n\t\tattn = self.attn_drop(attn)\n\t\tx_t = rearrange(attn @ v, 'b h t d -> b t (h d)')\n\n\t\t# search region attention\n\t\tattn = (q_s @ k_t.transpose(-2, -1)) * self.scale  # (B, head, N_s, N)\n\t\tif padding_mask is not None:\n\t\t\tassert padding_mask.size()[0] == B\n\t\t\tassert padding_mask.size()[1] == N\n\t\t\tattn = attn.masked_fill(padding_mask.unsqueeze(1).unsqueeze(2), float(\"-inf\"))\n\t\tattn = attn.softmax(dim=-1)\n\t\tattn = self.attn_drop(attn)\n\t\tx_s = rearrange(attn @ v_t, 'b h t d -> b t (h d)')\n\n\t\tx = torch.cat([x_t, x_s], dim=1)\n\n\t\tx = self.proj(x)\n\t\tx = self.proj_drop(x)\n\t\treturn x\n\n\nclass CrossAttention(nn.Module):\n\tdef __init__(self,\n\t             dim,\n\t             num_heads,\n\t             qkv_bias=False,\n\t             attn_drop=0.,\n\t             proj_drop=0.,\n\t             ):\n\t\tsuper().__init__()\n\t\tself.num_heads = num_heads\n\t\thead_dim = dim // num_heads\n\t\tself.scale = head_dim ** -0.5\n\n\t\tself.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)\n\t\tself.attn_drop = nn.Dropout(attn_drop)\n\t\tself.proj = nn.Linear(dim, dim)\n\t\tself.proj_drop = nn.Dropout(proj_drop)\n\n\tdef forward(self, x, t_h, t_w, s_h, s_w):\n\t\tB, N, C = x.shape\n\t\tqkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)\n\t\tq, k, v = qkv[0], qkv[1], qkv[2]  # (B, head, N, C)\n\n\t\tq_t, q_s = torch.split(q, [t_h * t_w * 2, s_h * s_w], dim=2)\n\t\tk_t, k_s = torch.split(k, [((t_h + 1) // 2) ** 2 * 2, s_h * s_w // 4], dim=4)\n\t\tv_t, v_s = torch.split(v, [((t_h + 1) // 2) ** 2 * 2, s_h * s_w // 4], dim=4)\n\n\t\t# template attention\n\t\tattn = (q_t @ k_s.transpose(-2, -1)) * self.scale\n\t\tattn = attn.softmax(dim=-1)\n\t\tattn = self.attn_drop(attn)\n\t\tx_t = rearrange(attn @ v_s, 'b h t d -> b t (h d)')\n\n\t\t# search region attention\n\t\tattn = (q_s @ k_t.transpose(-2, -1)) * self.scale\n\t\tattn = attn.softmax(dim=-1)\n\t\tattn = self.attn_drop(attn)\n\t\tx_s = rearrange(attn @ v_t, 'b h t d -> b t (h d)')\n\n\t\tx = torch.cat([x_t, x_s], dim=1)\n\n\t\tx = self.proj(x)\n\t\tx = self.proj_drop(x)\n\t\treturn x\n"
  },
  {
    "path": "lib/models/mask_decoder/block.py",
    "content": "# -*- coding:utf-8 -*-\n# author  : Skye Song\n# file    : block.py\n# Copyright (c) Skye-Song. All Rights Reserved\n\nfrom .attention import *\nfrom .drop import DropPath\nfrom .mlp import Mlp\n\nclass Block(nn.Module):\n    def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, drop=0., attn_drop=0.,\n                 drop_path=0., attention = \"Attention\", act_layer=nn.GELU, norm_layer=nn.LayerNorm):\n        super().__init__()\n        if norm_layer is None:\n            norm_layer = nn.LayerNorm\n        self.norm1 = norm_layer(dim)\n\n        self.attn = globals()[attention](dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop,\n                                             proj_drop=drop)\n\n        # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here\n        self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()\n        self.norm2 = norm_layer(dim)\n        mlp_hidden_dim = int(dim * mlp_ratio)\n        self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)\n\n    def forward(self, x, **kwargs):\n        x = x + self.drop_path(self.attn(self.norm1(x), **kwargs))\n        x = x + self.drop_path(self.mlp(self.norm2(x)))\n        return x"
  },
  {
    "path": "lib/models/mask_decoder/drop.py",
    "content": "\"\"\" DropBlock, DropPath\n\nPyTorch implementations of DropBlock and DropPath (Stochastic Depth) regularization layers.\n\nPapers:\nDropBlock: A regularization method for convolutional networks (https://arxiv.org/abs/1810.12890)\n\nDeep Networks with Stochastic Depth (https://arxiv.org/abs/1603.09382)\n\nCode:\nDropBlock impl inspired by two Tensorflow impl that I liked:\n - https://github.com/tensorflow/tpu/blob/master/models/official/resnet/resnet_model.py#L74\n - https://github.com/clovaai/assembled-cnn/blob/master/nets/blocks.py\n\nHacked together by / Copyright 2020 Ross Wightman\n\"\"\"\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\ndef drop_block_2d(\n        x, drop_prob: float = 0.1, block_size: int = 7,  gamma_scale: float = 1.0,\n        with_noise: bool = False, inplace: bool = False, batchwise: bool = False):\n    \"\"\" DropBlock. See https://arxiv.org/pdf/1810.12890.pdf\n\n    DropBlock with an experimental gaussian noise option. This layer has been tested on a few training\n    runs with success, but needs further validation and possibly optimization for lower runtime impact.\n    \"\"\"\n    B, C, H, W = x.shape\n    total_size = W * H\n    clipped_block_size = min(block_size, min(W, H))\n    # seed_drop_rate, the gamma parameter\n    gamma = gamma_scale * drop_prob * total_size / clipped_block_size ** 2 / (\n        (W - block_size + 1) * (H - block_size + 1))\n\n    # Forces the block to be inside the feature map.\n    w_i, h_i = torch.meshgrid(torch.arange(W).to(x.device), torch.arange(H).to(x.device))\n    valid_block = ((w_i >= clipped_block_size // 2) & (w_i < W - (clipped_block_size - 1) // 2)) & \\\n                  ((h_i >= clipped_block_size // 2) & (h_i < H - (clipped_block_size - 1) // 2))\n    valid_block = torch.reshape(valid_block, (1, 1, H, W)).to(dtype=x.dtype)\n\n    if batchwise:\n        # one mask for whole batch, quite a bit faster\n        uniform_noise = torch.rand((1, C, H, W), dtype=x.dtype, device=x.device)\n    else:\n        uniform_noise = torch.rand_like(x)\n    block_mask = ((2 - gamma - valid_block + uniform_noise) >= 1).to(dtype=x.dtype)\n    block_mask = -F.max_pool2d(\n        -block_mask,\n        kernel_size=clipped_block_size,  # block_size,\n        stride=1,\n        padding=clipped_block_size // 2)\n\n    if with_noise:\n        normal_noise = torch.randn((1, C, H, W), dtype=x.dtype, device=x.device) if batchwise else torch.randn_like(x)\n        if inplace:\n            x.mul_(block_mask).add_(normal_noise * (1 - block_mask))\n        else:\n            x = x * block_mask + normal_noise * (1 - block_mask)\n    else:\n        normalize_scale = (block_mask.numel() / block_mask.to(dtype=torch.float32).sum().add(1e-7)).to(x.dtype)\n        if inplace:\n            x.mul_(block_mask * normalize_scale)\n        else:\n            x = x * block_mask * normalize_scale\n    return x\n\n\ndef drop_block_fast_2d(\n        x: torch.Tensor, drop_prob: float = 0.1, block_size: int = 7,\n        gamma_scale: float = 1.0, with_noise: bool = False, inplace: bool = False, batchwise: bool = False):\n    \"\"\" DropBlock. See https://arxiv.org/pdf/1810.12890.pdf\n\n    DropBlock with an experimental gaussian noise option. Simplied from above without concern for valid\n    block mask at edges.\n    \"\"\"\n    B, C, H, W = x.shape\n    total_size = W * H\n    clipped_block_size = min(block_size, min(W, H))\n    gamma = gamma_scale * drop_prob * total_size / clipped_block_size ** 2 / (\n            (W - block_size + 1) * (H - block_size + 1))\n\n    if batchwise:\n        # one mask for whole batch, quite a bit faster\n        block_mask = torch.rand((1, C, H, W), dtype=x.dtype, device=x.device) < gamma\n    else:\n        # mask per batch element\n        block_mask = torch.rand_like(x) < gamma\n    block_mask = F.max_pool2d(\n        block_mask.to(x.dtype), kernel_size=clipped_block_size, stride=1, padding=clipped_block_size // 2)\n\n    if with_noise:\n        normal_noise = torch.randn((1, C, H, W), dtype=x.dtype, device=x.device) if batchwise else torch.randn_like(x)\n        if inplace:\n            x.mul_(1. - block_mask).add_(normal_noise * block_mask)\n        else:\n            x = x * (1. - block_mask) + normal_noise * block_mask\n    else:\n        block_mask = 1 - block_mask\n        normalize_scale = (block_mask.numel() / block_mask.to(dtype=torch.float32).sum().add(1e-7)).to(dtype=x.dtype)\n        if inplace:\n            x.mul_(block_mask * normalize_scale)\n        else:\n            x = x * block_mask * normalize_scale\n    return x\n\n\nclass DropBlock2d(nn.Module):\n    \"\"\" DropBlock. See https://arxiv.org/pdf/1810.12890.pdf\n    \"\"\"\n    def __init__(self,\n                 drop_prob=0.1,\n                 block_size=7,\n                 gamma_scale=1.0,\n                 with_noise=False,\n                 inplace=False,\n                 batchwise=False,\n                 fast=True):\n        super(DropBlock2d, self).__init__()\n        self.drop_prob = drop_prob\n        self.gamma_scale = gamma_scale\n        self.block_size = block_size\n        self.with_noise = with_noise\n        self.inplace = inplace\n        self.batchwise = batchwise\n        self.fast = fast  # FIXME finish comparisons of fast vs not\n\n    def forward(self, x):\n        if not self.training or not self.drop_prob:\n            return x\n        if self.fast:\n            return drop_block_fast_2d(\n                x, self.drop_prob, self.block_size, self.gamma_scale, self.with_noise, self.inplace, self.batchwise)\n        else:\n            return drop_block_2d(\n                x, self.drop_prob, self.block_size, self.gamma_scale, self.with_noise, self.inplace, self.batchwise)\n\n\ndef drop_path(x, drop_prob: float = 0., training: bool = False):\n    \"\"\"Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).\n\n    This is the same as the DropConnect impl I created for EfficientNet, etc networks, however,\n    the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper...\n    See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for\n    changing the layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use\n    'survival rate' as the argument.\n\n    \"\"\"\n    if drop_prob == 0. or not training:\n        return x\n    keep_prob = 1 - drop_prob\n    shape = (x.shape[0],) + (1,) * (x.ndim - 1)  # work with diff dim tensors, not just 2D ConvNets\n    random_tensor = keep_prob + torch.rand(shape, dtype=x.dtype, device=x.device)\n    random_tensor.floor_()  # binarize\n    output = x.div(keep_prob) * random_tensor\n    return output\n\nclass DropPath(nn.Module):\n    \"\"\"Drop paths (Stochastic Depth) per sample  (when applied in main path of residual blocks).\n    \"\"\"\n    def __init__(self, drop_prob=None):\n        super(DropPath, self).__init__()\n        self.drop_prob = drop_prob\n\n    def forward(self, x):\n        return drop_path(x, self.drop_prob, self.training)\n"
  },
  {
    "path": "lib/models/mask_decoder/mlp.py",
    "content": "\"\"\" MLP module w/ dropout and configurable activation layer\n\nHacked together by / Copyright 2020 Ross Wightman\n\"\"\"\nfrom torch import nn as nn\nimport torch.nn.functional as F\n\nclass Mlp(nn.Module):\n    \"\"\" MLP as used in Vision Transformer, MLP-Mixer and related networks\n    \"\"\"\n    def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):\n        super().__init__()\n        out_features = out_features or in_features\n        hidden_features = hidden_features or in_features\n        self.fc1 = nn.Linear(in_features, hidden_features)\n        self.act = act_layer()\n        self.fc2 = nn.Linear(hidden_features, out_features)\n        self.drop = nn.Dropout(drop)\n\n    def forward(self, x):\n        x = self.fc1(x)\n        x = self.act(x)\n        x = self.drop(x)\n        x = self.fc2(x)\n        x = self.drop(x)\n        return x\n\nclass MultiLayerMlp(nn.Module):\n    \"\"\" Very simple multi-layer perceptron (also called FFN)\"\"\"\n\n    def __init__(self, input_dim, hidden_dim, output_dim, num_layers, BN=False):\n        super().__init__()\n        self.num_layers = num_layers\n        h = [hidden_dim] * (num_layers - 1)\n        if BN:\n            self.layers = nn.ModuleList(nn.Sequential(nn.Linear(n, k), nn.BatchNorm1d(k))\n                                        for n, k in zip([input_dim] + h, h + [output_dim]))\n        else:\n            self.layers = nn.ModuleList(nn.Linear(n, k)\n                                        for n, k in zip([input_dim] + h, h + [output_dim]))\n\n    def forward(self, x):\n        for i, layer in enumerate(self.layers):\n            x = F.relu(layer(x)) if i < self.num_layers - 1 else layer(x)\n        return x\n\nclass GluMlp(nn.Module):\n    \"\"\" MLP w/ GLU style gating\n    See: https://arxiv.org/abs/1612.08083, https://arxiv.org/abs/2002.05202\n    \"\"\"\n    def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.Sigmoid, drop=0.):\n        super().__init__()\n        out_features = out_features or in_features\n        hidden_features = hidden_features or in_features\n        assert hidden_features % 2 == 0\n        self.fc1 = nn.Linear(in_features, hidden_features)\n        self.act = act_layer()\n        self.fc2 = nn.Linear(hidden_features // 2, out_features)\n        self.drop = nn.Dropout(drop)\n\n    def init_weights(self):\n        # override init of fc1 w/ gate portion set to weight near zero, bias=1\n        fc1_mid = self.fc1.bias.shape[0] // 2\n        nn.init.ones_(self.fc1.bias[fc1_mid:])\n        nn.init.normal_(self.fc1.weight[fc1_mid:], std=1e-6)\n\n    def forward(self, x):\n        x = self.fc1(x)\n        x, gates = x.chunk(2, dim=-1)\n        x = x * self.act(gates)\n        x = self.drop(x)\n        x = self.fc2(x)\n        x = self.drop(x)\n        return x\n\n\nclass GatedMlp(nn.Module):\n    \"\"\" MLP as used in gMLP\n    \"\"\"\n    def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU,\n                 gate_layer=None, drop=0.):\n        super().__init__()\n        out_features = out_features or in_features\n        hidden_features = hidden_features or in_features\n        self.fc1 = nn.Linear(in_features, hidden_features)\n        self.act = act_layer()\n        if gate_layer is not None:\n            assert hidden_features % 2 == 0\n            self.gate = gate_layer(hidden_features)\n            hidden_features = hidden_features // 2  # FIXME base reduction on gate property?\n        else:\n            self.gate = nn.Identity()\n        self.fc2 = nn.Linear(hidden_features, out_features)\n        self.drop = nn.Dropout(drop)\n\n    def forward(self, x):\n        x = self.fc1(x)\n        x = self.act(x)\n        x = self.drop(x)\n        x = self.gate(x)\n        x = self.fc2(x)\n        x = self.drop(x)\n        return x\n\n\nclass ConvMlp(nn.Module):\n    \"\"\" MLP using 1x1 convs that keeps spatial dims\n    \"\"\"\n    def __init__(\n            self, in_features, hidden_features=None, out_features=None, act_layer=nn.ReLU, norm_layer=None, drop=0.):\n        super().__init__()\n        out_features = out_features or in_features\n        hidden_features = hidden_features or in_features\n        self.fc1 = nn.Conv2d(in_features, hidden_features, kernel_size=1, bias=True)\n        self.norm = norm_layer(hidden_features) if norm_layer else nn.Identity()\n        self.act = act_layer()\n        self.fc2 = nn.Conv2d(hidden_features, out_features, kernel_size=1, bias=True)\n        self.drop = nn.Dropout(drop)\n\n    def forward(self, x):\n        x = self.fc1(x)\n        x = self.norm(x)\n        x = self.act(x)\n        x = self.drop(x)\n        x = self.fc2(x)\n        return x\n"
  },
  {
    "path": "lib/models/mask_decoder/norm.py",
    "content": "# -*- coding:utf-8 -*-\n# author  : Skye Song\n# file    : norm.py\n# Copyright (c) Skye-Song. All Rights Reserved\n\nimport torch\n\nclass FrozenBatchNorm2d(torch.nn.Module):\n    \"\"\"\n    BatchNorm2d where the batch statistics and the affine parameters are fixed.\n\n    Copy-paste from torchvision.misc.ops with added eps before rqsrt,\n    without which any other models than torchvision.models.resnet[18,34,50,101]\n    produce nans.\n    \"\"\"\n\n    def __init__(self, n):\n        super(FrozenBatchNorm2d, self).__init__()\n        self.register_buffer(\"weight\", torch.ones(n))\n        self.register_buffer(\"bias\", torch.zeros(n))\n        self.register_buffer(\"running_mean\", torch.zeros(n))\n        self.register_buffer(\"running_var\", torch.ones(n))\n\n    def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict,\n                              missing_keys, unexpected_keys, error_msgs):\n        num_batches_tracked_key = prefix + 'num_batches_tracked'\n        if num_batches_tracked_key in state_dict:\n            del state_dict[num_batches_tracked_key]\n\n        super(FrozenBatchNorm2d, self)._load_from_state_dict(\n            state_dict, prefix, local_metadata, strict,\n            missing_keys, unexpected_keys, error_msgs)\n\n    def forward(self, x):\n        # move reshapes to the beginning\n        # to make it fuser-friendly\n        w = self.weight.reshape(1, -1, 1, 1)\n        b = self.bias.reshape(1, -1, 1, 1)\n        rv = self.running_var.reshape(1, -1, 1, 1)\n        rm = self.running_mean.reshape(1, -1, 1, 1)\n        eps = 1e-5\n        scale = w * (rv + eps).rsqrt()  # rsqrt(x): 1/sqrt(x), r: reciprocal\n        bias = b - rm * scale\n        return x * scale + bias"
  },
  {
    "path": "lib/models/mask_decoder/patch_embed.py",
    "content": "\"\"\" Image to Patch Embedding using Conv2d\n\nA convolution based approach to patchifying a 2D image w/ embedding projection.\n\nBased on the impl in https://github.com/google-research/vision_transformer\n\nHacked together by / Copyright 2020 Ross Wightman\n\"\"\"\n\nfrom torch import nn as nn\n\nfrom itertools import repeat\nimport collections.abc\n\n\n# From PyTorch internals\ndef _ntuple(n):\n    def parse(x):\n        if isinstance(x, collections.abc.Iterable):\n            return x\n        return tuple(repeat(x, n))\n    return parse\n\n\nto_1tuple = _ntuple(1)\nto_2tuple = _ntuple(2)\nto_3tuple = _ntuple(3)\nto_4tuple = _ntuple(4)\nto_ntuple = _ntuple\n\n\nclass PatchEmbed(nn.Module):\n    \"\"\" 2D Image to Patch Embedding\n    \"\"\"\n    def __init__(self, patch_size=16, in_chans=3, embed_dim=768, norm_layer=None, flatten=True):\n        super().__init__()\n        self.flatten = flatten\n        self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)\n        self.norm = norm_layer(embed_dim) if norm_layer else nn.Identity()\n\n    def forward(self, x):\n        x = self.proj(x)\n        if self.flatten:\n            x = x.flatten(2).transpose(1, 2)  # BCHW -> BNC\n        x = self.norm(x)\n        return x\n"
  },
  {
    "path": "lib/models/mask_decoder/pos_embed.py",
    "content": "# Copyright (c) Meta Platforms, Inc. and affiliates.\n# All rights reserved.\n\n# This source code is licensed under the license found in the\n# LICENSE file in the root directory of this source tree.\n# --------------------------------------------------------\n# Position embedding utils\n# --------------------------------------------------------\nimport numpy as np\nimport torch\n\n# --------------------------------------------------------\n# 2D sine-cosine position embedding\n# References:\n# Transformer: https://github.com/tensorflow/models/blob/master/official/nlp/transformer/model_utils.py\n# MoCo v3: https://github.com/facebookresearch/moco-v3\n# --------------------------------------------------------\ndef get_2d_sincos_pos_embed(embed_dim, grid_size, cls_token=False):\n    \"\"\"\n    grid_size: int of the grid height and width\n    return:\n    pos_embed: [grid_size*grid_size, embed_dim] or [1+grid_size*grid_size, embed_dim] (w/ or w/o cls_token)\n    \"\"\"\n    grid_h = np.arange(grid_size, dtype=np.float32)\n    grid_w = np.arange(grid_size, dtype=np.float32)\n    grid = np.meshgrid(grid_w, grid_h)  # here w goes first\n    grid = np.stack(grid, axis=0)\n\n    grid = grid.reshape([2, 1, grid_size, grid_size])\n    pos_embed = get_2d_sincos_pos_embed_from_grid(embed_dim, grid)\n    if cls_token:\n        pos_embed = np.concatenate([np.zeros([1, embed_dim]), pos_embed], axis=0)\n    return pos_embed\n\n\ndef get_2d_sincos_pos_embed_from_grid(embed_dim, grid):\n    assert embed_dim % 2 == 0\n\n    # use half of dimensions to encode grid_h\n    emb_h = get_1d_sincos_pos_embed_from_grid(embed_dim // 2, grid[0])  # (H*W, D/2)\n    emb_w = get_1d_sincos_pos_embed_from_grid(embed_dim // 2, grid[1])  # (H*W, D/2)\n\n    emb = np.concatenate([emb_h, emb_w], axis=1) # (H*W, D)\n    return emb\n\n\ndef get_1d_sincos_pos_embed_from_grid(embed_dim, pos):\n    \"\"\"\n    embed_dim: output dimension for each position\n    pos: a list of positions to be encoded: size (M,)\n    out: (M, D)\n    \"\"\"\n    assert embed_dim % 2 == 0\n    omega = np.arange(embed_dim // 2, dtype=np.float)\n    omega /= embed_dim / 2.\n    omega = 1. / 10000**omega  # (D/2,)\n\n    pos = pos.reshape(-1)  # (M,)\n    out = np.einsum('m,d->md', pos, omega)  # (M, D/2), outer product\n\n    emb_sin = np.sin(out) # (M, D/2)\n    emb_cos = np.cos(out) # (M, D/2)\n\n    emb = np.concatenate([emb_sin, emb_cos], axis=1)  # (M, D)\n    return emb\n\n\n\n# --------------------------------------------------------\n# Interpolate position embeddings for high-resolution\n# References:\n# DeiT: https://github.com/facebookresearch/deit\n# --------------------------------------------------------\ndef interpolate_pos_embed(model, checkpoint_model):\n    if 'pos_embed' in checkpoint_model:\n        pos_embed_checkpoint = checkpoint_model['pos_embed']\n        embedding_size = pos_embed_checkpoint.shape[-1]\n        num_patches = model.patch_embed.num_patches\n        num_extra_tokens = model.pos_embed.shape[-2] - num_patches\n        # height (== width) for the checkpoint position embedding\n        orig_size = int((pos_embed_checkpoint.shape[-2] - num_extra_tokens) ** 0.5)\n        # height (== width) for the new position embedding\n        new_size = int(num_patches ** 0.5)\n        # class_token and dist_token are kept unchanged\n        if orig_size != new_size:\n            print(\"Position interpolate from %dx%d to %dx%d\" % (orig_size, orig_size, new_size, new_size))\n            extra_tokens = pos_embed_checkpoint[:, :num_extra_tokens]\n            # only the position tokens are interpolated\n            pos_tokens = pos_embed_checkpoint[:, num_extra_tokens:]\n            pos_tokens = pos_tokens.reshape(-1, orig_size, orig_size, embedding_size).permute(0, 3, 1, 2)\n            pos_tokens = torch.nn.functional.interpolate(\n                pos_tokens, size=(new_size, new_size), mode='bicubic', align_corners=False)\n            pos_tokens = pos_tokens.permute(0, 2, 3, 1).flatten(1, 2)\n            new_pos_embed = torch.cat((extra_tokens, pos_tokens), dim=1)\n            checkpoint_model['pos_embed'] = new_pos_embed\n"
  },
  {
    "path": "lib/models/mask_decoder/weight_init.py",
    "content": "import torch\nimport math\nimport warnings\n\nfrom torch.nn.init import _calculate_fan_in_and_fan_out\n\ndef _no_grad_trunc_normal_(tensor, mean, std, a, b):\n    # Cut & paste from PyTorch official master until it's in a few official releases - RW\n    # Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf\n    def norm_cdf(x):\n        # Computes standard normal cumulative distribution function\n        return (1. + math.erf(x / math.sqrt(2.))) / 2.\n\n    if (mean < a - 2 * std) or (mean > b + 2 * std):\n        warnings.warn(\"mean is more than 2 std from [a, b] in nn.init.trunc_normal_. \"\n                      \"The distribution of values may be incorrect.\",\n                      stacklevel=2)\n\n    with torch.no_grad():\n        # Values are generated by using a truncated uniform distribution and\n        # then using the inverse CDF for the normal distribution.\n        # Get upper and lower cdf values\n        l = norm_cdf((a - mean) / std)\n        u = norm_cdf((b - mean) / std)\n\n        # Uniformly fill tensor with values from [l, u], then translate to\n        # [2l-1, 2u-1].\n        tensor.uniform_(2 * l - 1, 2 * u - 1)\n\n        # Use inverse cdf transform for normal distribution to get truncated\n        # standard normal\n        tensor.erfinv_()\n\n        # Transform to proper mean, std\n        tensor.mul_(std * math.sqrt(2.))\n        tensor.add_(mean)\n\n        # Clamp to ensure it's in the proper range\n        tensor.clamp_(min=a, max=b)\n        return tensor\n\n\ndef trunc_normal_(tensor, mean=0., std=1., a=-2., b=2.):\n    # type: (Tensor, float, float, float, float) -> Tensor\n    r\"\"\"Fills the input Tensor with values drawn from a truncated\n    normal distribution. The values are effectively drawn from the\n    normal distribution :math:`\\mathcal{N}(\\text{mean}, \\text{std}^2)`\n    with values outside :math:`[a, b]` redrawn until they are within\n    the bounds. The method used for generating the random values works\n    best when :math:`a \\leq \\text{mean} \\leq b`.\n    Args:\n        tensor: an n-dimensional `torch.Tensor`\n        mean: the mean of the normal distribution\n        std: the standard deviation of the normal distribution\n        a: the minimum cutoff value\n        b: the maximum cutoff value\n    Examples:\n        >>> w = torch.empty(3, 5)\n        >>> nn.init.trunc_normal_(w)\n    \"\"\"\n    return _no_grad_trunc_normal_(tensor, mean, std, a, b)\n\n\ndef variance_scaling_(tensor, scale=1.0, mode='fan_in', distribution='normal'):\n    fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor)\n    if mode == 'fan_in':\n        denom = fan_in\n    elif mode == 'fan_out':\n        denom = fan_out\n    elif mode == 'fan_avg':\n        denom = (fan_in + fan_out) / 2\n\n    variance = scale / denom\n\n    if distribution == \"truncated_normal\":\n        # constant is stddev of standard normal truncated to (-2, 2)\n        trunc_normal_(tensor, std=math.sqrt(variance) / .87962566103423978)\n    elif distribution == \"normal\":\n        tensor.normal_(std=math.sqrt(variance))\n    elif distribution == \"uniform\":\n        bound = math.sqrt(3 * variance)\n        tensor.uniform_(-bound, bound)\n    else:\n        raise ValueError(f\"invalid distribution {distribution}\")\n\n\ndef lecun_normal_(tensor):\n    variance_scaling_(tensor, mode='fan_in', distribution='truncated_normal')\n"
  },
  {
    "path": "lib/test/__init__.py",
    "content": ""
  },
  {
    "path": "lib/test/analysis/__init__.py",
    "content": ""
  },
  {
    "path": "lib/test/analysis/extract_results.py",
    "content": "import os\nimport sys\nimport numpy as np\nfrom lib.test.utils.load_text import load_text\nimport torch\nimport pickle\nfrom tqdm import tqdm\n\nenv_path = os.path.join(os.path.dirname(__file__), '../../..')\nif env_path not in sys.path:\n    sys.path.append(env_path)\n\nfrom lib.test.evaluation.environment import env_settings\n\n\ndef calc_err_center(pred_bb, anno_bb, normalized=False):\n    pred_center = pred_bb[:, :2] + 0.5 * (pred_bb[:, 2:] - 1.0)\n    anno_center = anno_bb[:, :2] + 0.5 * (anno_bb[:, 2:] - 1.0)\n\n    if normalized:\n        pred_center = pred_center / anno_bb[:, 2:]\n        anno_center = anno_center / anno_bb[:, 2:]\n\n    err_center = ((pred_center - anno_center)**2).sum(1).sqrt()\n    return err_center\n\n\ndef calc_iou_overlap(pred_bb, anno_bb):\n    tl = torch.max(pred_bb[:, :2], anno_bb[:, :2])\n    br = torch.min(pred_bb[:, :2] + pred_bb[:, 2:] - 1.0, anno_bb[:, :2] + anno_bb[:, 2:] - 1.0)\n    sz = (br - tl + 1.0).clamp(0)\n\n    # Area\n    intersection = sz.prod(dim=1)\n    union = pred_bb[:, 2:].prod(dim=1) + anno_bb[:, 2:].prod(dim=1) - intersection\n\n    return intersection / union\n\n\ndef calc_seq_err_robust(pred_bb, anno_bb, dataset, target_visible=None):\n    pred_bb = pred_bb.clone()\n\n    # Check if invalid values are present\n    if torch.isnan(pred_bb).any() or (pred_bb[:, 2:] < 0.0).any():\n        raise Exception('Error: Invalid results')\n\n    if torch.isnan(anno_bb).any():\n        if dataset == 'uav':\n            pass\n        else:\n            raise Exception('Warning: NaNs in annotation')\n\n    if (pred_bb[:, 2:] == 0.0).any():\n        for i in range(1, pred_bb.shape[0]):\n            if (pred_bb[i, 2:] == 0.0).any() and not torch.isnan(anno_bb[i, :]).any():\n                pred_bb[i, :] = pred_bb[i-1, :]\n\n    if pred_bb.shape[0] != anno_bb.shape[0]:\n        if dataset == 'lasot':\n            if pred_bb.shape[0] > anno_bb.shape[0]:\n                # For monkey-17, there is a mismatch for some trackers.\n                pred_bb = pred_bb[:anno_bb.shape[0], :]\n            else:\n                raise Exception('Mis-match in tracker prediction and GT lengths')\n        else:\n            # print('Warning: Mis-match in tracker prediction and GT lengths')\n            if pred_bb.shape[0] > anno_bb.shape[0]:\n                pred_bb = pred_bb[:anno_bb.shape[0], :]\n            else:\n                pad = torch.zeros((anno_bb.shape[0] - pred_bb.shape[0], 4)).type_as(pred_bb)\n                pred_bb = torch.cat((pred_bb, pad), dim=0)\n\n    pred_bb[0, :] = anno_bb[0, :]\n\n    if target_visible is not None:\n        target_visible = target_visible.bool()\n        valid = ((anno_bb[:, 2:] > 0.0).sum(1) == 2) & target_visible\n    else:\n        valid = ((anno_bb[:, 2:] > 0.0).sum(1) == 2)\n\n    err_center = calc_err_center(pred_bb, anno_bb)\n    err_center_normalized = calc_err_center(pred_bb, anno_bb, normalized=True)\n    err_overlap = calc_iou_overlap(pred_bb, anno_bb)\n\n    # handle invalid anno cases\n    if dataset in ['uav']:\n        err_center[~valid] = -1.0\n    else:\n        err_center[~valid] = float(\"Inf\")\n    err_center_normalized[~valid] = -1.0\n    err_overlap[~valid] = -1.0\n\n    if dataset == 'lasot':\n        err_center_normalized[~target_visible] = float(\"Inf\")\n        err_center[~target_visible] = float(\"Inf\")\n\n    if torch.isnan(err_overlap).any():\n        raise Exception('Nans in calculated overlap')\n    return err_overlap, err_center, err_center_normalized, valid\n\n\ndef extract_results(trackers, dataset, report_name, skip_missing_seq=False, plot_bin_gap=0.05,\n                    exclude_invalid_frames=False):\n    settings = env_settings()\n    eps = 1e-16\n\n    result_plot_path = os.path.join(settings.result_plot_path, report_name)\n\n    if not os.path.exists(result_plot_path):\n        os.makedirs(result_plot_path)\n\n    threshold_set_overlap = torch.arange(0.0, 1.0 + plot_bin_gap, plot_bin_gap, dtype=torch.float64)\n    threshold_set_center = torch.arange(0, 51, dtype=torch.float64)\n    threshold_set_center_norm = torch.arange(0, 51, dtype=torch.float64) / 100.0\n\n    avg_overlap_all = torch.zeros((len(dataset), len(trackers)), dtype=torch.float64)\n    ave_success_rate_plot_overlap = torch.zeros((len(dataset), len(trackers), threshold_set_overlap.numel()),\n                                                dtype=torch.float32)\n    ave_success_rate_plot_center = torch.zeros((len(dataset), len(trackers), threshold_set_center.numel()),\n                                               dtype=torch.float32)\n    ave_success_rate_plot_center_norm = torch.zeros((len(dataset), len(trackers), threshold_set_center.numel()),\n                                                    dtype=torch.float32)\n\n    valid_sequence = torch.ones(len(dataset), dtype=torch.uint8)\n\n    for seq_id, seq in enumerate(tqdm(dataset)):\n        # Load anno\n        anno_bb = torch.tensor(seq.ground_truth_rect)\n        target_visible = torch.tensor(seq.target_visible, dtype=torch.uint8) if seq.target_visible is not None else None\n        for trk_id, trk in enumerate(trackers):\n            # Load results\n            base_results_path = '{}/{}'.format(trk.results_dir, seq.name)\n            results_path = '{}.txt'.format(base_results_path)\n\n            if os.path.isfile(results_path):\n                pred_bb = torch.tensor(load_text(str(results_path), delimiter=('\\t', ','), dtype=np.float64))\n            else:\n                if skip_missing_seq:\n                    valid_sequence[seq_id] = 0\n                    break\n                else:\n                    raise Exception('Result not found. {}'.format(results_path))\n\n            # Calculate measures\n            err_overlap, err_center, err_center_normalized, valid_frame = calc_seq_err_robust(\n                pred_bb, anno_bb, seq.dataset, target_visible)\n\n            avg_overlap_all[seq_id, trk_id] = err_overlap[valid_frame].mean()\n\n            if exclude_invalid_frames:\n                seq_length = valid_frame.long().sum()\n            else:\n                seq_length = anno_bb.shape[0]\n\n            if seq_length <= 0:\n                raise Exception('Seq length zero')\n\n            ave_success_rate_plot_overlap[seq_id, trk_id, :] = (err_overlap.view(-1, 1) > threshold_set_overlap.view(1, -1)).sum(0).float() / seq_length\n            ave_success_rate_plot_center[seq_id, trk_id, :] = (err_center.view(-1, 1) <= threshold_set_center.view(1, -1)).sum(0).float() / seq_length\n            ave_success_rate_plot_center_norm[seq_id, trk_id, :] = (err_center_normalized.view(-1, 1) <= threshold_set_center_norm.view(1, -1)).sum(0).float() / seq_length\n\n    print('\\n\\nComputed results over {} / {} sequences'.format(valid_sequence.long().sum().item(), valid_sequence.shape[0]))\n\n    # Prepare dictionary for saving data\n    seq_names = [s.name for s in dataset]\n    tracker_names = [{'name': t.name, 'param': t.parameter_name, 'run_id': t.run_id, 'disp_name': t.display_name}\n                     for t in trackers]\n\n    eval_data = {'sequences': seq_names, 'trackers': tracker_names,\n                 'valid_sequence': valid_sequence.tolist(),\n                 'ave_success_rate_plot_overlap': ave_success_rate_plot_overlap.tolist(),\n                 'ave_success_rate_plot_center': ave_success_rate_plot_center.tolist(),\n                 'ave_success_rate_plot_center_norm': ave_success_rate_plot_center_norm.tolist(),\n                 'avg_overlap_all': avg_overlap_all.tolist(),\n                 'threshold_set_overlap': threshold_set_overlap.tolist(),\n                 'threshold_set_center': threshold_set_center.tolist(),\n                 'threshold_set_center_norm': threshold_set_center_norm.tolist()}\n\n    with open(result_plot_path + '/eval_data.pkl', 'wb') as fh:\n        pickle.dump(eval_data, fh)\n\n    return eval_data\n"
  },
  {
    "path": "lib/test/analysis/plot_results.py",
    "content": "import tikzplotlib\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport os\nimport torch\nimport pickle\nimport json\nfrom lib.test.evaluation.environment import env_settings\nfrom lib.test.analysis.extract_results import extract_results\n\n\ndef get_plot_draw_styles():\n    plot_draw_style = [{'color': (1.0, 0.0, 0.0), 'line_style': '-'},\n                       {'color': (0.0, 1.0, 0.0), 'line_style': '-'},\n                       {'color': (0.0, 0.0, 1.0), 'line_style': '-'},\n                       {'color': (0.0, 0.0, 0.0), 'line_style': '-'},\n                       {'color': (1.0, 0.0, 1.0), 'line_style': '-'},\n                       {'color': (0.0, 1.0, 1.0), 'line_style': '-'},\n                       {'color': (0.5, 0.5, 0.5), 'line_style': '-'},\n                       {'color': (136.0 / 255.0, 0.0, 21.0 / 255.0), 'line_style': '-'},\n                       {'color': (1.0, 127.0 / 255.0, 39.0 / 255.0), 'line_style': '-'},\n                       {'color': (0.0, 162.0 / 255.0, 232.0 / 255.0), 'line_style': '-'},\n                       {'color': (0.0, 0.5, 0.0), 'line_style': '-'},\n                       {'color': (1.0, 0.5, 0.2), 'line_style': '-'},\n                       {'color': (0.1, 0.4, 0.0), 'line_style': '-'},\n                       {'color': (0.6, 0.3, 0.9), 'line_style': '-'},\n                       {'color': (0.4, 0.7, 0.1), 'line_style': '-'},\n                       {'color': (0.2, 0.1, 0.7), 'line_style': '-'},\n                       {'color': (0.7, 0.6, 0.2), 'line_style': '-'}]\n\n    return plot_draw_style\n\n\ndef check_eval_data_is_valid(eval_data, trackers, dataset):\n    \"\"\" Checks if the pre-computed results are valid\"\"\"\n    seq_names = [s.name for s in dataset]\n    seq_names_saved = eval_data['sequences']\n\n    tracker_names_f = [(t.name, t.parameter_name, t.run_id) for t in trackers]\n    tracker_names_f_saved = [(t['name'], t['param'], t['run_id']) for t in eval_data['trackers']]\n\n    return seq_names == seq_names_saved and tracker_names_f == tracker_names_f_saved\n\n\ndef merge_multiple_runs(eval_data):\n    new_tracker_names = []\n    ave_success_rate_plot_overlap_merged = []\n    ave_success_rate_plot_center_merged = []\n    ave_success_rate_plot_center_norm_merged = []\n    avg_overlap_all_merged = []\n\n    ave_success_rate_plot_overlap = torch.tensor(eval_data['ave_success_rate_plot_overlap'])\n    ave_success_rate_plot_center = torch.tensor(eval_data['ave_success_rate_plot_center'])\n    ave_success_rate_plot_center_norm = torch.tensor(eval_data['ave_success_rate_plot_center_norm'])\n    avg_overlap_all = torch.tensor(eval_data['avg_overlap_all'])\n\n    trackers = eval_data['trackers']\n    merged = torch.zeros(len(trackers), dtype=torch.uint8)\n    for i in range(len(trackers)):\n        if merged[i]:\n            continue\n        base_tracker = trackers[i]\n        new_tracker_names.append(base_tracker)\n\n        match = [t['name'] == base_tracker['name'] and t['param'] == base_tracker['param'] for t in trackers]\n        match = torch.tensor(match)\n\n        ave_success_rate_plot_overlap_merged.append(ave_success_rate_plot_overlap[:, match, :].mean(1))\n        ave_success_rate_plot_center_merged.append(ave_success_rate_plot_center[:, match, :].mean(1))\n        ave_success_rate_plot_center_norm_merged.append(ave_success_rate_plot_center_norm[:, match, :].mean(1))\n        avg_overlap_all_merged.append(avg_overlap_all[:, match].mean(1))\n\n        merged[match] = 1\n\n    ave_success_rate_plot_overlap_merged = torch.stack(ave_success_rate_plot_overlap_merged, dim=1)\n    ave_success_rate_plot_center_merged = torch.stack(ave_success_rate_plot_center_merged, dim=1)\n    ave_success_rate_plot_center_norm_merged = torch.stack(ave_success_rate_plot_center_norm_merged, dim=1)\n    avg_overlap_all_merged = torch.stack(avg_overlap_all_merged, dim=1)\n\n    eval_data['trackers'] = new_tracker_names\n    eval_data['ave_success_rate_plot_overlap'] = ave_success_rate_plot_overlap_merged.tolist()\n    eval_data['ave_success_rate_plot_center'] = ave_success_rate_plot_center_merged.tolist()\n    eval_data['ave_success_rate_plot_center_norm'] = ave_success_rate_plot_center_norm_merged.tolist()\n    eval_data['avg_overlap_all'] = avg_overlap_all_merged.tolist()\n\n    return eval_data\n\n\ndef get_tracker_display_name(tracker):\n    if tracker['disp_name'] is None:\n        if tracker['run_id'] is None:\n            disp_name = '{}_{}'.format(tracker['name'], tracker['param'])\n        else:\n            disp_name = '{}_{}_{:03d}'.format(tracker['name'], tracker['param'],\n                                              tracker['run_id'])\n    else:\n        disp_name = tracker['disp_name']\n\n    return  disp_name\n\n\ndef plot_draw_save(y, x, scores, trackers, plot_draw_styles, result_plot_path, plot_opts):\n    plt.rcParams['text.usetex']=True\n    plt.rcParams[\"font.family\"] = \"Times New Roman\"\n    # Plot settings\n    font_size = plot_opts.get('font_size', 20)\n    font_size_axis = plot_opts.get('font_size_axis', 20)\n    line_width = plot_opts.get('line_width', 2)\n    font_size_legend = plot_opts.get('font_size_legend', 20)\n\n    plot_type = plot_opts['plot_type']\n    legend_loc = plot_opts['legend_loc']\n\n    xlabel = plot_opts['xlabel']\n    ylabel = plot_opts['ylabel']\n    ylabel = \"%s\"%(ylabel.replace('%','\\%'))\n    xlim = plot_opts['xlim']\n    ylim = plot_opts['ylim']\n\n    title = r\"$\\bf{%s}$\" %(plot_opts['title'])\n\n    matplotlib.rcParams.update({'font.size': font_size})\n    matplotlib.rcParams.update({'axes.titlesize': font_size_axis})\n    matplotlib.rcParams.update({'axes.titleweight': 'black'})\n    matplotlib.rcParams.update({'axes.labelsize': font_size_axis})\n\n    fig, ax = plt.subplots()\n\n    index_sort = scores.argsort(descending=False)\n\n    plotted_lines = []\n    legend_text = []\n\n    for id, id_sort in enumerate(index_sort):\n        line = ax.plot(x.tolist(), y[id_sort, :].tolist(),\n                       linewidth=line_width,\n                       color=plot_draw_styles[index_sort.numel() - id - 1]['color'],\n                       linestyle=plot_draw_styles[index_sort.numel() - id - 1]['line_style'])\n\n        plotted_lines.append(line[0])\n\n        tracker = trackers[id_sort]\n        disp_name = get_tracker_display_name(tracker)\n\n        legend_text.append('{} [{:.1f}]'.format(disp_name, scores[id_sort]))\n\n    try:\n        # add bold to our method\n        for i in range(1,2):\n            legend_text[-i] = r'\\textbf{%s}'%(legend_text[-i])\n\n        ax.legend(plotted_lines[::-1], legend_text[::-1], loc=legend_loc, fancybox=False, edgecolor='black',\n                  fontsize=font_size_legend, framealpha=1.0)\n    except:\n        pass\n\n    ax.set(xlabel=xlabel,\n           ylabel=ylabel,\n           xlim=xlim, ylim=ylim,\n           title=title)\n\n    ax.grid(True, linestyle='-.')\n    fig.tight_layout()\n\n    tikzplotlib.save('{}/{}_plot.tex'.format(result_plot_path, plot_type))\n    fig.savefig('{}/{}_plot.pdf'.format(result_plot_path, plot_type), dpi=300, format='pdf', transparent=True)\n    plt.draw()\n\n\ndef check_and_load_precomputed_results(trackers, dataset, report_name, force_evaluation=False, **kwargs):\n    # Load data\n    settings = env_settings()\n\n    # Load pre-computed results\n    result_plot_path = os.path.join(settings.result_plot_path, report_name)\n    eval_data_path = os.path.join(result_plot_path, 'eval_data.pkl')\n\n    if os.path.isfile(eval_data_path) and not force_evaluation:\n        with open(eval_data_path, 'rb') as fh:\n            eval_data = pickle.load(fh)\n    else:\n        # print('Pre-computed evaluation data not found. Computing results!')\n        eval_data = extract_results(trackers, dataset, report_name, **kwargs)\n\n    if not check_eval_data_is_valid(eval_data, trackers, dataset):\n        # print('Pre-computed evaluation data invalid. Re-computing results!')\n        eval_data = extract_results(trackers, dataset, report_name, **kwargs)\n        # pass\n    else:\n        # Update display names\n        tracker_names = [{'name': t.name, 'param': t.parameter_name, 'run_id': t.run_id, 'disp_name': t.display_name}\n                         for t in trackers]\n        eval_data['trackers'] = tracker_names\n    with open(eval_data_path, 'wb') as fh:\n        pickle.dump(eval_data, fh)\n    return eval_data\n\n\ndef get_auc_curve(ave_success_rate_plot_overlap, valid_sequence):\n    ave_success_rate_plot_overlap = ave_success_rate_plot_overlap[valid_sequence, :, :]\n    auc_curve = ave_success_rate_plot_overlap.mean(0) * 100.0\n    auc = auc_curve.mean(-1)\n\n    return auc_curve, auc\n\n\ndef get_prec_curve(ave_success_rate_plot_center, valid_sequence):\n    ave_success_rate_plot_center = ave_success_rate_plot_center[valid_sequence, :, :]\n    prec_curve = ave_success_rate_plot_center.mean(0) * 100.0\n    prec_score = prec_curve[:, 20]\n\n    return prec_curve, prec_score\n\n\ndef plot_results(trackers, dataset, report_name, merge_results=False,\n                 plot_types=('success'), force_evaluation=False, **kwargs):\n    \"\"\"\n    Plot results for the given trackers\n\n    args:\n        trackers - List of trackers to evaluate\n        dataset - List of sequences to evaluate\n        report_name - Name of the folder in env_settings.perm_mat_path where the computed results and plots are saved\n        merge_results - If True, multiple random runs for a non-deterministic trackers are averaged\n        plot_types - List of scores to display. Can contain 'success',\n                    'prec' (precision), and 'norm_prec' (normalized precision)\n    \"\"\"\n    # Load data\n    settings = env_settings()\n\n    plot_draw_styles = get_plot_draw_styles()\n\n    # Load pre-computed results\n    result_plot_path = os.path.join(settings.result_plot_path, report_name)\n    eval_data = check_and_load_precomputed_results(trackers, dataset, report_name, force_evaluation, **kwargs)\n\n    # Merge results from multiple runs\n    if merge_results:\n        eval_data = merge_multiple_runs(eval_data)\n\n    tracker_names = eval_data['trackers']\n\n    valid_sequence = torch.tensor(eval_data['valid_sequence'], dtype=torch.bool)\n\n    print('\\nPlotting results over {} / {} sequences'.format(valid_sequence.long().sum().item(), valid_sequence.shape[0]))\n\n    print('\\nGenerating plots for: {}'.format(report_name))\n\n    # ********************************  Success Plot **************************************\n    if 'success' in plot_types:\n        ave_success_rate_plot_overlap = torch.tensor(eval_data['ave_success_rate_plot_overlap'])\n\n        # Index out valid sequences\n        auc_curve, auc = get_auc_curve(ave_success_rate_plot_overlap, valid_sequence)\n        threshold_set_overlap = torch.tensor(eval_data['threshold_set_overlap'])\n\n        success_plot_opts = {'plot_type': 'success', 'legend_loc': 'lower left', 'xlabel': 'Overlap threshold',\n                             'ylabel': 'Overlap Precision [%]', 'xlim': (0, 1.0), 'ylim': (0, 88), 'title': 'Success'}\n        plot_draw_save(auc_curve, threshold_set_overlap, auc, tracker_names, plot_draw_styles, result_plot_path, success_plot_opts)\n\n    # ********************************  Precision Plot **************************************\n    if 'prec' in plot_types:\n        ave_success_rate_plot_center = torch.tensor(eval_data['ave_success_rate_plot_center'])\n\n        # Index out valid sequences\n        prec_curve, prec_score = get_prec_curve(ave_success_rate_plot_center, valid_sequence)\n        threshold_set_center = torch.tensor(eval_data['threshold_set_center'])\n\n        precision_plot_opts = {'plot_type': 'precision', 'legend_loc': 'lower right',\n                               'xlabel': 'Location error threshold [pixels]', 'ylabel': 'Distance Precision [%]',\n                               'xlim': (0, 50), 'ylim': (0, 100), 'title': 'Precision plot'}\n        plot_draw_save(prec_curve, threshold_set_center, prec_score, tracker_names, plot_draw_styles, result_plot_path,\n                       precision_plot_opts)\n\n    # ********************************  Norm Precision Plot **************************************\n    if 'norm_prec' in plot_types:\n        ave_success_rate_plot_center_norm = torch.tensor(eval_data['ave_success_rate_plot_center_norm'])\n\n        # Index out valid sequences\n        prec_curve, prec_score = get_prec_curve(ave_success_rate_plot_center_norm, valid_sequence)\n        threshold_set_center_norm = torch.tensor(eval_data['threshold_set_center_norm'])\n\n        norm_precision_plot_opts = {'plot_type': 'norm_precision', 'legend_loc': 'lower right',\n                                    'xlabel': 'Location error threshold', 'ylabel': 'Distance Precision [%]',\n                                    'xlim': (0, 0.5), 'ylim': (0, 85), 'title': 'Normalized Precision'}\n        plot_draw_save(prec_curve, threshold_set_center_norm, prec_score, tracker_names, plot_draw_styles, result_plot_path,\n                       norm_precision_plot_opts)\n\n    plt.show()\n\n\ndef generate_formatted_report(row_labels, scores, table_name=''):\n    name_width = max([len(d) for d in row_labels] + [len(table_name)]) + 5\n    min_score_width = 10\n\n    report_text = '\\n{label: <{width}} |'.format(label=table_name, width=name_width)\n\n    score_widths = [max(min_score_width, len(k) + 3) for k in scores.keys()]\n\n    for s, s_w in zip(scores.keys(), score_widths):\n        report_text = '{prev} {s: <{width}} |'.format(prev=report_text, s=s, width=s_w)\n\n    report_text = '{prev}\\n'.format(prev=report_text)\n\n    for trk_id, d_name in enumerate(row_labels):\n        # display name\n        report_text = '{prev}{tracker: <{width}} |'.format(prev=report_text, tracker=d_name,\n                                                           width=name_width)\n        for (score_type, score_value), s_w in zip(scores.items(), score_widths):\n            report_text = '{prev} {score: <{width}} |'.format(prev=report_text,\n                                                              score='{:0.2f}'.format(score_value[trk_id].item()),\n                                                              width=s_w)\n        report_text = '{prev}\\n'.format(prev=report_text)\n\n    return report_text\n\n\ndef print_results(trackers, dataset, report_name, merge_results=False,\n                  plot_types=('success'), **kwargs):\n    \"\"\" Print the results for the given trackers in a formatted table\n    args:\n        trackers - List of trackers to evaluate\n        dataset - List of sequences to evaluate\n        report_name - Name of the folder in env_settings.perm_mat_path where the computed results and plots are saved\n        merge_results - If True, multiple random runs for a non-deterministic trackers are averaged\n        plot_types - List of scores to display. Can contain 'success' (prints AUC, OP50, and OP75 scores),\n                    'prec' (prints precision score), and 'norm_prec' (prints normalized precision score)\n    \"\"\"\n    # Load pre-computed results\n    eval_data = check_and_load_precomputed_results(trackers, dataset, report_name, **kwargs)\n\n    # Merge results from multiple runs\n    if merge_results:\n        eval_data = merge_multiple_runs(eval_data)\n\n    tracker_names = eval_data['trackers']\n    valid_sequence = torch.tensor(eval_data['valid_sequence'], dtype=torch.bool)\n\n    print('\\nReporting results over {} / {} sequences'.format(valid_sequence.long().sum().item(), valid_sequence.shape[0]))\n\n    scores = {}\n\n    # ********************************  Success Plot **************************************\n    if 'success' in plot_types:\n        threshold_set_overlap = torch.tensor(eval_data['threshold_set_overlap'])\n        ave_success_rate_plot_overlap = torch.tensor(eval_data['ave_success_rate_plot_overlap'])\n\n        # Index out valid sequences\n        auc_curve, auc = get_auc_curve(ave_success_rate_plot_overlap, valid_sequence)\n        scores['AUC'] = auc\n        scores['OP50'] = auc_curve[:, threshold_set_overlap == 0.50]\n        scores['OP75'] = auc_curve[:, threshold_set_overlap == 0.75]\n\n    # ********************************  Precision Plot **************************************\n    if 'prec' in plot_types:\n        ave_success_rate_plot_center = torch.tensor(eval_data['ave_success_rate_plot_center'])\n\n        # Index out valid sequences\n        prec_curve, prec_score = get_prec_curve(ave_success_rate_plot_center, valid_sequence)\n        scores['Precision'] = prec_score\n\n    # ********************************  Norm Precision Plot *********************************\n    if 'norm_prec' in plot_types:\n        ave_success_rate_plot_center_norm = torch.tensor(eval_data['ave_success_rate_plot_center_norm'])\n\n        # Index out valid sequences\n        norm_prec_curve, norm_prec_score = get_prec_curve(ave_success_rate_plot_center_norm, valid_sequence)\n        scores['Norm Precision'] = norm_prec_score\n\n    # Print\n    tracker_disp_names = [get_tracker_display_name(trk) for trk in tracker_names]\n    report_text = generate_formatted_report(tracker_disp_names, scores, table_name=report_name)\n    print(report_text)\n\n\ndef plot_got_success(trackers, report_name):\n    \"\"\" Plot success plot for GOT-10k dataset using the json reports.\n    Save the json reports from http://got-10k.aitestunion.com/leaderboard in the directory set to\n    env_settings.got_reports_path\n\n    The tracker name in the experiment file should be set to the name of the report file for that tracker,\n    e.g. DiMP50_report_2019_09_02_15_44_25 if the report is name DiMP50_report_2019_09_02_15_44_25.json\n\n    args:\n        trackers - List of trackers to evaluate\n        report_name - Name of the folder in env_settings.perm_mat_path where the computed results and plots are saved\n    \"\"\"\n    # Load data\n    settings = env_settings()\n    plot_draw_styles = get_plot_draw_styles()\n\n    result_plot_path = os.path.join(settings.result_plot_path, report_name)\n\n    auc_curve = torch.zeros((len(trackers), 101))\n    scores = torch.zeros(len(trackers))\n\n    # Load results\n    tracker_names = []\n    for trk_id, trk in enumerate(trackers):\n        json_path = '{}/{}.json'.format(settings.got_reports_path, trk.name)\n\n        if os.path.isfile(json_path):\n            with open(json_path, 'r') as f:\n                eval_data = json.load(f)\n        else:\n            raise Exception('Report not found {}'.format(json_path))\n\n        if len(eval_data.keys()) > 1:\n            raise Exception\n\n        # First field is the tracker name. Index it out\n        eval_data = eval_data[list(eval_data.keys())[0]]\n        if 'succ_curve' in eval_data.keys():\n            curve = eval_data['succ_curve']\n            ao = eval_data['ao']\n        elif 'overall' in eval_data.keys() and 'succ_curve' in eval_data['overall'].keys():\n            curve = eval_data['overall']['succ_curve']\n            ao = eval_data['overall']['ao']\n        else:\n            raise Exception('Invalid JSON file {}'.format(json_path))\n\n        auc_curve[trk_id, :] = torch.tensor(curve) * 100.0\n        scores[trk_id] = ao * 100.0\n\n        tracker_names.append({'name': trk.name, 'param': trk.parameter_name, 'run_id': trk.run_id,\n                              'disp_name': trk.display_name})\n\n    threshold_set_overlap = torch.arange(0.0, 1.01, 0.01, dtype=torch.float64)\n\n    success_plot_opts = {'plot_type': 'success', 'legend_loc': 'lower left', 'xlabel': 'Overlap threshold',\n                         'ylabel': 'Overlap Precision [%]', 'xlim': (0, 1.0), 'ylim': (0, 100), 'title': 'Success plot'}\n    plot_draw_save(auc_curve, threshold_set_overlap, scores, tracker_names, plot_draw_styles, result_plot_path,\n                   success_plot_opts)\n    plt.show()\n\n\ndef print_per_sequence_results(trackers, dataset, report_name, merge_results=False,\n                               filter_criteria=None, **kwargs):\n    \"\"\" Print per-sequence results for the given trackers. Additionally, the sequences to list can be filtered using\n    the filter criteria.\n\n    args:\n        trackers - List of trackers to evaluate\n        dataset - List of sequences to evaluate\n        report_name - Name of the folder in env_settings.perm_mat_path where the computed results and plots are saved\n        merge_results - If True, multiple random runs for a non-deterministic trackers are averaged\n        filter_criteria - Filter sequence results which are reported. Following modes are supported\n                        None: No filtering. Display results for all sequences in dataset\n                        'ao_min': Only display sequences for which the minimum average overlap (AO) score over the\n                                  trackers is less than a threshold filter_criteria['threshold']. This mode can\n                                  be used to select sequences where at least one tracker performs poorly.\n                        'ao_max': Only display sequences for which the maximum average overlap (AO) score over the\n                                  trackers is less than a threshold filter_criteria['threshold']. This mode can\n                                  be used to select sequences all tracker performs poorly.\n                        'delta_ao': Only display sequences for which the performance of different trackers vary by at\n                                    least filter_criteria['threshold'] in average overlap (AO) score. This mode can\n                                    be used to select sequences where the behaviour of the trackers greatly differ\n                                    between each other.\n    \"\"\"\n    # Load pre-computed results\n    eval_data = check_and_load_precomputed_results(trackers, dataset, report_name, **kwargs)\n\n    # Merge results from multiple runs\n    if merge_results:\n        eval_data = merge_multiple_runs(eval_data)\n\n    tracker_names = eval_data['trackers']\n    valid_sequence = torch.tensor(eval_data['valid_sequence'], dtype=torch.bool)\n    sequence_names = eval_data['sequences']\n    avg_overlap_all = torch.tensor(eval_data['avg_overlap_all']) * 100.0\n\n    # Filter sequences\n    if filter_criteria is not None:\n        if filter_criteria['mode'] == 'ao_min':\n            min_ao = avg_overlap_all.min(dim=1)[0]\n            valid_sequence = valid_sequence & (min_ao < filter_criteria['threshold'])\n        elif filter_criteria['mode'] == 'ao_max':\n            max_ao = avg_overlap_all.max(dim=1)[0]\n            valid_sequence = valid_sequence & (max_ao < filter_criteria['threshold'])\n        elif filter_criteria['mode'] == 'delta_ao':\n            min_ao = avg_overlap_all.min(dim=1)[0]\n            max_ao = avg_overlap_all.max(dim=1)[0]\n            valid_sequence = valid_sequence & ((max_ao - min_ao) > filter_criteria['threshold'])\n        else:\n            raise Exception\n\n    avg_overlap_all = avg_overlap_all[valid_sequence, :]\n    sequence_names = [s + ' (ID={})'.format(i) for i, (s, v) in enumerate(zip(sequence_names, valid_sequence.tolist())) if v]\n\n    tracker_disp_names = [get_tracker_display_name(trk) for trk in tracker_names]\n\n    scores_per_tracker = {k: avg_overlap_all[:, i] for i, k in enumerate(tracker_disp_names)}\n    report_text = generate_formatted_report(sequence_names, scores_per_tracker)\n\n    print(report_text)\n\n\ndef print_results_per_video(trackers, dataset, report_name, merge_results=False,\n                  plot_types=('success'), per_video=False, **kwargs):\n    \"\"\" Print the results for the given trackers in a formatted table\n    args:\n        trackers - List of trackers to evaluate\n        dataset - List of sequences to evaluate\n        report_name - Name of the folder in env_settings.perm_mat_path where the computed results and plots are saved\n        merge_results - If True, multiple random runs for a non-deterministic trackers are averaged\n        plot_types - List of scores to display. Can contain 'success' (prints AUC, OP50, and OP75 scores),\n                    'prec' (prints precision score), and 'norm_prec' (prints normalized precision score)\n    \"\"\"\n    # Load pre-computed results\n    eval_data = check_and_load_precomputed_results(trackers, dataset, report_name, **kwargs)\n\n    # Merge results from multiple runs\n    if merge_results:\n        eval_data = merge_multiple_runs(eval_data)\n\n    seq_lens = len(eval_data['sequences'])\n    eval_datas = [{} for _ in range(seq_lens)]\n    if per_video:\n        for key, value in eval_data.items():\n            if len(value) == seq_lens:\n                for i in range(seq_lens):\n                    eval_datas[i][key] = [value[i]]\n            else:\n                for i in range(seq_lens):\n                    eval_datas[i][key] = value\n\n    tracker_names = eval_data['trackers']\n    valid_sequence = torch.tensor(eval_data['valid_sequence'], dtype=torch.bool)\n\n    print('\\nReporting results over {} / {} sequences'.format(valid_sequence.long().sum().item(), valid_sequence.shape[0]))\n\n    scores = {}\n\n    # ********************************  Success Plot **************************************\n    if 'success' in plot_types:\n        threshold_set_overlap = torch.tensor(eval_data['threshold_set_overlap'])\n        ave_success_rate_plot_overlap = torch.tensor(eval_data['ave_success_rate_plot_overlap'])\n\n        # Index out valid sequences\n        auc_curve, auc = get_auc_curve(ave_success_rate_plot_overlap, valid_sequence)\n        scores['AUC'] = auc\n        scores['OP50'] = auc_curve[:, threshold_set_overlap == 0.50]\n        scores['OP75'] = auc_curve[:, threshold_set_overlap == 0.75]\n\n    # ********************************  Precision Plot **************************************\n    if 'prec' in plot_types:\n        ave_success_rate_plot_center = torch.tensor(eval_data['ave_success_rate_plot_center'])\n\n        # Index out valid sequences\n        prec_curve, prec_score = get_prec_curve(ave_success_rate_plot_center, valid_sequence)\n        scores['Precision'] = prec_score\n\n    # ********************************  Norm Precision Plot *********************************\n    if 'norm_prec' in plot_types:\n        ave_success_rate_plot_center_norm = torch.tensor(eval_data['ave_success_rate_plot_center_norm'])\n\n        # Index out valid sequences\n        norm_prec_curve, norm_prec_score = get_prec_curve(ave_success_rate_plot_center_norm, valid_sequence)\n        scores['Norm Precision'] = norm_prec_score\n\n    # Print\n    tracker_disp_names = [get_tracker_display_name(trk) for trk in tracker_names]\n    report_text = generate_formatted_report(tracker_disp_names, scores, table_name=report_name)\n    print(report_text)\n\n    if per_video:\n        for i in range(seq_lens):\n            eval_data = eval_datas[i]\n\n            print('\\n{} sequences'.format(eval_data['sequences'][0]))\n\n            scores = {}\n            valid_sequence = torch.tensor(eval_data['valid_sequence'], dtype=torch.bool)\n\n            # ********************************  Success Plot **************************************\n            if 'success' in plot_types:\n                threshold_set_overlap = torch.tensor(eval_data['threshold_set_overlap'])\n                ave_success_rate_plot_overlap = torch.tensor(eval_data['ave_success_rate_plot_overlap'])\n\n                # Index out valid sequences\n                auc_curve, auc = get_auc_curve(ave_success_rate_plot_overlap, valid_sequence)\n                scores['AUC'] = auc\n                scores['OP50'] = auc_curve[:, threshold_set_overlap == 0.50]\n                scores['OP75'] = auc_curve[:, threshold_set_overlap == 0.75]\n\n            # ********************************  Precision Plot **************************************\n            if 'prec' in plot_types:\n                ave_success_rate_plot_center = torch.tensor(eval_data['ave_success_rate_plot_center'])\n\n                # Index out valid sequences\n                prec_curve, prec_score = get_prec_curve(ave_success_rate_plot_center, valid_sequence)\n                scores['Precision'] = prec_score\n\n            # ********************************  Norm Precision Plot *********************************\n            if 'norm_prec' in plot_types:\n                ave_success_rate_plot_center_norm = torch.tensor(eval_data['ave_success_rate_plot_center_norm'])\n\n                # Index out valid sequences\n                norm_prec_curve, norm_prec_score = get_prec_curve(ave_success_rate_plot_center_norm, valid_sequence)\n                scores['Norm Precision'] = norm_prec_score\n\n            # Print\n            tracker_disp_names = [get_tracker_display_name(trk) for trk in tracker_names]\n            report_text = generate_formatted_report(tracker_disp_names, scores, table_name=report_name)\n            print(report_text)\n"
  },
  {
    "path": "lib/test/evaluation/__init__.py",
    "content": "from .data import Sequence\nfrom .tracker import Tracker, trackerlist\nfrom .datasets import get_dataset\nfrom .environment import create_default_local_file_ITP_test"
  },
  {
    "path": "lib/test/evaluation/data.py",
    "content": "import numpy as np\nfrom lib.test.evaluation.environment import env_settings\nfrom lib.train.data.image_loader import imread_indexed\nfrom collections import OrderedDict\n\n\nclass BaseDataset:\n    \"\"\"Base class for all datasets.\"\"\"\n    def __init__(self):\n        self.env_settings = env_settings()\n\n    def __len__(self):\n        \"\"\"Overload this function in your dataset. This should return number of sequences in the dataset.\"\"\"\n        raise NotImplementedError\n\n    def get_sequence_list(self):\n        \"\"\"Overload this in your dataset. Should return the list of sequences in the dataset.\"\"\"\n        raise NotImplementedError\n\n\nclass Sequence:\n    \"\"\"Class for the sequence in an evaluation.\"\"\"\n    def __init__(self, name, frames, dataset, ground_truth_rect, ground_truth_seg=None, init_data=None,\n                 object_class=None, target_visible=None, object_ids=None, multiobj_mode=False):\n        self.name = name\n        self.frames = frames\n        self.dataset = dataset\n        self.ground_truth_rect = ground_truth_rect\n        self.ground_truth_seg = ground_truth_seg\n        self.object_class = object_class\n        self.target_visible = target_visible\n        self.object_ids = object_ids\n        self.multiobj_mode = multiobj_mode\n        self.init_data = self._construct_init_data(init_data)\n        self._ensure_start_frame()\n\n    def _ensure_start_frame(self):\n        # Ensure start frame is 0\n        start_frame = min(list(self.init_data.keys()))\n        if start_frame > 0:\n            self.frames = self.frames[start_frame:]\n            if self.ground_truth_rect is not None:\n                if isinstance(self.ground_truth_rect, (dict, OrderedDict)):\n                    for obj_id, gt in self.ground_truth_rect.items():\n                        self.ground_truth_rect[obj_id] = gt[start_frame:,:]\n                else:\n                    self.ground_truth_rect = self.ground_truth_rect[start_frame:,:]\n            if self.ground_truth_seg is not None:\n                self.ground_truth_seg = self.ground_truth_seg[start_frame:]\n                assert len(self.frames) == len(self.ground_truth_seg)\n\n            if self.target_visible is not None:\n                self.target_visible = self.target_visible[start_frame:]\n            self.init_data = {frame-start_frame: val for frame, val in self.init_data.items()}\n\n    def _construct_init_data(self, init_data):\n        if init_data is not None:\n            if not self.multiobj_mode:\n                assert self.object_ids is None or len(self.object_ids) == 1\n                for frame, init_val in init_data.items():\n                    if 'bbox' in init_val and isinstance(init_val['bbox'], (dict, OrderedDict)):\n                        init_val['bbox'] = init_val['bbox'][self.object_ids[0]]\n            # convert to list\n            for frame, init_val in init_data.items():\n                if 'bbox' in init_val:\n                    if isinstance(init_val['bbox'], (dict, OrderedDict)):\n                        init_val['bbox'] = OrderedDict({obj_id: list(init) for obj_id, init in init_val['bbox'].items()})\n                    else:\n                        init_val['bbox'] = list(init_val['bbox'])\n        else:\n            init_data = {0: dict()}     # Assume start from frame 0\n\n            if self.object_ids is not None:\n                init_data[0]['object_ids'] = self.object_ids\n\n            if self.ground_truth_rect is not None:\n                if self.multiobj_mode:\n                    assert isinstance(self.ground_truth_rect, (dict, OrderedDict))\n                    init_data[0]['bbox'] = OrderedDict({obj_id: list(gt[0,:]) for obj_id, gt in self.ground_truth_rect.items()})\n                else:\n                    assert self.object_ids is None or len(self.object_ids) == 1\n                    if isinstance(self.ground_truth_rect, (dict, OrderedDict)):\n                        init_data[0]['bbox'] = list(self.ground_truth_rect[self.object_ids[0]][0, :])\n                    else:\n                        init_data[0]['bbox'] = list(self.ground_truth_rect[0,:])\n\n            if self.ground_truth_seg is not None:\n                init_data[0]['mask'] = self.ground_truth_seg[0]\n\n        return init_data\n\n    def init_info(self):\n        info = self.frame_info(frame_num=0)\n        return info\n\n    def frame_info(self, frame_num):\n        info = self.object_init_data(frame_num=frame_num)\n        return info\n\n    def init_bbox(self, frame_num=0):\n        return self.object_init_data(frame_num=frame_num).get('init_bbox')\n\n    def init_mask(self, frame_num=0):\n        return self.object_init_data(frame_num=frame_num).get('init_mask')\n\n    def get_info(self, keys, frame_num=None):\n        info = dict()\n        for k in keys:\n            val = self.get(k, frame_num=frame_num)\n            if val is not None:\n                info[k] = val\n        return info\n\n    def object_init_data(self, frame_num=None) -> dict:\n        if frame_num is None:\n            frame_num = 0\n        if frame_num not in self.init_data:\n            return dict()\n\n        init_data = dict()\n        for key, val in self.init_data[frame_num].items():\n            if val is None:\n                continue\n            init_data['init_'+key] = val\n\n        if 'init_mask' in init_data and init_data['init_mask'] is not None:\n            anno = imread_indexed(init_data['init_mask'])\n            if not self.multiobj_mode and self.object_ids is not None:\n                assert len(self.object_ids) == 1\n                anno = (anno == int(self.object_ids[0])).astype(np.uint8)\n            init_data['init_mask'] = anno\n\n        if self.object_ids is not None:\n            init_data['object_ids'] = self.object_ids\n            init_data['sequence_object_ids'] = self.object_ids\n\n        return init_data\n\n    def target_class(self, frame_num=None):\n        return self.object_class\n\n    def get(self, name, frame_num=None):\n        return getattr(self, name)(frame_num)\n\n    def __repr__(self):\n        return \"{self.__class__.__name__} {self.name}, length={len} frames\".format(self=self, len=len(self.frames))\n\n\n\nclass SequenceList(list):\n    \"\"\"List of sequences. Supports the addition operator to concatenate sequence lists.\"\"\"\n    def __getitem__(self, item):\n        if isinstance(item, str):\n            for seq in self:\n                if seq.name == item:\n                    return seq\n            raise IndexError('Sequence name not in the dataset.')\n        elif isinstance(item, int):\n            return super(SequenceList, self).__getitem__(item)\n        elif isinstance(item, (tuple, list)):\n            return SequenceList([super(SequenceList, self).__getitem__(i) for i in item])\n        else:\n            return SequenceList(super(SequenceList, self).__getitem__(item))\n\n    def __add__(self, other):\n        return SequenceList(super(SequenceList, self).__add__(other))\n\n    def copy(self):\n        return SequenceList(super(SequenceList, self).copy())"
  },
  {
    "path": "lib/test/evaluation/datasets.py",
    "content": "from collections import namedtuple\nimport importlib\nfrom lib.test.evaluation.data import SequenceList\n\nDatasetInfo = namedtuple('DatasetInfo', ['module', 'class_name', 'kwargs'])\n\npt = \"lib.test.evaluation.%sdataset\"  # Useful abbreviations to reduce the clutter\n\ndataset_dict = dict(\n    otb=DatasetInfo(module=pt % \"otb\", class_name=\"OTBDataset\", kwargs=dict()),\n    nfs=DatasetInfo(module=pt % \"nfs\", class_name=\"NFSDataset\", kwargs=dict()),\n    uav=DatasetInfo(module=pt % \"uav\", class_name=\"UAVDataset\", kwargs=dict()),\n    tc128=DatasetInfo(module=pt % \"tc128\", class_name=\"TC128Dataset\", kwargs=dict()),\n    tc128ce=DatasetInfo(module=pt % \"tc128ce\", class_name=\"TC128CEDataset\", kwargs=dict()),\n    trackingnet=DatasetInfo(module=pt % \"trackingnet\", class_name=\"TrackingNetDataset\", kwargs=dict()),\n    got10k_test=DatasetInfo(module=pt % \"got10k\", class_name=\"GOT10KDataset\", kwargs=dict(split='test')),\n    got10k_val=DatasetInfo(module=pt % \"got10k\", class_name=\"GOT10KDataset\", kwargs=dict(split='val')),\n    got10k_ltrval=DatasetInfo(module=pt % \"got10k\", class_name=\"GOT10KDataset\", kwargs=dict(split='ltrval')),\n    lasot=DatasetInfo(module=pt % \"lasot\", class_name=\"LaSOTDataset\", kwargs=dict()),\n    lasot_lmdb=DatasetInfo(module=pt % \"lasot_lmdb\", class_name=\"LaSOTlmdbDataset\", kwargs=dict()),\n\n    vot18=DatasetInfo(module=pt % \"vot\", class_name=\"VOTDataset\", kwargs=dict()),\n    vot22=DatasetInfo(module=pt % \"vot\", class_name=\"VOTDataset\", kwargs=dict(year=22)),\n    itb=DatasetInfo(module=pt % \"itb\", class_name=\"ITBDataset\", kwargs=dict()),\n    tnl2k=DatasetInfo(module=pt % \"tnl2k\", class_name=\"TNL2kDataset\", kwargs=dict()),\n    lasot_extension_subset=DatasetInfo(module=pt % \"lasotextensionsubset\", class_name=\"LaSOTExtensionSubsetDataset\",\n                                       kwargs=dict()),\n)\n\n\ndef load_dataset(name: str):\n    \"\"\" Import and load a single dataset.\"\"\"\n    name = name.lower()\n    dset_info = dataset_dict.get(name)\n    if dset_info is None:\n        raise ValueError('Unknown dataset \\'%s\\'' % name)\n\n    m = importlib.import_module(dset_info.module)\n    dataset = getattr(m, dset_info.class_name)(**dset_info.kwargs)  # Call the constructor\n    return dataset.get_sequence_list()\n\n\ndef get_dataset(*args):\n    \"\"\" Get a single or set of datasets.\"\"\"\n    dset = SequenceList()\n    for name in args:\n        dset.extend(load_dataset(name))\n    return dset"
  },
  {
    "path": "lib/test/evaluation/environment.py",
    "content": "import importlib\nimport os\n\n\nclass EnvSettings:\n    def __init__(self):\n        test_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))\n\n        self.results_path = '{}/tracking_results/'.format(test_path)\n        self.segmentation_path = '{}/segmentation_results/'.format(test_path)\n        self.network_path = '{}/networks/'.format(test_path)\n        self.result_plot_path = '{}/result_plots/'.format(test_path)\n        self.otb_path = ''\n        self.nfs_path = ''\n        self.uav_path = ''\n        self.tpl_path = ''\n        self.vot_path = ''\n        self.got10k_path = ''\n        self.lasot_path = ''\n        self.trackingnet_path = ''\n        self.davis_dir = ''\n        self.youtubevos_dir = ''\n\n        self.got_packed_results_path = ''\n        self.got_reports_path = ''\n        self.tn_packed_results_path = ''\n\n\ndef create_default_local_file():\n    comment = {'results_path': 'Where to store tracking results',\n               'network_path': 'Where tracking networks are stored.'}\n\n    path = os.path.join(os.path.dirname(__file__), 'local.py')\n    with open(path, 'w') as f:\n        settings = EnvSettings()\n\n        f.write('from test.evaluation.environment import EnvSettings\\n\\n')\n        f.write('def local_env_settings():\\n')\n        f.write('    settings = EnvSettings()\\n\\n')\n        f.write('    # Set your local paths here.\\n\\n')\n\n        for attr in dir(settings):\n            comment_str = None\n            if attr in comment:\n                comment_str = comment[attr]\n            attr_val = getattr(settings, attr)\n            if not attr.startswith('__') and not callable(attr_val):\n                if comment_str is None:\n                    f.write('    settings.{} = \\'{}\\'\\n'.format(attr, attr_val))\n                else:\n                    f.write('    settings.{} = \\'{}\\'    # {}\\n'.format(attr, attr_val, comment_str))\n        f.write('\\n    return settings\\n\\n')\n\n\nclass EnvSettings_ITP:\n    def __init__(self, workspace_dir, data_dir, save_dir):\n        self.prj_dir = workspace_dir\n        self.save_dir = save_dir\n        self.results_path = os.path.join(save_dir, 'test/tracking_results')\n        self.segmentation_path = os.path.join(save_dir, 'test/segmentation_results')\n        self.network_path = os.path.join(save_dir, 'test/networks')\n        self.result_plot_path = os.path.join(save_dir, 'test/result_plots')\n        self.otb_path = os.path.join(data_dir, 'otb')\n        self.nfs_path = os.path.join(data_dir, 'nfs')\n        self.uav_path = os.path.join(data_dir, 'uav')\n        self.tc128_path = os.path.join(data_dir, 'TC128')\n        self.tpl_path = ''\n        self.vot_path = os.path.join(data_dir, 'VOT2019')\n        self.got10k_path = os.path.join(data_dir, 'got10k')\n        self.got10k_lmdb_path = os.path.join(data_dir, 'got10k_lmdb')\n        self.lasot_path = os.path.join(data_dir, 'lasot')\n        self.lasot_lmdb_path = os.path.join(data_dir, 'lasot_lmdb')\n        self.trackingnet_path = os.path.join(data_dir, 'trackingnet')\n        self.vot18_path = os.path.join(data_dir, 'vot2018')\n        self.vot22_path = os.path.join(data_dir, 'vot2022')\n        self.itb_path = os.path.join(data_dir, 'itb')\n        self.tnl2k_path = os.path.join(data_dir, 'tnl2k')\n        self.lasot_extension_subset_path_path = os.path.join(data_dir, 'lasot_extension_subset')\n        self.davis_dir = ''\n        self.youtubevos_dir = ''\n\n        self.got_packed_results_path = ''\n        self.got_reports_path = ''\n        self.tn_packed_results_path = ''\n\n\ndef create_default_local_file_ITP_test(workspace_dir, data_dir, save_dir):\n    comment = {'results_path': 'Where to store tracking results',\n               'network_path': 'Where tracking networks are stored.'}\n\n    path = os.path.join(os.path.dirname(__file__), 'local.py')\n    with open(path, 'w') as f:\n        settings = EnvSettings_ITP(workspace_dir, data_dir, save_dir)\n\n        f.write('from lib.test.evaluation.environment import EnvSettings\\n\\n')\n        f.write('def local_env_settings():\\n')\n        f.write('    settings = EnvSettings()\\n\\n')\n        f.write('    # Set your local paths here.\\n\\n')\n\n        for attr in dir(settings):\n            comment_str = None\n            if attr in comment:\n                comment_str = comment[attr]\n            attr_val = getattr(settings, attr)\n            if not attr.startswith('__') and not callable(attr_val):\n                if comment_str is None:\n                    f.write('    settings.{} = \\'{}\\'\\n'.format(attr, attr_val))\n                else:\n                    f.write('    settings.{} = \\'{}\\'    # {}\\n'.format(attr, attr_val, comment_str))\n        f.write('\\n    return settings\\n\\n')\n\n\ndef env_settings():\n    env_module_name = 'lib.test.evaluation.local'\n    try:\n        env_module = importlib.import_module(env_module_name)\n        return env_module.local_env_settings()\n    except:\n        env_file = os.path.join(os.path.dirname(__file__), 'local.py')\n\n        # Create a default file\n        create_default_local_file()\n        raise RuntimeError('YOU HAVE NOT SETUP YOUR local.py!!!\\n Go to \"{}\" and set all the paths you need. '\n                           'Then try to run again.'.format(env_file))"
  },
  {
    "path": "lib/test/evaluation/got10kdataset.py",
    "content": "import numpy as np\nfrom lib.test.evaluation.data import Sequence, BaseDataset, SequenceList\nfrom lib.test.utils.load_text import load_text\nimport os\n\n\nclass GOT10KDataset(BaseDataset):\n    \"\"\" GOT-10k dataset.\n\n    Publication:\n        GOT-10k: A Large High-Diversity Benchmark for Generic Object Tracking in the Wild\n        Lianghua Huang, Xin Zhao, and Kaiqi Huang\n        arXiv:1810.11981, 2018\n        https://arxiv.org/pdf/1810.11981.pdf\n\n    Download dataset from http://got-10k.aitestunion.com/downloads\n    \"\"\"\n    def __init__(self, split):\n        super().__init__()\n        # Split can be test, val, or ltrval (a validation split consisting of videos from the official train set)\n        if split == 'test' or split == 'val':\n            self.base_path = os.path.join(self.env_settings.got10k_path, split)\n        else:\n            self.base_path = os.path.join(self.env_settings.got10k_path, 'train')\n\n        self.sequence_list = self._get_sequence_list(split)\n        self.split = split\n\n    def get_sequence_list(self):\n        return SequenceList([self._construct_sequence(s) for s in self.sequence_list])\n\n    def _construct_sequence(self, sequence_name):\n        anno_path = '{}/{}/groundtruth.txt'.format(self.base_path, sequence_name)\n\n        ground_truth_rect = load_text(str(anno_path), delimiter=',', dtype=np.float64)\n\n        frames_path = '{}/{}'.format(self.base_path, sequence_name)\n        frame_list = [frame for frame in os.listdir(frames_path) if frame.endswith(\".jpg\")]\n        frame_list.sort(key=lambda f: int(f[:-4]))\n        frames_list = [os.path.join(frames_path, frame) for frame in frame_list]\n\n        return Sequence(sequence_name, frames_list, 'got10k', ground_truth_rect.reshape(-1, 4))\n\n    def __len__(self):\n        return len(self.sequence_list)\n\n    def _get_sequence_list(self, split):\n        with open('{}/list.txt'.format(self.base_path)) as f:\n            sequence_list = f.read().splitlines()\n\n        if split == 'ltrval':\n            with open('{}/got10k_val_split.txt'.format(self.env_settings.dataspec_path)) as f:\n                seq_ids = f.read().splitlines()\n\n            sequence_list = [sequence_list[int(x)] for x in seq_ids]\n        return sequence_list\n"
  },
  {
    "path": "lib/test/evaluation/itbdataset.py",
    "content": "import numpy as np\r\nfrom lib.test.evaluation.data import Sequence, BaseDataset, SequenceList\r\nfrom lib.test.utils.load_text import load_text\r\nimport os\r\n\r\n\r\nclass ITBDataset(BaseDataset):\r\n    \"\"\" NUS-PRO dataset\r\n    \"\"\"\r\n\r\n    def __init__(self):\r\n        super().__init__()\r\n        self.base_path = self.env_settings.itb_path\r\n        self.sequence_info_list = self._get_sequence_info_list(self.base_path)\r\n\r\n    def get_sequence_list(self):\r\n        return SequenceList([self._construct_sequence(s) for s in self.sequence_info_list])\r\n\r\n    def _construct_sequence(self, sequence_info):\r\n        sequence_path = sequence_info['path']\r\n        nz = sequence_info['nz']\r\n        ext = sequence_info['ext']\r\n        start_frame = sequence_info['startFrame']\r\n        end_frame = sequence_info['endFrame']\r\n\r\n        init_omit = 0\r\n        if 'initOmit' in sequence_info:\r\n            init_omit = sequence_info['initOmit']\r\n\r\n        frames = ['{base_path}/{sequence_path}/{frame:0{nz}}.{ext}'.format(base_path=self.base_path,\r\n                                                                           sequence_path=sequence_path, frame=frame_num,\r\n                                                                           nz=nz, ext=ext) for frame_num in\r\n                  range(start_frame + init_omit, end_frame + 1)]\r\n\r\n        anno_path = '{}/{}'.format(self.base_path, sequence_info['anno_path'])\r\n\r\n        # NOTE: NUS has some weird annos which panda cannot handle\r\n        ground_truth_rect = load_text(str(anno_path), delimiter=(',', None), dtype=np.float64, backend='numpy')\r\n        return Sequence(sequence_info['name'], frames, 'otb', ground_truth_rect[init_omit:, :],\r\n                        object_class=sequence_info['object_class'])\r\n\r\n    def __len__(self):\r\n        return len(self.sequence_info_list)\r\n\r\n    def get_fileNames(self, rootdir):\r\n        fs = []\r\n        fs_all = []\r\n        for root, dirs, files in os.walk(rootdir, topdown=True):\r\n            files.sort()\r\n            files.sort(key=len)\r\n            if files is not None:\r\n                for name in files:\r\n                    _, ending = os.path.splitext(name)\r\n                    if ending == \".jpg\":\r\n                        _, root_ = os.path.split(root)\r\n                        fs.append(os.path.join(root_, name))\r\n                        fs_all.append(os.path.join(root, name))\r\n\r\n        return fs_all, fs\r\n\r\n    def _get_sequence_info_list(self, base_path):\r\n        sequence_info_list = []\r\n        for scene in os.listdir(base_path):\r\n            if '.' in scene:\r\n                continue\r\n            videos = os.listdir(os.path.join(base_path, scene))\r\n            for video in videos:\r\n                _, fs = self.get_fileNames(os.path.join(base_path, scene, video))\r\n                video_tmp = {\"name\": video, \"path\": scene + '/' + video, \"startFrame\": 1, \"endFrame\": len(fs),\r\n                             \"nz\": len(fs[0].split('/')[-1].split('.')[0]), \"ext\": \"jpg\",\r\n                             \"anno_path\": scene + '/' + video + \"/groundtruth.txt\",\r\n                             \"object_class\": \"unknown\"}\r\n                sequence_info_list.append(video_tmp)\r\n\r\n        return sequence_info_list  # sequence_info_list_50 #\r\n"
  },
  {
    "path": "lib/test/evaluation/lasot_lmdbdataset.py",
    "content": "from lib.test.evaluation.data import Sequence, BaseDataset, SequenceList\nfrom lib.utils.lmdb_utils import *\n\n'''2021.1.27 LaSOT dataset using lmdb data'''\n\n\nclass LaSOTlmdbDataset(BaseDataset):\n    \"\"\"\n    LaSOT test set consisting of 280 videos (see Protocol-II in the LaSOT paper)\n\n    Publication:\n        LaSOT: A High-quality Benchmark for Large-scale Single Object Tracking\n        Heng Fan, Liting Lin, Fan Yang, Peng Chu, Ge Deng, Sijia Yu, Hexin Bai, Yong Xu, Chunyuan Liao and Haibin Ling\n        CVPR, 2019\n        https://arxiv.org/pdf/1809.07845.pdf\n\n    Download the dataset from https://cis.temple.edu/lasot/download.html\n    \"\"\"\n    def __init__(self):\n        super().__init__()\n        self.base_path = self.env_settings.lasot_lmdb_path\n        self.sequence_list = self._get_sequence_list()\n        self.clean_list = self.clean_seq_list()\n\n    def clean_seq_list(self):\n        clean_lst = []\n        for i in range(len(self.sequence_list)):\n            cls, _ = self.sequence_list[i].split('-')\n            clean_lst.append(cls)\n        return clean_lst\n\n    def get_sequence_list(self):\n        return SequenceList([self._construct_sequence(s) for s in self.sequence_list])\n\n    def _construct_sequence(self, sequence_name):\n        class_name = sequence_name.split('-')[0]\n        anno_path = str('{}/{}/groundtruth.txt'.format(class_name, sequence_name))\n        # decode the groundtruth\n        gt_str_list = decode_str(self.base_path, anno_path).split('\\n')[:-1]  # the last line is empty\n        gt_list = [list(map(float, line.split(','))) for line in gt_str_list]\n        ground_truth_rect = np.array(gt_list).astype(np.float64)\n        # decode occlusion file\n        occlusion_label_path = str('{}/{}/full_occlusion.txt'.format(class_name, sequence_name))\n        occ_list = list(map(int, decode_str(self.base_path, occlusion_label_path).split(',')))\n        full_occlusion = np.array(occ_list).astype(np.float64)\n        # decode out of view file\n        out_of_view_label_path = str('{}/{}/out_of_view.txt'.format(class_name, sequence_name))\n        out_of_view_list = list(map(int, decode_str(self.base_path, out_of_view_label_path).split(',')))\n        out_of_view = np.array(out_of_view_list).astype(np.float64)\n\n        target_visible = np.logical_and(full_occlusion == 0, out_of_view == 0)\n\n        frames_path = '{}/{}/img'.format(class_name, sequence_name)\n\n        frames_list = [[self.base_path, '{}/{:08d}.jpg'.format(frames_path, frame_number)] for frame_number in range(1, ground_truth_rect.shape[0] + 1)]\n\n        target_class = class_name\n        return Sequence(sequence_name, frames_list, 'lasot', ground_truth_rect.reshape(-1, 4),\n                        object_class=target_class, target_visible=target_visible)\n\n    def __len__(self):\n        return len(self.sequence_list)\n\n    def _get_sequence_list(self):\n        sequence_list = ['airplane-1',\n                         'airplane-9',\n                         'airplane-13',\n                         'airplane-15',\n                         'basketball-1',\n                         'basketball-6',\n                         'basketball-7',\n                         'basketball-11',\n                         'bear-2',\n                         'bear-4',\n                         'bear-6',\n                         'bear-17',\n                         'bicycle-2',\n                         'bicycle-7',\n                         'bicycle-9',\n                         'bicycle-18',\n                         'bird-2',\n                         'bird-3',\n                         'bird-15',\n                         'bird-17',\n                         'boat-3',\n                         'boat-4',\n                         'boat-12',\n                         'boat-17',\n                         'book-3',\n                         'book-10',\n                         'book-11',\n                         'book-19',\n                         'bottle-1',\n                         'bottle-12',\n                         'bottle-14',\n                         'bottle-18',\n                         'bus-2',\n                         'bus-5',\n                         'bus-17',\n                         'bus-19',\n                         'car-2',\n                         'car-6',\n                         'car-9',\n                         'car-17',\n                         'cat-1',\n                         'cat-3',\n                         'cat-18',\n                         'cat-20',\n                         'cattle-2',\n                         'cattle-7',\n                         'cattle-12',\n                         'cattle-13',\n                         'spider-14',\n                         'spider-16',\n                         'spider-18',\n                         'spider-20',\n                         'coin-3',\n                         'coin-6',\n                         'coin-7',\n                         'coin-18',\n                         'crab-3',\n                         'crab-6',\n                         'crab-12',\n                         'crab-18',\n                         'surfboard-12',\n                         'surfboard-4',\n                         'surfboard-5',\n                         'surfboard-8',\n                         'cup-1',\n                         'cup-4',\n                         'cup-7',\n                         'cup-17',\n                         'deer-4',\n                         'deer-8',\n                         'deer-10',\n                         'deer-14',\n                         'dog-1',\n                         'dog-7',\n                         'dog-15',\n                         'dog-19',\n                         'guitar-3',\n                         'guitar-8',\n                         'guitar-10',\n                         'guitar-16',\n                         'person-1',\n                         'person-5',\n                         'person-10',\n                         'person-12',\n                         'pig-2',\n                         'pig-10',\n                         'pig-13',\n                         'pig-18',\n                         'rubicCube-1',\n                         'rubicCube-6',\n                         'rubicCube-14',\n                         'rubicCube-19',\n                         'swing-10',\n                         'swing-14',\n                         'swing-17',\n                         'swing-20',\n                         'drone-13',\n                         'drone-15',\n                         'drone-2',\n                         'drone-7',\n                         'pool-12',\n                         'pool-15',\n                         'pool-3',\n                         'pool-7',\n                         'rabbit-10',\n                         'rabbit-13',\n                         'rabbit-17',\n                         'rabbit-19',\n                         'racing-10',\n                         'racing-15',\n                         'racing-16',\n                         'racing-20',\n                         'robot-1',\n                         'robot-19',\n                         'robot-5',\n                         'robot-8',\n                         'sepia-13',\n                         'sepia-16',\n                         'sepia-6',\n                         'sepia-8',\n                         'sheep-3',\n                         'sheep-5',\n                         'sheep-7',\n                         'sheep-9',\n                         'skateboard-16',\n                         'skateboard-19',\n                         'skateboard-3',\n                         'skateboard-8',\n                         'tank-14',\n                         'tank-16',\n                         'tank-6',\n                         'tank-9',\n                         'tiger-12',\n                         'tiger-18',\n                         'tiger-4',\n                         'tiger-6',\n                         'train-1',\n                         'train-11',\n                         'train-20',\n                         'train-7',\n                         'truck-16',\n                         'truck-3',\n                         'truck-6',\n                         'truck-7',\n                         'turtle-16',\n                         'turtle-5',\n                         'turtle-8',\n                         'turtle-9',\n                         'umbrella-17',\n                         'umbrella-19',\n                         'umbrella-2',\n                         'umbrella-9',\n                         'yoyo-15',\n                         'yoyo-17',\n                         'yoyo-19',\n                         'yoyo-7',\n                         'zebra-10',\n                         'zebra-14',\n                         'zebra-16',\n                         'zebra-17',\n                         'elephant-1',\n                         'elephant-12',\n                         'elephant-16',\n                         'elephant-18',\n                         'goldfish-3',\n                         'goldfish-7',\n                         'goldfish-8',\n                         'goldfish-10',\n                         'hat-1',\n                         'hat-2',\n                         'hat-5',\n                         'hat-18',\n                         'kite-4',\n                         'kite-6',\n                         'kite-10',\n                         'kite-15',\n                         'motorcycle-1',\n                         'motorcycle-3',\n                         'motorcycle-9',\n                         'motorcycle-18',\n                         'mouse-1',\n                         'mouse-8',\n                         'mouse-9',\n                         'mouse-17',\n                         'flag-3',\n                         'flag-9',\n                         'flag-5',\n                         'flag-2',\n                         'frog-3',\n                         'frog-4',\n                         'frog-20',\n                         'frog-9',\n                         'gametarget-1',\n                         'gametarget-2',\n                         'gametarget-7',\n                         'gametarget-13',\n                         'hand-2',\n                         'hand-3',\n                         'hand-9',\n                         'hand-16',\n                         'helmet-5',\n                         'helmet-11',\n                         'helmet-19',\n                         'helmet-13',\n                         'licenseplate-6',\n                         'licenseplate-12',\n                         'licenseplate-13',\n                         'licenseplate-15',\n                         'electricfan-1',\n                         'electricfan-10',\n                         'electricfan-18',\n                         'electricfan-20',\n                         'chameleon-3',\n                         'chameleon-6',\n                         'chameleon-11',\n                         'chameleon-20',\n                         'crocodile-3',\n                         'crocodile-4',\n                         'crocodile-10',\n                         'crocodile-14',\n                         'gecko-1',\n                         'gecko-5',\n                         'gecko-16',\n                         'gecko-19',\n                         'fox-2',\n                         'fox-3',\n                         'fox-5',\n                         'fox-20',\n                         'giraffe-2',\n                         'giraffe-10',\n                         'giraffe-13',\n                         'giraffe-15',\n                         'gorilla-4',\n                         'gorilla-6',\n                         'gorilla-9',\n                         'gorilla-13',\n                         'hippo-1',\n                         'hippo-7',\n                         'hippo-9',\n                         'hippo-20',\n                         'horse-1',\n                         'horse-4',\n                         'horse-12',\n                         'horse-15',\n                         'kangaroo-2',\n                         'kangaroo-5',\n                         'kangaroo-11',\n                         'kangaroo-14',\n                         'leopard-1',\n                         'leopard-7',\n                         'leopard-16',\n                         'leopard-20',\n                         'lion-1',\n                         'lion-5',\n                         'lion-12',\n                         'lion-20',\n                         'lizard-1',\n                         'lizard-3',\n                         'lizard-6',\n                         'lizard-13',\n                         'microphone-2',\n                         'microphone-6',\n                         'microphone-14',\n                         'microphone-16',\n                         'monkey-3',\n                         'monkey-4',\n                         'monkey-9',\n                         'monkey-17',\n                         'shark-2',\n                         'shark-3',\n                         'shark-5',\n                         'shark-6',\n                         'squirrel-8',\n                         'squirrel-11',\n                         'squirrel-13',\n                         'squirrel-19',\n                         'volleyball-1',\n                         'volleyball-13',\n                         'volleyball-18',\n                         'volleyball-19']\n        return sequence_list\n"
  },
  {
    "path": "lib/test/evaluation/lasotdataset.py",
    "content": "import numpy as np\nfrom lib.test.evaluation.data import Sequence, BaseDataset, SequenceList\nfrom lib.test.utils.load_text import load_text\n\n\nclass LaSOTDataset(BaseDataset):\n    \"\"\"\n    LaSOT test set consisting of 280 videos (see Protocol-II in the LaSOT paper)\n\n    Publication:\n        LaSOT: A High-quality Benchmark for Large-scale Single Object Tracking\n        Heng Fan, Liting Lin, Fan Yang, Peng Chu, Ge Deng, Sijia Yu, Hexin Bai, Yong Xu, Chunyuan Liao and Haibin Ling\n        CVPR, 2019\n        https://arxiv.org/pdf/1809.07845.pdf\n\n    Download the dataset from https://cis.temple.edu/lasot/download.html\n    \"\"\"\n    def __init__(self):\n        super().__init__()\n        self.base_path = self.env_settings.lasot_path\n        self.sequence_list = self._get_sequence_list()\n        self.clean_list = self.clean_seq_list()\n\n    def clean_seq_list(self):\n        clean_lst = []\n        for i in range(len(self.sequence_list)):\n            cls, _ = self.sequence_list[i].split('-')\n            clean_lst.append(cls)\n        return  clean_lst\n\n    def get_sequence_list(self):\n        return SequenceList([self._construct_sequence(s) for s in self.sequence_list])\n\n    def _construct_sequence(self, sequence_name):\n        class_name = sequence_name.split('-')[0]\n        anno_path = '{}/{}/{}/groundtruth.txt'.format(self.base_path, class_name, sequence_name)\n\n        ground_truth_rect = load_text(str(anno_path), delimiter=',', dtype=np.float64)\n\n        occlusion_label_path = '{}/{}/{}/full_occlusion.txt'.format(self.base_path, class_name, sequence_name)\n\n        # NOTE: pandas backed seems super super slow for loading occlusion/oov masks\n        full_occlusion = load_text(str(occlusion_label_path), delimiter=',', dtype=np.float64, backend='numpy')\n\n        out_of_view_label_path = '{}/{}/{}/out_of_view.txt'.format(self.base_path, class_name, sequence_name)\n        out_of_view = load_text(str(out_of_view_label_path), delimiter=',', dtype=np.float64, backend='numpy')\n\n        target_visible = np.logical_and(full_occlusion == 0, out_of_view == 0)\n\n        frames_path = '{}/{}/{}/img'.format(self.base_path, class_name, sequence_name)\n\n        frames_list = ['{}/{:08d}.jpg'.format(frames_path, frame_number) for frame_number in range(1, ground_truth_rect.shape[0] + 1)]\n\n        target_class = class_name\n        return Sequence(sequence_name, frames_list, 'lasot', ground_truth_rect.reshape(-1, 4),\n                        object_class=target_class, target_visible=target_visible)\n\n    def __len__(self):\n        return len(self.sequence_list)\n\n    def _get_sequence_list(self):\n        sequence_list = ['airplane-1',\n                         'airplane-9',\n                         'airplane-13',\n                         'airplane-15',\n                         'basketball-1',\n                         'basketball-6',\n                         'basketball-7',\n                         'basketball-11',\n                         'bear-2',\n                         'bear-4',\n                         'bear-6',\n                         'bear-17',\n                         'bicycle-2',\n                         'bicycle-7',\n                         'bicycle-9',\n                         'bicycle-18',\n                         'bird-2',\n                         'bird-3',\n                         'bird-15',\n                         'bird-17',\n                         'boat-3',\n                         'boat-4',\n                         'boat-12',\n                         'boat-17',\n                         'book-3',\n                         'book-10',\n                         'book-11',\n                         'book-19',\n                         'bottle-1',\n                         'bottle-12',\n                         'bottle-14',\n                         'bottle-18',\n                         'bus-2',\n                         'bus-5',\n                         'bus-17',\n                         'bus-19',\n                         'car-2',\n                         'car-6',\n                         'car-9',\n                         'car-17',\n                         'cat-1',\n                         'cat-3',\n                         'cat-18',\n                         'cat-20',\n                         'cattle-2',\n                         'cattle-7',\n                         'cattle-12',\n                         'cattle-13',\n                         'spider-14',\n                         'spider-16',\n                         'spider-18',\n                         'spider-20',\n                         'coin-3',\n                         'coin-6',\n                         'coin-7',\n                         'coin-18',\n                         'crab-3',\n                         'crab-6',\n                         'crab-12',\n                         'crab-18',\n                         'surfboard-12',\n                         'surfboard-4',\n                         'surfboard-5',\n                         'surfboard-8',\n                         'cup-1',\n                         'cup-4',\n                         'cup-7',\n                         'cup-17',\n                         'deer-4',\n                         'deer-8',\n                         'deer-10',\n                         'deer-14',\n                         'dog-1',\n                         'dog-7',\n                         'dog-15',\n                         'dog-19',\n                         'guitar-3',\n                         'guitar-8',\n                         'guitar-10',\n                         'guitar-16',\n                         'person-1',\n                         'person-5',\n                         'person-10',\n                         'person-12',\n                         'pig-2',\n                         'pig-10',\n                         'pig-13',\n                         'pig-18',\n                         'rubicCube-1',\n                         'rubicCube-6',\n                         'rubicCube-14',\n                         'rubicCube-19',\n                         'swing-10',\n                         'swing-14',\n                         'swing-17',\n                         'swing-20',\n                         'drone-13',\n                         'drone-15',\n                         'drone-2',\n                         'drone-7',\n                         'pool-12',\n                         'pool-15',\n                         'pool-3',\n                         'pool-7',\n                         'rabbit-10',\n                         'rabbit-13',\n                         'rabbit-17',\n                         'rabbit-19',\n                         'racing-10',\n                         'racing-15',\n                         'racing-16',\n                         'racing-20',\n                         'robot-1',\n                         'robot-19',\n                         'robot-5',\n                         'robot-8',\n                         'sepia-13',\n                         'sepia-16',\n                         'sepia-6',\n                         'sepia-8',\n                         'sheep-3',\n                         'sheep-5',\n                         'sheep-7',\n                         'sheep-9',\n                         'skateboard-16',\n                         'skateboard-19',\n                         'skateboard-3',\n                         'skateboard-8',\n                         'tank-14',\n                         'tank-16',\n                         'tank-6',\n                         'tank-9',\n                         'tiger-12',\n                         'tiger-18',\n                         'tiger-4',\n                         'tiger-6',\n                         'train-1',\n                         'train-11',\n                         'train-20',\n                         'train-7',\n                         'truck-16',\n                         'truck-3',\n                         'truck-6',\n                         'truck-7',\n                         'turtle-16',\n                         'turtle-5',\n                         'turtle-8',\n                         'turtle-9',\n                         'umbrella-17',\n                         'umbrella-19',\n                         'umbrella-2',\n                         'umbrella-9',\n                         'yoyo-15',\n                         'yoyo-17',\n                         'yoyo-19',\n                         'yoyo-7',\n                         'zebra-10',\n                         'zebra-14',\n                         'zebra-16',\n                         'zebra-17',\n                         'elephant-1',\n                         'elephant-12',\n                         'elephant-16',\n                         'elephant-18',\n                         'goldfish-3',\n                         'goldfish-7',\n                         'goldfish-8',\n                         'goldfish-10',\n                         'hat-1',\n                         'hat-2',\n                         'hat-5',\n                         'hat-18',\n                         'kite-4',\n                         'kite-6',\n                         'kite-10',\n                         'kite-15',\n                         'motorcycle-1',\n                         'motorcycle-3',\n                         'motorcycle-9',\n                         'motorcycle-18',\n                         'mouse-1',\n                         'mouse-8',\n                         'mouse-9',\n                         'mouse-17',\n                         'flag-3',\n                         'flag-9',\n                         'flag-5',\n                         'flag-2',\n                         'frog-3',\n                         'frog-4',\n                         'frog-20',\n                         'frog-9',\n                         'gametarget-1',\n                         'gametarget-2',\n                         'gametarget-7',\n                         'gametarget-13',\n                         'hand-2',\n                         'hand-3',\n                         'hand-9',\n                         'hand-16',\n                         'helmet-5',\n                         'helmet-11',\n                         'helmet-19',\n                         'helmet-13',\n                         'licenseplate-6',\n                         'licenseplate-12',\n                         'licenseplate-13',\n                         'licenseplate-15',\n                         'electricfan-1',\n                         'electricfan-10',\n                         'electricfan-18',\n                         'electricfan-20',\n                         'chameleon-3',\n                         'chameleon-6',\n                         'chameleon-11',\n                         'chameleon-20',\n                         'crocodile-3',\n                         'crocodile-4',\n                         'crocodile-10',\n                         'crocodile-14',\n                         'gecko-1',\n                         'gecko-5',\n                         'gecko-16',\n                         'gecko-19',\n                         'fox-2',\n                         'fox-3',\n                         'fox-5',\n                         'fox-20',\n                         'giraffe-2',\n                         'giraffe-10',\n                         'giraffe-13',\n                         'giraffe-15',\n                         'gorilla-4',\n                         'gorilla-6',\n                         'gorilla-9',\n                         'gorilla-13',\n                         'hippo-1',\n                         'hippo-7',\n                         'hippo-9',\n                         'hippo-20',\n                         'horse-1',\n                         'horse-4',\n                         'horse-12',\n                         'horse-15',\n                         'kangaroo-2',\n                         'kangaroo-5',\n                         'kangaroo-11',\n                         'kangaroo-14',\n                         'leopard-1',\n                         'leopard-7',\n                         'leopard-16',\n                         'leopard-20',\n                         'lion-1',\n                         'lion-5',\n                         'lion-12',\n                         'lion-20',\n                         'lizard-1',\n                         'lizard-3',\n                         'lizard-6',\n                         'lizard-13',\n                         'microphone-2',\n                         'microphone-6',\n                         'microphone-14',\n                         'microphone-16',\n                         'monkey-3',\n                         'monkey-4',\n                         'monkey-9',\n                         'monkey-17',\n                         'shark-2',\n                         'shark-3',\n                         'shark-5',\n                         'shark-6',\n                         'squirrel-8',\n                         'squirrel-11',\n                         'squirrel-13',\n                         'squirrel-19',\n                         'volleyball-1',\n                         'volleyball-13',\n                         'volleyball-18',\n                         'volleyball-19']\n        return sequence_list\n"
  },
  {
    "path": "lib/test/evaluation/lasotextensionsubsetdataset.py",
    "content": "import numpy as np\r\nfrom lib.test.evaluation.data import Sequence, BaseDataset, SequenceList\r\nfrom lib.test.utils.load_text import load_text\r\n\r\n\r\nclass LaSOTExtensionSubsetDataset(BaseDataset):\r\n    \"\"\"\r\n    LaSOT test set consisting of 280 videos (see Protocol-II in the LaSOT paper)\r\n    Publication:\r\n        LaSOT: A High-quality Large-scale Single Object Tracking Benchmark\r\n        Heng Fan, Hexin Bai, Liting Lin, Fan Yang, Peng Chu, Ge Deng, Sijia Yu, Harshit, Mingzhen Huang, Juehuan Liu,\r\n        Yong Xu, Chunyuan Liao, Lin Yuan, Haibin Ling\r\n        IJCV, 2020\r\n        https://arxiv.org/pdf/2009.03465.pdf\r\n    Download the dataset from http://vision.cs.stonybrook.edu/~lasot/download.html\r\n    \"\"\"\r\n    def __init__(self):\r\n        super().__init__()\r\n        self.base_path = self.env_settings.lasot_extension_subset_path\r\n        self.sequence_list = self._get_sequence_list()\r\n        self.clean_list = self.clean_seq_list()\r\n\r\n    def clean_seq_list(self):\r\n        clean_lst = []\r\n        for i in range(len(self.sequence_list)):\r\n            cls, _ = self.sequence_list[i].split('-')\r\n            clean_lst.append(cls)\r\n        return  clean_lst\r\n\r\n    def get_sequence_list(self):\r\n        return SequenceList([self._construct_sequence(s) for s in self.sequence_list])\r\n\r\n    def _construct_sequence(self, sequence_name):\r\n        class_name = sequence_name.split('-')[0]\r\n        anno_path = '{}/{}/{}/groundtruth.txt'.format(self.base_path, class_name, sequence_name)\r\n\r\n        ground_truth_rect = load_text(str(anno_path), delimiter=',', dtype=np.float64)\r\n\r\n        occlusion_label_path = '{}/{}/{}/full_occlusion.txt'.format(self.base_path, class_name, sequence_name)\r\n\r\n        # NOTE: pandas backed seems super super slow for loading occlusion/oov masks\r\n        full_occlusion = load_text(str(occlusion_label_path), delimiter=',', dtype=np.float64, backend='numpy')\r\n\r\n        out_of_view_label_path = '{}/{}/{}/out_of_view.txt'.format(self.base_path, class_name, sequence_name)\r\n        out_of_view = load_text(str(out_of_view_label_path), delimiter=',', dtype=np.float64, backend='numpy')\r\n\r\n        target_visible = np.logical_and(full_occlusion == 0, out_of_view == 0)\r\n\r\n        frames_path = '{}/{}/{}/img'.format(self.base_path, class_name, sequence_name)\r\n\r\n        frames_list = ['{}/{:08d}.jpg'.format(frames_path, frame_number) for frame_number in range(1, ground_truth_rect.shape[0] + 1)]\r\n\r\n        target_class = class_name\r\n        return Sequence(sequence_name, frames_list, 'lasot_extension_subset', ground_truth_rect.reshape(-1, 4),\r\n                        object_class=target_class, target_visible=target_visible)\r\n\r\n    def __len__(self):\r\n        return len(self.sequence_list)\r\n\r\n    def _get_sequence_list(self):\r\n        sequence_list = ['atv-1',\r\n                         'atv-2',\r\n                         'atv-3',\r\n                         'atv-4',\r\n                         'atv-5',\r\n                         'atv-6',\r\n                         'atv-7',\r\n                         'atv-8',\r\n                         'atv-9',\r\n                         'atv-10',\r\n                         'badminton-1',\r\n                         'badminton-2',\r\n                         'badminton-3',\r\n                         'badminton-4',\r\n                         'badminton-5',\r\n                         'badminton-6',\r\n                         'badminton-7',\r\n                         'badminton-8',\r\n                         'badminton-9',\r\n                         'badminton-10',\r\n                         'cosplay-1',\r\n                         'cosplay-10',\r\n                         'cosplay-2',\r\n                         'cosplay-3',\r\n                         'cosplay-4',\r\n                         'cosplay-5',\r\n                         'cosplay-6',\r\n                         'cosplay-7',\r\n                         'cosplay-8',\r\n                         'cosplay-9',\r\n                         'dancingshoe-1',\r\n                         'dancingshoe-2',\r\n                         'dancingshoe-3',\r\n                         'dancingshoe-4',\r\n                         'dancingshoe-5',\r\n                         'dancingshoe-6',\r\n                         'dancingshoe-7',\r\n                         'dancingshoe-8',\r\n                         'dancingshoe-9',\r\n                         'dancingshoe-10',\r\n                         'footbag-1',\r\n                         'footbag-2',\r\n                         'footbag-3',\r\n                         'footbag-4',\r\n                         'footbag-5',\r\n                         'footbag-6',\r\n                         'footbag-7',\r\n                         'footbag-8',\r\n                         'footbag-9',\r\n                         'footbag-10',\r\n                         'frisbee-1',\r\n                         'frisbee-2',\r\n                         'frisbee-3',\r\n                         'frisbee-4',\r\n                         'frisbee-5',\r\n                         'frisbee-6',\r\n                         'frisbee-7',\r\n                         'frisbee-8',\r\n                         'frisbee-9',\r\n                         'frisbee-10',\r\n                         'jianzi-1',\r\n                         'jianzi-2',\r\n                         'jianzi-3',\r\n                         'jianzi-4',\r\n                         'jianzi-5',\r\n                         'jianzi-6',\r\n                         'jianzi-7',\r\n                         'jianzi-8',\r\n                         'jianzi-9',\r\n                         'jianzi-10',\r\n                         'lantern-1',\r\n                         'lantern-2',\r\n                         'lantern-3',\r\n                         'lantern-4',\r\n                         'lantern-5',\r\n                         'lantern-6',\r\n                         'lantern-7',\r\n                         'lantern-8',\r\n                         'lantern-9',\r\n                         'lantern-10',\r\n                         'misc-1',\r\n                         'misc-2',\r\n                         'misc-3',\r\n                         'misc-4',\r\n                         'misc-5',\r\n                         'misc-6',\r\n                         'misc-7',\r\n                         'misc-8',\r\n                         'misc-9',\r\n                         'misc-10',\r\n                         'opossum-1',\r\n                         'opossum-2',\r\n                         'opossum-3',\r\n                         'opossum-4',\r\n                         'opossum-5',\r\n                         'opossum-6',\r\n                         'opossum-7',\r\n                         'opossum-8',\r\n                         'opossum-9',\r\n                         'opossum-10',\r\n                         'paddle-1',\r\n                         'paddle-2',\r\n                         'paddle-3',\r\n                         'paddle-4',\r\n                         'paddle-5',\r\n                         'paddle-6',\r\n                         'paddle-7',\r\n                         'paddle-8',\r\n                         'paddle-9',\r\n                         'paddle-10',\r\n                         'raccoon-1',\r\n                         'raccoon-2',\r\n                         'raccoon-3',\r\n                         'raccoon-4',\r\n                         'raccoon-5',\r\n                         'raccoon-6',\r\n                         'raccoon-7',\r\n                         'raccoon-8',\r\n                         'raccoon-9',\r\n                         'raccoon-10',\r\n                         'rhino-1',\r\n                         'rhino-2',\r\n                         'rhino-3',\r\n                         'rhino-4',\r\n                         'rhino-5',\r\n                         'rhino-6',\r\n                         'rhino-7',\r\n                         'rhino-8',\r\n                         'rhino-9',\r\n                         'rhino-10',\r\n                         'skatingshoe-1',\r\n                         'skatingshoe-2',\r\n                         'skatingshoe-3',\r\n                         'skatingshoe-4',\r\n                         'skatingshoe-5',\r\n                         'skatingshoe-6',\r\n                         'skatingshoe-7',\r\n                         'skatingshoe-8',\r\n                         'skatingshoe-9',\r\n                         'skatingshoe-10',\r\n                         'wingsuit-1',\r\n                         'wingsuit-2',\r\n                         'wingsuit-3',\r\n                         'wingsuit-4',\r\n                         'wingsuit-5',\r\n                         'wingsuit-6',\r\n                         'wingsuit-7',\r\n                         'wingsuit-8',\r\n                         'wingsuit-9',\r\n                         'wingsuit-10']\r\n        return sequence_list"
  },
  {
    "path": "lib/test/evaluation/local.py",
    "content": "from lib.test.evaluation.environment import EnvSettings\n\ndef local_env_settings():\n    settings = EnvSettings()\n\n    # Set your local paths here.\n\n    settings.davis_dir = ''\n    settings.got10k_lmdb_path = '/home/wangguijie/code/Siamese-ResNet-track/data/got10k_lmdb'\n    settings.got10k_path = '/home/baiyifan/GOT-10k/'\n    settings.got_packed_results_path = ''\n    settings.got_reports_path = ''\n    settings.itb_path = '/home/wangguijie/code/Siamese-ResNet-track/data/itb'\n    settings.lasot_extension_subset_path_path = '/home/wangguijie/code/Siamese-ResNet-track/data/lasot_extension_subset'\n    settings.lasot_lmdb_path = '/home/wangguijie/code/Siamese-ResNet-track/data/lasot_lmdb'\n    settings.lasot_path = '/home/wangguijie/code/Siamese-ResNet-track/data/lasot'\n    settings.network_path = '/data1/baiyifan/artrackv2_256_got/'    # Where tracking networks are stored.\n    settings.nfs_path = '/home/wangguijie/code/Siamese-ResNet-track/data/nfs'\n    settings.otb_path = '/home/wangguijie/code/Siamese-ResNet-track/data/otb'\n    settings.prj_dir = '/home/baiyifan/code/AR2_github/ARTrack-main/'\n    settings.result_plot_path = '/data1/baiyifan/artrackv2_256_got/'\n    settings.results_path = '/data1/baiyifan/artrackv2_256_got/'    # Where to store tracking results\n    settings.save_dir = '/data1/baiyifan/artrackv2_256_got/'\n    settings.segmentation_path = '/home/wangguijie/code/Siamese-ResNet-track/output/test/segmentation_results'\n    settings.tc128_path = '/home/wangguijie/code/Siamese-ResNet-track/data/TC128'\n    settings.tn_packed_results_path = ''\n    settings.tnl2k_path = '/home/wangguijie/code/Siamese-ResNet-track/data/tnl2k'\n    settings.tpl_path = ''\n    settings.trackingnet_path = '/home/wangguijie/code/Siamese-ResNet-track/data/trackingnet'\n    settings.uav_path = '/home/wangguijie/code/Siamese-ResNet-track/data/uav'\n    settings.vot18_path = '/home/wangguijie/code/Siamese-ResNet-track/data/vot2018'\n    settings.vot22_path = '/home/wangguijie/code/Siamese-ResNet-track/data/vot2022'\n    settings.vot_path = '/home/wangguijie/code/Siamese-ResNet-track/data/VOT2019'\n    settings.youtubevos_dir = ''\n\n    return settings\n\n"
  },
  {
    "path": "lib/test/evaluation/nfsdataset.py",
    "content": "import numpy as np\nfrom lib.test.evaluation.data import Sequence, BaseDataset, SequenceList\nfrom lib.test.utils.load_text import load_text\n\n\nclass NFSDataset(BaseDataset):\n    \"\"\" NFS dataset.\n    Publication:\n        Need for Speed: A Benchmark for Higher Frame Rate Object Tracking\n        H. Kiani Galoogahi, A. Fagg, C. Huang, D. Ramanan, and S.Lucey\n        ICCV, 2017\n        http://openaccess.thecvf.com/content_ICCV_2017/papers/Galoogahi_Need_for_Speed_ICCV_2017_paper.pdf\n    Download the dataset from http://ci2cv.net/nfs/index.html\n    \"\"\"\n    def __init__(self):\n        super().__init__()\n        self.base_path = self.env_settings.nfs_path\n        self.sequence_info_list = self._get_sequence_info_list()\n\n    def get_sequence_list(self):\n        return SequenceList([self._construct_sequence(s) for s in self.sequence_info_list])\n\n    def _construct_sequence(self, sequence_info):\n        sequence_path = sequence_info['path']\n        nz = sequence_info['nz']\n        ext = sequence_info['ext']\n        start_frame = sequence_info['startFrame']\n        end_frame = sequence_info['endFrame']\n\n        init_omit = 0\n        if 'initOmit' in sequence_info:\n            init_omit = sequence_info['initOmit']\n\n        frames = ['{base_path}/{sequence_path}/{frame:0{nz}}.{ext}'.format(base_path=self.base_path, \n        sequence_path=sequence_path, frame=frame_num, nz=nz, ext=ext) for frame_num in range(start_frame+init_omit, end_frame+1)]\n\n        anno_path = '{}/{}'.format(self.base_path, sequence_info['anno_path'])\n\n        ground_truth_rect = load_text(str(anno_path), delimiter='\\t', dtype=np.float64)\n\n        return Sequence(sequence_info['name'], frames, 'nfs', ground_truth_rect[init_omit:,:],\n                        object_class=sequence_info['object_class'])\n\n    def __len__(self):\n        return len(self.sequence_info_list)\n\n    def _get_sequence_info_list(self):\n        sequence_info_list = [\n            {\"name\": \"nfs_Gymnastics\", \"path\": \"sequences/Gymnastics\", \"startFrame\": 1, \"endFrame\": 368, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_Gymnastics.txt\", \"object_class\": \"person\", 'occlusion': False},\n            {\"name\": \"nfs_MachLoop_jet\", \"path\": \"sequences/MachLoop_jet\", \"startFrame\": 1, \"endFrame\": 99, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_MachLoop_jet.txt\", \"object_class\": \"aircraft\", 'occlusion': False},\n            {\"name\": \"nfs_Skiing_red\", \"path\": \"sequences/Skiing_red\", \"startFrame\": 1, \"endFrame\": 69, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_Skiing_red.txt\", \"object_class\": \"person\", 'occlusion': False},\n            {\"name\": \"nfs_Skydiving\", \"path\": \"sequences/Skydiving\", \"startFrame\": 1, \"endFrame\": 196, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_Skydiving.txt\", \"object_class\": \"person\", 'occlusion': True},\n            {\"name\": \"nfs_airboard_1\", \"path\": \"sequences/airboard_1\", \"startFrame\": 1, \"endFrame\": 425, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_airboard_1.txt\", \"object_class\": \"ball\", 'occlusion': False},\n            {\"name\": \"nfs_airplane_landing\", \"path\": \"sequences/airplane_landing\", \"startFrame\": 1, \"endFrame\": 81, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_airplane_landing.txt\", \"object_class\": \"aircraft\", 'occlusion': False},\n            {\"name\": \"nfs_airtable_3\", \"path\": \"sequences/airtable_3\", \"startFrame\": 1, \"endFrame\": 482, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_airtable_3.txt\", \"object_class\": \"ball\", 'occlusion': False},\n            {\"name\": \"nfs_basketball_1\", \"path\": \"sequences/basketball_1\", \"startFrame\": 1, \"endFrame\": 282, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_basketball_1.txt\", \"object_class\": \"ball\", 'occlusion': False},\n            {\"name\": \"nfs_basketball_2\", \"path\": \"sequences/basketball_2\", \"startFrame\": 1, \"endFrame\": 102, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_basketball_2.txt\", \"object_class\": \"ball\", 'occlusion': False},\n            {\"name\": \"nfs_basketball_3\", \"path\": \"sequences/basketball_3\", \"startFrame\": 1, \"endFrame\": 421, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_basketball_3.txt\", \"object_class\": \"ball\", 'occlusion': False},\n            {\"name\": \"nfs_basketball_6\", \"path\": \"sequences/basketball_6\", \"startFrame\": 1, \"endFrame\": 224, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_basketball_6.txt\", \"object_class\": \"ball\", 'occlusion': False},\n            {\"name\": \"nfs_basketball_7\", \"path\": \"sequences/basketball_7\", \"startFrame\": 1, \"endFrame\": 240, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_basketball_7.txt\", \"object_class\": \"person\", 'occlusion': True},\n            {\"name\": \"nfs_basketball_player\", \"path\": \"sequences/basketball_player\", \"startFrame\": 1, \"endFrame\": 369, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_basketball_player.txt\", \"object_class\": \"person\", 'occlusion': True},\n            {\"name\": \"nfs_basketball_player_2\", \"path\": \"sequences/basketball_player_2\", \"startFrame\": 1, \"endFrame\": 437, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_basketball_player_2.txt\", \"object_class\": \"person\", 'occlusion': False},\n            {\"name\": \"nfs_beach_flipback_person\", \"path\": \"sequences/beach_flipback_person\", \"startFrame\": 1, \"endFrame\": 61, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_beach_flipback_person.txt\", \"object_class\": \"person head\", 'occlusion': False},\n            {\"name\": \"nfs_bee\", \"path\": \"sequences/bee\", \"startFrame\": 1, \"endFrame\": 45, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_bee.txt\", \"object_class\": \"insect\", 'occlusion': False},\n            {\"name\": \"nfs_biker_acrobat\", \"path\": \"sequences/biker_acrobat\", \"startFrame\": 1, \"endFrame\": 128, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_biker_acrobat.txt\", \"object_class\": \"bicycle\", 'occlusion': False},\n            {\"name\": \"nfs_biker_all_1\", \"path\": \"sequences/biker_all_1\", \"startFrame\": 1, \"endFrame\": 113, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_biker_all_1.txt\", \"object_class\": \"person\", 'occlusion': False},\n            {\"name\": \"nfs_biker_head_2\", \"path\": \"sequences/biker_head_2\", \"startFrame\": 1, \"endFrame\": 132, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_biker_head_2.txt\", \"object_class\": \"person head\", 'occlusion': False},\n            {\"name\": \"nfs_biker_head_3\", \"path\": \"sequences/biker_head_3\", \"startFrame\": 1, \"endFrame\": 254, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_biker_head_3.txt\", \"object_class\": \"person head\", 'occlusion': False},\n            {\"name\": \"nfs_biker_upper_body\", \"path\": \"sequences/biker_upper_body\", \"startFrame\": 1, \"endFrame\": 194, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_biker_upper_body.txt\", \"object_class\": \"person\", 'occlusion': False},\n            {\"name\": \"nfs_biker_whole_body\", \"path\": \"sequences/biker_whole_body\", \"startFrame\": 1, \"endFrame\": 572, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_biker_whole_body.txt\", \"object_class\": \"person\", 'occlusion': True},\n            {\"name\": \"nfs_billiard_2\", \"path\": \"sequences/billiard_2\", \"startFrame\": 1, \"endFrame\": 604, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_billiard_2.txt\", \"object_class\": \"ball\", 'occlusion': False},\n            {\"name\": \"nfs_billiard_3\", \"path\": \"sequences/billiard_3\", \"startFrame\": 1, \"endFrame\": 698, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_billiard_3.txt\", \"object_class\": \"ball\", 'occlusion': False},\n            {\"name\": \"nfs_billiard_6\", \"path\": \"sequences/billiard_6\", \"startFrame\": 1, \"endFrame\": 771, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_billiard_6.txt\", \"object_class\": \"ball\", 'occlusion': False},\n            {\"name\": \"nfs_billiard_7\", \"path\": \"sequences/billiard_7\", \"startFrame\": 1, \"endFrame\": 724, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_billiard_7.txt\", \"object_class\": \"ball\", 'occlusion': False},\n            {\"name\": \"nfs_billiard_8\", \"path\": \"sequences/billiard_8\", \"startFrame\": 1, \"endFrame\": 778, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_billiard_8.txt\", \"object_class\": \"ball\", 'occlusion': False},\n            {\"name\": \"nfs_bird_2\", \"path\": \"sequences/bird_2\", \"startFrame\": 1, \"endFrame\": 476, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_bird_2.txt\", \"object_class\": \"bird\", 'occlusion': False},\n            {\"name\": \"nfs_book\", \"path\": \"sequences/book\", \"startFrame\": 1, \"endFrame\": 288, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_book.txt\", \"object_class\": \"other\", 'occlusion': False},\n            {\"name\": \"nfs_bottle\", \"path\": \"sequences/bottle\", \"startFrame\": 1, \"endFrame\": 2103, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_bottle.txt\", \"object_class\": \"other\", 'occlusion': False},\n            {\"name\": \"nfs_bowling_1\", \"path\": \"sequences/bowling_1\", \"startFrame\": 1, \"endFrame\": 303, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_bowling_1.txt\", \"object_class\": \"ball\", 'occlusion': True},\n            {\"name\": \"nfs_bowling_2\", \"path\": \"sequences/bowling_2\", \"startFrame\": 1, \"endFrame\": 710, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_bowling_2.txt\", \"object_class\": \"ball\", 'occlusion': True},\n            {\"name\": \"nfs_bowling_3\", \"path\": \"sequences/bowling_3\", \"startFrame\": 1, \"endFrame\": 271, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_bowling_3.txt\", \"object_class\": \"ball\", 'occlusion': True},\n            {\"name\": \"nfs_bowling_6\", \"path\": \"sequences/bowling_6\", \"startFrame\": 1, \"endFrame\": 260, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_bowling_6.txt\", \"object_class\": \"ball\", 'occlusion': False},\n            {\"name\": \"nfs_bowling_ball\", \"path\": \"sequences/bowling_ball\", \"startFrame\": 1, \"endFrame\": 275, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_bowling_ball.txt\", \"object_class\": \"ball\", 'occlusion': True},\n            {\"name\": \"nfs_bunny\", \"path\": \"sequences/bunny\", \"startFrame\": 1, \"endFrame\": 705, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_bunny.txt\", \"object_class\": \"mammal\", 'occlusion': False},\n            {\"name\": \"nfs_car\", \"path\": \"sequences/car\", \"startFrame\": 1, \"endFrame\": 2020, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_car.txt\", \"object_class\": \"car\", 'occlusion': True},\n            {\"name\": \"nfs_car_camaro\", \"path\": \"sequences/car_camaro\", \"startFrame\": 1, \"endFrame\": 36, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_car_camaro.txt\", \"object_class\": \"car\", 'occlusion': False},\n            {\"name\": \"nfs_car_drifting\", \"path\": \"sequences/car_drifting\", \"startFrame\": 1, \"endFrame\": 173, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_car_drifting.txt\", \"object_class\": \"car\", 'occlusion': False},\n            {\"name\": \"nfs_car_jumping\", \"path\": \"sequences/car_jumping\", \"startFrame\": 1, \"endFrame\": 22, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_car_jumping.txt\", \"object_class\": \"car\", 'occlusion': False},\n            {\"name\": \"nfs_car_rc_rolling\", \"path\": \"sequences/car_rc_rolling\", \"startFrame\": 1, \"endFrame\": 62, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_car_rc_rolling.txt\", \"object_class\": \"car\", 'occlusion': False},\n            {\"name\": \"nfs_car_rc_rotating\", \"path\": \"sequences/car_rc_rotating\", \"startFrame\": 1, \"endFrame\": 80, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_car_rc_rotating.txt\", \"object_class\": \"car\", 'occlusion': False},\n            {\"name\": \"nfs_car_side\", \"path\": \"sequences/car_side\", \"startFrame\": 1, \"endFrame\": 108, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_car_side.txt\", \"object_class\": \"car\", 'occlusion': False},\n            {\"name\": \"nfs_car_white\", \"path\": \"sequences/car_white\", \"startFrame\": 1, \"endFrame\": 2063, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_car_white.txt\", \"object_class\": \"car\", 'occlusion': False},\n            {\"name\": \"nfs_cheetah\", \"path\": \"sequences/cheetah\", \"startFrame\": 1, \"endFrame\": 167, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_cheetah.txt\", \"object_class\": \"mammal\", 'occlusion': True},\n            {\"name\": \"nfs_cup\", \"path\": \"sequences/cup\", \"startFrame\": 1, \"endFrame\": 1281, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_cup.txt\", \"object_class\": \"other\", 'occlusion': False},\n            {\"name\": \"nfs_cup_2\", \"path\": \"sequences/cup_2\", \"startFrame\": 1, \"endFrame\": 182, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_cup_2.txt\", \"object_class\": \"other\", 'occlusion': False},\n            {\"name\": \"nfs_dog\", \"path\": \"sequences/dog\", \"startFrame\": 1, \"endFrame\": 1030, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_dog.txt\", \"object_class\": \"dog\", 'occlusion': True},\n            {\"name\": \"nfs_dog_1\", \"path\": \"sequences/dog_1\", \"startFrame\": 1, \"endFrame\": 168, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_dog_1.txt\", \"object_class\": \"dog\", 'occlusion': False},\n            {\"name\": \"nfs_dog_2\", \"path\": \"sequences/dog_2\", \"startFrame\": 1, \"endFrame\": 594, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_dog_2.txt\", \"object_class\": \"dog\", 'occlusion': True},\n            {\"name\": \"nfs_dog_3\", \"path\": \"sequences/dog_3\", \"startFrame\": 1, \"endFrame\": 200, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_dog_3.txt\", \"object_class\": \"dog\", 'occlusion': False},\n            {\"name\": \"nfs_dogs\", \"path\": \"sequences/dogs\", \"startFrame\": 1, \"endFrame\": 198, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_dogs.txt\", \"object_class\": \"dog\", 'occlusion': True},\n            {\"name\": \"nfs_dollar\", \"path\": \"sequences/dollar\", \"startFrame\": 1, \"endFrame\": 1426, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_dollar.txt\", \"object_class\": \"other\", 'occlusion': False},\n            {\"name\": \"nfs_drone\", \"path\": \"sequences/drone\", \"startFrame\": 1, \"endFrame\": 70, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_drone.txt\", \"object_class\": \"aircraft\", 'occlusion': False},\n            {\"name\": \"nfs_ducks_lake\", \"path\": \"sequences/ducks_lake\", \"startFrame\": 1, \"endFrame\": 107, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_ducks_lake.txt\", \"object_class\": \"bird\", 'occlusion': False},\n            {\"name\": \"nfs_exit\", \"path\": \"sequences/exit\", \"startFrame\": 1, \"endFrame\": 359, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_exit.txt\", \"object_class\": \"other\", 'occlusion': False},\n            {\"name\": \"nfs_first\", \"path\": \"sequences/first\", \"startFrame\": 1, \"endFrame\": 435, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_first.txt\", \"object_class\": \"other\", 'occlusion': False},\n            {\"name\": \"nfs_flower\", \"path\": \"sequences/flower\", \"startFrame\": 1, \"endFrame\": 448, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_flower.txt\", \"object_class\": \"other\", 'occlusion': False},\n            {\"name\": \"nfs_footbal_skill\", \"path\": \"sequences/footbal_skill\", \"startFrame\": 1, \"endFrame\": 131, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_footbal_skill.txt\", \"object_class\": \"ball\", 'occlusion': True},\n            {\"name\": \"nfs_helicopter\", \"path\": \"sequences/helicopter\", \"startFrame\": 1, \"endFrame\": 310, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_helicopter.txt\", \"object_class\": \"aircraft\", 'occlusion': False},\n            {\"name\": \"nfs_horse_jumping\", \"path\": \"sequences/horse_jumping\", \"startFrame\": 1, \"endFrame\": 117, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_horse_jumping.txt\", \"object_class\": \"horse\", 'occlusion': True},\n            {\"name\": \"nfs_horse_running\", \"path\": \"sequences/horse_running\", \"startFrame\": 1, \"endFrame\": 139, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_horse_running.txt\", \"object_class\": \"horse\", 'occlusion': False},\n            {\"name\": \"nfs_iceskating_6\", \"path\": \"sequences/iceskating_6\", \"startFrame\": 1, \"endFrame\": 603, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_iceskating_6.txt\", \"object_class\": \"person\", 'occlusion': False},\n            {\"name\": \"nfs_jellyfish_5\", \"path\": \"sequences/jellyfish_5\", \"startFrame\": 1, \"endFrame\": 746, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_jellyfish_5.txt\", \"object_class\": \"invertebrate\", 'occlusion': False},\n            {\"name\": \"nfs_kid_swing\", \"path\": \"sequences/kid_swing\", \"startFrame\": 1, \"endFrame\": 169, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_kid_swing.txt\", \"object_class\": \"person\", 'occlusion': False},\n            {\"name\": \"nfs_motorcross\", \"path\": \"sequences/motorcross\", \"startFrame\": 1, \"endFrame\": 39, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_motorcross.txt\", \"object_class\": \"vehicle\", 'occlusion': True},\n            {\"name\": \"nfs_motorcross_kawasaki\", \"path\": \"sequences/motorcross_kawasaki\", \"startFrame\": 1, \"endFrame\": 65, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_motorcross_kawasaki.txt\", \"object_class\": \"vehicle\", 'occlusion': False},\n            {\"name\": \"nfs_parkour\", \"path\": \"sequences/parkour\", \"startFrame\": 1, \"endFrame\": 58, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_parkour.txt\", \"object_class\": \"person head\", 'occlusion': False},\n            {\"name\": \"nfs_person_scooter\", \"path\": \"sequences/person_scooter\", \"startFrame\": 1, \"endFrame\": 413, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_person_scooter.txt\", \"object_class\": \"person\", 'occlusion': True},\n            {\"name\": \"nfs_pingpong_2\", \"path\": \"sequences/pingpong_2\", \"startFrame\": 1, \"endFrame\": 1277, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_pingpong_2.txt\", \"object_class\": \"ball\", 'occlusion': False},\n            {\"name\": \"nfs_pingpong_7\", \"path\": \"sequences/pingpong_7\", \"startFrame\": 1, \"endFrame\": 1290, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_pingpong_7.txt\", \"object_class\": \"ball\", 'occlusion': False},\n            {\"name\": \"nfs_pingpong_8\", \"path\": \"sequences/pingpong_8\", \"startFrame\": 1, \"endFrame\": 296, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_pingpong_8.txt\", \"object_class\": \"ball\", 'occlusion': False},\n            {\"name\": \"nfs_purse\", \"path\": \"sequences/purse\", \"startFrame\": 1, \"endFrame\": 968, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_purse.txt\", \"object_class\": \"other\", 'occlusion': False},\n            {\"name\": \"nfs_rubber\", \"path\": \"sequences/rubber\", \"startFrame\": 1, \"endFrame\": 1328, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_rubber.txt\", \"object_class\": \"other\", 'occlusion': False},\n            {\"name\": \"nfs_running\", \"path\": \"sequences/running\", \"startFrame\": 1, \"endFrame\": 677, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_running.txt\", \"object_class\": \"person\", 'occlusion': False},\n            {\"name\": \"nfs_running_100_m\", \"path\": \"sequences/running_100_m\", \"startFrame\": 1, \"endFrame\": 313, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_running_100_m.txt\", \"object_class\": \"person\", 'occlusion': True},\n            {\"name\": \"nfs_running_100_m_2\", \"path\": \"sequences/running_100_m_2\", \"startFrame\": 1, \"endFrame\": 337, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_running_100_m_2.txt\", \"object_class\": \"person\", 'occlusion': True},\n            {\"name\": \"nfs_running_2\", \"path\": \"sequences/running_2\", \"startFrame\": 1, \"endFrame\": 363, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_running_2.txt\", \"object_class\": \"person\", 'occlusion': False},\n            {\"name\": \"nfs_shuffleboard_1\", \"path\": \"sequences/shuffleboard_1\", \"startFrame\": 1, \"endFrame\": 42, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_shuffleboard_1.txt\", \"object_class\": \"other\", 'occlusion': False},\n            {\"name\": \"nfs_shuffleboard_2\", \"path\": \"sequences/shuffleboard_2\", \"startFrame\": 1, \"endFrame\": 41, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_shuffleboard_2.txt\", \"object_class\": \"other\", 'occlusion': False},\n            {\"name\": \"nfs_shuffleboard_4\", \"path\": \"sequences/shuffleboard_4\", \"startFrame\": 1, \"endFrame\": 62, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_shuffleboard_4.txt\", \"object_class\": \"other\", 'occlusion': False},\n            {\"name\": \"nfs_shuffleboard_5\", \"path\": \"sequences/shuffleboard_5\", \"startFrame\": 1, \"endFrame\": 32, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_shuffleboard_5.txt\", \"object_class\": \"other\", 'occlusion': False},\n            {\"name\": \"nfs_shuffleboard_6\", \"path\": \"sequences/shuffleboard_6\", \"startFrame\": 1, \"endFrame\": 52, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_shuffleboard_6.txt\", \"object_class\": \"other\", 'occlusion': False},\n            {\"name\": \"nfs_shuffletable_2\", \"path\": \"sequences/shuffletable_2\", \"startFrame\": 1, \"endFrame\": 372, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_shuffletable_2.txt\", \"object_class\": \"other\", 'occlusion': False},\n            {\"name\": \"nfs_shuffletable_3\", \"path\": \"sequences/shuffletable_3\", \"startFrame\": 1, \"endFrame\": 368, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_shuffletable_3.txt\", \"object_class\": \"other\", 'occlusion': False},\n            {\"name\": \"nfs_shuffletable_4\", \"path\": \"sequences/shuffletable_4\", \"startFrame\": 1, \"endFrame\": 101, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_shuffletable_4.txt\", \"object_class\": \"other\", 'occlusion': False},\n            {\"name\": \"nfs_ski_long\", \"path\": \"sequences/ski_long\", \"startFrame\": 1, \"endFrame\": 274, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_ski_long.txt\", \"object_class\": \"person\", 'occlusion': False},\n            {\"name\": \"nfs_soccer_ball\", \"path\": \"sequences/soccer_ball\", \"startFrame\": 1, \"endFrame\": 163, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_soccer_ball.txt\", \"object_class\": \"ball\", 'occlusion': False},\n            {\"name\": \"nfs_soccer_ball_2\", \"path\": \"sequences/soccer_ball_2\", \"startFrame\": 1, \"endFrame\": 1934, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_soccer_ball_2.txt\", \"object_class\": \"ball\", 'occlusion': False},\n            {\"name\": \"nfs_soccer_ball_3\", \"path\": \"sequences/soccer_ball_3\", \"startFrame\": 1, \"endFrame\": 1381, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_soccer_ball_3.txt\", \"object_class\": \"ball\", 'occlusion': False},\n            {\"name\": \"nfs_soccer_player_2\", \"path\": \"sequences/soccer_player_2\", \"startFrame\": 1, \"endFrame\": 475, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_soccer_player_2.txt\", \"object_class\": \"person\", 'occlusion': False},\n            {\"name\": \"nfs_soccer_player_3\", \"path\": \"sequences/soccer_player_3\", \"startFrame\": 1, \"endFrame\": 319, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_soccer_player_3.txt\", \"object_class\": \"person\", 'occlusion': True},\n            {\"name\": \"nfs_stop_sign\", \"path\": \"sequences/stop_sign\", \"startFrame\": 1, \"endFrame\": 302, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_stop_sign.txt\", \"object_class\": \"other\", 'occlusion': False},\n            {\"name\": \"nfs_suv\", \"path\": \"sequences/suv\", \"startFrame\": 1, \"endFrame\": 2584, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_suv.txt\", \"object_class\": \"car\", 'occlusion': False},\n            {\"name\": \"nfs_tiger\", \"path\": \"sequences/tiger\", \"startFrame\": 1, \"endFrame\": 1556, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_tiger.txt\", \"object_class\": \"mammal\", 'occlusion': False},\n            {\"name\": \"nfs_walking\", \"path\": \"sequences/walking\", \"startFrame\": 1, \"endFrame\": 555, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_walking.txt\", \"object_class\": \"person\", 'occlusion': False},\n            {\"name\": \"nfs_walking_3\", \"path\": \"sequences/walking_3\", \"startFrame\": 1, \"endFrame\": 1427, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_walking_3.txt\", \"object_class\": \"person\", 'occlusion': False},\n            {\"name\": \"nfs_water_ski_2\", \"path\": \"sequences/water_ski_2\", \"startFrame\": 1, \"endFrame\": 47, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_water_ski_2.txt\", \"object_class\": \"person\", 'occlusion': False},\n            {\"name\": \"nfs_yoyo\", \"path\": \"sequences/yoyo\", \"startFrame\": 1, \"endFrame\": 67, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_yoyo.txt\", \"object_class\": \"other\", 'occlusion': False},\n            {\"name\": \"nfs_zebra_fish\", \"path\": \"sequences/zebra_fish\", \"startFrame\": 1, \"endFrame\": 671, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"anno/nfs_zebra_fish.txt\", \"object_class\": \"fish\", 'occlusion': False},\n        ]\n\n        return sequence_info_list"
  },
  {
    "path": "lib/test/evaluation/otbdataset.py",
    "content": "import numpy as np\nfrom lib.test.evaluation.data import Sequence, BaseDataset, SequenceList\nfrom lib.test.utils.load_text import load_text\n\n\nclass OTBDataset(BaseDataset):\n    \"\"\" OTB-2015 dataset\n    Publication:\n        Object Tracking Benchmark\n        Wu, Yi, Jongwoo Lim, and Ming-hsuan Yan\n        TPAMI, 2015\n        http://faculty.ucmerced.edu/mhyang/papers/pami15_tracking_benchmark.pdf\n    Download the dataset from http://cvlab.hanyang.ac.kr/tracker_benchmark/index.html\n    \"\"\"\n    def __init__(self):\n        super().__init__()\n        self.base_path = self.env_settings.otb_path\n        self.sequence_info_list = self._get_sequence_info_list()\n\n    def get_sequence_list(self):\n        return SequenceList([self._construct_sequence(s) for s in self.sequence_info_list])\n\n    def _construct_sequence(self, sequence_info):\n        sequence_path = sequence_info['path']\n        nz = sequence_info['nz']\n        ext = sequence_info['ext']\n        start_frame = sequence_info['startFrame']\n        end_frame = sequence_info['endFrame']\n\n        init_omit = 0\n        if 'initOmit' in sequence_info:\n            init_omit = sequence_info['initOmit']\n\n        frames = ['{base_path}/{sequence_path}/{frame:0{nz}}.{ext}'.format(base_path=self.base_path, \n        sequence_path=sequence_path, frame=frame_num, nz=nz, ext=ext) for frame_num in range(start_frame+init_omit, end_frame+1)]\n\n        anno_path = '{}/{}'.format(self.base_path, sequence_info['anno_path'])\n\n        # NOTE: OTB has some weird annos which panda cannot handle\n        ground_truth_rect = load_text(str(anno_path), delimiter=(',', None), dtype=np.float64, backend='numpy')\n\n        return Sequence(sequence_info['name'], frames, 'otb', ground_truth_rect[init_omit:,:],\n                        object_class=sequence_info['object_class'])\n\n    def __len__(self):\n        return len(self.sequence_info_list)\n\n    def _get_sequence_info_list(self):\n        sequence_info_list = [\n            {\"name\": \"Basketball\", \"path\": \"Basketball/img\", \"startFrame\": 1, \"endFrame\": 725, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Basketball/groundtruth_rect.txt\",\n             \"object_class\": \"person\"},\n            {\"name\": \"Biker\", \"path\": \"Biker/img\", \"startFrame\": 1, \"endFrame\": 142, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Biker/groundtruth_rect.txt\",\n             \"object_class\": \"person head\"},\n            {\"name\": \"Bird1\", \"path\": \"Bird1/img\", \"startFrame\": 1, \"endFrame\": 408, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Bird1/groundtruth_rect.txt\",\n             \"object_class\": \"bird\"},\n            {\"name\": \"Bird2\", \"path\": \"Bird2/img\", \"startFrame\": 1, \"endFrame\": 99, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Bird2/groundtruth_rect.txt\",\n             \"object_class\": \"bird\"},\n            {\"name\": \"BlurBody\", \"path\": \"BlurBody/img\", \"startFrame\": 1, \"endFrame\": 334, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"BlurBody/groundtruth_rect.txt\",\n             \"object_class\": \"person\"},\n            {\"name\": \"BlurCar1\", \"path\": \"BlurCar1/img\", \"startFrame\": 247, \"endFrame\": 988, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"BlurCar1/groundtruth_rect.txt\",\n             \"object_class\": \"car\"},\n            {\"name\": \"BlurCar2\", \"path\": \"BlurCar2/img\", \"startFrame\": 1, \"endFrame\": 585, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"BlurCar2/groundtruth_rect.txt\",\n             \"object_class\": \"car\"},\n            {\"name\": \"BlurCar3\", \"path\": \"BlurCar3/img\", \"startFrame\": 3, \"endFrame\": 359, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"BlurCar3/groundtruth_rect.txt\",\n             \"object_class\": \"car\"},\n            {\"name\": \"BlurCar4\", \"path\": \"BlurCar4/img\", \"startFrame\": 18, \"endFrame\": 397, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"BlurCar4/groundtruth_rect.txt\",\n             \"object_class\": \"car\"},\n            {\"name\": \"BlurFace\", \"path\": \"BlurFace/img\", \"startFrame\": 1, \"endFrame\": 493, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"BlurFace/groundtruth_rect.txt\",\n             \"object_class\": \"face\"},\n            {\"name\": \"BlurOwl\", \"path\": \"BlurOwl/img\", \"startFrame\": 1, \"endFrame\": 631, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"BlurOwl/groundtruth_rect.txt\",\n             \"object_class\": \"other\"},\n            {\"name\": \"Board\", \"path\": \"Board/img\", \"startFrame\": 1, \"endFrame\": 698, \"nz\": 5, \"ext\": \"jpg\", \"anno_path\": \"Board/groundtruth_rect.txt\",\n             \"object_class\": \"other\"},\n            {\"name\": \"Bolt\", \"path\": \"Bolt/img\", \"startFrame\": 1, \"endFrame\": 350, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Bolt/groundtruth_rect.txt\",\n             \"object_class\": \"person\"},\n            {\"name\": \"Bolt2\", \"path\": \"Bolt2/img\", \"startFrame\": 1, \"endFrame\": 293, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Bolt2/groundtruth_rect.txt\",\n             \"object_class\": \"person\"},\n            {\"name\": \"Box\", \"path\": \"Box/img\", \"startFrame\": 1, \"endFrame\": 1161, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Box/groundtruth_rect.txt\",\n             \"object_class\": \"other\"},\n            {\"name\": \"Boy\", \"path\": \"Boy/img\", \"startFrame\": 1, \"endFrame\": 602, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Boy/groundtruth_rect.txt\",\n             \"object_class\": \"face\"},\n            {\"name\": \"Car1\", \"path\": \"Car1/img\", \"startFrame\": 1, \"endFrame\": 1020, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Car1/groundtruth_rect.txt\",\n             \"object_class\": \"car\"},\n            {\"name\": \"Car2\", \"path\": \"Car2/img\", \"startFrame\": 1, \"endFrame\": 913, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Car2/groundtruth_rect.txt\",\n             \"object_class\": \"car\"},\n            {\"name\": \"Car24\", \"path\": \"Car24/img\", \"startFrame\": 1, \"endFrame\": 3059, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Car24/groundtruth_rect.txt\",\n             \"object_class\": \"car\"},\n            {\"name\": \"Car4\", \"path\": \"Car4/img\", \"startFrame\": 1, \"endFrame\": 659, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Car4/groundtruth_rect.txt\",\n             \"object_class\": \"car\"},\n            {\"name\": \"CarDark\", \"path\": \"CarDark/img\", \"startFrame\": 1, \"endFrame\": 393, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"CarDark/groundtruth_rect.txt\",\n             \"object_class\": \"car\"},\n            {\"name\": \"CarScale\", \"path\": \"CarScale/img\", \"startFrame\": 1, \"endFrame\": 252, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"CarScale/groundtruth_rect.txt\",\n             \"object_class\": \"car\"},\n            {\"name\": \"ClifBar\", \"path\": \"ClifBar/img\", \"startFrame\": 1, \"endFrame\": 472, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"ClifBar/groundtruth_rect.txt\",\n             \"object_class\": \"other\"},\n            {\"name\": \"Coke\", \"path\": \"Coke/img\", \"startFrame\": 1, \"endFrame\": 291, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Coke/groundtruth_rect.txt\",\n             \"object_class\": \"other\"},\n            {\"name\": \"Couple\", \"path\": \"Couple/img\", \"startFrame\": 1, \"endFrame\": 140, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Couple/groundtruth_rect.txt\",\n             \"object_class\": \"person\"},\n            {\"name\": \"Coupon\", \"path\": \"Coupon/img\", \"startFrame\": 1, \"endFrame\": 327, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Coupon/groundtruth_rect.txt\",\n             \"object_class\": \"other\"},\n            {\"name\": \"Crossing\", \"path\": \"Crossing/img\", \"startFrame\": 1, \"endFrame\": 120, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Crossing/groundtruth_rect.txt\",\n             \"object_class\": \"person\"},\n            {\"name\": \"Crowds\", \"path\": \"Crowds/img\", \"startFrame\": 1, \"endFrame\": 347, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Crowds/groundtruth_rect.txt\",\n             \"object_class\": \"person\"},\n            {\"name\": \"Dancer\", \"path\": \"Dancer/img\", \"startFrame\": 1, \"endFrame\": 225, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Dancer/groundtruth_rect.txt\",\n             \"object_class\": \"person\"},\n            {\"name\": \"Dancer2\", \"path\": \"Dancer2/img\", \"startFrame\": 1, \"endFrame\": 150, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Dancer2/groundtruth_rect.txt\",\n             \"object_class\": \"person\"},\n            {\"name\": \"David\", \"path\": \"David/img\", \"startFrame\": 300, \"endFrame\": 770, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"David/groundtruth_rect.txt\",\n             \"object_class\": \"face\"},\n            {\"name\": \"David2\", \"path\": \"David2/img\", \"startFrame\": 1, \"endFrame\": 537, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"David2/groundtruth_rect.txt\",\n             \"object_class\": \"face\"},\n            {\"name\": \"David3\", \"path\": \"David3/img\", \"startFrame\": 1, \"endFrame\": 252, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"David3/groundtruth_rect.txt\",\n             \"object_class\": \"person\"},\n            {\"name\": \"Deer\", \"path\": \"Deer/img\", \"startFrame\": 1, \"endFrame\": 71, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Deer/groundtruth_rect.txt\",\n             \"object_class\": \"mammal\"},\n            {\"name\": \"Diving\", \"path\": \"Diving/img\", \"startFrame\": 1, \"endFrame\": 215, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Diving/groundtruth_rect.txt\",\n             \"object_class\": \"person\"},\n            {\"name\": \"Dog\", \"path\": \"Dog/img\", \"startFrame\": 1, \"endFrame\": 127, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Dog/groundtruth_rect.txt\",\n             \"object_class\": \"dog\"},\n            {\"name\": \"Dog1\", \"path\": \"Dog1/img\", \"startFrame\": 1, \"endFrame\": 1350, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Dog1/groundtruth_rect.txt\",\n             \"object_class\": \"dog\"},\n            {\"name\": \"Doll\", \"path\": \"Doll/img\", \"startFrame\": 1, \"endFrame\": 3872, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Doll/groundtruth_rect.txt\",\n             \"object_class\": \"other\"},\n            {\"name\": \"DragonBaby\", \"path\": \"DragonBaby/img\", \"startFrame\": 1, \"endFrame\": 113, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"DragonBaby/groundtruth_rect.txt\",\n             \"object_class\": \"face\"},\n            {\"name\": \"Dudek\", \"path\": \"Dudek/img\", \"startFrame\": 1, \"endFrame\": 1145, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Dudek/groundtruth_rect.txt\",\n             \"object_class\": \"face\"},\n            {\"name\": \"FaceOcc1\", \"path\": \"FaceOcc1/img\", \"startFrame\": 1, \"endFrame\": 892, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"FaceOcc1/groundtruth_rect.txt\",\n             \"object_class\": \"face\"},\n            {\"name\": \"FaceOcc2\", \"path\": \"FaceOcc2/img\", \"startFrame\": 1, \"endFrame\": 812, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"FaceOcc2/groundtruth_rect.txt\",\n             \"object_class\": \"face\"},\n            {\"name\": \"Fish\", \"path\": \"Fish/img\", \"startFrame\": 1, \"endFrame\": 476, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Fish/groundtruth_rect.txt\",\n             \"object_class\": \"other\"},\n            {\"name\": \"FleetFace\", \"path\": \"FleetFace/img\", \"startFrame\": 1, \"endFrame\": 707, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"FleetFace/groundtruth_rect.txt\",\n             \"object_class\": \"face\"},\n            {\"name\": \"Football\", \"path\": \"Football/img\", \"startFrame\": 1, \"endFrame\": 362, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Football/groundtruth_rect.txt\",\n             \"object_class\": \"person head\"},\n            {\"name\": \"Football1\", \"path\": \"Football1/img\", \"startFrame\": 1, \"endFrame\": 74, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Football1/groundtruth_rect.txt\",\n             \"object_class\": \"face\"},\n            {\"name\": \"Freeman1\", \"path\": \"Freeman1/img\", \"startFrame\": 1, \"endFrame\": 326, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Freeman1/groundtruth_rect.txt\",\n             \"object_class\": \"face\"},\n            {\"name\": \"Freeman3\", \"path\": \"Freeman3/img\", \"startFrame\": 1, \"endFrame\": 460, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Freeman3/groundtruth_rect.txt\",\n             \"object_class\": \"face\"},\n            {\"name\": \"Freeman4\", \"path\": \"Freeman4/img\", \"startFrame\": 1, \"endFrame\": 283, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Freeman4/groundtruth_rect.txt\",\n             \"object_class\": \"face\"},\n            {\"name\": \"Girl\", \"path\": \"Girl/img\", \"startFrame\": 1, \"endFrame\": 500, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Girl/groundtruth_rect.txt\",\n             \"object_class\": \"face\"},\n            {\"name\": \"Girl2\", \"path\": \"Girl2/img\", \"startFrame\": 1, \"endFrame\": 1500, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Girl2/groundtruth_rect.txt\",\n             \"object_class\": \"person\"},\n            {\"name\": \"Gym\", \"path\": \"Gym/img\", \"startFrame\": 1, \"endFrame\": 767, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Gym/groundtruth_rect.txt\",\n             \"object_class\": \"person\"},\n            {\"name\": \"Human2\", \"path\": \"Human2/img\", \"startFrame\": 1, \"endFrame\": 1128, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Human2/groundtruth_rect.txt\",\n             \"object_class\": \"person\"},\n            {\"name\": \"Human3\", \"path\": \"Human3/img\", \"startFrame\": 1, \"endFrame\": 1698, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Human3/groundtruth_rect.txt\",\n             \"object_class\": \"person\"},\n            {\"name\": \"Human4_2\", \"path\": \"Human4/img\", \"startFrame\": 1, \"endFrame\": 667, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Human4/groundtruth_rect.2.txt\",\n             \"object_class\": \"person\"},\n            {\"name\": \"Human5\", \"path\": \"Human5/img\", \"startFrame\": 1, \"endFrame\": 713, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Human5/groundtruth_rect.txt\",\n             \"object_class\": \"person\"},\n            {\"name\": \"Human6\", \"path\": \"Human6/img\", \"startFrame\": 1, \"endFrame\": 792, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Human6/groundtruth_rect.txt\",\n             \"object_class\": \"person\"},\n            {\"name\": \"Human7\", \"path\": \"Human7/img\", \"startFrame\": 1, \"endFrame\": 250, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Human7/groundtruth_rect.txt\",\n             \"object_class\": \"person\"},\n            {\"name\": \"Human8\", \"path\": \"Human8/img\", \"startFrame\": 1, \"endFrame\": 128, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Human8/groundtruth_rect.txt\",\n             \"object_class\": \"person\"},\n            {\"name\": \"Human9\", \"path\": \"Human9/img\", \"startFrame\": 1, \"endFrame\": 305, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Human9/groundtruth_rect.txt\",\n             \"object_class\": \"person\"},\n            {\"name\": \"Ironman\", \"path\": \"Ironman/img\", \"startFrame\": 1, \"endFrame\": 166, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Ironman/groundtruth_rect.txt\",\n             \"object_class\": \"person head\"},\n            {\"name\": \"Jogging_1\", \"path\": \"Jogging/img\", \"startFrame\": 1, \"endFrame\": 307, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Jogging/groundtruth_rect.1.txt\",\n             \"object_class\": \"person\"},\n            {\"name\": \"Jogging_2\", \"path\": \"Jogging/img\", \"startFrame\": 1, \"endFrame\": 307, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Jogging/groundtruth_rect.2.txt\",\n             \"object_class\": \"person\"},\n            {\"name\": \"Jump\", \"path\": \"Jump/img\", \"startFrame\": 1, \"endFrame\": 122, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Jump/groundtruth_rect.txt\",\n             \"object_class\": \"person\"},\n            {\"name\": \"Jumping\", \"path\": \"Jumping/img\", \"startFrame\": 1, \"endFrame\": 313, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Jumping/groundtruth_rect.txt\",\n             \"object_class\": \"face\"},\n            {\"name\": \"KiteSurf\", \"path\": \"KiteSurf/img\", \"startFrame\": 1, \"endFrame\": 84, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"KiteSurf/groundtruth_rect.txt\",\n             \"object_class\": \"face\"},\n            {\"name\": \"Lemming\", \"path\": \"Lemming/img\", \"startFrame\": 1, \"endFrame\": 1336, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Lemming/groundtruth_rect.txt\",\n             \"object_class\": \"other\"},\n            {\"name\": \"Liquor\", \"path\": \"Liquor/img\", \"startFrame\": 1, \"endFrame\": 1741, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Liquor/groundtruth_rect.txt\",\n             \"object_class\": \"other\"},\n            {\"name\": \"Man\", \"path\": \"Man/img\", \"startFrame\": 1, \"endFrame\": 134, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Man/groundtruth_rect.txt\",\n             \"object_class\": \"face\"},\n            {\"name\": \"Matrix\", \"path\": \"Matrix/img\", \"startFrame\": 1, \"endFrame\": 100, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Matrix/groundtruth_rect.txt\",\n             \"object_class\": \"person head\"},\n            {\"name\": \"Mhyang\", \"path\": \"Mhyang/img\", \"startFrame\": 1, \"endFrame\": 1490, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Mhyang/groundtruth_rect.txt\",\n             \"object_class\": \"face\"},\n            {\"name\": \"MotorRolling\", \"path\": \"MotorRolling/img\", \"startFrame\": 1, \"endFrame\": 164, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"MotorRolling/groundtruth_rect.txt\",\n             \"object_class\": \"vehicle\"},\n            {\"name\": \"MountainBike\", \"path\": \"MountainBike/img\", \"startFrame\": 1, \"endFrame\": 228, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"MountainBike/groundtruth_rect.txt\",\n             \"object_class\": \"bicycle\"},\n            {\"name\": \"Panda\", \"path\": \"Panda/img\", \"startFrame\": 1, \"endFrame\": 1000, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Panda/groundtruth_rect.txt\",\n             \"object_class\": \"mammal\"},\n            {\"name\": \"RedTeam\", \"path\": \"RedTeam/img\", \"startFrame\": 1, \"endFrame\": 1918, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"RedTeam/groundtruth_rect.txt\",\n             \"object_class\": \"vehicle\"},\n            {\"name\": \"Rubik\", \"path\": \"Rubik/img\", \"startFrame\": 1, \"endFrame\": 1997, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Rubik/groundtruth_rect.txt\",\n             \"object_class\": \"other\"},\n            {\"name\": \"Shaking\", \"path\": \"Shaking/img\", \"startFrame\": 1, \"endFrame\": 365, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Shaking/groundtruth_rect.txt\",\n             \"object_class\": \"face\"},\n            {\"name\": \"Singer1\", \"path\": \"Singer1/img\", \"startFrame\": 1, \"endFrame\": 351, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Singer1/groundtruth_rect.txt\",\n             \"object_class\": \"person\"},\n            {\"name\": \"Singer2\", \"path\": \"Singer2/img\", \"startFrame\": 1, \"endFrame\": 366, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Singer2/groundtruth_rect.txt\",\n             \"object_class\": \"person\"},\n            {\"name\": \"Skater\", \"path\": \"Skater/img\", \"startFrame\": 1, \"endFrame\": 160, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Skater/groundtruth_rect.txt\",\n             \"object_class\": \"person\"},\n            {\"name\": \"Skater2\", \"path\": \"Skater2/img\", \"startFrame\": 1, \"endFrame\": 435, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Skater2/groundtruth_rect.txt\",\n             \"object_class\": \"person\"},\n            {\"name\": \"Skating1\", \"path\": \"Skating1/img\", \"startFrame\": 1, \"endFrame\": 400, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Skating1/groundtruth_rect.txt\",\n             \"object_class\": \"person\"},\n            {\"name\": \"Skating2_1\", \"path\": \"Skating2/img\", \"startFrame\": 1, \"endFrame\": 473, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Skating2/groundtruth_rect.1.txt\",\n             \"object_class\": \"person\"},\n            {\"name\": \"Skating2_2\", \"path\": \"Skating2/img\", \"startFrame\": 1, \"endFrame\": 473, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Skating2/groundtruth_rect.2.txt\",\n             \"object_class\": \"person\"},\n            {\"name\": \"Skiing\", \"path\": \"Skiing/img\", \"startFrame\": 1, \"endFrame\": 81, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Skiing/groundtruth_rect.txt\",\n             \"object_class\": \"person\"},\n            {\"name\": \"Soccer\", \"path\": \"Soccer/img\", \"startFrame\": 1, \"endFrame\": 392, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Soccer/groundtruth_rect.txt\",\n             \"object_class\": \"face\"},\n            {\"name\": \"Subway\", \"path\": \"Subway/img\", \"startFrame\": 1, \"endFrame\": 175, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Subway/groundtruth_rect.txt\",\n             \"object_class\": \"person\"},\n            {\"name\": \"Surfer\", \"path\": \"Surfer/img\", \"startFrame\": 1, \"endFrame\": 376, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Surfer/groundtruth_rect.txt\",\n             \"object_class\": \"person head\"},\n            {\"name\": \"Suv\", \"path\": \"Suv/img\", \"startFrame\": 1, \"endFrame\": 945, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Suv/groundtruth_rect.txt\",\n             \"object_class\": \"car\"},\n            {\"name\": \"Sylvester\", \"path\": \"Sylvester/img\", \"startFrame\": 1, \"endFrame\": 1345, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Sylvester/groundtruth_rect.txt\",\n             \"object_class\": \"other\"},\n            {\"name\": \"Tiger1\", \"path\": \"Tiger1/img\", \"startFrame\": 1, \"endFrame\": 354, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Tiger1/groundtruth_rect.txt\", \"initOmit\": 5,\n             \"object_class\": \"other\"},\n            {\"name\": \"Tiger2\", \"path\": \"Tiger2/img\", \"startFrame\": 1, \"endFrame\": 365, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Tiger2/groundtruth_rect.txt\",\n             \"object_class\": \"other\"},\n            {\"name\": \"Toy\", \"path\": \"Toy/img\", \"startFrame\": 1, \"endFrame\": 271, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Toy/groundtruth_rect.txt\",\n             \"object_class\": \"other\"},\n            {\"name\": \"Trans\", \"path\": \"Trans/img\", \"startFrame\": 1, \"endFrame\": 124, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Trans/groundtruth_rect.txt\",\n             \"object_class\": \"other\"},\n            {\"name\": \"Trellis\", \"path\": \"Trellis/img\", \"startFrame\": 1, \"endFrame\": 569, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Trellis/groundtruth_rect.txt\",\n             \"object_class\": \"face\"},\n            {\"name\": \"Twinnings\", \"path\": \"Twinnings/img\", \"startFrame\": 1, \"endFrame\": 472, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Twinnings/groundtruth_rect.txt\",\n             \"object_class\": \"other\"},\n            {\"name\": \"Vase\", \"path\": \"Vase/img\", \"startFrame\": 1, \"endFrame\": 271, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Vase/groundtruth_rect.txt\",\n             \"object_class\": \"other\"},\n            {\"name\": \"Walking\", \"path\": \"Walking/img\", \"startFrame\": 1, \"endFrame\": 412, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Walking/groundtruth_rect.txt\",\n             \"object_class\": \"person\"},\n            {\"name\": \"Walking2\", \"path\": \"Walking2/img\", \"startFrame\": 1, \"endFrame\": 500, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Walking2/groundtruth_rect.txt\",\n             \"object_class\": \"person\"},\n            {\"name\": \"Woman\", \"path\": \"Woman/img\", \"startFrame\": 1, \"endFrame\": 597, \"nz\": 4, \"ext\": \"jpg\", \"anno_path\": \"Woman/groundtruth_rect.txt\",\n             \"object_class\": \"person\"}\n        ]\n    \n        return sequence_info_list"
  },
  {
    "path": "lib/test/evaluation/running.py",
    "content": "import numpy as np\nimport multiprocessing\nimport os\nimport sys\nfrom itertools import product\nfrom collections import OrderedDict\nfrom lib.test.evaluation import Sequence, Tracker\nimport torch\n\n\ndef _save_tracker_output(seq: Sequence, tracker: Tracker, output: dict):\n    \"\"\"Saves the output of the tracker.\"\"\"\n\n    if not os.path.exists(tracker.results_dir):\n        print(\"create tracking result dir:\", tracker.results_dir)\n        os.makedirs(tracker.results_dir)\n    if seq.dataset in ['trackingnet', 'got10k']:\n        if not os.path.exists(os.path.join(tracker.results_dir, seq.dataset)):\n            os.makedirs(os.path.join(tracker.results_dir, seq.dataset))\n    '''2021.1.5 create new folder for these two datasets'''\n    if seq.dataset in ['trackingnet', 'got10k']:\n        base_results_path = os.path.join(tracker.results_dir, seq.dataset, seq.name)\n    else:\n        base_results_path = os.path.join(tracker.results_dir, seq.name)\n\n    def save_bb(file, data):\n        tracked_bb = np.array(data).astype(int)\n        np.savetxt(file, tracked_bb, delimiter='\\t', fmt='%d')\n\n    def save_time(file, data):\n        exec_times = np.array(data).astype(float)\n        np.savetxt(file, exec_times, delimiter='\\t', fmt='%f')\n\n    def save_score(file, data):\n        scores = np.array(data).astype(float)\n        np.savetxt(file, scores, delimiter='\\t', fmt='%.2f')\n\n    def _convert_dict(input_dict):\n        data_dict = {}\n        for elem in input_dict:\n            for k, v in elem.items():\n                if k in data_dict.keys():\n                    data_dict[k].append(v)\n                else:\n                    data_dict[k] = [v, ]\n        return data_dict\n\n    for key, data in output.items():\n        # If data is empty\n        if not data:\n            continue\n\n        if key == 'target_bbox':\n            if isinstance(data[0], (dict, OrderedDict)):\n                data_dict = _convert_dict(data)\n\n                for obj_id, d in data_dict.items():\n                    bbox_file = '{}_{}.txt'.format(base_results_path, obj_id)\n                    save_bb(bbox_file, d)\n            else:\n                # Single-object mode\n                bbox_file = '{}.txt'.format(base_results_path)\n                save_bb(bbox_file, data)\n\n        if key == 'all_boxes':\n            if isinstance(data[0], (dict, OrderedDict)):\n                data_dict = _convert_dict(data)\n\n                for obj_id, d in data_dict.items():\n                    bbox_file = '{}_{}_all_boxes.txt'.format(base_results_path, obj_id)\n                    save_bb(bbox_file, d)\n            else:\n                # Single-object mode\n                bbox_file = '{}_all_boxes.txt'.format(base_results_path)\n                save_bb(bbox_file, data)\n\n        if key == 'all_scores':\n            if isinstance(data[0], (dict, OrderedDict)):\n                data_dict = _convert_dict(data)\n\n                for obj_id, d in data_dict.items():\n                    bbox_file = '{}_{}_all_scores.txt'.format(base_results_path, obj_id)\n                    save_score(bbox_file, d)\n            else:\n                # Single-object mode\n                print(\"saving scores...\")\n                bbox_file = '{}_all_scores.txt'.format(base_results_path)\n                save_score(bbox_file, data)\n\n        elif key == 'time':\n            if isinstance(data[0], dict):\n                data_dict = _convert_dict(data)\n\n                for obj_id, d in data_dict.items():\n                    timings_file = '{}_{}_time.txt'.format(base_results_path, obj_id)\n                    save_time(timings_file, d)\n            else:\n                timings_file = '{}_time.txt'.format(base_results_path)\n                save_time(timings_file, data)\n\n\ndef run_sequence(seq: Sequence, tracker: Tracker, debug=False, num_gpu=8):\n    \"\"\"Runs a tracker on a sequence.\"\"\"\n    '''2021.1.2 Add multiple gpu support'''\n    try:\n        worker_name = multiprocessing.current_process().name\n        worker_id = int(worker_name[worker_name.find('-') + 1:]) - 1\n        gpu_id = worker_id % num_gpu\n        torch.cuda.set_device(gpu_id)\n    except:\n        pass\n\n    def _results_exist():\n        if seq.object_ids is None:\n            if seq.dataset in ['trackingnet', 'got10k']:\n                base_results_path = os.path.join(tracker.results_dir, seq.dataset, seq.name)\n                bbox_file = '{}.txt'.format(base_results_path)\n            else:\n                bbox_file = '{}/{}.txt'.format(tracker.results_dir, seq.name)\n            return os.path.isfile(bbox_file)\n        else:\n            bbox_files = ['{}/{}_{}.txt'.format(tracker.results_dir, seq.name, obj_id) for obj_id in seq.object_ids]\n            missing = [not os.path.isfile(f) for f in bbox_files]\n            return sum(missing) == 0\n\n    if _results_exist() and not debug:\n        print('FPS: {}'.format(-1))\n        return\n\n    print('Tracker: {} {} {} ,  Sequence: {}'.format(tracker.name, tracker.parameter_name, tracker.run_id, seq.name))\n\n    if debug:\n        output = tracker.run_sequence(seq, debug=debug)\n    else:\n        try:\n            output = tracker.run_sequence(seq, debug=debug)\n        except Exception as e:\n            print(e)\n            return\n\n    sys.stdout.flush()\n\n    if isinstance(output['time'][0], (dict, OrderedDict)):\n        exec_time = sum([sum(times.values()) for times in output['time']])\n        num_frames = len(output['time'])\n    else:\n        exec_time = sum(output['time'])\n        num_frames = len(output['time'])\n\n    print('FPS: {}'.format(num_frames / exec_time))\n\n    if not debug:\n        _save_tracker_output(seq, tracker, output)\n\n\ndef run_dataset(dataset, trackers, debug=False, threads=0, num_gpus=8):\n    \"\"\"Runs a list of trackers on a dataset.\n    args:\n        dataset: List of Sequence instances, forming a dataset.\n        trackers: List of Tracker instances.\n        debug: Debug level.\n        threads: Number of threads to use (default 0).\n    \"\"\"\n    multiprocessing.set_start_method('spawn', force=True)\n\n    print('Evaluating {:4d} trackers on {:5d} sequences'.format(len(trackers), len(dataset)))\n\n    multiprocessing.set_start_method('spawn', force=True)\n\n    if threads == 0:\n        mode = 'sequential'\n    else:\n        mode = 'parallel'\n\n    if mode == 'sequential':\n        for seq in dataset:\n            for tracker_info in trackers:\n                run_sequence(seq, tracker_info, debug=debug)\n    elif mode == 'parallel':\n        param_list = [(seq, tracker_info, debug, num_gpus) for seq, tracker_info in product(dataset, trackers)]\n        with multiprocessing.Pool(processes=threads) as pool:\n            pool.starmap(run_sequence, param_list)\n    print('Done')\n"
  },
  {
    "path": "lib/test/evaluation/tc128cedataset.py",
    "content": "import numpy as np\nfrom lib.test.evaluation.data import Sequence, BaseDataset, SequenceList\nimport os\nimport glob\nimport six\n\n\nclass TC128CEDataset(BaseDataset):\n    \"\"\"\n    TC-128 Dataset (78 newly added sequences)\n    modified from the implementation in got10k-toolkit (https://github.com/got-10k/toolkit)\n    \"\"\"\n    def __init__(self):\n        super().__init__()\n        self.base_path = self.env_settings.tc128_path\n        self.anno_files = sorted(glob.glob(\n            os.path.join(self.base_path, '*/*_gt.txt')))\n        \"\"\"filter the newly added sequences (_ce)\"\"\"\n        self.anno_files = [s for s in self.anno_files if \"_ce\" in s]\n        self.seq_dirs = [os.path.dirname(f) for f in self.anno_files]\n        self.seq_names = [os.path.basename(d) for d in self.seq_dirs]\n        # valid frame range for each sequence\n        self.range_files = [glob.glob(os.path.join(d, '*_frames.txt'))[0] for d in self.seq_dirs]\n\n    def get_sequence_list(self):\n        return SequenceList([self._construct_sequence(s) for s in self.seq_names])\n\n    def _construct_sequence(self, sequence_name):\n        if isinstance(sequence_name, six.string_types):\n            if not sequence_name in self.seq_names:\n                raise Exception('Sequence {} not found.'.format(sequence_name))\n            index = self.seq_names.index(sequence_name)\n        # load valid frame range\n        frames = np.loadtxt(self.range_files[index], dtype=int, delimiter=',')\n        img_files = [os.path.join(self.seq_dirs[index], 'img/%04d.jpg' % f) for f in range(frames[0], frames[1] + 1)]\n\n        # load annotations\n        anno = np.loadtxt(self.anno_files[index], delimiter=',')\n        assert len(img_files) == len(anno)\n        assert anno.shape[1] == 4\n\n        # return img_files, anno\n        return Sequence(sequence_name, img_files, 'tc128', anno.reshape(-1, 4))\n\n    def __len__(self):\n        return len(self.seq_names)\n"
  },
  {
    "path": "lib/test/evaluation/tc128dataset.py",
    "content": "import numpy as np\nfrom lib.test.evaluation.data import Sequence, BaseDataset, SequenceList\nimport os\nimport glob\nimport six\n\n\nclass TC128Dataset(BaseDataset):\n    \"\"\"\n    TC-128 Dataset\n    modified from the implementation in got10k-toolkit (https://github.com/got-10k/toolkit)\n    \"\"\"\n    def __init__(self):\n        super().__init__()\n        self.base_path = self.env_settings.tc128_path\n        self.anno_files = sorted(glob.glob(\n            os.path.join(self.base_path, '*/*_gt.txt')))\n        self.seq_dirs = [os.path.dirname(f) for f in self.anno_files]\n        self.seq_names = [os.path.basename(d) for d in self.seq_dirs]\n        # valid frame range for each sequence\n        self.range_files = [glob.glob(os.path.join(d, '*_frames.txt'))[0] for d in self.seq_dirs]\n\n    def get_sequence_list(self):\n        return SequenceList([self._construct_sequence(s) for s in self.seq_names])\n\n    def _construct_sequence(self, sequence_name):\n        if isinstance(sequence_name, six.string_types):\n            if not sequence_name in self.seq_names:\n                raise Exception('Sequence {} not found.'.format(sequence_name))\n            index = self.seq_names.index(sequence_name)\n        # load valid frame range\n        frames = np.loadtxt(self.range_files[index], dtype=int, delimiter=',')\n        img_files = [os.path.join(self.seq_dirs[index], 'img/%04d.jpg' % f) for f in range(frames[0], frames[1] + 1)]\n\n        # load annotations\n        anno = np.loadtxt(self.anno_files[index], delimiter=',')\n        assert len(img_files) == len(anno)\n        assert anno.shape[1] == 4\n\n        # return img_files, anno\n        return Sequence(sequence_name, img_files, 'tc128', anno.reshape(-1, 4))\n\n    def __len__(self):\n        return len(self.seq_names)\n"
  },
  {
    "path": "lib/test/evaluation/tnl2kdataset.py",
    "content": "import os\r\n\r\nimport numpy as np\r\nfrom lib.test.evaluation.data import Sequence, BaseDataset, SequenceList\r\nfrom lib.test.utils.load_text import load_text, load_str\r\n\r\n############\r\n# current 00000492.png of test_015_Sord_video_Q01_done is damaged and replaced by a copy of 00000491.png\r\n############\r\n\r\n\r\nclass TNL2kDataset(BaseDataset):\r\n    \"\"\"\r\n    TNL2k test set\r\n    \"\"\"\r\n    def __init__(self):\r\n        super().__init__()\r\n        self.base_path = self.env_settings.tnl2k_path\r\n        self.sequence_list = self._get_sequence_list()\r\n\r\n    def get_sequence_list(self):\r\n        return SequenceList([self._construct_sequence(s) for s in self.sequence_list])\r\n\r\n    def _construct_sequence(self, sequence_name):\r\n        # class_name = sequence_name.split('-')[0]\r\n        anno_path = '{}/{}/groundtruth.txt'.format(self.base_path, sequence_name)\r\n\r\n        ground_truth_rect = load_text(str(anno_path), delimiter=',', dtype=np.float64)\r\n\r\n        text_dsp_path = '{}/{}/language.txt'.format(self.base_path, sequence_name)\r\n        text_dsp = load_str(text_dsp_path)\r\n\r\n        frames_path = '{}/{}/imgs'.format(self.base_path, sequence_name)\r\n        frames_list = [f for f in os.listdir(frames_path)]\r\n        frames_list = sorted(frames_list)\r\n        frames_list = ['{}/{}'.format(frames_path, frame_i) for frame_i in frames_list]\r\n\r\n        # target_class = class_name\r\n        return Sequence(sequence_name, frames_list, 'tnl2k', ground_truth_rect.reshape(-1, 4), text_dsp=text_dsp)\r\n\r\n    def __len__(self):\r\n        return len(self.sequence_list)\r\n\r\n    def _get_sequence_list(self):\r\n        sequence_list = []\r\n        for seq in os.listdir(self.base_path):\r\n            if os.path.isdir(os.path.join(self.base_path, seq)):\r\n                sequence_list.append(seq)\r\n\r\n        return sequence_list\r\n"
  },
  {
    "path": "lib/test/evaluation/tracker.py",
    "content": "import importlib\nimport os\nfrom collections import OrderedDict\nfrom lib.test.evaluation.environment import env_settings\nimport time\nimport cv2 as cv\n\nfrom lib.utils.lmdb_utils import decode_img\nfrom pathlib import Path\nimport numpy as np\n\n\ndef trackerlist(name: str, parameter_name: str, dataset_name: str, run_ids = None, display_name: str = None,\n                result_only=False):\n    \"\"\"Generate list of trackers.\n    args:\n        name: Name of tracking method.\n        parameter_name: Name of parameter file.\n        run_ids: A single or list of run_ids.\n        display_name: Name to be displayed in the result plots.\n    \"\"\"\n    if run_ids is None or isinstance(run_ids, int):\n        run_ids = [run_ids]\n    return [Tracker(name, parameter_name, dataset_name, run_id, display_name, result_only) for run_id in run_ids]\n\n\nclass Tracker:\n    \"\"\"Wraps the tracker for evaluation and running purposes.\n    args:\n        name: Name of tracking method.\n        parameter_name: Name of parameter file.\n        run_id: The run id.\n        display_name: Name to be displayed in the result plots.\n    \"\"\"\n\n    def __init__(self, name: str, parameter_name: str, dataset_name: str, run_id: int = None, display_name: str = None,\n                 result_only=False):\n        assert run_id is None or isinstance(run_id, int)\n\n        self.name = name\n        self.parameter_name = parameter_name\n        self.dataset_name = dataset_name\n        self.run_id = run_id\n        self.display_name = display_name\n\n        env = env_settings()\n        if self.run_id is None:\n            self.results_dir = '{}/{}/{}'.format(env.results_path, self.name, self.parameter_name)\n        else:\n            self.results_dir = '{}/{}/{}_{:03d}'.format(env.results_path, self.name, self.parameter_name, self.run_id)\n        if result_only:\n            self.results_dir = '{}/{}'.format(env.results_path, self.name)\n\n        tracker_module_abspath = os.path.abspath(os.path.join(os.path.dirname(__file__),\n                                                              '..', 'tracker', '%s.py' % self.name))\n        if os.path.isfile(tracker_module_abspath):\n            tracker_module = importlib.import_module('lib.test.tracker.{}'.format(self.name))\n            self.tracker_class = tracker_module.get_tracker_class()\n        else:\n            self.tracker_class = None\n\n    def create_tracker(self, params):\n        tracker = self.tracker_class(params, self.dataset_name)\n        return tracker\n\n    def run_sequence(self, seq, debug=None):\n        \"\"\"Run tracker on sequence.\n        args:\n            seq: Sequence to run the tracker on.\n            visualization: Set visualization flag (None means default value specified in the parameters).\n            debug: Set debug level (None means default value specified in the parameters).\n            multiobj_mode: Which mode to use for multiple objects.\n        \"\"\"\n        params = self.get_parameters()\n\n        debug_ = debug\n        if debug is None:\n            debug_ = getattr(params, 'debug', 0)\n\n        params.debug = debug_\n\n        # Get init information\n        init_info = seq.init_info()\n\n        tracker = self.create_tracker(params)\n\n        output = self._track_sequence(tracker, seq, init_info)\n        return output\n\n    def _track_sequence(self, tracker, seq, init_info):\n        # Define outputs\n        # Each field in output is a list containing tracker prediction for each frame.\n\n        # In case of single object tracking mode:\n        # target_bbox[i] is the predicted bounding box for frame i\n        # time[i] is the processing time for frame i\n\n        # In case of multi object tracking mode:\n        # target_bbox[i] is an OrderedDict, where target_bbox[i][obj_id] is the predicted box for target obj_id in\n        # frame i\n        # time[i] is either the processing time for frame i, or an OrderedDict containing processing times for each\n        # object in frame i\n\n        output = {'target_bbox': [],\n                  'time': []}\n        if tracker.params.save_all_boxes:\n            output['all_boxes'] = []\n            output['all_scores'] = []\n\n        def _store_outputs(tracker_out: dict, defaults=None):\n            defaults = {} if defaults is None else defaults\n            for key in output.keys():\n                val = tracker_out.get(key, defaults.get(key, None))\n                if key in tracker_out or val is not None:\n                    output[key].append(val)\n\n        # Initialize\n        image = self._read_image(seq.frames[0])\n\n        start_time = time.time()\n        out = tracker.initialize(image, init_info)\n        if out is None:\n            out = {}\n\n        prev_output = OrderedDict(out)\n        init_default = {'target_bbox': init_info.get('init_bbox'),\n                        'time': time.time() - start_time}\n        if tracker.params.save_all_boxes:\n            init_default['all_boxes'] = out['all_boxes']\n            init_default['all_scores'] = out['all_scores']\n\n        _store_outputs(out, init_default)\n\n        for frame_num, frame_path in enumerate(seq.frames[1:], start=1):\n            image = self._read_image(frame_path)\n\n            start_time = time.time()\n\n            info = seq.frame_info(frame_num)\n            info['previous_output'] = prev_output\n\n            if len(seq.ground_truth_rect) > 1:\n                info['gt_bbox'] = seq.ground_truth_rect[frame_num]\n            out = tracker.track(image, info)\n            prev_output = OrderedDict(out)\n            _store_outputs(out, {'time': time.time() - start_time})\n\n        for key in ['target_bbox', 'all_boxes', 'all_scores']:\n            if key in output and len(output[key]) <= 1:\n                output.pop(key)\n\n        return output\n\n    def run_video(self, videofilepath, optional_box=None, debug=None, visdom_info=None, save_results=False):\n        \"\"\"Run the tracker with the vieofile.\n        args:\n            debug: Debug level.\n        \"\"\"\n\n        params = self.get_parameters()\n\n        debug_ = debug\n        if debug is None:\n            debug_ = getattr(params, 'debug', 0)\n        params.debug = debug_\n\n        params.tracker_name = self.name\n        params.param_name = self.parameter_name\n        # self._init_visdom(visdom_info, debug_)\n\n        multiobj_mode = getattr(params, 'multiobj_mode', getattr(self.tracker_class, 'multiobj_mode', 'default'))\n\n        if multiobj_mode == 'default':\n            tracker = self.create_tracker(params)\n\n        elif multiobj_mode == 'parallel':\n            tracker = MultiObjectWrapper(self.tracker_class, params, self.visdom, fast_load=True)\n        else:\n            raise ValueError('Unknown multi object mode {}'.format(multiobj_mode))\n\n        assert os.path.isfile(videofilepath), \"Invalid param {}\".format(videofilepath)\n        \", videofilepath must be a valid videofile\"\n\n        output_boxes = []\n\n        cap = cv.VideoCapture(videofilepath)\n        display_name = 'Display: ' + tracker.params.tracker_name\n        cv.namedWindow(display_name, cv.WINDOW_NORMAL | cv.WINDOW_KEEPRATIO)\n        cv.resizeWindow(display_name, 960, 720)\n        success, frame = cap.read()\n        cv.imshow(display_name, frame)\n\n        def _build_init_info(box):\n            return {'init_bbox': box}\n\n        if success is not True:\n            print(\"Read frame from {} failed.\".format(videofilepath))\n            exit(-1)\n        if optional_box is not None:\n            assert isinstance(optional_box, (list, tuple))\n            assert len(optional_box) == 4, \"valid box's foramt is [x,y,w,h]\"\n            tracker.initialize(frame, _build_init_info(optional_box))\n            output_boxes.append(optional_box)\n        else:\n            while True:\n                # cv.waitKey()\n                frame_disp = frame.copy()\n\n                cv.putText(frame_disp, 'Select target ROI and press ENTER', (20, 30), cv.FONT_HERSHEY_COMPLEX_SMALL,\n                           1.5, (0, 0, 0), 1)\n\n                x, y, w, h = cv.selectROI(display_name, frame_disp, fromCenter=False)\n                init_state = [x, y, w, h]\n                tracker.initialize(frame, _build_init_info(init_state))\n                output_boxes.append(init_state)\n                break\n\n        while True:\n            ret, frame = cap.read()\n\n            if frame is None:\n                break\n\n            frame_disp = frame.copy()\n\n            # Draw box\n            out = tracker.track(frame)\n            state = [int(s) for s in out['target_bbox']]\n            output_boxes.append(state)\n\n            cv.rectangle(frame_disp, (state[0], state[1]), (state[2] + state[0], state[3] + state[1]),\n                         (0, 255, 0), 5)\n\n            font_color = (0, 0, 0)\n            cv.putText(frame_disp, 'Tracking!', (20, 30), cv.FONT_HERSHEY_COMPLEX_SMALL, 1,\n                       font_color, 1)\n            cv.putText(frame_disp, 'Press r to reset', (20, 55), cv.FONT_HERSHEY_COMPLEX_SMALL, 1,\n                       font_color, 1)\n            cv.putText(frame_disp, 'Press q to quit', (20, 80), cv.FONT_HERSHEY_COMPLEX_SMALL, 1,\n                       font_color, 1)\n\n            # Display the resulting frame\n            cv.imshow(display_name, frame_disp)\n            key = cv.waitKey(1)\n            if key == ord('q'):\n                break\n            elif key == ord('r'):\n                ret, frame = cap.read()\n                frame_disp = frame.copy()\n\n                cv.putText(frame_disp, 'Select target ROI and press ENTER', (20, 30), cv.FONT_HERSHEY_COMPLEX_SMALL, 1.5,\n                           (0, 0, 0), 1)\n\n                cv.imshow(display_name, frame_disp)\n                x, y, w, h = cv.selectROI(display_name, frame_disp, fromCenter=False)\n                init_state = [x, y, w, h]\n                tracker.initialize(frame, _build_init_info(init_state))\n                output_boxes.append(init_state)\n\n        # When everything done, release the capture\n        cap.release()\n        cv.destroyAllWindows()\n\n        if save_results:\n            if not os.path.exists(self.results_dir):\n                os.makedirs(self.results_dir)\n            video_name = Path(videofilepath).stem\n            base_results_path = os.path.join(self.results_dir, 'video_{}'.format(video_name))\n\n            tracked_bb = np.array(output_boxes).astype(int)\n            bbox_file = '{}.txt'.format(base_results_path)\n            np.savetxt(bbox_file, tracked_bb, delimiter='\\t', fmt='%d')\n\n\n    def get_parameters(self):\n        \"\"\"Get parameters.\"\"\"\n        param_module = importlib.import_module('lib.test.parameter.{}'.format(self.name))\n        params = param_module.parameters(self.parameter_name)\n        return params\n\n    def _read_image(self, image_file: str):\n        if isinstance(image_file, str):\n            im = cv.imread(image_file)\n            return cv.cvtColor(im, cv.COLOR_BGR2RGB)\n        elif isinstance(image_file, list) and len(image_file) == 2:\n            return decode_img(image_file[0], image_file[1])\n        else:\n            raise ValueError(\"type of image_file should be str or list\")\n\n\n\n"
  },
  {
    "path": "lib/test/evaluation/trackingnetdataset.py",
    "content": "import numpy as np\nfrom lib.test.evaluation.data import Sequence, BaseDataset, SequenceList\nimport os\nfrom lib.test.utils.load_text import load_text\n\n\nclass TrackingNetDataset(BaseDataset):\n    \"\"\" TrackingNet test set.\n\n    Publication:\n        TrackingNet: A Large-Scale Dataset and Benchmark for Object Tracking in the Wild.\n        Matthias Mueller,Adel Bibi, Silvio Giancola, Salman Al-Subaihi and Bernard Ghanem\n        ECCV, 2018\n        https://ivul.kaust.edu.sa/Documents/Publications/2018/TrackingNet%20A%20Large%20Scale%20Dataset%20and%20Benchmark%20for%20Object%20Tracking%20in%20the%20Wild.pdf\n\n    Download the dataset using the toolkit https://github.com/SilvioGiancola/TrackingNet-devkit.\n    \"\"\"\n    def __init__(self):\n        super().__init__()\n        self.base_path = self.env_settings.trackingnet_path\n\n        sets = 'TEST'\n        if not isinstance(sets, (list, tuple)):\n            if sets == 'TEST':\n                sets = ['TEST']\n            elif sets == 'TRAIN':\n                sets = ['TRAIN_{}'.format(i) for i in range(5)]\n\n        self.sequence_list = self._list_sequences(self.base_path, sets)\n\n    def get_sequence_list(self):\n        return SequenceList([self._construct_sequence(set, seq_name) for set, seq_name in self.sequence_list])\n\n    def _construct_sequence(self, set, sequence_name):\n        anno_path = '{}/{}/anno/{}.txt'.format(self.base_path, set, sequence_name)\n\n        ground_truth_rect = load_text(str(anno_path), delimiter=',', dtype=np.float64, backend='numpy')\n\n        frames_path = '{}/{}/frames/{}'.format(self.base_path, set, sequence_name)\n        frame_list = [frame for frame in os.listdir(frames_path) if frame.endswith(\".jpg\")]\n        frame_list.sort(key=lambda f: int(f[:-4]))\n        frames_list = [os.path.join(frames_path, frame) for frame in frame_list]\n\n        return Sequence(sequence_name, frames_list, 'trackingnet', ground_truth_rect.reshape(-1, 4))\n\n    def __len__(self):\n        return len(self.sequence_list)\n\n    def _list_sequences(self, root, set_ids):\n        sequence_list = []\n\n        for s in set_ids:\n            anno_dir = os.path.join(root, s, \"anno\")\n            sequences_cur_set = [(s, os.path.splitext(f)[0]) for f in os.listdir(anno_dir) if f.endswith('.txt')]\n\n            sequence_list += sequences_cur_set\n\n        return sequence_list\n"
  },
  {
    "path": "lib/test/evaluation/uavdataset.py",
    "content": "import numpy as np\nfrom lib.test.evaluation.data import Sequence, BaseDataset, SequenceList\nfrom lib.test.utils.load_text import load_text\n\n\nclass UAVDataset(BaseDataset):\n    \"\"\" UAV123 dataset.\n    Publication:\n        A Benchmark and Simulator for UAV Tracking.\n        Matthias Mueller, Neil Smith and Bernard Ghanem\n        ECCV, 2016\n        https://ivul.kaust.edu.sa/Documents/Publications/2016/A%20Benchmark%20and%20Simulator%20for%20UAV%20Tracking.pdf\n    Download the dataset from https://ivul.kaust.edu.sa/Pages/pub-benchmark-simulator-uav.aspx\n    \"\"\"\n    def __init__(self):\n        super().__init__()\n        self.base_path = self.env_settings.uav_path\n        self.sequence_info_list = self._get_sequence_info_list()\n\n    def get_sequence_list(self):\n        return SequenceList([self._construct_sequence(s) for s in self.sequence_info_list])\n\n    def _construct_sequence(self, sequence_info):\n        sequence_path = sequence_info['path']\n        nz = sequence_info['nz']\n        ext = sequence_info['ext']\n        start_frame = sequence_info['startFrame']\n        end_frame = sequence_info['endFrame']\n\n        init_omit = 0\n        if 'initOmit' in sequence_info:\n            init_omit = sequence_info['initOmit']\n\n        frames = ['{base_path}/{sequence_path}/{frame:0{nz}}.{ext}'.format(base_path=self.base_path, \n        sequence_path=sequence_path, frame=frame_num, nz=nz, ext=ext) for frame_num in range(start_frame+init_omit, end_frame+1)]\n\n        anno_path = '{}/{}'.format(self.base_path, sequence_info['anno_path'])\n\n        ground_truth_rect = load_text(str(anno_path), delimiter=',', dtype=np.float64, backend='numpy')\n\n        return Sequence(sequence_info['name'], frames, 'uav', ground_truth_rect[init_omit:,:],\n                        object_class=sequence_info['object_class'])\n\n    def __len__(self):\n        return len(self.sequence_info_list)\n\n    def _get_sequence_info_list(self):\n        sequence_info_list = [\n            {\"name\": \"uav_bike1\", \"path\": \"data_seq/UAV123/bike1\", \"startFrame\": 1, \"endFrame\": 3085, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/bike1.txt\", \"object_class\": \"vehicle\"},\n            {\"name\": \"uav_bike2\", \"path\": \"data_seq/UAV123/bike2\", \"startFrame\": 1, \"endFrame\": 553, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/bike2.txt\", \"object_class\": \"vehicle\"},\n            {\"name\": \"uav_bike3\", \"path\": \"data_seq/UAV123/bike3\", \"startFrame\": 1, \"endFrame\": 433, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/bike3.txt\", \"object_class\": \"vehicle\"},\n            {\"name\": \"uav_bird1_1\", \"path\": \"data_seq/UAV123/bird1\", \"startFrame\": 1, \"endFrame\": 253, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/bird1_1.txt\", \"object_class\": \"bird\"},\n            {\"name\": \"uav_bird1_2\", \"path\": \"data_seq/UAV123/bird1\", \"startFrame\": 775, \"endFrame\": 1477, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/bird1_2.txt\", \"object_class\": \"bird\"},\n            {\"name\": \"uav_bird1_3\", \"path\": \"data_seq/UAV123/bird1\", \"startFrame\": 1573, \"endFrame\": 2437, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/bird1_3.txt\", \"object_class\": \"bird\"},\n            {\"name\": \"uav_boat1\", \"path\": \"data_seq/UAV123/boat1\", \"startFrame\": 1, \"endFrame\": 901, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/boat1.txt\", \"object_class\": \"vessel\"},\n            {\"name\": \"uav_boat2\", \"path\": \"data_seq/UAV123/boat2\", \"startFrame\": 1, \"endFrame\": 799, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/boat2.txt\", \"object_class\": \"vessel\"},\n            {\"name\": \"uav_boat3\", \"path\": \"data_seq/UAV123/boat3\", \"startFrame\": 1, \"endFrame\": 901, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/boat3.txt\", \"object_class\": \"vessel\"},\n            {\"name\": \"uav_boat4\", \"path\": \"data_seq/UAV123/boat4\", \"startFrame\": 1, \"endFrame\": 553, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/boat4.txt\", \"object_class\": \"vessel\"},\n            {\"name\": \"uav_boat5\", \"path\": \"data_seq/UAV123/boat5\", \"startFrame\": 1, \"endFrame\": 505, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/boat5.txt\", \"object_class\": \"vessel\"},\n            {\"name\": \"uav_boat6\", \"path\": \"data_seq/UAV123/boat6\", \"startFrame\": 1, \"endFrame\": 805, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/boat6.txt\", \"object_class\": \"vessel\"},\n            {\"name\": \"uav_boat7\", \"path\": \"data_seq/UAV123/boat7\", \"startFrame\": 1, \"endFrame\": 535, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/boat7.txt\", \"object_class\": \"vessel\"},\n            {\"name\": \"uav_boat8\", \"path\": \"data_seq/UAV123/boat8\", \"startFrame\": 1, \"endFrame\": 685, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/boat8.txt\", \"object_class\": \"vessel\"},\n            {\"name\": \"uav_boat9\", \"path\": \"data_seq/UAV123/boat9\", \"startFrame\": 1, \"endFrame\": 1399, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/boat9.txt\", \"object_class\": \"vessel\"},\n            {\"name\": \"uav_building1\", \"path\": \"data_seq/UAV123/building1\", \"startFrame\": 1, \"endFrame\": 469, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/building1.txt\", \"object_class\": \"other\"},\n            {\"name\": \"uav_building2\", \"path\": \"data_seq/UAV123/building2\", \"startFrame\": 1, \"endFrame\": 577, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/building2.txt\", \"object_class\": \"other\"},\n            {\"name\": \"uav_building3\", \"path\": \"data_seq/UAV123/building3\", \"startFrame\": 1, \"endFrame\": 829, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/building3.txt\", \"object_class\": \"other\"},\n            {\"name\": \"uav_building4\", \"path\": \"data_seq/UAV123/building4\", \"startFrame\": 1, \"endFrame\": 787, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/building4.txt\", \"object_class\": \"other\"},\n            {\"name\": \"uav_building5\", \"path\": \"data_seq/UAV123/building5\", \"startFrame\": 1, \"endFrame\": 481, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/building5.txt\", \"object_class\": \"other\"},\n            {\"name\": \"uav_car1_1\", \"path\": \"data_seq/UAV123/car1\", \"startFrame\": 1, \"endFrame\": 751, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/car1_1.txt\", \"object_class\": \"car\"},\n            {\"name\": \"uav_car1_2\", \"path\": \"data_seq/UAV123/car1\", \"startFrame\": 751, \"endFrame\": 1627, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/car1_2.txt\", \"object_class\": \"car\"},\n            {\"name\": \"uav_car1_3\", \"path\": \"data_seq/UAV123/car1\", \"startFrame\": 1627, \"endFrame\": 2629, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/car1_3.txt\", \"object_class\": \"car\"},\n            {\"name\": \"uav_car10\", \"path\": \"data_seq/UAV123/car10\", \"startFrame\": 1, \"endFrame\": 1405, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/car10.txt\", \"object_class\": \"car\"},\n            {\"name\": \"uav_car11\", \"path\": \"data_seq/UAV123/car11\", \"startFrame\": 1, \"endFrame\": 337, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/car11.txt\", \"object_class\": \"car\"},\n            {\"name\": \"uav_car12\", \"path\": \"data_seq/UAV123/car12\", \"startFrame\": 1, \"endFrame\": 499, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/car12.txt\", \"object_class\": \"car\"},\n            {\"name\": \"uav_car13\", \"path\": \"data_seq/UAV123/car13\", \"startFrame\": 1, \"endFrame\": 415, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/car13.txt\", \"object_class\": \"car\"},\n            {\"name\": \"uav_car14\", \"path\": \"data_seq/UAV123/car14\", \"startFrame\": 1, \"endFrame\": 1327, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/car14.txt\", \"object_class\": \"car\"},\n            {\"name\": \"uav_car15\", \"path\": \"data_seq/UAV123/car15\", \"startFrame\": 1, \"endFrame\": 469, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/car15.txt\", \"object_class\": \"car\"},\n            {\"name\": \"uav_car16_1\", \"path\": \"data_seq/UAV123/car16\", \"startFrame\": 1, \"endFrame\": 415, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/car16_1.txt\", \"object_class\": \"car\"},\n            {\"name\": \"uav_car16_2\", \"path\": \"data_seq/UAV123/car16\", \"startFrame\": 415, \"endFrame\": 1993, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/car16_2.txt\", \"object_class\": \"car\"},\n            {\"name\": \"uav_car17\", \"path\": \"data_seq/UAV123/car17\", \"startFrame\": 1, \"endFrame\": 1057, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/car17.txt\", \"object_class\": \"car\"},\n            {\"name\": \"uav_car18\", \"path\": \"data_seq/UAV123/car18\", \"startFrame\": 1, \"endFrame\": 1207, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/car18.txt\", \"object_class\": \"car\"},\n            {\"name\": \"uav_car1_s\", \"path\": \"data_seq/UAV123/car1_s\", \"startFrame\": 1, \"endFrame\": 1475, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/car1_s.txt\", \"object_class\": \"car\"},\n            {\"name\": \"uav_car2\", \"path\": \"data_seq/UAV123/car2\", \"startFrame\": 1, \"endFrame\": 1321, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/car2.txt\", \"object_class\": \"car\"},\n            {\"name\": \"uav_car2_s\", \"path\": \"data_seq/UAV123/car2_s\", \"startFrame\": 1, \"endFrame\": 320, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/car2_s.txt\", \"object_class\": \"car\"},\n            {\"name\": \"uav_car3\", \"path\": \"data_seq/UAV123/car3\", \"startFrame\": 1, \"endFrame\": 1717, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/car3.txt\", \"object_class\": \"car\"},\n            {\"name\": \"uav_car3_s\", \"path\": \"data_seq/UAV123/car3_s\", \"startFrame\": 1, \"endFrame\": 1300, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/car3_s.txt\", \"object_class\": \"car\"},\n            {\"name\": \"uav_car4\", \"path\": \"data_seq/UAV123/car4\", \"startFrame\": 1, \"endFrame\": 1345, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/car4.txt\", \"object_class\": \"car\"},\n            {\"name\": \"uav_car4_s\", \"path\": \"data_seq/UAV123/car4_s\", \"startFrame\": 1, \"endFrame\": 830, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/car4_s.txt\", \"object_class\": \"car\"},\n            {\"name\": \"uav_car5\", \"path\": \"data_seq/UAV123/car5\", \"startFrame\": 1, \"endFrame\": 745, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/car5.txt\", \"object_class\": \"car\"},\n            {\"name\": \"uav_car6_1\", \"path\": \"data_seq/UAV123/car6\", \"startFrame\": 1, \"endFrame\": 487, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/car6_1.txt\", \"object_class\": \"car\"},\n            {\"name\": \"uav_car6_2\", \"path\": \"data_seq/UAV123/car6\", \"startFrame\": 487, \"endFrame\": 1807, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/car6_2.txt\", \"object_class\": \"car\"},\n            {\"name\": \"uav_car6_3\", \"path\": \"data_seq/UAV123/car6\", \"startFrame\": 1807, \"endFrame\": 2953, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/car6_3.txt\", \"object_class\": \"car\"},\n            {\"name\": \"uav_car6_4\", \"path\": \"data_seq/UAV123/car6\", \"startFrame\": 2953, \"endFrame\": 3925, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/car6_4.txt\", \"object_class\": \"car\"},\n            {\"name\": \"uav_car6_5\", \"path\": \"data_seq/UAV123/car6\", \"startFrame\": 3925, \"endFrame\": 4861, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/car6_5.txt\", \"object_class\": \"car\"},\n            {\"name\": \"uav_car7\", \"path\": \"data_seq/UAV123/car7\", \"startFrame\": 1, \"endFrame\": 1033, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/car7.txt\", \"object_class\": \"car\"},\n            {\"name\": \"uav_car8_1\", \"path\": \"data_seq/UAV123/car8\", \"startFrame\": 1, \"endFrame\": 1357, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/car8_1.txt\", \"object_class\": \"car\"},\n            {\"name\": \"uav_car8_2\", \"path\": \"data_seq/UAV123/car8\", \"startFrame\": 1357, \"endFrame\": 2575, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/car8_2.txt\", \"object_class\": \"car\"},\n            {\"name\": \"uav_car9\", \"path\": \"data_seq/UAV123/car9\", \"startFrame\": 1, \"endFrame\": 1879, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/car9.txt\", \"object_class\": \"car\"},\n            {\"name\": \"uav_group1_1\", \"path\": \"data_seq/UAV123/group1\", \"startFrame\": 1, \"endFrame\": 1333, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/group1_1.txt\", \"object_class\": \"person\"},\n            {\"name\": \"uav_group1_2\", \"path\": \"data_seq/UAV123/group1\", \"startFrame\": 1333, \"endFrame\": 2515, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/group1_2.txt\", \"object_class\": \"person\"},\n            {\"name\": \"uav_group1_3\", \"path\": \"data_seq/UAV123/group1\", \"startFrame\": 2515, \"endFrame\": 3925, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/group1_3.txt\", \"object_class\": \"person\"},\n            {\"name\": \"uav_group1_4\", \"path\": \"data_seq/UAV123/group1\", \"startFrame\": 3925, \"endFrame\": 4873, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/group1_4.txt\", \"object_class\": \"person\"},\n            {\"name\": \"uav_group2_1\", \"path\": \"data_seq/UAV123/group2\", \"startFrame\": 1, \"endFrame\": 907, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/group2_1.txt\", \"object_class\": \"person\"},\n            {\"name\": \"uav_group2_2\", \"path\": \"data_seq/UAV123/group2\", \"startFrame\": 907, \"endFrame\": 1771, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/group2_2.txt\", \"object_class\": \"person\"},\n            {\"name\": \"uav_group2_3\", \"path\": \"data_seq/UAV123/group2\", \"startFrame\": 1771, \"endFrame\": 2683, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/group2_3.txt\", \"object_class\": \"person\"},\n            {\"name\": \"uav_group3_1\", \"path\": \"data_seq/UAV123/group3\", \"startFrame\": 1, \"endFrame\": 1567, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/group3_1.txt\", \"object_class\": \"person\"},\n            {\"name\": \"uav_group3_2\", \"path\": \"data_seq/UAV123/group3\", \"startFrame\": 1567, \"endFrame\": 2827, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/group3_2.txt\", \"object_class\": \"person\"},\n            {\"name\": \"uav_group3_3\", \"path\": \"data_seq/UAV123/group3\", \"startFrame\": 2827, \"endFrame\": 4369, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/group3_3.txt\", \"object_class\": \"person\"},\n            {\"name\": \"uav_group3_4\", \"path\": \"data_seq/UAV123/group3\", \"startFrame\": 4369, \"endFrame\": 5527, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/group3_4.txt\", \"object_class\": \"person\"},\n            {\"name\": \"uav_person1\", \"path\": \"data_seq/UAV123/person1\", \"startFrame\": 1, \"endFrame\": 799, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/person1.txt\", \"object_class\": \"person\"},\n            {\"name\": \"uav_person10\", \"path\": \"data_seq/UAV123/person10\", \"startFrame\": 1, \"endFrame\": 1021, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/person10.txt\", \"object_class\": \"person\"},\n            {\"name\": \"uav_person11\", \"path\": \"data_seq/UAV123/person11\", \"startFrame\": 1, \"endFrame\": 721, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/person11.txt\", \"object_class\": \"person\"},\n            {\"name\": \"uav_person12_1\", \"path\": \"data_seq/UAV123/person12\", \"startFrame\": 1, \"endFrame\": 601, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/person12_1.txt\", \"object_class\": \"person\"},\n            {\"name\": \"uav_person12_2\", \"path\": \"data_seq/UAV123/person12\", \"startFrame\": 601, \"endFrame\": 1621, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/person12_2.txt\", \"object_class\": \"person\"},\n            {\"name\": \"uav_person13\", \"path\": \"data_seq/UAV123/person13\", \"startFrame\": 1, \"endFrame\": 883, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/person13.txt\", \"object_class\": \"person\"},\n            {\"name\": \"uav_person14_1\", \"path\": \"data_seq/UAV123/person14\", \"startFrame\": 1, \"endFrame\": 847, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/person14_1.txt\", \"object_class\": \"person\"},\n            {\"name\": \"uav_person14_2\", \"path\": \"data_seq/UAV123/person14\", \"startFrame\": 847, \"endFrame\": 1813, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/person14_2.txt\", \"object_class\": \"person\"},\n            {\"name\": \"uav_person14_3\", \"path\": \"data_seq/UAV123/person14\", \"startFrame\": 1813, \"endFrame\": 2923,\n             \"nz\": 6, \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/person14_3.txt\", \"object_class\": \"person\"},\n            {\"name\": \"uav_person15\", \"path\": \"data_seq/UAV123/person15\", \"startFrame\": 1, \"endFrame\": 1339, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/person15.txt\", \"object_class\": \"person\"},\n            {\"name\": \"uav_person16\", \"path\": \"data_seq/UAV123/person16\", \"startFrame\": 1, \"endFrame\": 1147, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/person16.txt\", \"object_class\": \"person\"},\n            {\"name\": \"uav_person17_1\", \"path\": \"data_seq/UAV123/person17\", \"startFrame\": 1, \"endFrame\": 1501, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/person17_1.txt\", \"object_class\": \"person\"},\n            {\"name\": \"uav_person17_2\", \"path\": \"data_seq/UAV123/person17\", \"startFrame\": 1501, \"endFrame\": 2347,\n             \"nz\": 6, \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/person17_2.txt\", \"object_class\": \"person\"},\n            {\"name\": \"uav_person18\", \"path\": \"data_seq/UAV123/person18\", \"startFrame\": 1, \"endFrame\": 1393, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/person18.txt\", \"object_class\": \"person\"},\n            {\"name\": \"uav_person19_1\", \"path\": \"data_seq/UAV123/person19\", \"startFrame\": 1, \"endFrame\": 1243, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/person19_1.txt\", \"object_class\": \"person\"},\n            {\"name\": \"uav_person19_2\", \"path\": \"data_seq/UAV123/person19\", \"startFrame\": 1243, \"endFrame\": 2791,\n             \"nz\": 6, \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/person19_2.txt\", \"object_class\": \"person\"},\n            {\"name\": \"uav_person19_3\", \"path\": \"data_seq/UAV123/person19\", \"startFrame\": 2791, \"endFrame\": 4357,\n             \"nz\": 6, \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/person19_3.txt\", \"object_class\": \"person\"},\n            {\"name\": \"uav_person1_s\", \"path\": \"data_seq/UAV123/person1_s\", \"startFrame\": 1, \"endFrame\": 1600, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/person1_s.txt\", \"object_class\": \"person\"},\n            {\"name\": \"uav_person2_1\", \"path\": \"data_seq/UAV123/person2\", \"startFrame\": 1, \"endFrame\": 1189, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/person2_1.txt\", \"object_class\": \"person\"},\n            {\"name\": \"uav_person2_2\", \"path\": \"data_seq/UAV123/person2\", \"startFrame\": 1189, \"endFrame\": 2623, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/person2_2.txt\", \"object_class\": \"person\"},\n            {\"name\": \"uav_person20\", \"path\": \"data_seq/UAV123/person20\", \"startFrame\": 1, \"endFrame\": 1783, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/person20.txt\", \"object_class\": \"person\"},\n            {\"name\": \"uav_person21\", \"path\": \"data_seq/UAV123/person21\", \"startFrame\": 1, \"endFrame\": 487, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/person21.txt\", \"object_class\": \"person\"},\n            {\"name\": \"uav_person22\", \"path\": \"data_seq/UAV123/person22\", \"startFrame\": 1, \"endFrame\": 199, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/person22.txt\", \"object_class\": \"person\"},\n            {\"name\": \"uav_person23\", \"path\": \"data_seq/UAV123/person23\", \"startFrame\": 1, \"endFrame\": 397, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/person23.txt\", \"object_class\": \"person\"},\n            {\"name\": \"uav_person2_s\", \"path\": \"data_seq/UAV123/person2_s\", \"startFrame\": 1, \"endFrame\": 250, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/person2_s.txt\", \"object_class\": \"person\"},\n            {\"name\": \"uav_person3\", \"path\": \"data_seq/UAV123/person3\", \"startFrame\": 1, \"endFrame\": 643, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/person3.txt\", \"object_class\": \"person\"},\n            {\"name\": \"uav_person3_s\", \"path\": \"data_seq/UAV123/person3_s\", \"startFrame\": 1, \"endFrame\": 505, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/person3_s.txt\", \"object_class\": \"person\"},\n            {\"name\": \"uav_person4_1\", \"path\": \"data_seq/UAV123/person4\", \"startFrame\": 1, \"endFrame\": 1501, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/person4_1.txt\", \"object_class\": \"person\"},\n            {\"name\": \"uav_person4_2\", \"path\": \"data_seq/UAV123/person4\", \"startFrame\": 1501, \"endFrame\": 2743, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/person4_2.txt\", \"object_class\": \"person\"},\n            {\"name\": \"uav_person5_1\", \"path\": \"data_seq/UAV123/person5\", \"startFrame\": 1, \"endFrame\": 877, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/person5_1.txt\", \"object_class\": \"person\"},\n            {\"name\": \"uav_person5_2\", \"path\": \"data_seq/UAV123/person5\", \"startFrame\": 877, \"endFrame\": 2101, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/person5_2.txt\", \"object_class\": \"person\"},\n            {\"name\": \"uav_person6\", \"path\": \"data_seq/UAV123/person6\", \"startFrame\": 1, \"endFrame\": 901, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/person6.txt\", \"object_class\": \"person\"},\n            {\"name\": \"uav_person7_1\", \"path\": \"data_seq/UAV123/person7\", \"startFrame\": 1, \"endFrame\": 1249, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/person7_1.txt\", \"object_class\": \"person\"},\n            {\"name\": \"uav_person7_2\", \"path\": \"data_seq/UAV123/person7\", \"startFrame\": 1249, \"endFrame\": 2065, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/person7_2.txt\", \"object_class\": \"person\"},\n            {\"name\": \"uav_person8_1\", \"path\": \"data_seq/UAV123/person8\", \"startFrame\": 1, \"endFrame\": 1075, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/person8_1.txt\", \"object_class\": \"person\"},\n            {\"name\": \"uav_person8_2\", \"path\": \"data_seq/UAV123/person8\", \"startFrame\": 1075, \"endFrame\": 1525, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/person8_2.txt\", \"object_class\": \"person\"},\n            {\"name\": \"uav_person9\", \"path\": \"data_seq/UAV123/person9\", \"startFrame\": 1, \"endFrame\": 661, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/person9.txt\", \"object_class\": \"person\"},\n            {\"name\": \"uav_truck1\", \"path\": \"data_seq/UAV123/truck1\", \"startFrame\": 1, \"endFrame\": 463, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/truck1.txt\", \"object_class\": \"truck\"},\n            {\"name\": \"uav_truck2\", \"path\": \"data_seq/UAV123/truck2\", \"startFrame\": 1, \"endFrame\": 385, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/truck2.txt\", \"object_class\": \"truck\"},\n            {\"name\": \"uav_truck3\", \"path\": \"data_seq/UAV123/truck3\", \"startFrame\": 1, \"endFrame\": 535, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/truck3.txt\", \"object_class\": \"truck\"},\n            {\"name\": \"uav_truck4_1\", \"path\": \"data_seq/UAV123/truck4\", \"startFrame\": 1, \"endFrame\": 577, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/truck4_1.txt\", \"object_class\": \"truck\"},\n            {\"name\": \"uav_truck4_2\", \"path\": \"data_seq/UAV123/truck4\", \"startFrame\": 577, \"endFrame\": 1261, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/truck4_2.txt\", \"object_class\": \"truck\"},\n            {\"name\": \"uav_uav1_1\", \"path\": \"data_seq/UAV123/uav1\", \"startFrame\": 1, \"endFrame\": 1555, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/uav1_1.txt\", \"object_class\": \"aircraft\"},\n            {\"name\": \"uav_uav1_2\", \"path\": \"data_seq/UAV123/uav1\", \"startFrame\": 1555, \"endFrame\": 2377, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/uav1_2.txt\", \"object_class\": \"aircraft\"},\n            {\"name\": \"uav_uav1_3\", \"path\": \"data_seq/UAV123/uav1\", \"startFrame\": 2473, \"endFrame\": 3469, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/uav1_3.txt\", \"object_class\": \"aircraft\"},\n            {\"name\": \"uav_uav2\", \"path\": \"data_seq/UAV123/uav2\", \"startFrame\": 1, \"endFrame\": 133, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/uav2.txt\", \"object_class\": \"aircraft\"},\n            {\"name\": \"uav_uav3\", \"path\": \"data_seq/UAV123/uav3\", \"startFrame\": 1, \"endFrame\": 265, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/uav3.txt\", \"object_class\": \"aircraft\"},\n            {\"name\": \"uav_uav4\", \"path\": \"data_seq/UAV123/uav4\", \"startFrame\": 1, \"endFrame\": 157, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/uav4.txt\", \"object_class\": \"aircraft\"},\n            {\"name\": \"uav_uav5\", \"path\": \"data_seq/UAV123/uav5\", \"startFrame\": 1, \"endFrame\": 139, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/uav5.txt\", \"object_class\": \"aircraft\"},\n            {\"name\": \"uav_uav6\", \"path\": \"data_seq/UAV123/uav6\", \"startFrame\": 1, \"endFrame\": 109, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/uav6.txt\", \"object_class\": \"aircraft\"},\n            {\"name\": \"uav_uav7\", \"path\": \"data_seq/UAV123/uav7\", \"startFrame\": 1, \"endFrame\": 373, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/uav7.txt\", \"object_class\": \"aircraft\"},\n            {\"name\": \"uav_uav8\", \"path\": \"data_seq/UAV123/uav8\", \"startFrame\": 1, \"endFrame\": 301, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/uav8.txt\", \"object_class\": \"aircraft\"},\n            {\"name\": \"uav_wakeboard1\", \"path\": \"data_seq/UAV123/wakeboard1\", \"startFrame\": 1, \"endFrame\": 421, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/wakeboard1.txt\", \"object_class\": \"person\"},\n            {\"name\": \"uav_wakeboard10\", \"path\": \"data_seq/UAV123/wakeboard10\", \"startFrame\": 1, \"endFrame\": 469,\n             \"nz\": 6, \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/wakeboard10.txt\", \"object_class\": \"person\"},\n            {\"name\": \"uav_wakeboard2\", \"path\": \"data_seq/UAV123/wakeboard2\", \"startFrame\": 1, \"endFrame\": 733, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/wakeboard2.txt\", \"object_class\": \"person\"},\n            {\"name\": \"uav_wakeboard3\", \"path\": \"data_seq/UAV123/wakeboard3\", \"startFrame\": 1, \"endFrame\": 823, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/wakeboard3.txt\", \"object_class\": \"person\"},\n            {\"name\": \"uav_wakeboard4\", \"path\": \"data_seq/UAV123/wakeboard4\", \"startFrame\": 1, \"endFrame\": 697, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/wakeboard4.txt\", \"object_class\": \"person\"},\n            {\"name\": \"uav_wakeboard5\", \"path\": \"data_seq/UAV123/wakeboard5\", \"startFrame\": 1, \"endFrame\": 1675, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/wakeboard5.txt\", \"object_class\": \"person\"},\n            {\"name\": \"uav_wakeboard6\", \"path\": \"data_seq/UAV123/wakeboard6\", \"startFrame\": 1, \"endFrame\": 1165, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/wakeboard6.txt\", \"object_class\": \"person\"},\n            {\"name\": \"uav_wakeboard7\", \"path\": \"data_seq/UAV123/wakeboard7\", \"startFrame\": 1, \"endFrame\": 199, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/wakeboard7.txt\", \"object_class\": \"person\"},\n            {\"name\": \"uav_wakeboard8\", \"path\": \"data_seq/UAV123/wakeboard8\", \"startFrame\": 1, \"endFrame\": 1543, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/wakeboard8.txt\", \"object_class\": \"person\"},\n            {\"name\": \"uav_wakeboard9\", \"path\": \"data_seq/UAV123/wakeboard9\", \"startFrame\": 1, \"endFrame\": 355, \"nz\": 6,\n             \"ext\": \"jpg\", \"anno_path\": \"anno/UAV123/wakeboard9.txt\", \"object_class\": \"person\"}\n        ]\n\n        return sequence_info_list"
  },
  {
    "path": "lib/test/evaluation/votdataset.py",
    "content": "from typing import Union, TextIO\n\nimport numpy as np\nfrom numba import jit\n\nfrom lib.test.evaluation.data import SequenceList, BaseDataset, Sequence\n\n\nclass VOTDataset(BaseDataset):\n    \"\"\"\n    VOT2018 dataset\n\n    Publication:\n        The sixth Visual Object Tracking VOT2018 challenge results.\n        Matej Kristan, Ales Leonardis, Jiri Matas, Michael Felsberg, Roman Pfugfelder, Luka Cehovin Zajc, Tomas Vojir,\n        Goutam Bhat, Alan Lukezic et al.\n        ECCV, 2018\n        https://prints.vicos.si/publications/365\n\n    Download the dataset from http://www.votchallenge.net/vot2018/dataset.html\n    \"\"\"\n    def __init__(self, year=18):\n        super().__init__()\n        self.year = year\n        if year == 18:\n            self.base_path = self.env_settings.vot18_path\n        elif year == 20:\n            self.base_path = self.env_settings.vot20_path\n        elif year == 22:\n            self.base_path = self.env_settings.vot22_path\n        self.sequence_list = self._get_sequence_list(year)\n\n    def get_sequence_list(self):\n        return SequenceList([self._construct_sequence(s) for s in self.sequence_list])\n\n    def _construct_sequence(self, sequence_name):\n        sequence_path = sequence_name\n        nz = 8\n        ext = 'jpg'\n        start_frame = 1\n\n        anno_path = '{}/{}/groundtruth.txt'.format(self.base_path, sequence_name)\n\n        if self.year == 18 or self.year == 22:\n            try:\n                ground_truth_rect = np.loadtxt(str(anno_path), dtype=np.float64)\n            except:\n                ground_truth_rect = np.loadtxt(str(anno_path), delimiter=',', dtype=np.float64)\n\n            end_frame = ground_truth_rect.shape[0]\n\n            frames = ['{base_path}/{sequence_path}/color/{frame:0{nz}}.{ext}'.format(base_path=self.base_path,\n                      sequence_path=sequence_path, frame=frame_num, nz=nz, ext=ext)\n                      for frame_num in range(start_frame, end_frame+1)]\n\n            # Convert gt\n            if ground_truth_rect.shape[1] > 4:\n                gt_x_all = ground_truth_rect[:, [0, 2, 4, 6]]\n                gt_y_all = ground_truth_rect[:, [1, 3, 5, 7]]\n\n                x1 = np.amin(gt_x_all, 1).reshape(-1,1)\n                y1 = np.amin(gt_y_all, 1).reshape(-1,1)\n                x2 = np.amax(gt_x_all, 1).reshape(-1,1)\n                y2 = np.amax(gt_y_all, 1).reshape(-1,1)\n\n                ground_truth_rect = np.concatenate((x1, y1, x2-x1, y2-y1), 1)\n\n        elif self.year == 20:\n            ground_truth_rect = read_file(str(anno_path))\n            ground_truth_rect = np.array(ground_truth_rect, dtype=np.float64)\n            end_frame = ground_truth_rect.shape[0]\n\n            frames = ['{base_path}/{sequence_path}/color/{frame:0{nz}}.{ext}'.format(base_path=self.base_path,\n                                                                                     sequence_path=sequence_path,\n                                                                                     frame=frame_num, nz=nz, ext=ext)\n                      for frame_num in range(start_frame, end_frame + 1)]\n\n        else:\n            raise NotImplementedError\n\n        return Sequence(sequence_name, frames, 'vot', ground_truth_rect)\n\n    def __len__(self):\n        return len(self.sequence_list)\n\n    def _get_sequence_list(self, year):\n        if year == 18:\n            sequence_list= ['ants1',\n                            'ants3',\n                            'bag',\n                            'ball1',\n                            'ball2',\n                            'basketball',\n                            'birds1',\n                            'blanket',\n                            'bmx',\n                            'bolt1',\n                            'bolt2',\n                            'book',\n                            'butterfly',\n                            'car1',\n                            'conduction1',\n                            'crabs1',\n                            'crossing',\n                            'dinosaur',\n                            'drone_across',\n                            'drone_flip',\n                            'drone1',\n                            'fernando',\n                            'fish1',\n                            'fish2',\n                            'fish3',\n                            'flamingo1',\n                            'frisbee',\n                            'girl',\n                            'glove',\n                            'godfather',\n                            'graduate',\n                            'gymnastics1',\n                            'gymnastics2',\n                            'gymnastics3',\n                            'hand',\n                            'handball1',\n                            'handball2',\n                            'helicopter',\n                            'iceskater1',\n                            'iceskater2',\n                            'leaves',\n                            'matrix',\n                            'motocross1',\n                            'motocross2',\n                            'nature',\n                            'pedestrian1',\n                            'rabbit',\n                            'racing',\n                            'road',\n                            'shaking',\n                            'sheep',\n                            'singer2',\n                            'singer3',\n                            'soccer1',\n                            'soccer2',\n                            'soldier',\n                            'tiger',\n                            'traffic',\n                            'wiper',\n                            'zebrafish1']\n        elif year == 20:\n\n            sequence_list= ['agility',\n                            'ants1',\n                            'ball2',\n                            'ball3',\n                            'basketball',\n                            'birds1',\n                            'bolt1',\n                            'book',\n                            'butterfly',\n                            'car1',\n                            'conduction1',\n                            'crabs1',\n                            'dinosaur',\n                            'dribble',\n                            'drone1',\n                            'drone_across',\n                            'drone_flip',\n                            'fernando',\n                            'fish1',\n                            'fish2',\n                            'flamingo1',\n                            'frisbee',\n                            'girl',\n                            'glove',\n                            'godfather',\n                            'graduate',\n                            'gymnastics1',\n                            'gymnastics2',\n                            'gymnastics3',\n                            'hand',\n                            'hand02',\n                            'hand2',\n                            'handball1',\n                            'handball2',\n                            'helicopter',\n                            'iceskater1',\n                            'iceskater2',\n                            'lamb',\n                            'leaves',\n                            'marathon',\n                            'matrix',\n                            'monkey',\n                            'motocross1',\n                            'nature',\n                            'polo',\n                            'rabbit',\n                            'rabbit2',\n                            'road',\n                            'rowing',\n                            'shaking',\n                            'singer2',\n                            'singer3',\n                            'soccer1',\n                            'soccer2',\n                            'soldier',\n                            'surfing',\n                            'tiger',\n                            'wheel',\n                            'wiper',\n                            'zebrafish1']\n        elif year == 22:\n            sequence_list= ['agility',\n                            'animal',\n                            'ants1',\n                            'bag',\n                            'ball2',\n                            'ball3',\n                            'basketball',\n                            'birds1',\n                            'birds2',\n                            'bolt1',\n                            'book',\n                            'bubble',\n                            'butterfly',\n                            'car1',\n                            'conduction1',\n                            'crabs1',\n                            'dinosaur',\n                            'diver',\n                            'drone1',\n                            'drone_across',\n                            'fernando',\n                            'fish1',\n                            'fish2',\n                            'flamingo1',\n                            'frisbee',\n                            'girl',\n                            'graduate',\n                            'gymnastics1',\n                            'gymnastics2',\n                            'gymnastics3',\n                            'hand',\n                            'hand2',\n                            'handball1',\n                            'handball2',\n                            'helicopter',\n                            'iceskater1',\n                            'iceskater2',\n                            'kangaroo',\n                            'lamb',\n                            'leaves',\n                            'marathon',\n                            'matrix',\n                            'monkey',\n                            'motocross1',\n                            'nature',\n                            'polo',\n                            'rabbit',\n                            'rabbit2',\n                            'rowing',\n                            'shaking',\n                            'singer2',\n                            'singer3',\n                            'snake',\n                            'soccer1',\n                            'soccer2',\n                            'soldier',\n                            'surfing',\n                            'tennis',\n                            'tiger',\n                            'wheel',\n                            'wiper',\n                            'zebrafish1']\n\n        else:\n            raise NotImplementedError\n\n        return sequence_list\n\n\ndef parse(string):\n    \"\"\"\n    parse string to the appropriate region format and return region object\n    \"\"\"\n    from vot.region.shapes import Rectangle, Polygon, Mask\n\n\n    if string[0] == 'm':\n        # input is a mask - decode it\n        m_, offset_, region = create_mask_from_string(string[1:].split(','))\n        # return Mask(m_, offset=offset_)\n        return region\n    else:\n        # input is not a mask - check if special, rectangle or polygon\n        raise NotImplementedError\n    print('Unknown region format.')\n    return None\n\n\ndef read_file(fp: Union[str, TextIO]):\n    if isinstance(fp, str):\n        with open(fp) as file:\n            lines = file.readlines()\n    else:\n        lines = fp.readlines()\n\n    regions = []\n    # iterate over all lines in the file\n    for i, line in enumerate(lines):\n        regions.append(parse(line.strip()))\n    return regions\n\n\ndef create_mask_from_string(mask_encoding):\n    \"\"\"\n    mask_encoding: a string in the following format: x0, y0, w, h, RLE\n    output: mask, offset\n    mask: 2-D binary mask, size defined in the mask encoding\n    offset: (x, y) offset of the mask in the image coordinates\n    \"\"\"\n    elements = [int(el) for el in mask_encoding]\n    tl_x, tl_y, region_w, region_h = elements[:4]\n    rle = np.array([el for el in elements[4:]], dtype=np.int32)\n\n    # create mask from RLE within target region\n    mask = rle_to_mask(rle, region_w, region_h)\n    region = [tl_x, tl_y, region_w, region_h]\n\n    return mask, (tl_x, tl_y), region\n\n@jit(nopython=True)\ndef rle_to_mask(rle, width, height):\n    \"\"\"\n    rle: input rle mask encoding\n    each evenly-indexed element represents number of consecutive 0s\n    each oddly indexed element represents number of consecutive 1s\n    width and height are dimensions of the mask\n    output: 2-D binary mask\n    \"\"\"\n    # allocate list of zeros\n    v = [0] * (width * height)\n\n    # set id of the last different element to the beginning of the vector\n    idx_ = 0\n    for i in range(len(rle)):\n        if i % 2 != 0:\n            # write as many 1s as RLE says (zeros are already in the vector)\n            for j in range(rle[i]):\n                v[idx_+j] = 1\n        idx_ += rle[i]"
  },
  {
    "path": "lib/test/parameter/__init__.py",
    "content": ""
  },
  {
    "path": "lib/test/parameter/artrack.py",
    "content": "from lib.test.utils import TrackerParams\nimport os\nfrom lib.test.evaluation.environment import env_settings\nfrom lib.config.artrack.config import cfg, update_config_from_file\n\n\ndef parameters(yaml_name: str):\n    params = TrackerParams()\n    prj_dir = env_settings().prj_dir\n    save_dir = env_settings().save_dir\n    # update default config from yaml file\n    yaml_file = os.path.join(prj_dir, 'experiments/artrack/%s.yaml' % yaml_name)\n    update_config_from_file(yaml_file)\n    params.cfg = cfg\n    print(\"test config: \", cfg)\n\n    # template and search region\n    params.template_factor = cfg.TEST.TEMPLATE_FACTOR\n    params.template_size = cfg.TEST.TEMPLATE_SIZE\n    params.search_factor = cfg.TEST.SEARCH_FACTOR\n    params.search_size = cfg.TEST.SEARCH_SIZE\n\n    # Network checkpoint path\n    params.checkpoint = os.path.join(save_dir, \"checkpoints/train/artrack/%s/ARTrack_ep%04d.pth.tar\" %\n                                     (yaml_name, cfg.TEST.EPOCH))\n\n    # whether to save boxes from all queries\n    params.save_all_boxes = False\n\n    return params\n"
  },
  {
    "path": "lib/test/parameter/artrack_seq.py",
    "content": "from lib.test.utils import TrackerParams\r\nimport os\r\nfrom lib.test.evaluation.environment import env_settings\r\nfrom lib.config.artrack_seq.config import cfg, update_config_from_file\r\n\r\n\r\ndef parameters(yaml_name: str):\r\n    params = TrackerParams()\r\n    prj_dir = env_settings().prj_dir\r\n    save_dir = env_settings().save_dir\r\n    # update default config from yaml file\r\n    yaml_file = os.path.join(prj_dir, 'experiments/artrack_seq/%s.yaml' % yaml_name)\r\n    update_config_from_file(yaml_file)\r\n    params.cfg = cfg\r\n    print(\"test config: \", cfg)\r\n\r\n    # template and search region\r\n    params.template_factor = cfg.TEST.TEMPLATE_FACTOR\r\n    params.template_size = cfg.TEST.TEMPLATE_SIZE\r\n    params.search_factor = cfg.TEST.SEARCH_FACTOR\r\n    params.search_size = cfg.TEST.SEARCH_SIZE\r\n\r\n    # Network checkpoint path\r\n    params.checkpoint = os.path.join(save_dir, \"checkpoints/train/artrack_seq/%s/ARTrackSeq_ep%04d.pth.tar\" %\r\n                                     (yaml_name, cfg.TEST.EPOCH))\r\n\r\n    # whether to save boxes from all queries\r\n    params.save_all_boxes = False\r\n\r\n    return params\r\n"
  },
  {
    "path": "lib/test/parameter/artrackv2.py",
    "content": "from lib.test.utils import TrackerParams\nimport os\nfrom lib.test.evaluation.environment import env_settings\nfrom lib.config.artrackv2.config import cfg, update_config_from_file\n\n\ndef parameters(yaml_name: str):\n    params = TrackerParams()\n    prj_dir = env_settings().prj_dir\n    save_dir = env_settings().save_dir\n    # update default config from yaml file\n    yaml_file = os.path.join(prj_dir, 'experiments/artrackv2/%s.yaml' % yaml_name)\n    update_config_from_file(yaml_file)\n    params.cfg = cfg\n    print(\"test config: \", cfg)\n\n    # template and search region\n    params.template_factor = cfg.TEST.TEMPLATE_FACTOR\n    params.template_size = cfg.TEST.TEMPLATE_SIZE\n    params.search_factor = cfg.TEST.SEARCH_FACTOR\n    params.search_size = cfg.TEST.SEARCH_SIZE\n\n    # Network checkpoint path\n    params.checkpoint = os.path.join(save_dir, \"checkpoints/train/artrackv2/%s/ARTrackV2_ep%04d.pth.tar\" %\n                                     (yaml_name, cfg.TEST.EPOCH))\n\n    # whether to save boxes from all queries\n    params.save_all_boxes = False\n\n    return params\n"
  },
  {
    "path": "lib/test/parameter/artrackv2_seq.py",
    "content": "from lib.test.utils import TrackerParams\nimport os\nfrom lib.test.evaluation.environment import env_settings\nfrom lib.config.artrackv2_seq.config import cfg, update_config_from_file\n\n\ndef parameters(yaml_name: str):\n    params = TrackerParams()\n    prj_dir = env_settings().prj_dir\n    save_dir = env_settings().save_dir\n    # update default config from yaml file\n    yaml_file = os.path.join(prj_dir, 'experiments/artrackv2_seq/%s.yaml' % yaml_name)\n    update_config_from_file(yaml_file)\n    params.cfg = cfg\n    print(\"test config: \", cfg)\n\n    # template and search region\n    params.template_factor = cfg.TEST.TEMPLATE_FACTOR\n    params.template_size = cfg.TEST.TEMPLATE_SIZE\n    params.search_factor = cfg.TEST.SEARCH_FACTOR\n    params.search_size = cfg.TEST.SEARCH_SIZE\n\n    # Network checkpoint path\n    params.checkpoint = os.path.join(save_dir, \"checkpoints/train/artrackv2_seq/%s/ARTrackV2Seq_ep%04d.pth.tar\" %\n                                     (yaml_name, cfg.TEST.EPOCH))\n\n    # whether to save boxes from all queries\n    params.save_all_boxes = False\n\n    return params\n"
  },
  {
    "path": "lib/test/tracker/__init__.py",
    "content": ""
  },
  {
    "path": "lib/test/tracker/artrack.py",
    "content": "import math\n\nfrom lib.models.artrack import build_artrack\nfrom lib.test.tracker.basetracker import BaseTracker\nimport torch\n\nfrom lib.test.tracker.vis_utils import gen_visualization\nfrom lib.test.utils.hann import hann2d\nfrom lib.train.data.processing_utils import sample_target\n# for debug\nimport cv2\nimport os\n\nfrom lib.test.tracker.data_utils import Preprocessor\nfrom lib.utils.box_ops import clip_box\nfrom lib.utils.ce_utils import generate_mask_cond\nimport random\n\nclass RandomErasing(object):\n    def __init__(self, EPSILON=0.5, sl=0.02, sh=0.33, r1=0.3, mean=[0.4914, 0.4822, 0.4465]):\n        self.EPSILON = EPSILON\n        self.mean = mean\n        self.sl = sl\n        self.sh = sh\n        self.r1 = r1\n\n    def __call__(self, img):\n\n        if random.uniform(0, 1) > self.EPSILON:\n            return img\n\n        for attempt in range(100):\n            print(img.size())\n            area = img.size()[1] * img.size()[2]\n\n            target_area = random.uniform(self.sl, self.sh) * area\n            aspect_ratio = random.uniform(self.r1, 1 / self.r1)\n\n            h = int(round(math.sqrt(target_area * aspect_ratio)))\n            w = int(round(math.sqrt(target_area / aspect_ratio)))\n\n            if w < img.size()[2] and h < img.size()[1]:\n                x1 = random.randint(0, img.size()[1] - h)\n                y1 = random.randint(0, img.size()[2] - w)\n                if img.size()[0] == 3:\n                    # img[0, x1:x1+h, y1:y1+w] = random.uniform(0, 1)\n                    # img[1, x1:x1+h, y1:y1+w] = random.uniform(0, 1)\n                    # img[2, x1:x1+h, y1:y1+w] = random.uniform(0, 1)\n                    img[0, x1:x1 + h, y1:y1 + w] = self.mean[0]\n                    img[1, x1:x1 + h, y1:y1 + w] = self.mean[1]\n                    img[2, x1:x1 + h, y1:y1 + w] = self.mean[2]\n                    # img[:, x1:x1+h, y1:y1+w] = torch.from_numpy(np.random.rand(3, h, w))\n                else:\n                    img[0, x1:x1 + h, y1:y1 + w] = self.mean[1]\n                    # img[0, x1:x1+h, y1:y1+w] = torch.from_numpy(np.random.rand(1, h, w))\n                return img\n\n        return img\n\n\nclass ARTrack(BaseTracker):\n    def __init__(self, params, dataset_name):\n        super(ARTrack, self).__init__(params)\n        network = build_artrack(params.cfg, training=False)\n        print(self.params.checkpoint)\n        network.load_state_dict(torch.load(self.params.checkpoint, map_location='cpu')['net'], strict=True)\n        self.cfg = params.cfg\n        self.bins = self.cfg.MODEL.BINS\n        self.network = network.cuda()\n        self.network.eval()\n        self.preprocessor = Preprocessor()\n        self.state = None\n        self.range = self.cfg.MODEL.RANGE\n\n        self.feat_sz = self.cfg.TEST.SEARCH_SIZE // self.cfg.MODEL.BACKBONE.STRIDE\n        # motion constrain\n        self.output_window = hann2d(torch.tensor([self.feat_sz, self.feat_sz]).long(), centered=True).cuda()\n\n        # for debug\n        self.debug = params.debug\n        self.use_visdom = params.debug\n        self.frame_id = 0\n        self.erase = RandomErasing()\n        if self.debug:\n            if not self.use_visdom:\n                self.save_dir = \"debug\"\n                if not os.path.exists(self.save_dir):\n                    os.makedirs(self.save_dir)\n            else:\n                # self.add_hook()\n                self._init_visdom(None, 1)\n        # for save boxes from all queries\n        self.save_all_boxes = params.save_all_boxes\n        self.z_dict1 = {}\n\n    def initialize(self, image, info: dict):\n        # forward the template once\n\n        z_patch_arr, resize_factor, z_amask_arr = sample_target(image, info['init_bbox'], self.params.template_factor,\n                                                    output_sz=self.params.template_size)#output_sz=self.params.template_size\n        self.z_patch_arr = z_patch_arr\n        template = self.preprocessor.process(z_patch_arr, z_amask_arr)\n        with torch.no_grad():\n            self.z_dict1 = template\n\n        self.box_mask_z = None\n        #if self.cfg.MODEL.BACKBONE.CE_LOC:\n        #    template_bbox = self.transform_bbox_to_crop(info['init_bbox'], resize_factor,\n        #                                                template.tensors.device).squeeze(1)\n        #    self.box_mask_z = generate_mask_cond(self.cfg, 1, template.tensors.device, template_bbox)\n\n        # save states\n        self.state = info['init_bbox']\n        self.frame_id = 0\n        if self.save_all_boxes:\n            '''save all predicted boxes'''\n            all_boxes_save = info['init_bbox'] * self.cfg.MODEL.NUM_OBJECT_QUERIES\n            return {\"all_boxes\": all_boxes_save}\n\n    def track(self, image, info: dict = None):\n        magic_num = (self.range - 1) * 0.5\n        H, W, _ = image.shape\n        self.frame_id += 1\n        x_patch_arr, resize_factor, x_amask_arr = sample_target(image, self.state, self.params.search_factor,\n                                                                output_sz=self.params.search_size)  # (x1, y1, w, h)\n        search = self.preprocessor.process(x_patch_arr, x_amask_arr)\n\n\n        with torch.no_grad():\n            x_dict = search\n            # merge the template and the search\n            # run the transformer\n            out_dict = self.network.forward(\n                template=self.z_dict1.tensors, search=x_dict.tensors)\n\n        # add hann windows\n        # pred_score_map = out_dict['score_map']\n        # response = self.output_window * pred_score_map\n        # pred_boxes = self.network.box_head.cal_bbox(response, out_dict['size_map'], out_dict['offset_map'])\n        # pred_boxes = pred_boxes.view(-1, 4)\n\n        pred_boxes = out_dict['seqs'][:, 0:4] / (self.bins - 1) - magic_num\n        pred_boxes = pred_boxes.view(-1, 4).mean(dim=0)\n        pred_new = pred_boxes\n        pred_new[2] = pred_boxes[2] - pred_boxes[0]\n        pred_new[3] = pred_boxes[3] - pred_boxes[1]\n        pred_new[0] = pred_boxes[0] + pred_boxes[2]/2\n        pred_new[1] = pred_boxes[1] + pred_boxes[3]/2\n\n        pred_boxes = (pred_new * self.params.search_size / resize_factor).tolist()\n\n        # Baseline: Take the mean of all pred boxes as the final result\n        #pred_box = (pred_boxes.mean(\n        #    dim=0) * self.params.search_size / resize_factor).tolist()  # (cx, cy, w, h) [0,1]\n        # get the final box result\n        self.state = clip_box(self.map_box_back(pred_boxes, resize_factor), H, W, margin=10)\n\n        # for debug\n        if self.debug:\n            if not self.use_visdom:\n                x1, y1, w, h = self.state\n                image_BGR = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)\n                cv2.rectangle(image_BGR, (int(x1),int(y1)), (int(x1+w),int(y1+h)), color=(0,0,255), thickness=2)\n                save_path = os.path.join(self.save_dir, \"%04d.jpg\" % self.frame_id)\n                cv2.imwrite(save_path, image_BGR)\n            else:\n                self.visdom.register((image, info['gt_bbox'].tolist(), self.state), 'Tracking', 1, 'Tracking')\n\n                self.visdom.register(torch.from_numpy(x_patch_arr).permute(2, 0, 1), 'image', 1, 'search_region')\n                self.visdom.register(torch.from_numpy(self.z_patch_arr).permute(2, 0, 1), 'image', 1, 'template')\n                self.visdom.register(pred_score_map.view(self.feat_sz, self.feat_sz), 'heatmap', 1, 'score_map')\n                self.visdom.register((pred_score_map * self.output_window).view(self.feat_sz, self.feat_sz), 'heatmap', 1, 'score_map_hann')\n\n                if 'removed_indexes_s' in out_dict and out_dict['removed_indexes_s']:\n                    removed_indexes_s = out_dict['removed_indexes_s']\n                    removed_indexes_s = [removed_indexes_s_i.cpu().numpy() for removed_indexes_s_i in removed_indexes_s]\n                    masked_search = gen_visualization(x_patch_arr, removed_indexes_s)\n                    self.visdom.register(torch.from_numpy(masked_search).permute(2, 0, 1), 'image', 1, 'masked_search')\n\n                while self.pause_mode:\n                    if self.step:\n                        self.step = False\n                        break\n\n        if self.save_all_boxes:\n            '''save all predictions'''\n            all_boxes = self.map_box_back_batch(pred_boxes * self.params.search_size / resize_factor, resize_factor)\n            all_boxes_save = all_boxes.view(-1).tolist()  # (4N, )\n            return {\"target_bbox\": self.state,\n                    \"all_boxes\": all_boxes_save}\n        else:\n            return {\"target_bbox\": self.state}\n\n    def map_box_back(self, pred_box: list, resize_factor: float):\n        cx_prev, cy_prev = self.state[0] + 0.5 * self.state[2], self.state[1] + 0.5 * self.state[3]\n        cx, cy, w, h = pred_box\n        half_side = 0.5 * self.params.search_size / resize_factor\n        cx_real = cx + (cx_prev - half_side)\n        cy_real = cy + (cy_prev - half_side)\n        #cx_real = cx + cx_prev\n        #cy_real = cy + cy_prev\n        return [cx_real - 0.5 * w, cy_real - 0.5 * h, w, h]\n\n    def map_box_back_batch(self, pred_box: torch.Tensor, resize_factor: float):\n        cx_prev, cy_prev = self.state[0] + 0.5 * self.state[2], self.state[1] + 0.5 * self.state[3]\n        cx, cy, w, h = pred_box.unbind(-1) # (N,4) --> (N,)\n        half_side = 0.5 * self.params.search_size / resize_factor\n        cx_real = cx + (cx_prev - half_side)\n        cy_real = cy + (cy_prev - half_side)\n        return torch.stack([cx_real - 0.5 * w, cy_real - 0.5 * h, w, h], dim=-1)\n\n    def add_hook(self):\n        conv_features, enc_attn_weights, dec_attn_weights = [], [], []\n\n        for i in range(12):\n            self.network.backbone.blocks[i].attn.register_forward_hook(\n                # lambda self, input, output: enc_attn_weights.append(output[1])\n                lambda self, input, output: enc_attn_weights.append(output[1])\n            )\n\n        self.enc_attn_weights = enc_attn_weights\n\n\ndef get_tracker_class():\n    return ARTrack\n"
  },
  {
    "path": "lib/test/tracker/artrack_seq.py",
    "content": "import math\r\n\r\nfrom lib.models.artrack_seq import build_artrack_seq\r\nfrom lib.test.tracker.basetracker import BaseTracker\r\nimport torch\r\n\r\nfrom lib.test.tracker.vis_utils import gen_visualization\r\nfrom lib.test.utils.hann import hann2d\r\nfrom lib.train.data.processing_utils import sample_target, transform_image_to_crop\r\n# for debug\r\nimport cv2\r\nimport os\r\n\r\nfrom lib.test.tracker.data_utils import Preprocessor\r\nfrom lib.utils.box_ops import clip_box\r\nfrom lib.utils.ce_utils import generate_mask_cond\r\n\r\n\r\nclass ARTrackSeq(BaseTracker):\r\n    def __init__(self, params, dataset_name):\r\n        super(ARTrackSeq, self).__init__(params)\r\n        network = build_artrack_seq(params.cfg, training=False)\r\n        print(self.params.checkpoint)\r\n        network.load_state_dict(torch.load(self.params.checkpoint, map_location='cpu')['net'], strict=True)\r\n        self.cfg = params.cfg\r\n        self.bins = self.cfg.MODEL.BINS\r\n        self.network = network.cuda()\r\n        self.network.eval()\r\n        self.preprocessor = Preprocessor()\r\n        self.state = None\r\n\r\n        self.feat_sz = self.cfg.TEST.SEARCH_SIZE // self.cfg.MODEL.BACKBONE.STRIDE\r\n        # motion constrain\r\n        self.output_window = hann2d(torch.tensor([self.feat_sz, self.feat_sz]).long(), centered=True).cuda()\r\n\r\n        # for debug\r\n        self.debug = params.debug\r\n        self.use_visdom = params.debug\r\n        self.frame_id = 0\r\n        if self.debug:\r\n            if not self.use_visdom:\r\n                self.save_dir = \"debug\"\r\n                if not os.path.exists(self.save_dir):\r\n                    os.makedirs(self.save_dir)\r\n            else:\r\n                # self.add_hook()\r\n                self._init_visdom(None, 1)\r\n        # for save boxes from all queries\r\n        self.save_all_boxes = params.save_all_boxes\r\n        self.z_dict1 = {}\r\n        self.store_result = None\r\n        self.save_all = 7\r\n        self.x_feat = None\r\n        self.update = None\r\n        self.update_threshold = 5.0\r\n        self.update_intervals = 1\r\n\r\n    def initialize(self, image, info: dict):\r\n        # forward the template once\r\n        self.x_feat = None\r\n\r\n        z_patch_arr, resize_factor, z_amask_arr = sample_target(image, info['init_bbox'], self.params.template_factor,\r\n                                                                output_sz=self.params.template_size)  # output_sz=self.params.template_size\r\n        self.z_patch_arr = z_patch_arr\r\n        template = self.preprocessor.process(z_patch_arr, z_amask_arr)\r\n        with torch.no_grad():\r\n            self.z_dict1 = template\r\n\r\n        self.box_mask_z = None\r\n        # if self.cfg.MODEL.BACKBONE.CE_LOC:\r\n        #    template_bbox = self.transform_bbox_to_crop(info['init_bbox'], resize_factor,\r\n        #                                                template.tensors.device).squeeze(1)\r\n        #    self.box_mask_z = generate_mask_cond(self.cfg, 1, template.tensors.device, template_bbox)\r\n\r\n        # save states\r\n        self.state = info['init_bbox']\r\n        self.store_result = [info['init_bbox'].copy()]\r\n        for i in range(self.save_all - 1):\r\n            self.store_result.append(info['init_bbox'].copy())\r\n        self.frame_id = 0\r\n        self.update = None\r\n        if self.save_all_boxes:\r\n            '''save all predicted boxes'''\r\n            all_boxes_save = info['init_bbox'] * self.cfg.MODEL.NUM_OBJECT_QUERIES\r\n            return {\"all_boxes\": all_boxes_save}\r\n\r\n    def track(self, image, info: dict = None):\r\n        H, W, _ = image.shape\r\n        self.frame_id += 1\r\n        x_patch_arr, resize_factor, x_amask_arr = sample_target(image, self.state, self.params.search_factor,\r\n                                                                output_sz=self.params.search_size)  # (x1, y1, w, h)\r\n        for i in range(len(self.store_result)):\r\n            box_temp = self.store_result[i].copy()\r\n            box_out_i = transform_image_to_crop(torch.Tensor(self.store_result[i]), torch.Tensor(self.state),\r\n                                                resize_factor,\r\n                                                torch.Tensor([self.cfg.TEST.SEARCH_SIZE, self.cfg.TEST.SEARCH_SIZE]),\r\n                                                normalize=True)\r\n            box_out_i[2] = box_out_i[2] + box_out_i[0]\r\n            box_out_i[3] = box_out_i[3] + box_out_i[1]\r\n            box_out_i = box_out_i.clamp(min=-0.5, max=1.5)\r\n            box_out_i = (box_out_i + 0.5) * (self.bins - 1)\r\n            if i == 0:\r\n                seqs_out = box_out_i\r\n            else:\r\n                seqs_out = torch.cat((seqs_out, box_out_i), dim=-1)\r\n        seqs_out = seqs_out.unsqueeze(0)\r\n        search = self.preprocessor.process(x_patch_arr, x_amask_arr)\r\n        with torch.no_grad():\r\n            x_dict = search\r\n            # merge the template and the search\r\n            # run the transformer\r\n            out_dict = self.network.forward(\r\n                template=self.z_dict1.tensors, search=x_dict.tensors,\r\n                seq_input=seqs_out, stage=\"sequence\", search_feature=self.x_feat, update=None)\r\n\r\n        self.x_feat = out_dict['x_feat']\r\n\r\n        pred_boxes = out_dict['seqs'][:, 0:4] / (self.bins - 1) - 0.5\r\n        pred_boxes = pred_boxes.view(-1, 4).mean(dim=0)\r\n        pred_new = pred_boxes\r\n        pred_new[2] = pred_boxes[2] - pred_boxes[0]\r\n        pred_new[3] = pred_boxes[3] - pred_boxes[1]\r\n        pred_new[0] = pred_boxes[0] + pred_new[2] / 2\r\n        pred_new[1] = pred_boxes[1] + pred_new[3] / 2\r\n        pred_boxes = (pred_new * self.params.search_size / resize_factor).tolist()\r\n\r\n        # Baseline: Take the mean of all pred boxes as the final result\r\n        # pred_box = (pred_boxes.mean(\r\n        #    dim=0) * self.params.search_size / resize_factor).tolist()  # (cx, cy, w, h) [0,1]\r\n        # get the final box result\r\n        self.state = clip_box(self.map_box_back(pred_boxes, resize_factor), H, W, margin=10)\r\n        if len(self.store_result) < self.save_all:\r\n            self.store_result.append(self.state.copy())\r\n        else:\r\n            for i in range(self.save_all):\r\n                if i != self.save_all - 1:\r\n                    self.store_result[i] = self.store_result[i + 1]\r\n                else:\r\n                    self.store_result[i] = self.state.copy()\r\n\r\n        # for debug\r\n        if self.debug:\r\n            if not self.use_visdom:\r\n                x1, y1, w, h = self.state\r\n                image_BGR = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)\r\n                cv2.rectangle(image_BGR, (int(x1), int(y1)), (int(x1 + w), int(y1 + h)), color=(0, 0, 255), thickness=2)\r\n                save_path = os.path.join(self.save_dir, \"%04d.jpg\" % self.frame_id)\r\n                cv2.imwrite(save_path, image_BGR)\r\n            else:\r\n                self.visdom.register((image, info['gt_bbox'].tolist(), self.state), 'Tracking', 1, 'Tracking')\r\n\r\n                self.visdom.register(torch.from_numpy(x_patch_arr).permute(2, 0, 1), 'image', 1, 'search_region')\r\n                self.visdom.register(torch.from_numpy(self.z_patch_arr).permute(2, 0, 1), 'image', 1, 'template')\r\n                self.visdom.register(pred_score_map.view(self.feat_sz, self.feat_sz), 'heatmap', 1, 'score_map')\r\n                self.visdom.register((pred_score_map * self.output_window).view(self.feat_sz, self.feat_sz), 'heatmap',\r\n                                     1, 'score_map_hann')\r\n\r\n                if 'removed_indexes_s' in out_dict and out_dict['removed_indexes_s']:\r\n                    removed_indexes_s = out_dict['removed_indexes_s']\r\n                    removed_indexes_s = [removed_indexes_s_i.cpu().numpy() for removed_indexes_s_i in removed_indexes_s]\r\n                    masked_search = gen_visualization(x_patch_arr, removed_indexes_s)\r\n                    self.visdom.register(torch.from_numpy(masked_search).permute(2, 0, 1), 'image', 1, 'masked_search')\r\n\r\n                while self.pause_mode:\r\n                    if self.step:\r\n                        self.step = False\r\n                        break\r\n\r\n        if self.save_all_boxes:\r\n            '''save all predictions'''\r\n            all_boxes = self.map_box_back_batch(pred_boxes * self.params.search_size / resize_factor, resize_factor)\r\n            all_boxes_save = all_boxes.view(-1).tolist()  # (4N, )\r\n            return {\"target_bbox\": self.state,\r\n                    \"all_boxes\": all_boxes_save}\r\n        else:\r\n            return {\"target_bbox\": self.state}\r\n\r\n    def map_box_back(self, pred_box: list, resize_factor: float):\r\n        cx_prev, cy_prev = self.state[0] + 0.5 * self.state[2], self.state[1] + 0.5 * self.state[3]\r\n        cx, cy, w, h = pred_box\r\n        half_side = 0.5 * self.params.search_size / resize_factor\r\n        cx_real = cx + (cx_prev - half_side)\r\n        cy_real = cy + (cy_prev - half_side)\r\n        # cx_real = cx + cx_prev\r\n        # cy_real = cy + cy_prev\r\n        return [cx_real - 0.5 * w, cy_real - 0.5 * h, w, h]\r\n\r\n    def map_box_back_batch(self, pred_box: torch.Tensor, resize_factor: float):\r\n        cx_prev, cy_prev = self.state[0] + 0.5 * self.state[2], self.state[1] + 0.5 * self.state[3]\r\n        cx, cy, w, h = pred_box.unbind(-1)  # (N,4) --> (N,)\r\n        half_side = 0.5 * self.params.search_size / resize_factor\r\n        cx_real = cx + (cx_prev - half_side)\r\n        cy_real = cy + (cy_prev - half_side)\r\n        return torch.stack([cx_real - 0.5 * w, cy_real - 0.5 * h, w, h], dim=-1)\r\n\r\n    def add_hook(self):\r\n        conv_features, enc_attn_weights, dec_attn_weights = [], [], []\r\n\r\n        for i in range(12):\r\n            self.network.backbone.blocks[i].attn.register_forward_hook(\r\n                # lambda self, input, output: enc_attn_weights.append(output[1])\r\n                lambda self, input, output: enc_attn_weights.append(output[1])\r\n            )\r\n\r\n        self.enc_attn_weights = enc_attn_weights\r\n\r\n\r\ndef get_tracker_class():\r\n    return ARTrackSeq\r\n"
  },
  {
    "path": "lib/test/tracker/artrackv2.py",
    "content": "import math\n\nfrom lib.models.artrackv2 import build_artrackv2\nfrom lib.test.tracker.basetracker import BaseTracker\nimport torch\n\nfrom lib.test.tracker.vis_utils import gen_visualization\nfrom lib.test.utils.hann import hann2d\nfrom lib.train.data.processing_utils import sample_target\n# for debug\nimport cv2\nimport os\n\nfrom lib.test.tracker.data_utils import Preprocessor\nfrom lib.utils.box_ops import clip_box\nfrom lib.utils.ce_utils import generate_mask_cond\nimport random\n\n\nclass RandomErasing(object):\n    def __init__(self, EPSILON=0.5, sl=0.02, sh=0.33, r1=0.3, mean=[0.4914, 0.4822, 0.4465]):\n        self.EPSILON = EPSILON\n        self.mean = mean\n        self.sl = sl\n        self.sh = sh\n        self.r1 = r1\n\n    def __call__(self, img):\n\n        if random.uniform(0, 1) > self.EPSILON:\n            return img\n\n        for attempt in range(100):\n            print(img.size())\n            area = img.size()[1] * img.size()[2]\n\n            target_area = random.uniform(self.sl, self.sh) * area\n            aspect_ratio = random.uniform(self.r1, 1 / self.r1)\n\n            h = int(round(math.sqrt(target_area * aspect_ratio)))\n            w = int(round(math.sqrt(target_area / aspect_ratio)))\n\n            if w < img.size()[2] and h < img.size()[1]:\n                x1 = random.randint(0, img.size()[1] - h)\n                y1 = random.randint(0, img.size()[2] - w)\n                if img.size()[0] == 3:\n                    img[0, x1:x1 + h, y1:y1 + w] = self.mean[0]\n                    img[1, x1:x1 + h, y1:y1 + w] = self.mean[1]\n                    img[2, x1:x1 + h, y1:y1 + w] = self.mean[2]\n                else:\n                    img[0, x1:x1 + h, y1:y1 + w] = self.mean[1]\n                return img\n\n        return img\n\n\nclass ARTrackV2(BaseTracker):\n    def __init__(self, params, dataset_name):\n        super(ARTrackV2, self).__init__(params)\n        network = build_artrackv2(params.cfg, training=False)\n        network.load_state_dict(torch.load(self.params.checkpoint, map_location='cpu')['net'], strict=True)\n        self.cfg = params.cfg\n        self.bins = self.cfg.MODEL.BINS\n        self.network = network.cuda()\n        self.network.eval()\n        self.preprocessor = Preprocessor()\n        self.state = None\n        self.update_ = False\n\n        self.feat_sz = self.cfg.TEST.SEARCH_SIZE // self.cfg.MODEL.BACKBONE.STRIDE\n        # motion constrain\n        self.output_window = hann2d(torch.tensor([self.feat_sz, self.feat_sz]).long(), centered=True).cuda()\n\n        # for debug\n        self.debug = params.debug\n        self.use_visdom = params.debug\n        self.frame_id = 0\n        self.erase = RandomErasing()\n        if self.debug:\n            if not self.use_visdom:\n                self.save_dir = \"debug\"\n                if not os.path.exists(self.save_dir):\n                    os.makedirs(self.save_dir)\n            else:\n                # self.add_hook()\n                self._init_visdom(None, 1)\n        # for save boxes from all queries\n        self.save_all_boxes = params.save_all_boxes\n        self.z_dict1 = {}\n\n    def initialize(self, image, info: dict):\n        # forward the template once\n\n        z_patch_arr, resize_factor, z_amask_arr = sample_target(image, info['init_bbox'], self.params.template_factor,\n                                                                output_sz=self.params.template_size)  # output_sz=self.params.template_size\n        self.z_patch_arr = z_patch_arr\n        template = self.preprocessor.process(z_patch_arr, z_amask_arr)\n        with torch.no_grad():\n            # initialize dynamic template as template in first frame\n            self.z_dict1 = template\n            self.z_dict2 = template\n\n        self.box_mask_z = None\n\n        self.state = info['init_bbox']\n        self.frame_id = 0\n        if self.save_all_boxes:\n            '''save all predicted boxes'''\n            all_boxes_save = info['init_bbox'] * self.cfg.MODEL.NUM_OBJECT_QUERIES\n            return {\"all_boxes\": all_boxes_save}\n\n    def track(self, image, info: dict = None):\n        H, W, _ = image.shape\n        self.frame_id += 1\n        x_patch_arr, resize_factor, x_amask_arr = sample_target(image, self.state, self.params.search_factor,\n                                                                output_sz=self.params.search_size)  # (x1, y1, w, h)\n        search = self.preprocessor.process(x_patch_arr, x_amask_arr)\n\n        with torch.no_grad():\n            x_dict = search\n            # merge the template and the search\n            # run the transformer\n            if self.update_:\n                template = torch.concat([self.z_dict1.tensors.unsqueeze(0), self.z_dict2.unsqueeze(0)], dim=0)\n            else:\n                template = torch.concat([self.z_dict1.tensors.unsqueeze(0), self.z_dict2.tensors.unsqueeze(0)], dim=0)\n            # merge the template and the search\n            # run the transformer\n            out_dict = self.network.forward(\n                template=template, search=x_dict.tensors, ce_template_mask=self.box_mask_z)\n\n        pred_boxes = out_dict['seqs'][:, 0:4] / (self.bins - 1) - 0.5\n        pred_boxes = pred_boxes.view(-1, 4).mean(dim=0)\n        pred_new = pred_boxes\n\n        pred_new[2] = pred_boxes[2] - pred_boxes[0]\n        pred_new[3] = pred_boxes[3] - pred_boxes[1]\n        pred_new[0] = pred_boxes[0] + pred_boxes[2] / 2\n        pred_new[1] = pred_boxes[1] + pred_boxes[3] / 2\n\n        pred_boxes = (pred_new * self.params.search_size / resize_factor).tolist()\n\n        # Baseline: Take the mean of all pred boxes as the final result\n        # pred_box = (pred_boxes.mean(\n        #    dim=0) * self.params.search_size / resize_factor).tolist()  # (cx, cy, w, h) [0,1]\n        # get the final box result\n        self.state = clip_box(self.map_box_back(pred_boxes, resize_factor), H, W, margin=10)\n\n        # for debug\n        if self.debug:\n            if not self.use_visdom:\n                x1, y1, w, h = self.state\n                image_BGR = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)\n                cv2.rectangle(image_BGR, (int(x1), int(y1)), (int(x1 + w), int(y1 + h)), color=(0, 0, 255), thickness=2)\n                save_path = os.path.join(self.save_dir, \"%04d.jpg\" % self.frame_id)\n                cv2.imwrite(save_path, image_BGR)\n            else:\n                self.visdom.register((image, info['gt_bbox'].tolist(), self.state), 'Tracking', 1, 'Tracking')\n\n                self.visdom.register(torch.from_numpy(x_patch_arr).permute(2, 0, 1), 'image', 1, 'search_region')\n                self.visdom.register(torch.from_numpy(self.z_patch_arr).permute(2, 0, 1), 'image', 1, 'template')\n                self.visdom.register(pred_score_map.view(self.feat_sz, self.feat_sz), 'heatmap', 1, 'score_map')\n                self.visdom.register((pred_score_map * self.output_window).view(self.feat_sz, self.feat_sz), 'heatmap',\n                                     1, 'score_map_hann')\n\n                if 'removed_indexes_s' in out_dict and out_dict['removed_indexes_s']:\n                    removed_indexes_s = out_dict['removed_indexes_s']\n                    removed_indexes_s = [removed_indexes_s_i.cpu().numpy() for removed_indexes_s_i in removed_indexes_s]\n                    masked_search = gen_visualization(x_patch_arr, removed_indexes_s)\n                    self.visdom.register(torch.from_numpy(masked_search).permute(2, 0, 1), 'image', 1, 'masked_search')\n\n                while self.pause_mode:\n                    if self.step:\n                        self.step = False\n                        break\n\n        if self.save_all_boxes:\n            '''save all predictions'''\n            all_boxes = self.map_box_back_batch(pred_boxes * self.params.search_size / resize_factor, resize_factor)\n            all_boxes_save = all_boxes.view(-1).tolist()  # (4N, )\n            return {\"target_bbox\": self.state,\n                    \"all_boxes\": all_boxes_save}\n        else:\n            return {\"target_bbox\": self.state}\n\n    def map_box_back(self, pred_box: list, resize_factor: float):\n        cx_prev, cy_prev = self.state[0] + 0.5 * self.state[2], self.state[1] + 0.5 * self.state[3]\n        cx, cy, w, h = pred_box\n        half_side = 0.5 * self.params.search_size / resize_factor\n        cx_real = cx + (cx_prev - half_side)\n        cy_real = cy + (cy_prev - half_side)\n        # cx_real = cx + cx_prev\n        # cy_real = cy + cy_prev\n        return [cx_real - 0.5 * w, cy_real - 0.5 * h, w, h]\n\n    def map_box_back_batch(self, pred_box: torch.Tensor, resize_factor: float):\n        cx_prev, cy_prev = self.state[0] + 0.5 * self.state[2], self.state[1] + 0.5 * self.state[3]\n        cx, cy, w, h = pred_box.unbind(-1)  # (N,4) --> (N,)\n        half_side = 0.5 * self.params.search_size / resize_factor\n        cx_real = cx + (cx_prev - half_side)\n        cy_real = cy + (cy_prev - half_side)\n        return torch.stack([cx_real - 0.5 * w, cy_real - 0.5 * h, w, h], dim=-1)\n\n    def add_hook(self):\n        conv_features, enc_attn_weights, dec_attn_weights = [], [], []\n\n        for i in range(12):\n            self.network.backbone.blocks[i].attn.register_forward_hook(\n                # lambda self, input, output: enc_attn_weights.append(output[1])\n                lambda self, input, output: enc_attn_weights.append(output[1])\n            )\n\n        self.enc_attn_weights = enc_attn_weights\n\n\ndef get_tracker_class():\n    return ARTrackV2\n"
  },
  {
    "path": "lib/test/tracker/artrackv2_seq.py",
    "content": "import math\n\nfrom lib.models.artrackv2_seq import build_artrackv2_seq\nfrom lib.test.tracker.basetracker import BaseTracker\nimport torch\n\nfrom lib.test.tracker.vis_utils import gen_visualization\nfrom lib.test.utils.hann import hann2d\nfrom lib.train.data.processing_utils import sample_target, transform_image_to_crop\n# for debug\nimport cv2\nimport os\n\nfrom lib.test.tracker.data_utils import Preprocessor\nfrom lib.utils.box_ops import clip_box\nfrom lib.utils.ce_utils import generate_mask_cond\n\n\nclass ARTrackV2Seq(BaseTracker):\n    def __init__(self, params, dataset_name):\n        super(ARTrackV2Seq, self).__init__(params)\n        network = build_artrackv2_seq(params.cfg, training=False)\n        network.load_state_dict(torch.load(self.params.checkpoint, map_location='cpu')['net'], strict=True)\n\n        self.cfg = params.cfg\n        self.bins = params.cfg.MODEL.BINS\n        self.network = network.cuda()\n        self.network.eval()\n        self.preprocessor = Preprocessor()\n        self.state = None\n        self.dz_feat = None\n\n        self.feat_sz = self.cfg.TEST.SEARCH_SIZE // self.cfg.MODEL.BACKBONE.STRIDE\n        # motion constrain\n        self.output_window = hann2d(torch.tensor([self.feat_sz, self.feat_sz]).long(), centered=True).cuda()\n\n        # for debug\n        self.debug = params.debug\n        self.use_visdom = params.debug\n        self.frame_id = 0\n        if self.debug:\n            if not self.use_visdom:\n                self.save_dir = \"debug\"\n                if not os.path.exists(self.save_dir):\n                    os.makedirs(self.save_dir)\n            else:\n                # self.add_hook()\n                self._init_visdom(None, 1)\n        # for save boxes from all queries\n        self.save_all_boxes = params.save_all_boxes\n        self.z_dict1 = {}\n        self.store_result = None\n        self.prenum = params.cfg.MODEL.PRENUM\n        self.range = params.cfg.MODEL.RANGE\n        self.x_feat = None\n\n    def initialize(self, image, info: dict):\n        # forward the template once\n        self.x_feat = None\n        self.update_ = False\n\n        z_patch_arr, resize_factor, z_amask_arr = sample_target(image, info['init_bbox'], self.params.template_factor,\n                                                                output_sz=self.params.template_size)  # output_sz=self.params.template_size\n        self.z_patch_arr = z_patch_arr\n        template = self.preprocessor.process(z_patch_arr, z_amask_arr)\n        with torch.no_grad():\n            self.z_dict1 = template\n            self.z_dict2 = template\n            self.dz_feat = None\n\n        self.box_mask_z = None\n\n        # save states\n        self.state = info['init_bbox']\n        self.store_result = [info['init_bbox'].copy()]\n        for i in range(self.prenum - 1):\n            self.store_result.append(info['init_bbox'].copy())\n        self.frame_id = 0\n        if self.save_all_boxes:\n            '''save all predicted boxes'''\n            all_boxes_save = info['init_bbox'] * self.cfg.MODEL.NUM_OBJECT_QUERIES\n            return {\"all_boxes\": all_boxes_save}\n\n    def track(self, image, info: dict = None):\n        H, W, _ = image.shape\n        self.frame_id += 1\n        x_patch_arr, resize_factor, x_amask_arr = sample_target(image, self.state, self.params.search_factor,\n                                                                output_sz=self.params.search_size)  # (x1, y1, w, h)\n        if self.dz_feat == None:\n            self.dz_feat = self.network.backbone.patch_embed(self.z_dict2.tensors)\n        for i in range(len(self.store_result)):\n            box_temp = self.store_result[i].copy()\n            box_out_i = transform_image_to_crop(torch.Tensor(self.store_result[i]), torch.Tensor(self.state),\n                                                resize_factor,\n                                                torch.Tensor([self.cfg.TEST.SEARCH_SIZE, self.cfg.TEST.SEARCH_SIZE]),\n                                                normalize=True)\n            box_out_i[2] = box_out_i[2] + box_out_i[0]\n            box_out_i[3] = box_out_i[3] + box_out_i[1]\n            box_out_i = box_out_i.clamp(min=-0.5, max=1.5)\n            box_out_i = (box_out_i + 0.5) * (self.bins - 1)\n            if i == 0:\n                seqs_out = box_out_i\n            else:\n                seqs_out = torch.cat((seqs_out, box_out_i), dim=-1)\n\n        seqs_out = seqs_out.unsqueeze(0)\n\n        search = self.preprocessor.process(x_patch_arr, x_amask_arr)\n\n        with torch.no_grad():\n            x_dict = search\n            # merge the template and the search\n            # run the transformer\n            if self.update_:\n                template = torch.concat([self.z_dict1.tensors.unsqueeze(1), self.z_dict2.unsqueeze(1)], dim=1)\n            else:\n                template = torch.concat([self.z_dict1.tensors.unsqueeze(1), self.z_dict2.tensors.unsqueeze(1)], dim=1)\n            out_dict = self.network.forward(\n                template=template, dz_feat=self.dz_feat, search=x_dict.tensors, ce_template_mask=self.box_mask_z,\n                seq_input=seqs_out, stage=\"sequence\", search_feature=self.x_feat)\n\n        self.dz_feat = out_dict['dz_feat']\n        self.x_feat = out_dict['x_feat']\n\n        pred_boxes = (out_dict['seqs'][:, 0:4] + 0.5) / (self.bins - 1) - 0.5\n\n        pred_feat = out_dict['feat']\n        pred = pred_feat.permute(1, 0, 2).reshape(-1, self.bins * self.range + 6)\n\n        pred = pred_feat[0:4, :, 0:self.bins * self.range]\n\n        out = pred.softmax(-1).to(pred)\n        mul = torch.range((-1 * self.range * 0.5 + 0.5) + 1 / (self.bins * self.range), (self.range * 0.5 + 0.5) - 1 / (self.bins * self.range), 2 / (self.bins * self.range)).to(pred)\n\n        ans = out * mul\n        ans = ans.sum(dim=-1)\n        ans = ans.permute(1, 0).to(pred)\n\n        pred_boxes = (ans + pred_boxes) / 2\n\n        pred_boxes = pred_boxes.view(-1, 4).mean(dim=0)\n\n        pred_new = pred_boxes\n        pred_new[2] = pred_boxes[2] - pred_boxes[0]\n        pred_new[3] = pred_boxes[3] - pred_boxes[1]\n        pred_new[0] = pred_boxes[0] + pred_new[2] / 2\n        pred_new[1] = pred_boxes[1] + pred_new[3] / 2\n\n        pred_boxes = (pred_new * self.params.search_size / resize_factor).tolist()\n\n        self.state = clip_box(self.map_box_back(pred_boxes, resize_factor), H, W, margin=10)\n\n        if len(self.store_result) < self.prenum:\n            self.store_result.append(self.state.copy())\n        else:\n            for i in range(self.prenum):\n                if i != self.prenum - 1:\n                    self.store_result[i] = self.store_result[i + 1]\n                else:\n                    self.store_result[i] = self.state.copy()\n\n        # for debug\n        if self.debug:\n            if not self.use_visdom:\n                x1, y1, w, h = self.state\n                image_BGR = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)\n                cv2.rectangle(image_BGR, (int(x1), int(y1)), (int(x1 + w), int(y1 + h)), color=(0, 0, 255), thickness=2)\n                save_path = os.path.join(self.save_dir, \"%04d.jpg\" % self.frame_id)\n                cv2.imwrite(save_path, image_BGR)\n            else:\n                self.visdom.register((image, info['gt_bbox'].tolist(), self.state), 'Tracking', 1, 'Tracking')\n\n                self.visdom.register(torch.from_numpy(x_patch_arr).permute(2, 0, 1), 'image', 1, 'search_region')\n                self.visdom.register(torch.from_numpy(self.z_patch_arr).permute(2, 0, 1), 'image', 1, 'template')\n                self.visdom.register(pred_score_map.view(self.feat_sz, self.feat_sz), 'heatmap', 1, 'score_map')\n                self.visdom.register((pred_score_map * self.output_window).view(self.feat_sz, self.feat_sz), 'heatmap',\n                                     1, 'score_map_hann')\n\n                if 'removed_indexes_s' in out_dict and out_dict['removed_indexes_s']:\n                    removed_indexes_s = out_dict['removed_indexes_s']\n                    removed_indexes_s = [removed_indexes_s_i.cpu().numpy() for removed_indexes_s_i in removed_indexes_s]\n                    masked_search = gen_visualization(x_patch_arr, removed_indexes_s)\n                    self.visdom.register(torch.from_numpy(masked_search).permute(2, 0, 1), 'image', 1, 'masked_search')\n\n                while self.pause_mode:\n                    if self.step:\n                        self.step = False\n                        break\n\n        if self.save_all_boxes:\n            '''save all predictions'''\n            all_boxes = self.map_box_back_batch(pred_boxes * self.params.search_size / resize_factor, resize_factor)\n            all_boxes_save = all_boxes.view(-1).tolist()  # (4N, )\n            return {\"target_bbox\": self.state,\n                    \"all_boxes\": all_boxes_save}\n        else:\n            return {\"target_bbox\": self.state}\n\n    def map_box_back(self, pred_box: list, resize_factor: float):\n        cx_prev, cy_prev = self.state[0] + 0.5 * self.state[2], self.state[1] + 0.5 * self.state[3]\n        cx, cy, w, h = pred_box\n        half_side = 0.5 * self.params.search_size / resize_factor\n        cx_real = cx + (cx_prev - half_side)\n        cy_real = cy + (cy_prev - half_side)\n        # cx_real = cx + cx_prev\n        # cy_real = cy + cy_prev\n        return [cx_real - 0.5 * w, cy_real - 0.5 * h, w, h]\n\n    def map_box_back_batch(self, pred_box: torch.Tensor, resize_factor: float):\n        cx_prev, cy_prev = self.state[0] + 0.5 * self.state[2], self.state[1] + 0.5 * self.state[3]\n        cx, cy, w, h = pred_box.unbind(-1)  # (N,4) --> (N,)\n        half_side = 0.5 * self.params.search_size / resize_factor\n        cx_real = cx + (cx_prev - half_side)\n        cy_real = cy + (cy_prev - half_side)\n        return torch.stack([cx_real - 0.5 * w, cy_real - 0.5 * h, w, h], dim=-1)\n\n    def add_hook(self):\n        conv_features, enc_attn_weights, dec_attn_weights = [], [], []\n\n        for i in range(12):\n            self.network.backbone.blocks[i].attn.register_forward_hook(\n                # lambda self, input, output: enc_attn_weights.append(output[1])\n                lambda self, input, output: enc_attn_weights.append(output[1])\n            )\n\n        self.enc_attn_weights = enc_attn_weights\n\n\ndef get_tracker_class():\n    return ARTrackV2Seq\n"
  },
  {
    "path": "lib/test/tracker/basetracker.py",
    "content": "import time\n\nimport torch\nfrom _collections import OrderedDict\n\nfrom lib.train.data.processing_utils import transform_image_to_crop\nfrom lib.vis.visdom_cus import Visdom\n\n\nclass BaseTracker:\n    \"\"\"Base class for all trackers.\"\"\"\n\n    def __init__(self, params):\n        self.params = params\n        self.visdom = None\n\n    def predicts_segmentation_mask(self):\n        return False\n\n    def initialize(self, image, info: dict) -> dict:\n        \"\"\"Overload this function in your tracker. This should initialize the model.\"\"\"\n        raise NotImplementedError\n\n    def track(self, image, info: dict = None) -> dict:\n        \"\"\"Overload this function in your tracker. This should track in the frame and update the model.\"\"\"\n        raise NotImplementedError\n\n    def visdom_draw_tracking(self, image, box, segmentation=None):\n        if isinstance(box, OrderedDict):\n            box = [v for k, v in box.items()]\n        else:\n            box = (box,)\n        if segmentation is None:\n            self.visdom.register((image, *box), 'Tracking', 1, 'Tracking')\n        else:\n            self.visdom.register((image, *box, segmentation), 'Tracking', 1, 'Tracking')\n\n    def transform_bbox_to_crop(self, box_in, resize_factor, device, box_extract=None, crop_type='template'):\n        # box_in: list [x1, y1, w, h], not normalized\n        # box_extract: same as box_in\n        # out bbox: Torch.tensor [1, 1, 4], x1y1wh, normalized\n        if crop_type == 'template':\n            crop_sz = torch.Tensor([self.params.template_size, self.params.template_size])\n        elif crop_type == 'search':\n            crop_sz = torch.Tensor([self.params.search_size, self.params.search_size])\n        else:\n            raise NotImplementedError\n\n        box_in = torch.tensor(box_in)\n        if box_extract is None:\n            box_extract = box_in\n        else:\n            box_extract = torch.tensor(box_extract)\n        template_bbox = transform_image_to_crop(box_in, box_extract, resize_factor, crop_sz, normalize=True)\n        template_bbox = template_bbox.view(1, 1, 4).to(device)\n\n        return template_bbox\n\n    def _init_visdom(self, visdom_info, debug):\n        visdom_info = {} if visdom_info is None else visdom_info\n        self.pause_mode = False\n        self.step = False\n        self.next_seq = False\n        if debug > 0 and visdom_info.get('use_visdom', True):\n            try:\n                self.visdom = Visdom(debug, {'handler': self._visdom_ui_handler, 'win_id': 'Tracking'},\n                                     visdom_info=visdom_info)\n\n                # # Show help\n                # help_text = 'You can pause/unpause the tracker by pressing ''space'' with the ''Tracking'' window ' \\\n                #             'selected. During paused mode, you can track for one frame by pressing the right arrow key.' \\\n                #             'To enable/disable plotting of a data block, tick/untick the corresponding entry in ' \\\n                #             'block list.'\n                # self.visdom.register(help_text, 'text', 1, 'Help')\n            except:\n                time.sleep(0.5)\n                print('!!! WARNING: Visdom could not start, so using matplotlib visualization instead !!!\\n'\n                      '!!! Start Visdom in a separate terminal window by typing \\'visdom\\' !!!')\n\n    def _visdom_ui_handler(self, data):\n        if data['event_type'] == 'KeyPress':\n            if data['key'] == ' ':\n                self.pause_mode = not self.pause_mode\n\n            elif data['key'] == 'ArrowRight' and self.pause_mode:\n                self.step = True\n\n            elif data['key'] == 'n':\n                self.next_seq = True\n"
  },
  {
    "path": "lib/test/tracker/data_utils.py",
    "content": "import torch\nimport numpy as np\nfrom lib.utils.misc import NestedTensor\n\n\nclass Preprocessor(object):\n    def __init__(self):\n        self.mean = torch.tensor([0.485, 0.456, 0.406]).view((1, 3, 1, 1)).cuda()\n        self.std = torch.tensor([0.229, 0.224, 0.225]).view((1, 3, 1, 1)).cuda()\n\n    def process(self, img_arr: np.ndarray, amask_arr: np.ndarray):\n        # Deal with the image patch\n        img_tensor = torch.tensor(img_arr).cuda().float().permute((2,0,1)).unsqueeze(dim=0)\n        img_tensor_norm = ((img_tensor / 255.0) - self.mean) / self.std  # (1,3,H,W)\n        # Deal with the attention mask\n        amask_tensor = torch.from_numpy(amask_arr).to(torch.bool).cuda().unsqueeze(dim=0)  # (1,H,W)\n        return NestedTensor(img_tensor_norm, amask_tensor)\n\n\nclass PreprocessorX(object):\n    def __init__(self):\n        self.mean = torch.tensor([0.485, 0.456, 0.406]).view((1, 3, 1, 1)).cuda()\n        self.std = torch.tensor([0.229, 0.224, 0.225]).view((1, 3, 1, 1)).cuda()\n\n    def process(self, img_arr: np.ndarray, amask_arr: np.ndarray):\n        # Deal with the image patch\n        img_tensor = torch.tensor(img_arr).cuda().float().permute((2,0,1)).unsqueeze(dim=0)\n        img_tensor_norm = ((img_tensor / 255.0) - self.mean) / self.std  # (1,3,H,W)\n        # Deal with the attention mask\n        amask_tensor = torch.from_numpy(amask_arr).to(torch.bool).cuda().unsqueeze(dim=0)  # (1,H,W)\n        return img_tensor_norm, amask_tensor\n\n\nclass PreprocessorX_onnx(object):\n    def __init__(self):\n        self.mean = np.array([0.485, 0.456, 0.406]).reshape((1, 3, 1, 1))\n        self.std = np.array([0.229, 0.224, 0.225]).reshape((1, 3, 1, 1))\n\n    def process(self, img_arr: np.ndarray, amask_arr: np.ndarray):\n        \"\"\"img_arr: (H,W,3), amask_arr: (H,W)\"\"\"\n        # Deal with the image patch\n        img_arr_4d = img_arr[np.newaxis, :, :, :].transpose(0, 3, 1, 2)\n        img_arr_4d = (img_arr_4d / 255.0 - self.mean) / self.std  # (1, 3, H, W)\n        # Deal with the attention mask\n        amask_arr_3d = amask_arr[np.newaxis, :, :]  # (1,H,W)\n        return img_arr_4d.astype(np.float32), amask_arr_3d.astype(np.bool)\n"
  },
  {
    "path": "lib/test/tracker/vis_utils.py",
    "content": "import numpy as np\r\n\r\n\r\n############## used for visulize eliminated tokens #################\r\ndef get_keep_indices(decisions):\r\n    keep_indices = []\r\n    for i in range(3):\r\n        if i == 0:\r\n            keep_indices.append(decisions[i])\r\n        else:\r\n            keep_indices.append(keep_indices[-1][decisions[i]])\r\n    return keep_indices\r\n\r\n\r\ndef gen_masked_tokens(tokens, indices, alpha=0.2):\r\n    # indices = [i for i in range(196) if i not in indices]\r\n    indices = indices[0].astype(int)\r\n    tokens = tokens.copy()\r\n    tokens[indices] = alpha * tokens[indices] + (1 - alpha) * 255\r\n    return tokens\r\n\r\n\r\ndef recover_image(tokens, H, W, Hp, Wp, patch_size):\r\n    # image: (C, 196, 16, 16)\r\n    image = tokens.reshape(Hp, Wp, patch_size, patch_size, 3).swapaxes(1, 2).reshape(H, W, 3)\r\n    return image\r\n\r\n\r\ndef pad_img(img):\r\n    height, width, channels = img.shape\r\n    im_bg = np.ones((height, width + 8, channels)) * 255\r\n    im_bg[0:height, 0:width, :] = img\r\n    return im_bg\r\n\r\n\r\ndef gen_visualization(image, mask_indices, patch_size=16):\r\n    # image [224, 224, 3]\r\n    # mask_indices, list of masked token indices\r\n\r\n    # mask mask_indices need to cat\r\n    # mask_indices = mask_indices[::-1]\r\n    num_stages = len(mask_indices)\r\n    for i in range(1, num_stages):\r\n        mask_indices[i] = np.concatenate([mask_indices[i-1], mask_indices[i]], axis=1)\r\n\r\n    # keep_indices = get_keep_indices(decisions)\r\n    image = np.asarray(image)\r\n    H, W, C = image.shape\r\n    Hp, Wp = H // patch_size, W // patch_size\r\n    image_tokens = image.reshape(Hp, patch_size, Wp, patch_size, 3).swapaxes(1, 2).reshape(Hp * Wp, patch_size, patch_size, 3)\r\n\r\n    stages = [\r\n        recover_image(gen_masked_tokens(image_tokens, mask_indices[i]), H, W, Hp, Wp, patch_size)\r\n        for i in range(num_stages)\r\n    ]\r\n    imgs = [image] + stages\r\n    imgs = [pad_img(img) for img in imgs]\r\n    viz = np.concatenate(imgs, axis=1)\r\n    return viz\r\n"
  },
  {
    "path": "lib/test/utils/__init__.py",
    "content": "from .params import TrackerParams, FeatureParams, Choice"
  },
  {
    "path": "lib/test/utils/_init_paths.py",
    "content": "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os.path as osp\nimport sys\n\n\ndef add_path(path):\n    if path not in sys.path:\n        sys.path.insert(0, path)\n\n\nthis_dir = osp.dirname(__file__)\n\nprj_path = osp.join(this_dir, '..', '..', '..')\nadd_path(prj_path)\n"
  },
  {
    "path": "lib/test/utils/hann.py",
    "content": "import torch\nimport math\nimport torch.nn.functional as F\n\n\ndef hann1d(sz: int, centered = True) -> torch.Tensor:\n    \"\"\"1D cosine window.\"\"\"\n    if centered:\n        return 0.5 * (1 - torch.cos((2 * math.pi / (sz + 1)) * torch.arange(1, sz + 1).float()))\n    w = 0.5 * (1 + torch.cos((2 * math.pi / (sz + 2)) * torch.arange(0, sz//2 + 1).float()))\n    return torch.cat([w, w[1:sz-sz//2].flip((0,))])\n\n\ndef hann2d(sz: torch.Tensor, centered = True) -> torch.Tensor:\n    \"\"\"2D cosine window.\"\"\"\n    return hann1d(sz[0].item(), centered).reshape(1, 1, -1, 1) * hann1d(sz[1].item(), centered).reshape(1, 1, 1, -1)\n\n\ndef hann2d_bias(sz: torch.Tensor, ctr_point: torch.Tensor, centered = True) -> torch.Tensor:\n    \"\"\"2D cosine window.\"\"\"\n    distance = torch.stack([ctr_point, sz-ctr_point], dim=0)\n    max_distance, _ = distance.max(dim=0)\n\n    hann1d_x = hann1d(max_distance[0].item() * 2, centered)\n    hann1d_x = hann1d_x[max_distance[0] - distance[0, 0]: max_distance[0] + distance[1, 0]]\n    hann1d_y = hann1d(max_distance[1].item() * 2, centered)\n    hann1d_y = hann1d_y[max_distance[1] - distance[0, 1]: max_distance[1] + distance[1, 1]]\n\n    return hann1d_y.reshape(1, 1, -1, 1) * hann1d_x.reshape(1, 1, 1, -1)\n\n\n\ndef hann2d_clipped(sz: torch.Tensor, effective_sz: torch.Tensor, centered = True) -> torch.Tensor:\n    \"\"\"1D clipped cosine window.\"\"\"\n\n    # Ensure that the difference is even\n    effective_sz += (effective_sz - sz) % 2\n    effective_window = hann1d(effective_sz[0].item(), True).reshape(1, 1, -1, 1) * hann1d(effective_sz[1].item(), True).reshape(1, 1, 1, -1)\n\n    pad = (sz - effective_sz) // 2\n\n    window = F.pad(effective_window, (pad[1].item(), pad[1].item(), pad[0].item(), pad[0].item()), 'replicate')\n\n    if centered:\n        return window\n    else:\n        mid = (sz / 2).int()\n        window_shift_lr = torch.cat((window[:, :, :, mid[1]:], window[:, :, :, :mid[1]]), 3)\n        return torch.cat((window_shift_lr[:, :, mid[0]:, :], window_shift_lr[:, :, :mid[0], :]), 2)\n\n\ndef gauss_fourier(sz: int, sigma: float, half: bool = False) -> torch.Tensor:\n    if half:\n        k = torch.arange(0, int(sz/2+1))\n    else:\n        k = torch.arange(-int((sz-1)/2), int(sz/2+1))\n    return (math.sqrt(2*math.pi) * sigma / sz) * torch.exp(-2 * (math.pi * sigma * k.float() / sz)**2)\n\n\ndef gauss_spatial(sz, sigma, center=0, end_pad=0):\n    k = torch.arange(-(sz-1)/2, (sz+1)/2+end_pad)\n    return torch.exp(-1.0/(2*sigma**2) * (k - center)**2)\n\n\ndef label_function(sz: torch.Tensor, sigma: torch.Tensor):\n    return gauss_fourier(sz[0].item(), sigma[0].item()).reshape(1, 1, -1, 1) * gauss_fourier(sz[1].item(), sigma[1].item(), True).reshape(1, 1, 1, -1)\n\ndef label_function_spatial(sz: torch.Tensor, sigma: torch.Tensor, center: torch.Tensor = torch.zeros(2), end_pad: torch.Tensor = torch.zeros(2)):\n    \"\"\"The origin is in the middle of the image.\"\"\"\n    return gauss_spatial(sz[0].item(), sigma[0].item(), center[0], end_pad[0].item()).reshape(1, 1, -1, 1) * \\\n           gauss_spatial(sz[1].item(), sigma[1].item(), center[1], end_pad[1].item()).reshape(1, 1, 1, -1)\n\n\ndef cubic_spline_fourier(f, a):\n    \"\"\"The continuous Fourier transform of a cubic spline kernel.\"\"\"\n\n    bf = (6*(1 - torch.cos(2 * math.pi * f)) + 3*a*(1 - torch.cos(4 * math.pi * f))\n           - (6 + 8*a)*math.pi*f*torch.sin(2 * math.pi * f) - 2*a*math.pi*f*torch.sin(4 * math.pi * f)) \\\n         / (4 * math.pi**4 * f**4)\n\n    bf[f == 0] = 1\n\n    return bf\n\ndef max2d(a: torch.Tensor) -> (torch.Tensor, torch.Tensor):\n    \"\"\"Computes maximum and argmax in the last two dimensions.\"\"\"\n\n    max_val_row, argmax_row = torch.max(a, dim=-2)\n    max_val, argmax_col = torch.max(max_val_row, dim=-1)\n    argmax_row = argmax_row.view(argmax_col.numel(),-1)[torch.arange(argmax_col.numel()), argmax_col.view(-1)]\n    argmax_row = argmax_row.reshape(argmax_col.shape)\n    argmax = torch.cat((argmax_row.unsqueeze(-1), argmax_col.unsqueeze(-1)), -1)\n    return max_val, argmax\n"
  },
  {
    "path": "lib/test/utils/load_text.py",
    "content": "import numpy as np\nimport pandas as pd\n\n\ndef load_text_numpy(path, delimiter, dtype):\n    if isinstance(delimiter, (tuple, list)):\n        for d in delimiter:\n            try:\n                ground_truth_rect = np.loadtxt(path, delimiter=d, dtype=dtype)\n                return ground_truth_rect\n            except:\n                pass\n\n        raise Exception('Could not read file {}'.format(path))\n    else:\n        ground_truth_rect = np.loadtxt(path, delimiter=delimiter, dtype=dtype)\n        return ground_truth_rect\n\n\ndef load_text_pandas(path, delimiter, dtype):\n    if isinstance(delimiter, (tuple, list)):\n        for d in delimiter:\n            try:\n                ground_truth_rect = pd.read_csv(path, delimiter=d, header=None, dtype=dtype, na_filter=False,\n                                                low_memory=False).values\n                return ground_truth_rect\n            except Exception as e:\n                pass\n\n        raise Exception('Could not read file {}'.format(path))\n    else:\n        ground_truth_rect = pd.read_csv(path, delimiter=delimiter, header=None, dtype=dtype, na_filter=False,\n                                        low_memory=False).values\n        return ground_truth_rect\n\n\ndef load_text(path, delimiter=' ', dtype=np.float32, backend='numpy'):\n    if backend == 'numpy':\n        return load_text_numpy(path, delimiter, dtype)\n    elif backend == 'pandas':\n        return load_text_pandas(path, delimiter, dtype)\n\n\ndef load_str(path):\n    with open(path, \"r\") as f:\n        text_str = f.readline().strip().lower()\n    return text_str\n"
  },
  {
    "path": "lib/test/utils/params.py",
    "content": "from lib.utils import TensorList\nimport random\n\n\nclass TrackerParams:\n    \"\"\"Class for tracker parameters.\"\"\"\n    def set_default_values(self, default_vals: dict):\n        for name, val in default_vals.items():\n            if not hasattr(self, name):\n                setattr(self, name, val)\n\n    def get(self, name: str, *default):\n        \"\"\"Get a parameter value with the given name. If it does not exists, it return the default value given as a\n        second argument or returns an error if no default value is given.\"\"\"\n        if len(default) > 1:\n            raise ValueError('Can only give one default value.')\n\n        if not default:\n            return getattr(self, name)\n\n        return getattr(self, name, default[0])\n\n    def has(self, name: str):\n        \"\"\"Check if there exist a parameter with the given name.\"\"\"\n        return hasattr(self, name)\n\n\nclass FeatureParams:\n    \"\"\"Class for feature specific parameters\"\"\"\n    def __init__(self, *args, **kwargs):\n        if len(args) > 0:\n            raise ValueError\n\n        for name, val in kwargs.items():\n            if isinstance(val, list):\n                setattr(self, name, TensorList(val))\n            else:\n                setattr(self, name, val)\n\n\ndef Choice(*args):\n    \"\"\"Can be used to sample random parameter values.\"\"\"\n    return random.choice(args)\n"
  },
  {
    "path": "lib/test/utils/transform_got10k.py",
    "content": "import numpy as np\nimport os\nimport shutil\nimport argparse\nimport _init_paths\nfrom lib.test.evaluation.environment import env_settings\n\n\ndef transform_got10k(tracker_name, cfg_name):\n    env = env_settings()\n    result_dir = env.results_path\n    src_dir = os.path.join(result_dir, \"%s/%s/got10k/\" % (tracker_name, cfg_name))\n    dest_dir = os.path.join(result_dir, \"%s/%s/got10k_submit/\" % (tracker_name, cfg_name))\n    if not os.path.exists(dest_dir):\n        os.makedirs(dest_dir)\n    items = os.listdir(src_dir)\n    for item in items:\n        if \"all\" in item:\n            continue\n        src_path = os.path.join(src_dir, item)\n        if \"time\" not in item:\n            seq_name = item.replace(\".txt\", '')\n            seq_dir = os.path.join(dest_dir, seq_name)\n            if not os.path.exists(seq_dir):\n                os.makedirs(seq_dir)\n            new_item = item.replace(\".txt\", '_001.txt')\n            dest_path = os.path.join(seq_dir, new_item)\n            bbox_arr = np.loadtxt(src_path, dtype=np.int, delimiter='\\t')\n            np.savetxt(dest_path, bbox_arr, fmt='%d', delimiter=',')\n        else:\n            seq_name = item.replace(\"_time.txt\", '')\n            seq_dir = os.path.join(dest_dir, seq_name)\n            if not os.path.exists(seq_dir):\n                os.makedirs(seq_dir)\n            dest_path = os.path.join(seq_dir, item)\n            os.system(\"cp %s %s\" % (src_path, dest_path))\n    # make zip archive\n    shutil.make_archive(src_dir, \"zip\", src_dir)\n    shutil.make_archive(dest_dir, \"zip\", dest_dir)\n    # Remove the original files\n    shutil.rmtree(src_dir)\n    shutil.rmtree(dest_dir)\n\n\nif __name__ == \"__main__\":\n    parser = argparse.ArgumentParser(description='transform got10k results.')\n    parser.add_argument('--tracker_name', type=str, help='Name of tracking method.')\n    parser.add_argument('--cfg_name', type=str, help='Name of config file.')\n\n    args = parser.parse_args()\n    transform_got10k(args.tracker_name, args.cfg_name)\n\n"
  },
  {
    "path": "lib/test/utils/transform_trackingnet.py",
    "content": "import numpy as np\nimport os\nimport shutil\nimport argparse\nimport _init_paths\nfrom lib.test.evaluation.environment import env_settings\n\n\ndef transform_trackingnet(tracker_name, cfg_name):\n    env = env_settings()\n    result_dir = env.results_path\n    src_dir = os.path.join(result_dir, \"%s/%s/trackingnet/\" % (tracker_name, cfg_name))\n    dest_dir = os.path.join(result_dir, \"%s/%s/trackingnet_submit/\" % (tracker_name, cfg_name))\n    if not os.path.exists(dest_dir):\n        os.makedirs(dest_dir)\n    items = os.listdir(src_dir)\n    for item in items:\n        if \"all\" in item:\n            continue\n        if \"time\" not in item:\n            src_path = os.path.join(src_dir, item)\n            dest_path = os.path.join(dest_dir, item)\n            bbox_arr = np.loadtxt(src_path, dtype=np.int, delimiter='\\t')\n            np.savetxt(dest_path, bbox_arr, fmt='%d', delimiter=',')\n    # make zip archive\n    shutil.make_archive(src_dir, \"zip\", src_dir)\n    shutil.make_archive(dest_dir, \"zip\", dest_dir)\n    # Remove the original files\n    shutil.rmtree(src_dir)\n    shutil.rmtree(dest_dir)\n\n\nif __name__ == \"__main__\":\n    parser = argparse.ArgumentParser(description='transform trackingnet results.')\n    parser.add_argument('--tracker_name', type=str, help='Name of tracking method.')\n    parser.add_argument('--cfg_name', type=str, help='Name of config file.')\n\n    args = parser.parse_args()\n    transform_trackingnet(args.tracker_name, args.cfg_name)\n"
  },
  {
    "path": "lib/train/__init__.py",
    "content": "from .admin.multigpu import MultiGPU\n"
  },
  {
    "path": "lib/train/_init_paths.py",
    "content": "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os.path as osp\nimport sys\n\n\ndef add_path(path):\n    if path not in sys.path:\n        sys.path.insert(0, path)\n\n\nthis_dir = osp.dirname(__file__)\n\nprj_path = osp.join(this_dir, '../..')\nadd_path(prj_path)\n"
  },
  {
    "path": "lib/train/actors/__init__.py",
    "content": "from .base_actor import BaseActor\nfrom .artrack import ARTrackActor\nfrom .artrack_seq import ARTrackSeqActor\nfrom .artrackv2 import ARTrackV2Actor\nfrom .artrackv2_seq import ARTrackV2SeqActor\n"
  },
  {
    "path": "lib/train/actors/artrack.py",
    "content": "from . import BaseActor\nfrom lib.utils.misc import NestedTensor\nfrom lib.utils.box_ops import box_cxcywh_to_xyxy, box_xywh_to_xyxy\nimport torch\nimport math\nimport numpy as np\nfrom lib.utils.merge import merge_template_search\nfrom ...utils.heapmap_utils import generate_heatmap\nfrom ...utils.ce_utils import generate_mask_cond, adjust_keep_rate\n\ndef fp16_clamp(x, min=None, max=None):\n    if not x.is_cuda and x.dtype == torch.float16:\n        # clamp for cpu float16, tensor fp16 has no clamp implementation\n        return x.float().clamp(min, max).half()\n\n    return x.clamp(min, max)\n    \ndef generate_sa_simdr(joints):\n    '''\n    :param joints:  [num_joints, 3]\n    :param joints_vis: [num_joints, 3]\n    :return: target, target_weight(1: visible, 0: invisible)\n    '''\n    num_joints = 48\n    image_size = [256, 256]\n    simdr_split_ratio = 1.5625\n    sigma = 6\n\n    target_x1 = np.zeros((num_joints,\n                              int(image_size[0] * simdr_split_ratio)),\n                             dtype=np.float32)\n    target_y1 = np.zeros((num_joints,\n                              int(image_size[1] * simdr_split_ratio)),\n                             dtype=np.float32)\n    target_x2 = np.zeros((num_joints,\n                              int(image_size[0] * simdr_split_ratio)),\n                             dtype=np.float32)\n    target_y2 = np.zeros((num_joints,\n                              int(image_size[1] * simdr_split_ratio)),\n                             dtype=np.float32)\n    zero_4_begin = np.zeros((num_joints, 1), dtype=np.float32)\n\n    tmp_size = sigma * 3\n\n    for joint_id in range(num_joints):\n\n        mu_x1 = joints[joint_id][0]\n        mu_y1 = joints[joint_id][1]\n        mu_x2 = joints[joint_id][2]\n        mu_y2 = joints[joint_id][3]\n\n        x1 = np.arange(0, int(image_size[0] * simdr_split_ratio), 1, np.float32)\n        y1 = np.arange(0, int(image_size[1] * simdr_split_ratio), 1, np.float32)\n        x2 = np.arange(0, int(image_size[0] * simdr_split_ratio), 1, np.float32)\n        y2 = np.arange(0, int(image_size[1] * simdr_split_ratio), 1, np.float32)\n\n        target_x1[joint_id] = (np.exp(- ((x1 - mu_x1) ** 2) / (2 * sigma ** 2))) / (\n                        sigma * np.sqrt(np.pi * 2))\n        target_y1[joint_id] = (np.exp(- ((y1 - mu_y1) ** 2) / (2 * sigma ** 2))) / (\n                        sigma * np.sqrt(np.pi * 2))\n        target_x2[joint_id] = (np.exp(- ((x2 - mu_x2) ** 2) / (2 * sigma ** 2))) / (\n                        sigma * np.sqrt(np.pi * 2))\n        target_y2[joint_id] = (np.exp(- ((y2 - mu_y2) ** 2) / (2 * sigma ** 2))) / (\n                        sigma * np.sqrt(np.pi * 2))\n    return target_x1, target_y1, target_x2, target_y2\n\n# angle cost\ndef SIoU_loss(test1, test2, theta=4):\n    eps = 1e-7\n    cx_pred = (test1[:, 0] + test1[:, 2]) / 2\n    cy_pred = (test1[:, 1] + test1[:, 3]) / 2\n    cx_gt = (test2[:, 0] + test2[:, 2]) / 2\n    cy_gt = (test2[:, 1] + test2[:, 3]) / 2\n\n    dist = ((cx_pred - cx_gt)**2 + (cy_pred - cy_gt)**2) ** 0.5\n    ch = torch.max(cy_gt, cy_pred) - torch.min(cy_gt, cy_pred)\n    x = ch / (dist + eps)\n\n    angle = 1 - 2*torch.sin(torch.arcsin(x)-torch.pi/4)**2\n    # distance cost\n    xmin = torch.min(test1[:, 0], test2[:, 0])\n    xmax = torch.max(test1[:, 2], test2[:, 2])\n    ymin = torch.min(test1[:, 1], test2[:, 1])\n    ymax = torch.max(test1[:, 3], test2[:, 3])\n    cw = xmax - xmin\n    ch = ymax - ymin\n    px = ((cx_gt - cx_pred) / (cw+eps))**2\n    py = ((cy_gt - cy_pred) / (ch+eps))**2\n    gama = 2 - angle\n    dis = (1 - torch.exp(-1 * gama * px)) + (1 - torch.exp(-1 * gama * py))\n\n    #shape cost\n    w_pred = test1[:, 2] - test1[:, 0]\n    h_pred = test1[:, 3] - test1[:, 1]\n    w_gt = test2[:, 2] - test2[:, 0]\n    h_gt = test2[:, 3] - test2[:, 1]\n    ww = torch.abs(w_pred - w_gt) / (torch.max(w_pred, w_gt) + eps)\n    wh = torch.abs(h_gt - h_pred) / (torch.max(h_gt, h_pred) + eps)\n    omega = (1 - torch.exp(-1 * wh)) ** theta + (1 - torch.exp(-1 * ww)) ** theta\n\n    #IoU loss\n    lt = torch.max(test1[..., :2], test2[..., :2])  # [B, rows, 2]\n    rb = torch.min(test1[..., 2:], test2[..., 2:])  # [B, rows, 2]\n\n    wh = fp16_clamp(rb - lt, min=0)\n    overlap = wh[..., 0] * wh[..., 1]\n    area1 = (test1[..., 2] - test1[..., 0]) * (\n            test1[..., 3] - test1[..., 1])\n    area2 = (test2[..., 2] - test2[..., 0]) * (\n            test2[..., 3] - test2[..., 1])\n    iou = overlap / (area1 + area2 - overlap)\n\n    SIoU = 1 - iou + (omega + dis) / 2\n    return SIoU, iou\n    \ndef ciou(pred, target, eps=1e-7):\n    # overlap\n    lt = torch.max(pred[:, :2], target[:, :2])\n    rb = torch.min(pred[:, 2:], target[:, 2:])\n    wh = (rb - lt).clamp(min=0)\n    overlap = wh[:, 0] * wh[:, 1]\n\n    # union\n    ap = (pred[:, 2] - pred[:, 0]) * (pred[:, 3] - pred[:, 1])\n    ag = (target[:, 2] - target[:, 0]) * (target[:, 3] - target[:, 1])\n    union = ap + ag - overlap + eps\n\n    # IoU\n    ious = overlap / union\n\n    # enclose area\n    enclose_x1y1 = torch.min(pred[:, :2], target[:, :2])\n    enclose_x2y2 = torch.max(pred[:, 2:], target[:, 2:])\n    enclose_wh = (enclose_x2y2 - enclose_x1y1).clamp(min=0)\n\n    cw = enclose_wh[:, 0]\n    ch = enclose_wh[:, 1]\n\n    c2 = cw**2 + ch**2 + eps\n\n    b1_x1, b1_y1 = pred[:, 0], pred[:, 1]\n    b1_x2, b1_y2 = pred[:, 2], pred[:, 3]\n    b2_x1, b2_y1 = target[:, 0], target[:, 1]\n    b2_x2, b2_y2 = target[:, 2], target[:, 3]\n\n    w1, h1 = b1_x2 - b1_x1, b1_y2 - b1_y1 + eps\n    w2, h2 = b2_x2 - b2_x1, b2_y2 - b2_y1 + eps\n\n    left = ((b2_x1 + b2_x2) - (b1_x1 + b1_x2))**2 / 4\n    right = ((b2_y1 + b2_y2) - (b1_y1 + b1_y2))**2 / 4\n    rho2 = left + right\n\n    factor = 4 / math.pi**2\n    v = factor * torch.pow(torch.atan(w2 / h2) - torch.atan(w1 / h1), 2)\n\n    # CIoU\n    cious = ious - (rho2 / c2 + v**2 / (1 - ious + v))\n    return cious, ious\n\nclass ARTrackActor(BaseActor):\n    \"\"\" Actor for training ARTrack models \"\"\"\n\n    def __init__(self, net, objective, loss_weight, settings, bins, search_size, cfg=None):\n        super().__init__(net, objective)\n        self.loss_weight = loss_weight\n        self.settings = settings\n        self.bs = self.settings.batchsize  # batch size\n        self.cfg = cfg\n        self.bins = bins\n        self.range = self.cfg.MODEL.RANGE\n        self.search_size = search_size\n        self.logsoftmax = torch.nn.LogSoftmax(dim=1)\n        self.focal = None\n        self.loss_weight['KL'] = 100\n        self.loss_weight['focal'] = 2\n\n    def __call__(self, data):\n        \"\"\"\n        args:\n            data - The input data, should contain the fields 'template', 'search', 'gt_bbox'.\n            template_images: (N_t, batch, 3, H, W)\n            search_images: (N_s, batch, 3, H, W)\n        returns:\n            loss    - the training loss\n            status  -  dict containing detailed losses\n        \"\"\"\n        # forward pass\n        out_dict = self.forward_pass(data)\n\n        # compute losses\n        loss, status = self.compute_losses(out_dict, data)\n\n        return loss, status\n\n    def forward_pass(self, data):\n        # currently only support 1 template and 1 search region\n        assert len(data['template_images']) == 1\n        assert len(data['search_images']) == 1\n\n        template_list = []\n        for i in range(self.settings.num_template):\n            template_img_i = data['template_images'][i].view(-1,\n                                                             *data['template_images'].shape[2:])  # (batch, 3, 128, 128)\n            template_list.append(template_img_i)\n\n        search_img = data['search_images'][0].view(-1, *data['search_images'].shape[2:])  # (batch, 3, 320, 320)\n\n        if len(template_list) == 1:\n            template_list = template_list[0]\n        gt_bbox = data['search_anno'][-1]\n        begin = self.bins * self.range\n        end = self.bins * self.range + 1\n\n        magic_num = (self.range - 1) * 0.5\n        gt_bbox[:, 2] = gt_bbox[:, 0] + gt_bbox[:, 2]\n        gt_bbox[:, 3] = gt_bbox[:, 1] + gt_bbox[:, 3]\n        gt_bbox = gt_bbox.clamp(min=(-1*magic_num), max=(1+magic_num))\n        data['real_bbox'] = gt_bbox\n\n        seq_ori = (gt_bbox + magic_num) * (self.bins - 1)\n\n        seq_ori = seq_ori.int().to(search_img)\n        B = seq_ori.shape[0]\n        seq_input = torch.cat([torch.ones((B, 1)).to(search_img) * begin, seq_ori], dim=1)\n        seq_output = torch.cat([seq_ori, torch.ones((B, 1)).to(search_img) * end], dim=1)\n        data['seq_input'] = seq_input\n        data['seq_output'] = seq_output\n        out_dict = self.net(template=template_list,\n                            search=search_img,\n                            seq_input=seq_input)\n\n        return out_dict\n\n    def compute_losses(self, pred_dict, gt_dict, return_status=True):\n        bins = self.bins\n        magic_num = (self.range - 1) * 0.5\n        seq_output = gt_dict['seq_output']\n        pred_feat = pred_dict[\"feat\"]\n        if self.focal == None:\n            weight = torch.ones(bins*self.range+2) * 1\n            weight[bins*self.range+1] = 0.1\n            weight[bins*self.range] = 0.1\n            weight.to(pred_feat)\n            self.klloss = torch.nn.KLDivLoss(reduction='none').to(pred_feat)\n\n            self.focal = torch.nn.CrossEntropyLoss(weight=weight, size_average=True).to(pred_feat)\n        # compute varfifocal loss\n        pred = pred_feat.permute(1, 0, 2).reshape(-1, bins*2+2)\n        target = seq_output.reshape(-1).to(torch.int64)\n        varifocal_loss = self.focal(pred, target)\n        # compute giou and L1 loss\n        beta = 1\n        pred = pred_feat[0:4, :, 0:bins*self.range] * beta\n        target = seq_output[:, 0:4].to(pred_feat)\n        \n        out = pred.softmax(-1).to(pred)\n        mul = torch.range((-1*magic_num+1/(self.bins*self.range)), (1+magic_num-1/(self.bins*self.range)), 2/(self.bins*self.range)).to(pred)\n        ans = out * mul\n        ans = ans.sum(dim=-1)\n        ans = ans.permute(1, 0).to(pred)\n        target = target / (bins - 1) - magic_num\n        extra_seq = ans\n        extra_seq = extra_seq.to(pred)\n        sious, iou = SIoU_loss(extra_seq, target, 4)\n        sious = sious.mean()\n        siou_loss = sious\n        l1_loss = self.objective['l1'](extra_seq, target)\n\n        loss = self.loss_weight['giou'] * siou_loss + self.loss_weight['l1'] * l1_loss + self.loss_weight['focal'] * varifocal_loss\n\n        if return_status:\n            # status for log\n            mean_iou = iou.detach().mean()\n            status = {\"Loss/total\": loss.item(),\n                      \"Loss/giou\": siou_loss.item(),\n                      \"Loss/l1\": l1_loss.item(),\n                      \"Loss/location\": varifocal_loss.item(),\n                      \"IoU\": mean_iou.item()}\n            return loss, status\n        else:\n            return loss\n"
  },
  {
    "path": "lib/train/actors/artrack_seq.py",
    "content": "from . import BaseActor\r\nfrom lib.utils.misc import NestedTensor\r\nfrom lib.utils.box_ops import box_cxcywh_to_xyxy, box_xywh_to_xyxy\r\nimport torch\r\nimport math\r\nimport numpy as np\r\nimport numpy\r\nimport cv2\r\nimport torch.nn.functional as F\r\nimport torchvision.transforms.functional as tvisf\r\nimport lib.train.data.bounding_box_utils as bbutils\r\nfrom lib.utils.merge import merge_template_search\r\nfrom torch.distributions.categorical import Categorical\r\nfrom ...utils.heapmap_utils import generate_heatmap\r\nfrom ...utils.ce_utils import generate_mask_cond, adjust_keep_rate\r\n\r\n\r\ndef IoU(rect1, rect2):\r\n    \"\"\" caculate interection over union\r\n    Args:\r\n        rect1: (x1, y1, x2, y2)\r\n        rect2: (x1, y1, x2, y2)\r\n    Returns:\r\n        iou\r\n    \"\"\"\r\n    # overlap\r\n    x1, y1, x2, y2 = rect1[0], rect1[1], rect1[2], rect1[3]\r\n    tx1, ty1, tx2, ty2 = rect2[0], rect2[1], rect2[2], rect2[3]\r\n\r\n    xx1 = np.maximum(tx1, x1)\r\n    yy1 = np.maximum(ty1, y1)\r\n    xx2 = np.minimum(tx2, x2)\r\n    yy2 = np.minimum(ty2, y2)\r\n\r\n    ww = np.maximum(0, xx2 - xx1)\r\n    hh = np.maximum(0, yy2 - yy1)\r\n\r\n    area = (x2 - x1) * (y2 - y1)\r\n    target_a = (tx2 - tx1) * (ty2 - ty1)\r\n    inter = ww * hh\r\n    iou = inter / (area + target_a - inter)\r\n    return iou\r\n\r\n\r\ndef fp16_clamp(x, min=None, max=None):\r\n    if not x.is_cuda and x.dtype == torch.float16:\r\n        # clamp for cpu float16, tensor fp16 has no clamp implementation\r\n        return x.float().clamp(min, max).half()\r\n\r\n    return x.clamp(min, max)\r\n\r\n\r\ndef generate_sa_simdr(joints):\r\n    '''\r\n    :param joints:  [num_joints, 3]\r\n    :param joints_vis: [num_joints, 3]\r\n    :return: target, target_weight(1: visible, 0: invisible)\r\n    '''\r\n    num_joints = 48\r\n    image_size = [256, 256]\r\n    simdr_split_ratio = 1.5625\r\n    sigma = 6\r\n\r\n    target_x1 = np.zeros((num_joints,\r\n                          int(image_size[0] * simdr_split_ratio)),\r\n                         dtype=np.float32)\r\n    target_y1 = np.zeros((num_joints,\r\n                          int(image_size[1] * simdr_split_ratio)),\r\n                         dtype=np.float32)\r\n    target_x2 = np.zeros((num_joints,\r\n                          int(image_size[0] * simdr_split_ratio)),\r\n                         dtype=np.float32)\r\n    target_y2 = np.zeros((num_joints,\r\n                          int(image_size[1] * simdr_split_ratio)),\r\n                         dtype=np.float32)\r\n    zero_4_begin = np.zeros((num_joints, 1), dtype=np.float32)\r\n\r\n    tmp_size = sigma * 3\r\n\r\n    for joint_id in range(num_joints):\r\n        mu_x1 = joints[joint_id][0]\r\n        mu_y1 = joints[joint_id][1]\r\n        mu_x2 = joints[joint_id][2]\r\n        mu_y2 = joints[joint_id][3]\r\n\r\n        x1 = np.arange(0, int(image_size[0] * simdr_split_ratio), 1, np.float32)\r\n        y1 = np.arange(0, int(image_size[1] * simdr_split_ratio), 1, np.float32)\r\n        x2 = np.arange(0, int(image_size[0] * simdr_split_ratio), 1, np.float32)\r\n        y2 = np.arange(0, int(image_size[1] * simdr_split_ratio), 1, np.float32)\r\n\r\n        target_x1[joint_id] = (np.exp(- ((x1 - mu_x1) ** 2) / (2 * sigma ** 2))) / (\r\n                sigma * np.sqrt(np.pi * 2))\r\n        target_y1[joint_id] = (np.exp(- ((y1 - mu_y1) ** 2) / (2 * sigma ** 2))) / (\r\n                sigma * np.sqrt(np.pi * 2))\r\n        target_x2[joint_id] = (np.exp(- ((x2 - mu_x2) ** 2) / (2 * sigma ** 2))) / (\r\n                sigma * np.sqrt(np.pi * 2))\r\n        target_y2[joint_id] = (np.exp(- ((y2 - mu_y2) ** 2) / (2 * sigma ** 2))) / (\r\n                sigma * np.sqrt(np.pi * 2))\r\n    return target_x1, target_y1, target_x2, target_y2\r\n\r\n\r\n# angle cost\r\ndef SIoU_loss(test1, test2, theta=4):\r\n    eps = 1e-7\r\n    cx_pred = (test1[:, 0] + test1[:, 2]) / 2\r\n    cy_pred = (test1[:, 1] + test1[:, 3]) / 2\r\n    cx_gt = (test2[:, 0] + test2[:, 2]) / 2\r\n    cy_gt = (test2[:, 1] + test2[:, 3]) / 2\r\n\r\n    dist = ((cx_pred - cx_gt) ** 2 + (cy_pred - cy_gt) ** 2) ** 0.5\r\n    ch = torch.max(cy_gt, cy_pred) - torch.min(cy_gt, cy_pred)\r\n    x = ch / (dist + eps)\r\n\r\n    angle = 1 - 2 * torch.sin(torch.arcsin(x) - torch.pi / 4) ** 2\r\n    # distance cost\r\n    xmin = torch.min(test1[:, 0], test2[:, 0])\r\n    xmax = torch.max(test1[:, 2], test2[:, 2])\r\n    ymin = torch.min(test1[:, 1], test2[:, 1])\r\n    ymax = torch.max(test1[:, 3], test2[:, 3])\r\n    cw = xmax - xmin\r\n    ch = ymax - ymin\r\n    px = ((cx_gt - cx_pred) / (cw + eps)) ** 2\r\n    py = ((cy_gt - cy_pred) / (ch + eps)) ** 2\r\n    gama = 2 - angle\r\n    dis = (1 - torch.exp(-1 * gama * px)) + (1 - torch.exp(-1 * gama * py))\r\n\r\n    # shape cost\r\n    w_pred = test1[:, 2] - test1[:, 0]\r\n    h_pred = test1[:, 3] - test1[:, 1]\r\n    w_gt = test2[:, 2] - test2[:, 0]\r\n    h_gt = test2[:, 3] - test2[:, 1]\r\n    ww = torch.abs(w_pred - w_gt) / (torch.max(w_pred, w_gt) + eps)\r\n    wh = torch.abs(h_gt - h_pred) / (torch.max(h_gt, h_pred) + eps)\r\n    omega = (1 - torch.exp(-1 * wh)) ** theta + (1 - torch.exp(-1 * ww)) ** theta\r\n\r\n    # IoU loss\r\n    lt = torch.max(test1[..., :2], test2[..., :2])  # [B, rows, 2]\r\n    rb = torch.min(test1[..., 2:], test2[..., 2:])  # [B, rows, 2]\r\n\r\n    wh = fp16_clamp(rb - lt, min=0)\r\n    overlap = wh[..., 0] * wh[..., 1]\r\n    area1 = (test1[..., 2] - test1[..., 0]) * (\r\n            test1[..., 3] - test1[..., 1])\r\n    area2 = (test2[..., 2] - test2[..., 0]) * (\r\n            test2[..., 3] - test2[..., 1])\r\n    iou = overlap / (area1 + area2 - overlap)\r\n\r\n    SIoU = 1 - iou + (omega + dis) / 2\r\n    return SIoU, iou\r\n\r\n\r\ndef ciou(pred, target, eps=1e-7):\r\n    # overlap\r\n    lt = torch.max(pred[:, :2], target[:, :2])\r\n    rb = torch.min(pred[:, 2:], target[:, 2:])\r\n    wh = (rb - lt).clamp(min=0)\r\n    overlap = wh[:, 0] * wh[:, 1]\r\n\r\n    # union\r\n    ap = (pred[:, 2] - pred[:, 0]) * (pred[:, 3] - pred[:, 1])\r\n    ag = (target[:, 2] - target[:, 0]) * (target[:, 3] - target[:, 1])\r\n    union = ap + ag - overlap + eps\r\n\r\n    # IoU\r\n    ious = overlap / union\r\n\r\n    # enclose area\r\n    enclose_x1y1 = torch.min(pred[:, :2], target[:, :2])\r\n    enclose_x2y2 = torch.max(pred[:, 2:], target[:, 2:])\r\n    enclose_wh = (enclose_x2y2 - enclose_x1y1).clamp(min=0)\r\n\r\n    cw = enclose_wh[:, 0]\r\n    ch = enclose_wh[:, 1]\r\n\r\n    c2 = cw ** 2 + ch ** 2 + eps\r\n\r\n    b1_x1, b1_y1 = pred[:, 0], pred[:, 1]\r\n    b1_x2, b1_y2 = pred[:, 2], pred[:, 3]\r\n    b2_x1, b2_y1 = target[:, 0], target[:, 1]\r\n    b2_x2, b2_y2 = target[:, 2], target[:, 3]\r\n\r\n    w1, h1 = b1_x2 - b1_x1, b1_y2 - b1_y1 + eps\r\n    w2, h2 = b2_x2 - b2_x1, b2_y2 - b2_y1 + eps\r\n\r\n    left = ((b2_x1 + b2_x2) - (b1_x1 + b1_x2)) ** 2 / 4\r\n    right = ((b2_y1 + b2_y2) - (b1_y1 + b1_y2)) ** 2 / 4\r\n    rho2 = left + right\r\n\r\n    factor = 4 / math.pi ** 2\r\n    v = factor * torch.pow(torch.atan(w2 / h2) - torch.atan(w1 / h1), 2)\r\n\r\n    # CIoU\r\n    cious = ious - (rho2 / c2 + v ** 2 / (1 - ious + v))\r\n    return cious, ious\r\n\r\n\r\nclass ARTrackSeqActor(BaseActor):\r\n    \"\"\" Actor for training OSTrack models \"\"\"\r\n\r\n    def __init__(self, net, objective, loss_weight, settings, bins, search_size, cfg=None):\r\n        super().__init__(net, objective)\r\n        self.loss_weight = loss_weight\r\n        self.settings = settings\r\n        self.bs = self.settings.batchsize  # batch size\r\n        self.cfg = cfg\r\n        self.bins = bins\r\n        self.search_size = search_size\r\n        self.logsoftmax = torch.nn.LogSoftmax(dim=1)\r\n        self.focal = None\r\n        self.range = cfg.MODEL.RANGE\r\n        self.pre_num = cfg.MODEL.PRENUM\r\n        self.loss_weight['KL'] = 0\r\n        self.loss_weight['focal'] = 0\r\n        self.pre_bbox = None\r\n        self.x_feat_rem = None\r\n        self.update_rem = None\r\n\r\n    def __call__(self, data):\r\n        \"\"\"\r\n        args:\r\n            data - The input data, should contain the fields 'template', 'search', 'gt_bbox'.\r\n            template_images: (N_t, batch, 3, H, W)\r\n            search_images: (N_s, batch, 3, H, W)\r\n        returns:\r\n            loss    - the training loss\r\n            status  -  dict containing detailed losses\r\n        \"\"\"\r\n        # forward pass\r\n        out_dict = self.forward_pass(data)\r\n\r\n        # compute losses\r\n        loss, status = self.compute_losses(out_dict, data)\r\n\r\n        return loss, status\r\n\r\n    def _bbox_clip(self, cx, cy, width, height, boundary):\r\n        cx = max(0, min(cx, boundary[1]))\r\n        cy = max(0, min(cy, boundary[0]))\r\n        width = max(10, min(width, boundary[1]))\r\n        height = max(10, min(height, boundary[0]))\r\n        return cx, cy, width, height\r\n\r\n    def get_subwindow(self, im, pos, model_sz, original_sz, avg_chans):\r\n        \"\"\"\r\n        args:\r\n            im: bgr based image\r\n            pos: center position\r\n            model_sz: exemplar size\r\n            s_z: original size\r\n            avg_chans: channel average\r\n        \"\"\"\r\n        if isinstance(pos, float):\r\n            pos = [pos, pos]\r\n        sz = original_sz\r\n        im_sz = im.shape\r\n        c = (original_sz + 1) / 2\r\n        # context_xmin = round(pos[0] - c) # py2 and py3 round\r\n        context_xmin = np.floor(pos[0] - c + 0.5)\r\n        context_xmax = context_xmin + sz - 1\r\n        # context_ymin = round(pos[1] - c)\r\n        context_ymin = np.floor(pos[1] - c + 0.5)\r\n        context_ymax = context_ymin + sz - 1\r\n        left_pad = int(max(0., -context_xmin))\r\n        top_pad = int(max(0., -context_ymin))\r\n        right_pad = int(max(0., context_xmax - im_sz[1] + 1))\r\n        bottom_pad = int(max(0., context_ymax - im_sz[0] + 1))\r\n\r\n        context_xmin = context_xmin + left_pad\r\n        context_xmax = context_xmax + left_pad\r\n        context_ymin = context_ymin + top_pad\r\n        context_ymax = context_ymax + top_pad\r\n\r\n        r, c, k = im.shape\r\n        if any([top_pad, bottom_pad, left_pad, right_pad]):\r\n            size = (r + top_pad + bottom_pad, c + left_pad + right_pad, k)\r\n            te_im = np.zeros(size, np.uint8)\r\n            te_im[top_pad:top_pad + r, left_pad:left_pad + c, :] = im\r\n            if top_pad:\r\n                te_im[0:top_pad, left_pad:left_pad + c, :] = avg_chans\r\n            if bottom_pad:\r\n                te_im[r + top_pad:, left_pad:left_pad + c, :] = avg_chans\r\n            if left_pad:\r\n                te_im[:, 0:left_pad, :] = avg_chans\r\n            if right_pad:\r\n                te_im[:, c + left_pad:, :] = avg_chans\r\n            im_patch = te_im[int(context_ymin):int(context_ymax + 1),\r\n                       int(context_xmin):int(context_xmax + 1), :]\r\n        else:\r\n            im_patch = im[int(context_ymin):int(context_ymax + 1),\r\n                       int(context_xmin):int(context_xmax + 1), :]\r\n\r\n        if not np.array_equal(model_sz, original_sz):\r\n            try:\r\n                im_patch = cv2.resize(im_patch, (model_sz, model_sz))\r\n            except:\r\n                return None\r\n        im_patch = im_patch.transpose(2, 0, 1)\r\n        im_patch = im_patch[np.newaxis, :, :, :]\r\n        im_patch = im_patch.astype(np.float32)\r\n        im_patch = torch.from_numpy(im_patch)\r\n        im_patch = im_patch.cuda()\r\n        return im_patch\r\n\r\n    def batch_init(self, images, template_bbox, initial_bbox) -> dict:\r\n        self.frame_num = 1\r\n        self.device = 'cuda'\r\n        # Convert bbox (x1, y1, w, h) -> (cx, cy, w, h)\r\n\r\n        template_bbox = bbutils.batch_xywh2center2(template_bbox)  # ndarray:(2*num_seq,4)\r\n        initial_bbox = bbutils.batch_xywh2center2(initial_bbox)  # ndarray:(2*num_seq,4)\r\n        self.center_pos = initial_bbox[:, :2]  # ndarray:(2*num_seq,2)\r\n        self.size = initial_bbox[:, 2:]  # ndarray:(2*num_seq,2)\r\n        self.pre_bbox = initial_bbox\r\n        for i in range(self.pre_num - 1):\r\n            self.pre_bbox = numpy.concatenate((self.pre_bbox, initial_bbox), axis=1)\r\n        # print(self.pre_bbox.shape)\r\n\r\n        template_factor = self.cfg.DATA.TEMPLATE.FACTOR\r\n        w_z = template_bbox[:, 2] * template_factor  # ndarray:(2*num_seq)\r\n        h_z = template_bbox[:, 3] * template_factor  # ndarray:(2*num_seq)\r\n        s_z = np.ceil(np.sqrt(w_z * h_z))  # ndarray:(2*num_seq)\r\n\r\n        self.channel_average = []\r\n        for img in images:\r\n            self.channel_average.append(np.mean(img, axis=(0, 1)))\r\n        self.channel_average = np.array(self.channel_average)  # ndarray:(2*num_seq,3)\r\n\r\n        # get crop\r\n        z_crop_list = []\r\n        for i in range(len(images)):\r\n            here_crop = self.get_subwindow(images[i], template_bbox[i, :2],\r\n                                           self.cfg.DATA.TEMPLATE.SIZE, s_z[i], self.channel_average[i])\r\n            z_crop = here_crop.float().mul(1.0 / 255.0).clamp(0.0, 1.0)\r\n            self.mean = [0.485, 0.456, 0.406]\r\n            self.std = [0.229, 0.224, 0.225]\r\n            self.inplace = False\r\n            z_crop[0] = tvisf.normalize(z_crop[0], self.mean, self.std, self.inplace)\r\n            z_crop_list.append(z_crop.clone())\r\n        z_crop = torch.cat(z_crop_list, dim=0)  # Tensor(2*num_seq,3,128,128)\r\n\r\n        self.update_rem = None\r\n\r\n        out = {'template_images': z_crop}\r\n        return out\r\n\r\n    def batch_track(self, img, gt_boxes, template, action_mode='max') -> dict:\r\n        search_factor = self.cfg.DATA.SEARCH.FACTOR\r\n        w_x = self.size[:, 0] * search_factor\r\n        h_x = self.size[:, 1] * search_factor\r\n        s_x = np.ceil(np.sqrt(w_x * h_x))\r\n\r\n        gt_boxes_corner = bbutils.batch_xywh2corner(gt_boxes)  # ndarray:(2*num_seq,4)\r\n\r\n        x_crop_list = []\r\n        gt_in_crop_list = []\r\n        pre_seq_list = []\r\n        pre_seq_in_list = []\r\n        x_feat_list = []\r\n\r\n        magic_num = (self.range - 1) * 0.5\r\n        for i in range(len(img)):\r\n            channel_avg = np.mean(img[i], axis=(0, 1))\r\n            x_crop = self.get_subwindow(img[i], self.center_pos[i], self.cfg.DATA.SEARCH.SIZE,\r\n                                        round(s_x[i]), channel_avg)\r\n            if x_crop == None:\r\n                return None\r\n            for q in range(self.pre_num):\r\n                pre_seq_temp = bbutils.batch_center2corner(self.pre_bbox[:, 0 + 4 * q:4 + 4 * q])\r\n                if q == 0:\r\n                    pre_seq = pre_seq_temp\r\n                else:\r\n                    pre_seq = numpy.concatenate((pre_seq, pre_seq_temp), axis=1)\r\n\r\n            if gt_boxes_corner is not None and np.sum(np.abs(gt_boxes_corner[i] - np.zeros(4))) > 10:\r\n                pre_in = np.zeros(4 * self.pre_num)\r\n                for w in range(self.pre_num):\r\n\r\n                    pre_in[0 + w * 4:2 + w * 4] = pre_seq[i, 0 + w * 4:2 + w * 4] - self.center_pos[i]\r\n                    pre_in[2 + w * 4:4 + w * 4] = pre_seq[i, 2 + w * 4:4 + w * 4] - self.center_pos[i]\r\n                    pre_in[0 + w * 4:4 + w * 4] = pre_in[0 + w * 4:4 + w * 4] * (\r\n                                self.cfg.DATA.SEARCH.SIZE / s_x[i]) + self.cfg.DATA.SEARCH.SIZE / 2\r\n                    pre_in[0 + w * 4:4 + w * 4] = pre_in[0 + w * 4:4 + w * 4] / self.cfg.DATA.SEARCH.SIZE\r\n\r\n                pre_seq_list.append(pre_in)\r\n                gt_in_crop = np.zeros(4)\r\n                gt_in_crop[:2] = gt_boxes_corner[i, :2] - self.center_pos[i]\r\n                gt_in_crop[2:] = gt_boxes_corner[i, 2:] - self.center_pos[i]\r\n                gt_in_crop = gt_in_crop * (self.cfg.DATA.SEARCH.SIZE / s_x[i]) + self.cfg.DATA.SEARCH.SIZE / 2\r\n                gt_in_crop[2:] = gt_in_crop[2:] - gt_in_crop[:2]  # (x1,y1,x2,y2) to (x1,y1,w,h)\r\n                gt_in_crop_list.append(gt_in_crop)\r\n            else:\r\n                pre_in = np.zeros(4 * self.pre_num)\r\n                pre_seq_list.append(pre_in)\r\n                gt_in_crop_list.append(np.zeros(4))\r\n            pre_seq_input = torch.from_numpy(pre_in).clamp(-1 * magic_num, 1 + magic_num)\r\n            pre_seq_input = (pre_seq_input + 0.5) * (self.bins - 1)\r\n            pre_seq_in_list.append(pre_seq_input.clone())\r\n            x_crop = x_crop.float().mul(1.0 / 255.0).clamp(0.0, 1.0)\r\n            x_crop[0] = tvisf.normalize(x_crop[0], self.mean, self.std, self.inplace)\r\n            x_crop_list.append(x_crop.clone())\r\n\r\n        x_crop = torch.cat(x_crop_list, dim=0)\r\n        pre_seq_output = torch.cat(pre_seq_in_list, dim=0).reshape(-1, 4 * self.pre_num)\r\n\r\n        outputs = self.net(template, x_crop, seq_input=pre_seq_output, head_type=None, stage=\"batch_track\",\r\n                           search_feature=self.x_feat_rem, update=None)\r\n        selected_indices = outputs['seqs'].detach()\r\n        x_feat = outputs['x_feat'].detach().cpu()\r\n        self.x_feat_rem = x_feat.clone()\r\n        x_feat_list.append(x_feat.clone())\r\n\r\n        pred_bbox = selected_indices[:, 0:4].data.cpu().numpy()\r\n        bbox = (pred_bbox / (self.bins - 1) - magic_num) * s_x.reshape(-1, 1)\r\n        cx = bbox[:, 0] + self.center_pos[:, 0] - s_x / 2\r\n        cy = bbox[:, 1] + self.center_pos[:, 1] - s_x / 2\r\n        width = bbox[:, 2] - bbox[:, 0]\r\n        height = bbox[:, 3] - bbox[:, 1]\r\n        cx = cx + width / 2\r\n        cy = cy + height / 2\r\n\r\n        for i in range(len(img)):\r\n            cx[i], cy[i], width[i], height[i] = self._bbox_clip(cx[i], cy[i], width[i],\r\n                                                                height[i], img[i].shape[:2])\r\n        self.center_pos = np.stack([cx, cy], 1)\r\n        self.size = np.stack([width, height], 1)\r\n        for e in range(self.pre_num):\r\n            if e != self.pre_num - 1:\r\n                self.pre_bbox[:, 0 + e * 4:4 + e * 4] = self.pre_bbox[:, 4 + e * 4:8 + e * 4]\r\n            else:\r\n                self.pre_bbox[:, 0 + e * 4:4 + e * 4] = numpy.stack([cx, cy, width, height], 1)\r\n\r\n        bbox = np.stack([cx - width / 2, cy - height / 2, width, height], 1)\r\n\r\n        out = {\r\n            'search_images': x_crop,\r\n            'pred_bboxes': bbox,\r\n            'selected_indices': selected_indices.cpu(),\r\n            'gt_in_crop': torch.tensor(np.stack(gt_in_crop_list, axis=0), dtype=torch.float),\r\n            'pre_seq': torch.tensor(np.stack(pre_seq_list, axis=0), dtype=torch.float),\r\n            'x_feat': torch.tensor([item.cpu().detach().numpy() for item in x_feat_list], dtype=torch.float),\r\n        }\r\n\r\n        return out\r\n\r\n    def explore(self, data):\r\n        results = {}\r\n        search_images_list = []\r\n        search_anno_list = []\r\n        iou_list = []\r\n        pre_seq_list = []\r\n        x_feat_list = []\r\n\r\n        num_frames = data['num_frames']\r\n        images = data['search_images']\r\n        gt_bbox = data['search_annos']\r\n        template = data['template_images']\r\n        template_bbox = data['template_annos']\r\n\r\n        template = template\r\n        template_bbox = template_bbox\r\n        template_bbox = np.array(template_bbox)\r\n        num_seq = len(num_frames)\r\n\r\n        for idx in range(np.max(num_frames)):\r\n            here_images = [img[idx] for img in images]  # S, N\r\n            here_gt_bbox = np.array([gt[idx] for gt in gt_bbox])\r\n\r\n            here_images = here_images\r\n            here_gt_bbox = np.concatenate([here_gt_bbox], 0)\r\n\r\n            if idx == 0:\r\n                outputs_template = self.batch_init(template, template_bbox, here_gt_bbox)\r\n                results['template_images'] = outputs_template['template_images']\r\n\r\n            else:\r\n                outputs = self.batch_track(here_images, here_gt_bbox, outputs_template['template_images'],\r\n                                           action_mode='half')\r\n                if outputs == None:\r\n                    return None\r\n\r\n                x_feat = outputs['x_feat']\r\n                pred_bbox = outputs['pred_bboxes']\r\n                search_images_list.append(outputs['search_images'])\r\n                search_anno_list.append(outputs['gt_in_crop'])\r\n                if len(outputs['pre_seq']) != 8:\r\n                    print(outputs['pre_seq'])\r\n                    print(len(outputs['pre_seq']))\r\n                    print(idx)\r\n                    print(data['num_frames'])\r\n                    print(data['search_annos'])\r\n                    return None\r\n                pre_seq_list.append(outputs['pre_seq'])\r\n                pred_bbox_corner = bbutils.batch_xywh2corner(pred_bbox)\r\n                gt_bbox_corner = bbutils.batch_xywh2corner(here_gt_bbox)\r\n                here_iou = []\r\n                for i in range(num_seq):\r\n                    bbox_iou = IoU(pred_bbox_corner[i], gt_bbox_corner[i])\r\n                    here_iou.append(bbox_iou)\r\n                iou_list.append(here_iou)\r\n                x_feat_list.append(x_feat.clone())\r\n\r\n        results['x_feat'] = torch.cat([torch.stack(x_feat_list)], dim=2)\r\n\r\n        results['search_images'] = torch.cat([torch.stack(search_images_list)],\r\n                                             dim=1)\r\n        results['search_anno'] = torch.cat([torch.stack(search_anno_list)],\r\n                                           dim=1)\r\n        results['pre_seq'] = torch.cat([torch.stack(pre_seq_list)], dim=1)\r\n\r\n        iou_tensor = torch.tensor(iou_list, dtype=torch.float)\r\n        results['baseline_iou'] = torch.cat([iou_tensor[:, :num_seq]], dim=1)\r\n\r\n\r\n        return results\r\n\r\n    def forward_pass(self, data):\r\n        # currently only support 1 template and 1 search region\r\n        assert len(data['template_images']) == 1\r\n        assert len(data['search_images']) == 1\r\n\r\n        template_list = []\r\n        for i in range(self.settings.num_template):\r\n            template_img_i = data['template_images'][i].view(-1,\r\n                                                             *data['template_images'].shape[2:])  # (batch, 3, 128, 128)\r\n            template_list.append(template_img_i)\r\n\r\n        search_img = data['search_images'][0].view(-1, *data['search_images'].shape[2:])  # (batch, 3, 320, 320)\r\n\r\n        box_mask_z = None\r\n        ce_keep_rate = None\r\n        if self.cfg.MODEL.BACKBONE.CE_LOC:\r\n            box_mask_z = generate_mask_cond(self.cfg, template_list[0].shape[0], template_list[0].device,\r\n                                            data['template_anno'][0])\r\n\r\n            ce_start_epoch = self.cfg.TRAIN.CE_START_EPOCH\r\n            ce_warm_epoch = self.cfg.TRAIN.CE_WARM_EPOCH\r\n            ce_keep_rate = adjust_keep_rate(data['epoch'], warmup_epochs=ce_start_epoch,\r\n                                            total_epochs=ce_start_epoch + ce_warm_epoch,\r\n                                            ITERS_PER_EPOCH=1,\r\n                                            base_keep_rate=self.cfg.MODEL.BACKBONE.CE_KEEP_RATIO[0])\r\n\r\n        if len(template_list) == 1:\r\n            template_list = template_list[0]\r\n        gt_bbox = data['search_anno'][-1]\r\n        begin = self.bins\r\n        end = self.bins + 1\r\n        gt_bbox[:, 2] = gt_bbox[:, 0] + gt_bbox[:, 2]\r\n        gt_bbox[:, 3] = gt_bbox[:, 1] + gt_bbox[:, 3]\r\n        gt_bbox = gt_bbox.clamp(min=0.5, max=1.5)\r\n        data['real_bbox'] = gt_bbox\r\n        seq_ori = gt_bbox * (self.bins - 1)\r\n        seq_ori = seq_ori.int().to(search_img)\r\n        B = seq_ori.shape[0]\r\n        seq_input = torch.cat([torch.ones((B, 1)).to(search_img) * begin, seq_ori], dim=1)\r\n        seq_output = torch.cat([seq_ori, torch.ones((B, 1)).to(search_img) * end], dim=1)\r\n        data['seq_input'] = seq_input\r\n        data['seq_output'] = seq_output\r\n        out_dict = self.net(template=template_list,\r\n                            search=search_img,\r\n                            ce_template_mask=box_mask_z,\r\n                            ce_keep_rate=ce_keep_rate,\r\n                            return_last_attn=False,\r\n                            seq_input=seq_input)\r\n\r\n        return out_dict\r\n\r\n    def compute_sequence_losses(self, data):\r\n        num_frames = data['search_images'].shape[0]\r\n        template_images = data['template_images'].repeat(num_frames, 1, 1, 1, 1)\r\n        template_images = template_images.view(-1, *template_images.size()[2:])\r\n        search_images = data['search_images'].reshape(-1, *data['search_images'].size()[2:])\r\n        search_anno = data['search_anno'].reshape(-1, *data['search_anno'].size()[2:])\r\n\r\n        magic_num = (self.range - 1) * 0.5\r\n        self.loss_weight['focal'] = 0\r\n        pre_seq = data['pre_seq'].reshape(-1, 4 * self.pre_num)\r\n        x_feat = data['x_feat'].reshape(-1, *data['x_feat'].size()[2:])\r\n        pre_seq = pre_seq.clamp(-1 * magic_num, 1 + magic_num)\r\n        pre_seq = (pre_seq + magic_num) * (self.bins - 1)\r\n\r\n        outputs = self.net(template_images, search_images, seq_input=pre_seq, stage=\"forward_pass\",\r\n                           search_feature=x_feat, update=None)\r\n\r\n        pred_feat = outputs[\"feat\"]\r\n        # generate labels\r\n        if self.focal == None:\r\n            weight = torch.ones(self.bins * self.range + 2) * 1\r\n            weight[self.bins * self.range + 1] = 0.1\r\n            weight[self.bins * self.range] = 0.1\r\n            weight.to(pred_feat)\r\n            self.focal = torch.nn.CrossEntropyLoss(weight=weight, size_average=True).to(pred_feat)\r\n\r\n        search_anno[:, 2] = search_anno[:, 2] + search_anno[:, 0]\r\n        search_anno[:, 3] = search_anno[:, 3] + search_anno[:, 1]\r\n        target = (search_anno / self.cfg.DATA.SEARCH.SIZE + 0.5) * (self.bins - 1)\r\n\r\n        target = target.clamp(min=0.0, max=(self.bins * self.range - 0.0001))\r\n        target_iou = target\r\n        target = torch.cat([target], dim=1)\r\n        target = target.reshape(-1).to(torch.int64)\r\n        pred = pred_feat.permute(1, 0, 2).reshape(-1, self.bins * self.range + 2)\r\n        varifocal_loss = self.focal(pred, target)\r\n        pred = pred_feat[0:4, :, 0:self.bins * self.range]\r\n        target = target_iou[:, 0:4].to(pred_feat) / (self.bins - 1) - magic_num\r\n        out = pred.softmax(-1).to(pred)\r\n        mul = torch.range(-1 * magic_num + 1 / (self.bins * self.range), 1 + magic_num - 1 / (self.bins * self.range), 2 / (self.bins * self.range)).to(pred)\r\n        ans = out * mul\r\n        ans = ans.sum(dim=-1)\r\n        ans = ans.permute(1, 0).to(pred)\r\n        extra_seq = ans\r\n        extra_seq = extra_seq.to(pred)\r\n\r\n        cious, iou = SIoU_loss(extra_seq, target, 4)\r\n        cious = cious.mean()\r\n\r\n        giou_loss = cious\r\n        loss_bb = self.loss_weight['giou'] * giou_loss + self.loss_weight[\r\n            'focal'] * varifocal_loss\r\n\r\n        total_losses = loss_bb\r\n\r\n        mean_iou = iou.detach().mean()\r\n        status = {\"Loss/total\": total_losses.item(),\r\n                  \"Loss/giou\": giou_loss.item(),\r\n                  \"Loss/location\": varifocal_loss.item(),\r\n                  \"IoU\": mean_iou.item()}\r\n\r\n        return total_losses, status\r\n\r\n"
  },
  {
    "path": "lib/train/actors/artrackv2.py",
    "content": "from . import BaseActor\nfrom copy import deepcopy\nfrom lib.utils.misc import NestedTensor\nfrom lib.utils.box_ops import box_cxcywh_to_xyxy, box_xywh_to_xyxy\nimport torch\nimport math\nimport numpy as np\nfrom lib.utils.merge import merge_template_search\nfrom ...utils.heapmap_utils import generate_heatmap\nfrom ...utils.ce_utils import generate_mask_cond, adjust_keep_rate\n\n\ndef fp16_clamp(x, min=None, max=None):\n    if not x.is_cuda and x.dtype == torch.float16:\n        # clamp for cpu float16, tensor fp16 has no clamp implementation\n        return x.float().clamp(min, max).half()\n\n    return x.clamp(min, max)\n\n\ndef generate_sa_simdr(joints):\n    '''\n    :param joints:  [num_joints, 3]\n    :param joints_vis: [num_joints, 3]\n    :return: target, target_weight(1: visible, 0: invisible)\n    '''\n    num_joints = 48\n    image_size = [256, 256]\n    simdr_split_ratio = 1.5625\n    sigma = 6\n\n    target_x1 = np.zeros((num_joints,\n                          int(image_size[0] * simdr_split_ratio)),\n                         dtype=np.float32)\n    target_y1 = np.zeros((num_joints,\n                          int(image_size[1] * simdr_split_ratio)),\n                         dtype=np.float32)\n    target_x2 = np.zeros((num_joints,\n                          int(image_size[0] * simdr_split_ratio)),\n                         dtype=np.float32)\n    target_y2 = np.zeros((num_joints,\n                          int(image_size[1] * simdr_split_ratio)),\n                         dtype=np.float32)\n    zero_4_begin = np.zeros((num_joints, 1), dtype=np.float32)\n\n    tmp_size = sigma * 3\n\n    for joint_id in range(num_joints):\n        mu_x1 = joints[joint_id][0]\n        mu_y1 = joints[joint_id][1]\n        mu_x2 = joints[joint_id][2]\n        mu_y2 = joints[joint_id][3]\n\n        x1 = np.arange(0, int(image_size[0] * simdr_split_ratio), 1, np.float32)\n        y1 = np.arange(0, int(image_size[1] * simdr_split_ratio), 1, np.float32)\n        x2 = np.arange(0, int(image_size[0] * simdr_split_ratio), 1, np.float32)\n        y2 = np.arange(0, int(image_size[1] * simdr_split_ratio), 1, np.float32)\n\n        target_x1[joint_id] = (np.exp(- ((x1 - mu_x1) ** 2) / (2 * sigma ** 2))) / (\n                sigma * np.sqrt(np.pi * 2))\n        target_y1[joint_id] = (np.exp(- ((y1 - mu_y1) ** 2) / (2 * sigma ** 2))) / (\n                sigma * np.sqrt(np.pi * 2))\n        target_x2[joint_id] = (np.exp(- ((x2 - mu_x2) ** 2) / (2 * sigma ** 2))) / (\n                sigma * np.sqrt(np.pi * 2))\n        target_y2[joint_id] = (np.exp(- ((y2 - mu_y2) ** 2) / (2 * sigma ** 2))) / (\n                sigma * np.sqrt(np.pi * 2))\n    return target_x1, target_y1, target_x2, target_y2\n\n\n# angle cost\ndef SIoU_loss(test1, test2, theta=4):\n    eps = 1e-7\n    cx_pred = (test1[:, 0] + test1[:, 2]) / 2\n    cy_pred = (test1[:, 1] + test1[:, 3]) / 2\n    cx_gt = (test2[:, 0] + test2[:, 2]) / 2\n    cy_gt = (test2[:, 1] + test2[:, 3]) / 2\n\n    dist = ((cx_pred - cx_gt) ** 2 + (cy_pred - cy_gt) ** 2) ** 0.5\n    ch = torch.max(cy_gt, cy_pred) - torch.min(cy_gt, cy_pred)\n    x = ch / (dist + eps)\n\n    angle = 1 - 2 * torch.sin(torch.arcsin(x) - torch.pi / 4) ** 2\n    # distance cost\n    xmin = torch.min(test1[:, 0], test2[:, 0])\n    xmax = torch.max(test1[:, 2], test2[:, 2])\n    ymin = torch.min(test1[:, 1], test2[:, 1])\n    ymax = torch.max(test1[:, 3], test2[:, 3])\n    cw = xmax - xmin\n    ch = ymax - ymin\n    px = ((cx_gt - cx_pred) / (cw + eps)) ** 2\n    py = ((cy_gt - cy_pred) / (ch + eps)) ** 2\n    gama = 2 - angle\n    dis = (1 - torch.exp(-1 * gama * px)) + (1 - torch.exp(-1 * gama * py))\n\n    # shape cost\n    w_pred = test1[:, 2] - test1[:, 0]\n    h_pred = test1[:, 3] - test1[:, 1]\n    w_gt = test2[:, 2] - test2[:, 0]\n    h_gt = test2[:, 3] - test2[:, 1]\n    ww = torch.abs(w_pred - w_gt) / (torch.max(w_pred, w_gt) + eps)\n    wh = torch.abs(h_gt - h_pred) / (torch.max(h_gt, h_pred) + eps)\n    omega = (1 - torch.exp(-1 * wh)) ** theta + (1 - torch.exp(-1 * ww)) ** theta\n\n    # IoU loss\n    lt = torch.max(test1[..., :2], test2[..., :2])  # [B, rows, 2]\n    rb = torch.min(test1[..., 2:], test2[..., 2:])  # [B, rows, 2]\n\n    wh = fp16_clamp(rb - lt, min=0)\n    overlap = wh[..., 0] * wh[..., 1]\n    area1 = (test1[..., 2] - test1[..., 0]) * (\n            test1[..., 3] - test1[..., 1])\n    area2 = (test2[..., 2] - test2[..., 0]) * (\n            test2[..., 3] - test2[..., 1])\n    iou = overlap / (area1 + area2 - overlap)\n\n    SIoU = 1 - iou + (omega + dis) / 2\n    return SIoU, iou\n\n\ndef ciou(pred, target, eps=1e-7):\n    # overlap\n    lt = torch.max(pred[:, :2], target[:, :2])\n    rb = torch.min(pred[:, 2:], target[:, 2:])\n    wh = (rb - lt).clamp(min=0)\n    overlap = wh[:, 0] * wh[:, 1]\n\n    # union\n    ap = (pred[:, 2] - pred[:, 0]) * (pred[:, 3] - pred[:, 1])\n    ag = (target[:, 2] - target[:, 0]) * (target[:, 3] - target[:, 1])\n    union = ap + ag - overlap + eps\n\n    # IoU\n    ious = overlap / union\n\n    # enclose area\n    enclose_x1y1 = torch.min(pred[:, :2], target[:, :2])\n    enclose_x2y2 = torch.max(pred[:, 2:], target[:, 2:])\n    enclose_wh = (enclose_x2y2 - enclose_x1y1).clamp(min=0)\n\n    cw = enclose_wh[:, 0]\n    ch = enclose_wh[:, 1]\n\n    c2 = cw ** 2 + ch ** 2 + eps\n\n    b1_x1, b1_y1 = pred[:, 0], pred[:, 1]\n    b1_x2, b1_y2 = pred[:, 2], pred[:, 3]\n    b2_x1, b2_y1 = target[:, 0], target[:, 1]\n    b2_x2, b2_y2 = target[:, 2], target[:, 3]\n\n    w1, h1 = b1_x2 - b1_x1, b1_y2 - b1_y1 + eps\n    w2, h2 = b2_x2 - b2_x1, b2_y2 - b2_y1 + eps\n\n    left = ((b2_x1 + b2_x2) - (b1_x1 + b1_x2)) ** 2 / 4\n    right = ((b2_y1 + b2_y2) - (b1_y1 + b1_y2)) ** 2 / 4\n    rho2 = left + right\n\n    factor = 4 / math.pi ** 2\n    v = factor * torch.pow(torch.atan(w2 / h2) - torch.atan(w1 / h1), 2)\n\n    # CIoU\n    cious = ious - (rho2 / c2 + v ** 2 / (1 - ious + v))\n    return cious, ious\n\n\nclass ARTrackV2Actor(BaseActor):\n    \"\"\" Actor for training OSTrack models \"\"\"\n\n    def __init__(self, net, objective, loss_weight, settings, bins, search_size, cfg=None):\n        super().__init__(net, objective)\n        self.loss_weight = loss_weight\n        self.settings = settings\n        self.bs = self.settings.batchsize  # batch size\n        self.cfg = cfg\n        self.bins = bins\n        self.search_size = search_size\n        self.logsoftmax = torch.nn.LogSoftmax(dim=1)\n        self.focal = None\n        self.range = self.cfg.MODEL.RANGE\n        self.loss_weight['KL'] = 100\n        self.loss_weight['focal'] = 2\n        self.loss_weight['renew'] = 0.3\n\n    def __call__(self, data):\n        \"\"\"\n        args:\n            data - The input data, should contain the fields 'template', 'search', 'gt_bbox'.\n            template_images: (N_t, batch, 3, H, W)\n            search_images: (N_s, batch, 3, H, W)\n        returns:\n            loss    - the training loss\n            status  -  dict containing detailed losses\n        \"\"\"\n        # forward pass\n        out_dict = self.forward_pass(data)\n\n        # compute losses\n        loss, status = self.compute_losses(out_dict, data)\n\n        return loss, status\n\n    def forward_pass(self, data):\n        # currently only support 1 template and 1 search region\n        assert len(data['template_images']) == 2\n        assert len(data['search_images']) == 1\n        # print(data['dataset'])\n\n        template_list = []\n        for i in range(self.settings.num_template):\n            template_img_i = data['template_images'][i].view(-1,\n                                                             *data['template_images'].shape[2:])  # (batch, 3, 128, 128)\n            # template_att_i = data['template_att'][i].view(-1, *data['template_att'].shape[2:])  # (batch, 128, 128)\n            template_list.append(template_img_i)\n\n        search_img = data['search_images'][0].view(-1, *data['search_images'].shape[2:])  # (batch, 3, 320, 320)\n        target_in_search_img = data['target_in_search_images'][0].view(-1, *data['target_in_search_images'].shape[\n                                                                            2:])  # (batch, 3, 320, 320)\n        gt_bboxes = deepcopy(data['search_anno'])\n        # search_att = data['search_att'][0].view(-1, *data['search_att'].shape[2:])  # (batch, 320, 320)\n\n        box_mask_z = None\n        ce_keep_rate = None\n        if self.cfg.MODEL.BACKBONE.CE_LOC:\n            box_mask_z = generate_mask_cond(self.cfg, template_list[0].shape[0], template_list[0].device,\n                                            data['template_anno'][0])\n\n            ce_start_epoch = self.cfg.TRAIN.CE_START_EPOCH\n            ce_warm_epoch = self.cfg.TRAIN.CE_WARM_EPOCH\n            ce_keep_rate = adjust_keep_rate(data['epoch'], warmup_epochs=ce_start_epoch,\n                                            total_epochs=ce_start_epoch + ce_warm_epoch,\n                                            ITERS_PER_EPOCH=1,\n                                            base_keep_rate=self.cfg.MODEL.BACKBONE.CE_KEEP_RATIO[0])\n\n        if len(template_list) == 1:\n            template_list = template_list[0]\n        gt_bbox = data['search_anno'][-1]\n        x0 = self.bins * self.range\n        y0 = self.bins * self.range + 1\n        x1 = self.bins * self.range + 2\n        y1 = self.bins * self.range + 3\n        score = self.bins * self.range + 5\n        end = self.bins * self.range + 4\n        gt_bbox[:, 2] = gt_bbox[:, 0] + gt_bbox[:, 2]\n        gt_bbox[:, 3] = gt_bbox[:, 1] + gt_bbox[:, 3]\n        gt_bbox = gt_bbox.clamp(min=(-0.5 * self.range + 0.5), max=(0.5 + self.range * 0.5))\n        data['real_bbox'] = gt_bbox\n\n        seq_ori = (gt_bbox + (self.range * 0.5 - 0.5)) * (self.bins - 1)\n\n        seq_ori = seq_ori.int().to(search_img)\n        B = seq_ori.shape[0]\n        seq_ori_4_4 = seq_ori[:, 0:3]\n\n        seq_input = torch.cat([torch.ones((B, 1)).to(search_img) * x0, torch.ones((B, 1)).to(search_img) * y0,\n                               torch.ones((B, 1)).to(search_img) * x1, torch.ones((B, 1)).to(search_img) * y1,\n                               torch.ones((B, 1)).to(search_img) * score], dim=1)\n\n        seq_output = torch.cat([seq_ori], dim=1)\n        data['seq_input'] = seq_input\n        data['seq_output'] = seq_output\n        out_dict = self.net(template=template_list,\n                                  search=search_img,\n                                  ce_template_mask=box_mask_z,\n                                  ce_keep_rate=ce_keep_rate,\n                                  return_last_attn=False,\n                                  seq_input=seq_input,\n                                  target_in_search_img=target_in_search_img,\n                                  gt_bboxes=gt_bboxes[-1])\n\n        return out_dict\n\n    def compute_losses(self, pred_dict, gt_dict, return_status=True):\n        # gt gaussian map\n        bins = self.bins\n        gt_bbox = gt_dict['search_anno'][-1]  # (Ns, batch, 4) (x1,y1,w,h) -> (batch, 4)\n        real_bbox = gt_dict['real_bbox']\n        seq_output = gt_dict['seq_output']\n        pred_feat = pred_dict[\"feat\"]\n        if self.focal == None:\n            weight = torch.ones(bins * self.range + 6) * 1\n            weight[bins * self.range + 4] = 0.1\n            weight[bins * self.range + 3] = 0.1\n            weight[bins * self.range + 2] = 0.1\n            weight[bins * self.range + 1] = 0.1\n            weight[bins * self.range] = 0.1\n            weight.to(pred_feat)\n\n            self.focal = torch.nn.CrossEntropyLoss(weight=weight, size_average=True).to(pred_feat)\n\n        pred = pred_feat.permute(1, 0, 2).reshape(-1, bins * self.range + 6)\n        target = seq_output.reshape(-1).to(torch.int64)\n\n        varifocal_loss = self.focal(pred, target)\n        beta = 1\n        pred = pred_feat[0:4, :, 0:bins * self.range] * beta\n        target = seq_output[:, 0:4].to(pred_feat)\n        target_box = seq_output[:, 0:4].cpu().numpy()\n\n        out = pred.softmax(-1).to(pred)\n        mul = torch.range((self.range * 0.5 * -1 + 0.5) + 1 / (self.bins * self.range), (0.5 + self.range * 0.5) - 1 / (self.bins * self.range), 2 / (self.bins * self.range)).to(pred)\n        ans = out * mul\n        ans = ans.sum(dim=-1)\n        ans = ans.permute(1, 0).to(pred)\n\n        target = target / (bins - 1) - (self.range * 0.5 - 0.5)\n        extra_seq = ans\n        extra_seq = extra_seq.to(pred)\n        cious, iou = SIoU_loss(extra_seq, target, 4)\n        cious = cious.mean()\n        giou_loss = cious\n        l1_loss = self.objective['l1'](extra_seq, target)\n        score = pred_dict[\"score\"]\n        score_loss = self.objective['l1'](score, iou)\n\n        loss = self.loss_weight['giou'] * giou_loss + self.loss_weight[\n            'focal'] * varifocal_loss + self.loss_weight['score'] * score_loss\n\n        if return_status:\n            # status for log\n            mean_iou = iou.detach().mean()\n            status = {\"Loss/total\": loss.item(),\n                      \"Loss/score\": score_loss.item(),\n                      \"Loss/giou\": giou_loss.item(),\n                      \"Loss/l1\": l1_loss.item(),\n                      \"Loss/location\": varifocal_loss.item(),\n                      \"IoU\": mean_iou.item()}\n            return loss, status\n        else:\n            return loss\n"
  },
  {
    "path": "lib/train/actors/artrackv2_seq.py",
    "content": "from . import BaseActor\nfrom lib.utils.misc import NestedTensor\nfrom lib.utils.box_ops import box_cxcywh_to_xyxy, box_xywh_to_xyxy\nimport torch\nimport math\nimport numpy as np\nimport numpy\nimport cv2\nimport torch.nn.functional as F\nimport torchvision.transforms.functional as tvisf\nimport lib.train.data.bounding_box_utils as bbutils\nfrom lib.utils.merge import merge_template_search\nfrom torch.distributions.categorical import Categorical\nfrom ...utils.heapmap_utils import generate_heatmap\nfrom ...utils.ce_utils import generate_mask_cond, adjust_keep_rate\n\n\ndef IoU(rect1, rect2):\n    \"\"\" caculate interection over union\n    Args:\n        rect1: (x1, y1, x2, y2)\n        rect2: (x1, y1, x2, y2)\n    Returns:\n        iou\n    \"\"\"\n    # overlap\n    x1, y1, x2, y2 = rect1[0], rect1[1], rect1[2], rect1[3]\n    tx1, ty1, tx2, ty2 = rect2[0], rect2[1], rect2[2], rect2[3]\n\n    xx1 = np.maximum(tx1, x1)\n    yy1 = np.maximum(ty1, y1)\n    xx2 = np.minimum(tx2, x2)\n    yy2 = np.minimum(ty2, y2)\n\n    ww = np.maximum(0, xx2 - xx1)\n    hh = np.maximum(0, yy2 - yy1)\n\n    area = (x2 - x1) * (y2 - y1)\n    target_a = (tx2 - tx1) * (ty2 - ty1)\n    inter = ww * hh\n    iou = inter / (area + target_a - inter)\n    return iou\n\n\ndef fp16_clamp(x, min=None, max=None):\n    if not x.is_cuda and x.dtype == torch.float16:\n        # clamp for cpu float16, tensor fp16 has no clamp implementation\n        return x.float().clamp(min, max).half()\n\n    return x.clamp(min, max)\n\n\ndef generate_sa_simdr(joints):\n    '''\n    :param joints:  [num_joints, 3]\n    :param joints_vis: [num_joints, 3]\n    :return: target, target_weight(1: visible, 0: invisible)\n    '''\n    num_joints = 48\n    image_size = [256, 256]\n    simdr_split_ratio = 1.5625\n    sigma = 6\n\n    target_x1 = np.zeros((num_joints,\n                          int(image_size[0] * simdr_split_ratio)),\n                         dtype=np.float32)\n    target_y1 = np.zeros((num_joints,\n                          int(image_size[1] * simdr_split_ratio)),\n                         dtype=np.float32)\n    target_x2 = np.zeros((num_joints,\n                          int(image_size[0] * simdr_split_ratio)),\n                         dtype=np.float32)\n    target_y2 = np.zeros((num_joints,\n                          int(image_size[1] * simdr_split_ratio)),\n                         dtype=np.float32)\n    zero_4_begin = np.zeros((num_joints, 1), dtype=np.float32)\n\n    tmp_size = sigma * 3\n\n    for joint_id in range(num_joints):\n        mu_x1 = joints[joint_id][0]\n        mu_y1 = joints[joint_id][1]\n        mu_x2 = joints[joint_id][2]\n        mu_y2 = joints[joint_id][3]\n\n        x1 = np.arange(0, int(image_size[0] * simdr_split_ratio), 1, np.float32)\n        y1 = np.arange(0, int(image_size[1] * simdr_split_ratio), 1, np.float32)\n        x2 = np.arange(0, int(image_size[0] * simdr_split_ratio), 1, np.float32)\n        y2 = np.arange(0, int(image_size[1] * simdr_split_ratio), 1, np.float32)\n\n        target_x1[joint_id] = (np.exp(- ((x1 - mu_x1) ** 2) / (2 * sigma ** 2))) / (\n                sigma * np.sqrt(np.pi * 2))\n        target_y1[joint_id] = (np.exp(- ((y1 - mu_y1) ** 2) / (2 * sigma ** 2))) / (\n                sigma * np.sqrt(np.pi * 2))\n        target_x2[joint_id] = (np.exp(- ((x2 - mu_x2) ** 2) / (2 * sigma ** 2))) / (\n                sigma * np.sqrt(np.pi * 2))\n        target_y2[joint_id] = (np.exp(- ((y2 - mu_y2) ** 2) / (2 * sigma ** 2))) / (\n                sigma * np.sqrt(np.pi * 2))\n    return target_x1, target_y1, target_x2, target_y2\n\n\n# angle cost\ndef SIoU_loss(test1, test2, theta=4):\n    eps = 1e-7\n    cx_pred = (test1[:, 0] + test1[:, 2]) / 2\n    cy_pred = (test1[:, 1] + test1[:, 3]) / 2\n    cx_gt = (test2[:, 0] + test2[:, 2]) / 2\n    cy_gt = (test2[:, 1] + test2[:, 3]) / 2\n\n    dist = ((cx_pred - cx_gt) ** 2 + (cy_pred - cy_gt) ** 2) ** 0.5\n    ch = torch.max(cy_gt, cy_pred) - torch.min(cy_gt, cy_pred)\n    x = ch / (dist + eps)\n\n    angle = 1 - 2 * torch.sin(torch.arcsin(x) - torch.pi / 4) ** 2\n    # distance cost\n    xmin = torch.min(test1[:, 0], test2[:, 0])\n    xmax = torch.max(test1[:, 2], test2[:, 2])\n    ymin = torch.min(test1[:, 1], test2[:, 1])\n    ymax = torch.max(test1[:, 3], test2[:, 3])\n    cw = xmax - xmin\n    ch = ymax - ymin\n    px = ((cx_gt - cx_pred) / (cw + eps)) ** 2\n    py = ((cy_gt - cy_pred) / (ch + eps)) ** 2\n    gama = 2 - angle\n    dis = (1 - torch.exp(-1 * gama * px)) + (1 - torch.exp(-1 * gama * py))\n\n    # shape cost\n    w_pred = test1[:, 2] - test1[:, 0]\n    h_pred = test1[:, 3] - test1[:, 1]\n    w_gt = test2[:, 2] - test2[:, 0]\n    h_gt = test2[:, 3] - test2[:, 1]\n    ww = torch.abs(w_pred - w_gt) / (torch.max(w_pred, w_gt) + eps)\n    wh = torch.abs(h_gt - h_pred) / (torch.max(h_gt, h_pred) + eps)\n    omega = (1 - torch.exp(-1 * wh)) ** theta + (1 - torch.exp(-1 * ww)) ** theta\n\n    # IoU loss\n    lt = torch.max(test1[..., :2], test2[..., :2])  # [B, rows, 2]\n    rb = torch.min(test1[..., 2:], test2[..., 2:])  # [B, rows, 2]\n\n    wh = fp16_clamp(rb - lt, min=0)\n    overlap = wh[..., 0] * wh[..., 1]\n    area1 = (test1[..., 2] - test1[..., 0]) * (\n            test1[..., 3] - test1[..., 1])\n    area2 = (test2[..., 2] - test2[..., 0]) * (\n            test2[..., 3] - test2[..., 1])\n    iou = overlap / (area1 + area2 - overlap)\n\n    SIoU = 1 - iou + (omega + dis) / 2\n    return SIoU, iou\n\n\ndef ciou(pred, target, eps=1e-7):\n    # overlap\n    lt = torch.max(pred[:, :2], target[:, :2])\n    rb = torch.min(pred[:, 2:], target[:, 2:])\n    wh = (rb - lt).clamp(min=0)\n    overlap = wh[:, 0] * wh[:, 1]\n\n    # union\n    ap = (pred[:, 2] - pred[:, 0]) * (pred[:, 3] - pred[:, 1])\n    ag = (target[:, 2] - target[:, 0]) * (target[:, 3] - target[:, 1])\n    union = ap + ag - overlap + eps\n\n    # IoU\n    ious = overlap / union\n\n    # enclose area\n    enclose_x1y1 = torch.min(pred[:, :2], target[:, :2])\n    enclose_x2y2 = torch.max(pred[:, 2:], target[:, 2:])\n    enclose_wh = (enclose_x2y2 - enclose_x1y1).clamp(min=0)\n\n    cw = enclose_wh[:, 0]\n    ch = enclose_wh[:, 1]\n\n    c2 = cw ** 2 + ch ** 2 + eps\n\n    b1_x1, b1_y1 = pred[:, 0], pred[:, 1]\n    b1_x2, b1_y2 = pred[:, 2], pred[:, 3]\n    b2_x1, b2_y1 = target[:, 0], target[:, 1]\n    b2_x2, b2_y2 = target[:, 2], target[:, 3]\n\n    w1, h1 = b1_x2 - b1_x1, b1_y2 - b1_y1 + eps\n    w2, h2 = b2_x2 - b2_x1, b2_y2 - b2_y1 + eps\n\n    left = ((b2_x1 + b2_x2) - (b1_x1 + b1_x2)) ** 2 / 4\n    right = ((b2_y1 + b2_y2) - (b1_y1 + b1_y2)) ** 2 / 4\n    rho2 = left + right\n\n    factor = 4 / math.pi ** 2\n    v = factor * torch.pow(torch.atan(w2 / h2) - torch.atan(w1 / h1), 2)\n\n    # CIoU\n    cious = ious - (rho2 / c2 + v ** 2 / (1 - ious + v))\n    return cious, ious\n\n\nclass ARTrackV2SeqActor(BaseActor):\n    \"\"\" Actor for training OSTrack models \"\"\"\n\n    def __init__(self, net, objective, loss_weight, settings, bins, search_size, cfg=None):\n        super().__init__(net, objective)\n        self.loss_weight = loss_weight\n        self.settings = settings\n        self.bs = self.settings.batchsize  # batch size\n        self.cfg = cfg\n        self.bins = bins\n        self.search_size = search_size\n        self.logsoftmax = torch.nn.LogSoftmax(dim=1)\n        self.focal = None\n        self.range = cfg.MODEL.RANGE\n        self.loss_weight['KL'] = 0\n        self.loss_weight['focal'] = 0\n        self.pre_num = cfg.MODEL.PRENUM\n        self.pre_bbox = None\n        self.x_feat_rem = None\n\n    def __call__(self, data):\n        \"\"\"\n        args:\n            data - The input data, should contain the fields 'template', 'search', 'gt_bbox'.\n            template_images: (N_t, batch, 3, H, W)\n            search_images: (N_s, batch, 3, H, W)\n        returns:\n            loss    - the training loss\n            status  -  dict containing detailed losses\n        \"\"\"\n        # forward pass\n        out_dict = self.forward_pass(data)\n\n        # compute losses\n        loss, status = self.compute_losses(out_dict, data)\n\n        return loss, status\n\n    def _bbox_clip(self, cx, cy, width, height, boundary):\n        cx = max(0, min(cx, boundary[1]))\n        cy = max(0, min(cy, boundary[0]))\n        width = max(10, min(width, boundary[1]))\n        height = max(10, min(height, boundary[0]))\n        return cx, cy, width, height\n\n    def get_subwindow(self, im, pos, model_sz, original_sz, avg_chans):\n        \"\"\"\n        args:\n            im: bgr based image\n            pos: center position\n            model_sz: exemplar size\n            s_z: original size\n            avg_chans: channel average\n        \"\"\"\n        if isinstance(pos, float):\n            pos = [pos, pos]\n        sz = original_sz\n        im_sz = im.shape\n        c = (original_sz + 1) / 2\n        context_xmin = np.floor(pos[0] - c + 0.5)\n        context_xmax = context_xmin + sz - 1\n        context_ymin = np.floor(pos[1] - c + 0.5)\n        context_ymax = context_ymin + sz - 1\n        left_pad = int(max(0., -context_xmin))\n        top_pad = int(max(0., -context_ymin))\n        right_pad = int(max(0., context_xmax - im_sz[1] + 1))\n        bottom_pad = int(max(0., context_ymax - im_sz[0] + 1))\n\n        context_xmin = context_xmin + left_pad\n        context_xmax = context_xmax + left_pad\n        context_ymin = context_ymin + top_pad\n        context_ymax = context_ymax + top_pad\n\n        r, c, k = im.shape\n        if any([top_pad, bottom_pad, left_pad, right_pad]):\n            size = (r + top_pad + bottom_pad, c + left_pad + right_pad, k)\n            te_im = np.zeros(size, np.uint8)\n            te_im[top_pad:top_pad + r, left_pad:left_pad + c, :] = im\n            if top_pad:\n                te_im[0:top_pad, left_pad:left_pad + c, :] = avg_chans\n            if bottom_pad:\n                te_im[r + top_pad:, left_pad:left_pad + c, :] = avg_chans\n            if left_pad:\n                te_im[:, 0:left_pad, :] = avg_chans\n            if right_pad:\n                te_im[:, c + left_pad:, :] = avg_chans\n            im_patch = te_im[int(context_ymin):int(context_ymax + 1),\n                       int(context_xmin):int(context_xmax + 1), :]\n        else:\n            im_patch = im[int(context_ymin):int(context_ymax + 1),\n                       int(context_xmin):int(context_xmax + 1), :]\n\n        if not np.array_equal(model_sz, original_sz):\n            try:\n                im_patch = cv2.resize(im_patch, (model_sz, model_sz))\n            except:\n                return None\n        im_patch = im_patch.transpose(2, 0, 1)\n        im_patch = im_patch[np.newaxis, :, :, :]\n        im_patch = im_patch.astype(np.float32)\n        im_patch = torch.from_numpy(im_patch)\n        im_patch = im_patch.cuda()\n        return im_patch\n\n    def batch_init(self, images, template_bbox, initial_bbox) -> dict:\n        self.frame_num = 1\n        self.device = 'cuda'\n        # Convert bbox (x1, y1, w, h) -> (cx, cy, w, h)\n        template_bbox_1 = template_bbox[:, 0]\n        template_bbox_2 = template_bbox[:, 1]\n\n        template_bbox_1 = bbutils.batch_xywh2center2(template_bbox_1)  # ndarray:(2*num_seq,4)\n        template_bbox_2 = bbutils.batch_xywh2center2(template_bbox_2)  # ndarray:(2*num_seq,4)\n\n        initial_bbox = bbutils.batch_xywh2center2(initial_bbox)  # ndarray:(2*num_seq,4)\n        self.center_pos = initial_bbox[:, :2]  # ndarray:(2*num_seq,2)\n        self.size = initial_bbox[:, 2:]  # ndarray:(2*num_seq,2)\n        self.pre_bbox = initial_bbox\n        for i in range(self.pre_num - 1):\n            self.pre_bbox = numpy.concatenate((self.pre_bbox, initial_bbox), axis=1)\n\n        template_factor = self.cfg.DATA.TEMPLATE.FACTOR\n        w_z_1 = template_bbox_1[:, 2] * template_factor  # ndarray:(2*num_seq)\n        h_z_1 = template_bbox_1[:, 3] * template_factor  # ndarray:(2*num_seq)\n        s_z_1 = np.ceil(np.sqrt(w_z_1 * h_z_1))  # ndarray:(2*num_seq)\n\n        w_z_2 = template_bbox_2[:, 2] * template_factor  # ndarray:(2*num_seq)\n        h_z_2 = template_bbox_2[:, 3] * template_factor  # ndarray:(2*num_seq)\n        s_z_2 = np.ceil(np.sqrt(w_z_2 * h_z_2))  # ndarray:(2*num_seq)\n\n        self.channel_average = []\n        for img in images:\n            self.channel_average.append(np.mean(img[0], axis=(0, 1)))\n            self.channel_average.append(np.mean(img[1], axis=(0, 1)))\n        self.channel_average = np.array(self.channel_average)  # ndarray:(2*num_seq,3)\n\n        # get crop\n        z_crop_list = []\n        z_1_list = []\n        z_2_list = []\n        for i in range(len(images)):\n            here_crop_1 = self.get_subwindow(images[i][0], template_bbox_1[i, :2],\n                                             self.cfg.DATA.TEMPLATE.SIZE, s_z_1[i], self.channel_average[2 * i])\n            here_crop_2 = self.get_subwindow(images[i][1], template_bbox_2[i, :2],\n                                             self.cfg.DATA.TEMPLATE.SIZE, s_z_2[i], self.channel_average[2 * i + 1])\n            z_crop_1 = here_crop_1.float().mul(1.0 / 255.0).clamp(0.0, 1.0)\n            z_crop_2 = here_crop_2.float().mul(1.0 / 255.0).clamp(0.0, 1.0)\n            self.mean = [0.485, 0.456, 0.406]\n            self.std = [0.229, 0.224, 0.225]\n            self.inplace = False\n            z_crop_1[0] = tvisf.normalize(z_crop_1[0], self.mean, self.std, self.inplace)\n            z_crop_2[0] = tvisf.normalize(z_crop_2[0], self.mean, self.std, self.inplace)\n            z_1_list.append(z_crop_1.unsqueeze(1).clone())\n            z_2_list.append(z_crop_2.unsqueeze(1).clone())\n            z_crop = torch.concat([z_crop_1.unsqueeze(1), z_crop_2.unsqueeze(1)], dim=1)\n            z_crop_list.append(z_crop.clone())\n        z_crop = torch.cat(z_crop_list, dim=0)  # Tensor(2*num_seq,3,128,128)\n        z_1_crop = torch.cat(z_1_list, dim=0)\n        z_2_crop = torch.cat(z_2_list, dim=0)\n        \n        model_to_access = getattr(self.net, 'module', self.net)\n\n        z_2_crop = z_2_crop.squeeze(1).to(model_to_access.backbone.word_embeddings.weight)\n        z_2_feat = model_to_access.backbone.patch_embed(z_2_crop)\n\n        out = {'template_images': z_crop, \"z_1\": z_1_crop, \"z_2\": z_2_crop, \"z_2_feat\": z_2_feat}\n        return out\n\n    def batch_track(self, img, gt_boxes, template, dz_feat, action_mode='max') -> dict:\n        search_factor = self.cfg.DATA.SEARCH.FACTOR\n        w_x = self.size[:, 0] * search_factor\n        h_x = self.size[:, 1] * search_factor\n        s_x = np.ceil(np.sqrt(w_x * h_x))\n\n        gt_boxes_corner = bbutils.batch_xywh2corner(gt_boxes)  # ndarray:(2*num_seq,4)\n        initial_bbox = bbutils.batch_xywh2center2(gt_boxes)\n\n        x_crop_list = []\n        gt_in_crop_list = []\n        pre_seq_list = []\n        pre_seq_in_list = []\n        x_feat_list = []\n        target_in_search_list = []\n        update_feat_list = []\n        for i in range(len(img)):\n\n            template_factor = self.cfg.DATA.TEMPLATE.FACTOR\n            w_z_1 = initial_bbox[:, 2] * template_factor  # ndarray:(2*num_seq)\n            h_z_1 = initial_bbox[:, 3] * template_factor  # ndarray:(2*num_seq)\n            s_z_1 = np.ceil(np.sqrt(w_z_1 * h_z_1))  # ndarray:(2*num_seq)\n\n            channel_avg = np.mean(img[i], axis=(0, 1))\n            target_in_search = self.get_subwindow(img[i], initial_bbox[i, :2], self.cfg.DATA.TEMPLATE.SIZE,\n                                                  round(s_z_1[i]), channel_avg)\n\n            x_crop = self.get_subwindow(img[i], self.center_pos[i], self.cfg.DATA.SEARCH.SIZE,\n                                        round(s_x[i]), channel_avg)\n            if x_crop == None:\n                return None\n            if target_in_search == None:\n                return None\n            for q in range(self.pre_num):\n                pre_seq_temp = bbutils.batch_center2corner(self.pre_bbox[:, 0 + 4 * q:4 + 4 * q])\n                if q == 0:\n                    pre_seq = pre_seq_temp\n                else:\n                    pre_seq = numpy.concatenate((pre_seq, pre_seq_temp), axis=1)\n\n            if gt_boxes_corner is not None and np.sum(np.abs(gt_boxes_corner[i] - np.zeros(4))) > 10:\n                pre_in = np.zeros(4 * self.pre_num)\n                for w in range(self.pre_num):\n                    pre_in[0 + w * 4:2 + w * 4] = pre_seq[i, 0 + w * 4:2 + w * 4] - self.center_pos[i]\n                    pre_in[2 + w * 4:4 + w * 4] = pre_seq[i, 2 + w * 4:4 + w * 4] - self.center_pos[i]\n                    pre_in[0 + w * 4:4 + w * 4] = pre_in[0 + w * 4:4 + w * 4] * (\n                                self.cfg.DATA.SEARCH.SIZE / s_x[i]) + self.cfg.DATA.SEARCH.SIZE / 2\n                    pre_in[0 + w * 4:4 + w * 4] = pre_in[0 + w * 4:4 + w * 4] / self.cfg.DATA.SEARCH.SIZE\n\n                pre_seq_list.append(pre_in)\n                gt_in_crop = np.zeros(4)\n                gt_in_crop[:2] = gt_boxes_corner[i, :2] - self.center_pos[i]\n                gt_in_crop[2:] = gt_boxes_corner[i, 2:] - self.center_pos[i]\n                gt_in_crop = gt_in_crop * (self.cfg.DATA.SEARCH.SIZE / s_x[i]) + self.cfg.DATA.SEARCH.SIZE / 2\n                gt_in_crop[2:] = gt_in_crop[2:] - gt_in_crop[:2]  # (x1,y1,x2,y2) to (x1,y1,w,h)\n                gt_in_crop_list.append(gt_in_crop)\n            else:\n                pre_in = np.zeros(4 * self.pre_num)\n                pre_seq_list.append(pre_in)\n                gt_in_crop_list.append(np.zeros(4))\n            pre_seq_input = torch.from_numpy(pre_in).clamp(-0.5 * self.range + 0.5, 0.5 + self.range * 0.5)\n            pre_seq_input = (pre_seq_input + (0.5 * self.range - 0.5)) * (self.bins - 1)\n            pre_seq_in_list.append(pre_seq_input.clone())\n            x_crop = x_crop.float().mul(1.0 / 255.0).clamp(0.0, 1.0)\n            target_in_search = target_in_search.float().mul(1.0 / 255.0).clamp(0.0, 1.0)\n            rem_x = x_crop\n            x_crop[0] = tvisf.normalize(x_crop[0], self.mean, self.std, self.inplace)\n            target_in_search[0] = tvisf.normalize(target_in_search[0], self.mean, self.std, self.inplace)\n            x_crop_list.append(x_crop.clone())\n            target_in_search_list.append(target_in_search.clone())\n\n        x_crop = torch.cat(x_crop_list, dim=0)\n        target_in_search = torch.cat(target_in_search_list, dim=0)\n        pre_seq_output = torch.cat(pre_seq_in_list, dim=0).reshape(-1, 4 * self.pre_num)\n        pre = torch.zeros_like(pre_seq_output)\n\n        outputs = self.net(template, dz_feat.cuda(), x_crop, seq_input=pre_seq_output, head_type=None,\n                           stage=\"batch_track\",\n                           search_feature=self.x_feat_rem, target_in_search_img=target_in_search,\n                           gt_bboxes=gt_boxes[-1])\n\n        selected_indices = outputs['seqs'].detach()\n        x_feat = outputs['x_feat'].detach().cpu()\n        self.x_feat_rem = x_feat.clone()\n        x_feat_list.append(x_feat.clone())\n\n        update_feat = outputs['dz_feat'].detach().cpu()\n        update_feat_list.append(update_feat.clone())\n\n        pred_bbox = selected_indices[:, 0:4].data.cpu().numpy()\n        bbox = (pred_bbox / (self.bins - 1) - (self.range * 0.5 - 0.5)) * s_x.reshape(-1, 1)\n        cx = bbox[:, 0] + self.center_pos[:, 0] - s_x / 2\n        cy = bbox[:, 1] + self.center_pos[:, 1] - s_x / 2\n        width = bbox[:, 2] - bbox[:, 0]\n        height = bbox[:, 3] - bbox[:, 1]\n        cx = cx + width / 2\n        cy = cy + height / 2\n\n        for i in range(len(img)):\n            cx[i], cy[i], width[i], height[i] = self._bbox_clip(cx[i], cy[i], width[i],\n                                                                height[i], img[i].shape[:2])\n        self.center_pos = np.stack([cx, cy], 1)\n        self.size = np.stack([width, height], 1)\n        for e in range(self.pre_num):\n            if e != self.pre_num - 1:\n                self.pre_bbox[:, 0 + e * 4:4 + e * 4] = self.pre_bbox[:, 4 + e * 4:8 + e * 4]\n            else:\n                self.pre_bbox[:, 0 + e * 4:4 + e * 4] = numpy.stack([cx, cy, width, height], 1)\n\n\n        bbox = np.stack([cx - width / 2, cy - height / 2, width, height], 1)\n\n        out = {\n            'dz_feat': update_feat,\n            'search_images': x_crop,\n            'target_in_search': target_in_search,\n            'pred_bboxes': bbox,\n            'selected_indices': selected_indices.cpu(),\n            'gt_in_crop': torch.tensor(np.stack(gt_in_crop_list, axis=0), dtype=torch.float),\n            'pre_seq': torch.tensor(np.stack(pre_seq_list, axis=0), dtype=torch.float),\n            'x_feat': torch.tensor([item.cpu().detach().numpy() for item in x_feat_list], dtype=torch.float),\n        }\n        return out\n\n    def explore(self, data):\n        results = {}\n        search_images_list = []\n        search_anno_list = []\n        action_tensor_list = []\n        iou_list = []\n        # cover_list = []\n        pre_seq_list = []\n        x_feat_list = []\n        target_in_search_list = []\n        template_all_list = []\n        dz_feat_udpate_list = []\n\n        num_frames = data['num_frames']\n        images = data['search_images']\n        gt_bbox = data['search_annos']\n        template = data['template_images']\n        template_bbox = data['template_annos']\n\n        template = template\n        template_bbox = template_bbox\n        template_bbox = np.array(template_bbox)\n        num_seq = len(num_frames)\n\n        for idx in range(np.max(num_frames)):\n            here_images = [img[idx] for img in images]  # S, N\n            here_gt_bbox = np.array([gt[idx] for gt in gt_bbox])\n\n            here_images = here_images\n            here_gt_bbox = np.concatenate([here_gt_bbox], 0)\n\n            if idx == 0:\n                outputs_template = self.batch_init(template, template_bbox, here_gt_bbox)\n                results['template_images'] = outputs_template['z_1']\n                self.template_temp = outputs_template['z_1'].clone()\n                z_all = [outputs_template['z_1'], outputs_template['z_2']]\n                results['z_all'] = z_all\n                self.dz_feat_update = outputs_template['z_2_feat']\n\n            else:\n                outputs = self.batch_track(here_images, here_gt_bbox, self.template_temp, self.dz_feat_update,\n                                           action_mode='half')\n                if outputs == None:\n                    return None\n                template_all_list.append(self.template_temp.clone())\n                dz_feat_udpate_list.append(self.dz_feat_update.clone().to(outputs['dz_feat']))\n\n                x_feat = outputs['x_feat']\n                self.dz_feat_update = outputs['dz_feat']\n\n                pred_bbox = outputs['pred_bboxes']\n                search_images_list.append(outputs['search_images'])\n                target_in_search_list.append(outputs['target_in_search'])\n                search_anno_list.append(outputs['gt_in_crop'])\n\n                if len(outputs['pre_seq']) != 8:\n                    print(outputs['pre_seq'])\n                    print(len(outputs['pre_seq']))\n                    print(idx)\n                    print(data['num_frames'])\n                    print(data['search_annos'])\n                    return None\n                pre_seq_list.append(outputs['pre_seq'])\n                pred_bbox_corner = bbutils.batch_xywh2corner(pred_bbox)\n                gt_bbox_corner = bbutils.batch_xywh2corner(here_gt_bbox)\n                here_iou = []\n                for i in range(num_seq):\n                    bbox_iou = IoU(pred_bbox_corner[i], gt_bbox_corner[i])\n                    here_iou.append(bbox_iou)\n                iou_list.append(here_iou)\n                x_feat_list.append(x_feat.clone())\n\n        search_images_reverse_list = []\n        search_anno_reverse_list = []\n        action_tensor_reverse_list = []\n        iou_reverse_list = []\n        pre_seq_reverse_list = []\n        x_feat_reverse_list = []\n        target_in_search_reverse_list = []\n        dz_feat_update_reverse_list = []\n        template_all_reverse_list = []\n        for idx in range(np.max(num_frames)):\n            real_idx = np.max(num_frames) - 1 - idx\n            here_images = [img[real_idx] for img in images]  # S, N\n            here_gt_bbox = np.array([gt[real_idx] for gt in gt_bbox])\n\n            here_images = here_images\n            here_gt_bbox = np.concatenate([here_gt_bbox], 0)\n\n            if idx == 0:\n                outputs_template = self.batch_init(template, template_bbox, here_gt_bbox)\n                results['template_images'] = outputs_template['z_1']\n                self.template_temp = outputs_template['z_1'].clone()\n                z_all = [outputs_template['z_1'], outputs_template['z_2']]\n                results['z_all'] = z_all\n                self.dz_feat_update = outputs_template['z_2_feat'].clone()\n\n            else:\n                outputs = self.batch_track(here_images, here_gt_bbox, self.template_temp, self.dz_feat_update,\n                                           action_mode='half')\n                if outputs == None:\n                    return None\n                template_all_reverse_list.append(self.template_temp.clone())\n                dz_feat_update_reverse_list.append(self.dz_feat_update.clone().to(outputs['dz_feat']))\n\n                x_feat = outputs['x_feat']\n                self.dz_feat_update = outputs['dz_feat']\n                pred_bbox = outputs['pred_bboxes']\n                search_images_reverse_list.append(outputs['search_images'])\n                target_in_search_reverse_list.append(outputs['target_in_search'])\n                search_anno_reverse_list.append(outputs['gt_in_crop'])\n\n                if len(outputs['pre_seq']) != 8:\n                    print(outputs['pre_seq'])\n                    print(len(outputs['pre_seq']))\n                    print(idx)\n                    print(data['num_frames'])\n                    print(data['search_annos'])\n                    return None\n                pre_seq_reverse_list.append(outputs['pre_seq'])\n                pred_bbox_corner = bbutils.batch_xywh2corner(pred_bbox)\n                gt_bbox_corner = bbutils.batch_xywh2corner(here_gt_bbox)\n                here_iou = []\n                for i in range(num_seq):\n                    bbox_iou = IoU(pred_bbox_corner[i], gt_bbox_corner[i])\n                    here_iou.append(bbox_iou)\n                iou_reverse_list.append(here_iou)\n                x_feat_reverse_list.append(x_feat.clone())\n\n        results['x_feat'] = torch.cat([torch.stack(x_feat_list), torch.stack(x_feat_reverse_list)], dim=2)\n        results['search_images'] = torch.cat([torch.stack(search_images_list), torch.stack(search_images_reverse_list)],\n                                             dim=1)\n        results['template_images_z0'] = torch.cat(\n            [torch.stack(template_all_list), torch.stack(template_all_reverse_list)], dim=1)\n        results['dz_feat_update'] = torch.cat(\n            [torch.stack(dz_feat_udpate_list), torch.stack(dz_feat_update_reverse_list)], dim=1)\n        results['search_anno'] = torch.cat([torch.stack(search_anno_list), torch.stack(search_anno_reverse_list)],\n                                           dim=1)\n        results['pre_seq'] = torch.cat([torch.stack(pre_seq_list), torch.stack(pre_seq_reverse_list)], dim=1)\n        results['target_in_search'] = torch.cat(\n            [torch.stack(target_in_search_list), torch.stack(target_in_search_reverse_list)], dim=1)\n\n        iou_tensor = torch.tensor(iou_list, dtype=torch.float)\n        iou_tensor_reverse = torch.tensor(iou_reverse_list, dtype=torch.float)\n        results['baseline_iou'] = torch.cat([iou_tensor[:, :num_seq], iou_tensor_reverse[:, :num_seq]], dim=1)\n        # results['explore_iou'] = iou_tensor[:, num_seq:]\n        # results['action_tensor'] = torch.stack(action_tensor_list)\n\n        return results\n\n    def forward_pass(self, data):\n        # currently only support 1 template and 1 search region\n        assert len(data['template_images']) == 1\n        assert len(data['search_images']) == 1\n\n\n        template_list = []\n        for i in range(self.settings.num_template):\n            template_img_i = data['template_images'][i].view(-1,\n                                                             *data['template_images'].shape[2:])  # (batch, 3, 128, 128)\n            template_list.append(template_img_i)\n\n        search_img = data['search_images'][0].view(-1, *data['search_images'].shape[2:])  # (batch, 3, 320, 320)\n\n        box_mask_z = None\n        ce_keep_rate = None\n        if self.cfg.MODEL.BACKBONE.CE_LOC:\n            box_mask_z = generate_mask_cond(self.cfg, template_list[0].shape[0], template_list[0].device,\n                                            data['template_anno'][0])\n\n            ce_start_epoch = self.cfg.TRAIN.CE_START_EPOCH\n            ce_warm_epoch = self.cfg.TRAIN.CE_WARM_EPOCH\n            ce_keep_rate = adjust_keep_rate(data['epoch'], warmup_epochs=ce_start_epoch,\n                                            total_epochs=ce_start_epoch + ce_warm_epoch,\n                                            ITERS_PER_EPOCH=1,\n                                            base_keep_rate=self.cfg.MODEL.BACKBONE.CE_KEEP_RATIO[0])\n\n        if len(template_list) == 1:\n            template_list = template_list[0]\n        gt_bbox = data['search_anno'][-1]\n        begin = self.bins\n        end = self.bins + 1\n        gt_bbox[:, 2] = gt_bbox[:, 0] + gt_bbox[:, 2]\n        gt_bbox[:, 3] = gt_bbox[:, 1] + gt_bbox[:, 3]\n        gt_bbox = gt_bbox.clamp(min=0.0, max=1.0)\n        data['real_bbox'] = gt_bbox\n        seq_ori = gt_bbox * (self.bins - 1)\n        seq_ori = seq_ori.int().to(search_img)\n        B = seq_ori.shape[0]\n        seq_ori_4_4 = seq_ori[:, 0:3]\n\n        seq_input = torch.cat([torch.ones((B, 1)).to(search_img) * begin, seq_ori], dim=1)\n\n        seq_output = torch.cat([seq_ori, torch.ones((B, 1)).to(search_img) * end], dim=1)\n        data['seq_input'] = seq_input\n        data['seq_output'] = seq_output\n        out_dict = self.net(template=template_list,\n                            search=search_img,\n                            ce_template_mask=box_mask_z,\n                            ce_keep_rate=ce_keep_rate,\n                            return_last_attn=False,\n                            seq_input=seq_input)\n\n        return out_dict\n\n    def compute_sequence_losses(self, data):\n        num_frames = data['search_images'].shape[0]\n        template_images_for = data['template_images_z0'].reshape(-1, *data['template_images_z0'].size()[2:])\n        dz_feat = data['dz_feat_update'].reshape(-1, *data['dz_feat_update'].size()[2:])\n        target_in_search = data['target_in_search'].reshape(-1, *data['target_in_search'].size()[2:])\n        search_images = data['search_images'].reshape(-1, *data['search_images'].size()[2:])\n        search_anno = data['search_anno'].reshape(-1, *data['search_anno'].size()[2:])\n\n        pre_seq = data['pre_seq'].reshape(-1, 4 * self.pre_num)\n        x_feat = data['x_feat'].reshape(-1, *data['x_feat'].size()[2:])\n\n        epoch = data['epoch']\n        if epoch < 11:\n            self.loss_weight['focal'] = 2\n            self.loss_weight['score_update'] = 1\n        elif epoch < 31:\n            self.loss_weight['focal'] = 0\n            self.loss_weight['score_update'] = 0.1\n        else:\n            self.loss_weight['focal'] = 0\n            self.loss_weight['score_update'] = 0.0\n\n        pre_seq = pre_seq.clamp(-0.5 * self.range + 0.5, 0.5 + self.range * 0.5)\n        pre_seq = (pre_seq + (self.range * 0.5 - 0.5)) * (self.bins - 1)\n\n        outputs = self.net(template_images_for, dz_feat, search_images, seq_input=pre_seq, stage=\"forward_pass\",\n                           search_feature=x_feat, target_in_search_img=target_in_search)\n\n        score = outputs['score']\n\n        renew_loss = outputs['renew_loss']\n\n        pred_feat = outputs[\"feat\"]\n\n        if self.focal == None:\n            weight = torch.ones(self.bins * self.range + 6) * 1\n            weight[self.bins * self.range + 4] = 0.1\n            weight[self.bins * self.range + 3] = 0.1\n            weight[self.bins * self.range + 2] = 0.1\n            weight[self.bins * self.range + 1] = 0.1\n            weight[self.bins * self.range] = 0.1\n            weight.to(pred_feat)\n            self.focal = torch.nn.CrossEntropyLoss(weight=weight, size_average=True).to(pred_feat)\n\n        search_anno[:, 2] = search_anno[:, 2] + search_anno[:, 0]\n        search_anno[:, 3] = search_anno[:, 3] + search_anno[:, 1]\n        target = (search_anno / self.cfg.DATA.SEARCH.SIZE + (self.range * 0.5 - 0.5)) * (self.bins - 1)\n\n        target = target.clamp(min=0.0, max=(self.bins * self.range - 0.0001))\n\n        target_iou = target\n        end_flag = torch.ones((target.shape[0], 1)) * (self.bins * self.range + 1)\n        end_flag = end_flag.to(target)\n        target = torch.cat([target], dim=1)\n        target = target.reshape(-1).to(torch.int64)\n        pred = pred_feat.permute(1, 0, 2).reshape(-1, self.bins * self.range + 6)\n\n        varifocal_loss = self.focal(pred, target)\n        pred = pred_feat[0:4, :, 0:self.bins * self.range]\n        target = target_iou[:, 0:4].to(pred_feat) / (self.bins - 1) - (self.range * 0.5 - 0.5)\n\n        out = pred.softmax(-1).to(pred)\n        mul = torch.range((-1 * self.range * 0.5 + 0.5) + 1 / (self.bins * self.range), (self.range * 0.5 + 0.5) - 1 / (self.bins * self.range), 2 / (self.bins * self.range)).to(pred)\n\n        ans = out * mul\n        ans = ans.sum(dim=-1)\n        ans = ans.permute(1, 0).to(pred)\n        extra_seq = ans\n\n        extra_seq = extra_seq.to(pred)\n\n        cious, iou = SIoU_loss(extra_seq, target, 4)\n        cious = cious.mean()\n\n        score_real = score\n\n        score_loss = self.objective['l1'](score_real, iou)\n\n        giou_loss = cious\n        l1_loss = self.objective['l1'](extra_seq, target)\n\n        loss_bb = (self.loss_weight['giou'] * giou_loss + self.loss_weight['l1'] * l1_loss + self.loss_weight[\n            'focal'] * varifocal_loss)\n\n        total_losses = loss_bb + renew_loss * self.loss_weight['score_update'] + score_loss * self.loss_weight['score_update']\n\n        mean_iou = iou.detach().mean()\n        status = {\"Loss/total\": total_losses.item() / 2,\n                  \"Loss/score\": score_loss.item() / 2,\n                  \"Loss/giou\": giou_loss.item() / 2,\n                  \"Loss/l1\": l1_loss.item() / 2,\n                  \"Loss/location\": varifocal_loss.item() / 2,\n                  \"Loss/renew\": renew_loss.item() / 2,\n                  \"IoU\": mean_iou.item() / 2}\n\n        return total_losses, status\n\n"
  },
  {
    "path": "lib/train/actors/base_actor.py",
    "content": "from lib.utils import TensorDict\n\n\nclass BaseActor:\n    \"\"\" Base class for actor. The actor class handles the passing of the data through the network\n    and calculation the loss\"\"\"\n    def __init__(self, net, objective):\n        \"\"\"\n        args:\n            net - The network to train\n            objective - The loss function\n        \"\"\"\n        self.net = net\n        self.objective = objective\n\n    def __call__(self, data: TensorDict):\n        \"\"\" Called in each training iteration. Should pass in input data through the network, calculate the loss, and\n        return the training stats for the input data\n        args:\n            data - A TensorDict containing all the necessary data blocks.\n\n        returns:\n            loss    - loss for the input data\n            stats   - a dict containing detailed losses\n        \"\"\"\n        raise NotImplementedError\n\n    def to(self, device):\n        \"\"\" Move the network to device\n        args:\n            device - device to use. 'cpu' or 'cuda'\n        \"\"\"\n        self.net.to(device)\n\n    def train(self, mode=True):\n        \"\"\" Set whether the network is in train mode.\n        args:\n            mode (True) - Bool specifying whether in training mode.\n        \"\"\"\n        self.net.train(mode)\n\n    def eval(self):\n        \"\"\" Set network to eval mode\"\"\"\n        self.train(False)"
  },
  {
    "path": "lib/train/admin/__init__.py",
    "content": "from .environment import env_settings, create_default_local_file_ITP_train\nfrom .stats import AverageMeter, StatValue\n#from .tensorboard import TensorboardWriter\n"
  },
  {
    "path": "lib/train/admin/environment.py",
    "content": "import importlib\nimport os\nfrom collections import OrderedDict\n\n\ndef create_default_local_file():\n    path = os.path.join(os.path.dirname(__file__), 'local.py')\n\n    empty_str = '\\'\\''\n    default_settings = OrderedDict({\n        'workspace_dir': empty_str,\n        'tensorboard_dir': 'self.workspace_dir + \\'/tensorboard/\\'',\n        'pretrained_networks': 'self.workspace_dir + \\'/pretrained_networks/\\'',\n        'lasot_dir': empty_str,\n        'got10k_dir': empty_str,\n        'trackingnet_dir': empty_str,\n        'coco_dir': empty_str,\n        'lvis_dir': empty_str,\n        'sbd_dir': empty_str,\n        'imagenet_dir': empty_str,\n        'imagenetdet_dir': empty_str,\n        'ecssd_dir': empty_str,\n        'hkuis_dir': empty_str,\n        'msra10k_dir': empty_str,\n        'davis_dir': empty_str,\n        'youtubevos_dir': empty_str})\n\n    comment = {'workspace_dir': 'Base directory for saving network checkpoints.',\n               'tensorboard_dir': 'Directory for tensorboard files.'}\n\n    with open(path, 'w') as f:\n        f.write('class EnvironmentSettings:\\n')\n        f.write('    def __init__(self):\\n')\n\n        for attr, attr_val in default_settings.items():\n            comment_str = None\n            if attr in comment:\n                comment_str = comment[attr]\n            if comment_str is None:\n                f.write('        self.{} = {}\\n'.format(attr, attr_val))\n            else:\n                f.write('        self.{} = {}    # {}\\n'.format(attr, attr_val, comment_str))\n\n\ndef create_default_local_file_ITP_train(workspace_dir, data_dir):\n    path = os.path.join(os.path.dirname(__file__), 'local.py')\n\n    empty_str = '\\'\\''\n    default_settings = OrderedDict({\n        'workspace_dir': workspace_dir,\n        'tensorboard_dir': os.path.join(workspace_dir, 'tensorboard'),    # Directory for tensorboard files.\n        'pretrained_networks': os.path.join(workspace_dir, 'pretrained_networks'),\n        'lasot_dir': os.path.join(data_dir, 'lasot'),\n        'got10k_dir': os.path.join(data_dir, 'got10k/train'),\n        'got10k_val_dir': os.path.join(data_dir, 'got10k/val'),\n        'lasot_lmdb_dir': os.path.join(data_dir, 'lasot_lmdb'),\n        'got10k_lmdb_dir': os.path.join(data_dir, 'got10k_lmdb'),\n        'trackingnet_dir': os.path.join(data_dir, 'trackingnet'),\n        'trackingnet_lmdb_dir': os.path.join(data_dir, 'trackingnet_lmdb'),\n        'coco_dir': os.path.join(data_dir, 'coco'),\n        'coco_lmdb_dir': os.path.join(data_dir, 'coco_lmdb'),\n        'lvis_dir': empty_str,\n        'sbd_dir': empty_str,\n        'imagenet_dir': os.path.join(data_dir, 'vid'),\n        'imagenet_lmdb_dir': os.path.join(data_dir, 'vid_lmdb'),\n        'imagenetdet_dir': empty_str,\n        'ecssd_dir': empty_str,\n        'hkuis_dir': empty_str,\n        'msra10k_dir': empty_str,\n        'davis_dir': empty_str,\n        'youtubevos_dir': empty_str})\n\n    comment = {'workspace_dir': 'Base directory for saving network checkpoints.',\n               'tensorboard_dir': 'Directory for tensorboard files.'}\n\n    with open(path, 'w') as f:\n        f.write('class EnvironmentSettings:\\n')\n        f.write('    def __init__(self):\\n')\n\n        for attr, attr_val in default_settings.items():\n            comment_str = None\n            if attr in comment:\n                comment_str = comment[attr]\n            if comment_str is None:\n                if attr_val == empty_str:\n                    f.write('        self.{} = {}\\n'.format(attr, attr_val))\n                else:\n                    f.write('        self.{} = \\'{}\\'\\n'.format(attr, attr_val))\n            else:\n                f.write('        self.{} = \\'{}\\'    # {}\\n'.format(attr, attr_val, comment_str))\n\n\ndef env_settings():\n    env_module_name = 'lib.train.admin.local'\n    try:\n        env_module = importlib.import_module(env_module_name)\n        return env_module.EnvironmentSettings()\n    except:\n        env_file = os.path.join(os.path.dirname(__file__), 'local.py')\n\n        create_default_local_file()\n        raise RuntimeError('YOU HAVE NOT SETUP YOUR local.py!!!\\n Go to \"{}\" and set all the paths you need. Then try to run again.'.format(env_file))\n"
  },
  {
    "path": "lib/train/admin/local.py",
    "content": "class EnvironmentSettings:\n    def __init__(self):\n        self.workspace_dir = '/home/baiyifan/code/2stage_update_intrain'    # Base directory for saving network checkpoints.\n        self.tensorboard_dir = '/home/baiyifan/code/2stage/tensorboard'    # Directory for tensorboard files.\n        self.pretrained_networks = '/home/baiyifan/code/2stage/pretrained_networks'\n        self.lasot_dir = '/home/baiyifan/LaSOT/LaSOTBenchmark'\n        self.got10k_dir = '/home/baiyifan/GOT-10k/train'\n        self.got10k_val_dir = '/home/baiyifan/GOT-10k/val'\n        self.lasot_lmdb_dir = '/home/baiyifan/code/2stage/data/lasot_lmdb'\n        self.got10k_lmdb_dir = '/home/baiyifan/code/2stage/data/got10k_lmdb'\n        self.trackingnet_dir = '/ssddata/TrackingNet/all_zip'\n        self.trackingnet_lmdb_dir = '/home/baiyifan/code/2stage/data/trackingnet_lmdb'\n        self.coco_dir = '/home/baiyifan/coco'\n        self.coco_lmdb_dir = '/home/baiyifan/code/2stage/data/coco_lmdb'\n        self.lvis_dir = ''\n        self.sbd_dir = ''\n        self.imagenet_dir = '/home/baiyifan/code/2stage/data/vid'\n        self.imagenet_lmdb_dir = '/home/baiyifan/code/2stage/data/vid_lmdb'\n        self.imagenetdet_dir = ''\n        self.ecssd_dir = ''\n        self.hkuis_dir = ''\n        self.msra10k_dir = ''\n        self.davis_dir = ''\n        self.youtubevos_dir = ''\n"
  },
  {
    "path": "lib/train/admin/multigpu.py",
    "content": "import torch.nn as nn\n# Here we use DistributedDataParallel(DDP) rather than DataParallel(DP) for multiple GPUs training\n\n\ndef is_multi_gpu(net):\n    return isinstance(net, (MultiGPU, nn.parallel.distributed.DistributedDataParallel))\n\n\nclass MultiGPU(nn.parallel.distributed.DistributedDataParallel):\n    def __getattr__(self, item):\n        try:\n            return super().__getattr__(item)\n        except:\n            pass\n        return getattr(self.module, item)\n"
  },
  {
    "path": "lib/train/admin/settings.py",
    "content": "from lib.train.admin.environment import env_settings\n\n\nclass Settings:\n    \"\"\" Training settings, e.g. the paths to datasets and networks.\"\"\"\n    def __init__(self):\n        self.set_default()\n\n    def set_default(self):\n        self.env = env_settings()\n        self.use_gpu = True\n\n\n"
  },
  {
    "path": "lib/train/admin/stats.py",
    "content": "\n\nclass StatValue:\n    def __init__(self):\n        self.clear()\n\n    def reset(self):\n        self.val = 0\n\n    def clear(self):\n        self.reset()\n        self.history = []\n\n    def update(self, val):\n        self.val = val\n        self.history.append(self.val)\n\n\nclass AverageMeter(object):\n    \"\"\"Computes and stores the average and current value\"\"\"\n    def __init__(self):\n        self.clear()\n        self.has_new_data = False\n\n    def reset(self):\n        self.avg = 0\n        self.val = 0\n        self.sum = 0\n        self.count = 0\n\n    def clear(self):\n        self.reset()\n        self.history = []\n\n    def update(self, val, n=1):\n        self.val = val\n        self.sum += val * n\n        self.count += n\n        self.avg = self.sum / self.count\n\n    def new_epoch(self):\n        if self.count > 0:\n            self.history.append(self.avg)\n            self.reset()\n            self.has_new_data = True\n        else:\n            self.has_new_data = False\n\n\ndef topk_accuracy(output, target, topk=(1,)):\n    \"\"\"Computes the precision@k for the specified values of k\"\"\"\n    single_input = not isinstance(topk, (tuple, list))\n    if single_input:\n        topk = (topk,)\n\n    maxk = max(topk)\n    batch_size = target.size(0)\n\n    _, pred = output.topk(maxk, 1, True, True)\n    pred = pred.t()\n    correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n    res = []\n    for k in topk:\n        correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)[0]\n        res.append(correct_k * 100.0 / batch_size)\n\n    if single_input:\n        return res[0]\n\n    return res\n"
  },
  {
    "path": "lib/train/admin/tensorboard.py",
    "content": "#import os\n#from collections import OrderedDict\n#try:\n#    from torch.utils.tensorboard import SummaryWriter\n#except:\n#    print('WARNING: You are using tensorboardX instead sis you have a too old pytorch version.')\n#    from tensorboardX import SummaryWriter\n\n\n#class TensorboardWriter:\n#    def __init__(self, directory, loader_names):\n#        self.directory = directory\n#        self.writer = OrderedDict({name: SummaryWriter(os.path.join(self.directory, name)) for name in loader_names})\n\n#    def write_info(self, script_name, description):\n#        tb_info_writer = SummaryWriter(os.path.join(self.directory, 'info'))\n#        tb_info_writer.add_text('Script_name', script_name)\n#        tb_info_writer.add_text('Description', description)\n#        tb_info_writer.close()\n\n#    def write_epoch(self, stats: OrderedDict, epoch: int, ind=-1):\n#        for loader_name, loader_stats in stats.items():\n#            if loader_stats is None:\n#                continue\n#            for var_name, val in loader_stats.items():\n#                if hasattr(val, 'history') and getattr(val, 'has_new_data', True):\n#                    self.writer[loader_name].add_scalar(var_name, val.history[ind], epoch)"
  },
  {
    "path": "lib/train/base_functions.py",
    "content": "import torch\nfrom torch.utils.data.distributed import DistributedSampler\n# datasets related\nfrom lib.train.dataset import Lasot, Got10k, MSCOCOSeq, ImagenetVID, TrackingNet\nfrom lib.train.dataset import Lasot_lmdb, Got10k_lmdb, MSCOCOSeq_lmdb, ImagenetVID_lmdb, TrackingNet_lmdb\nfrom lib.train.data import sampler, opencv_loader, processing, LTRLoader\nimport lib.train.data.transforms as tfm\nfrom lib.utils.misc import is_main_process\n\n\ndef update_settings(settings, cfg):\n    settings.print_interval = cfg.TRAIN.PRINT_INTERVAL\n    settings.search_area_factor = {'template': cfg.DATA.TEMPLATE.FACTOR,\n                                   'search': cfg.DATA.SEARCH.FACTOR}\n    settings.output_sz = {'template': cfg.DATA.TEMPLATE.SIZE,\n                          'search': cfg.DATA.SEARCH.SIZE}\n    settings.center_jitter_factor = {'template': cfg.DATA.TEMPLATE.CENTER_JITTER,\n                                     'search': cfg.DATA.SEARCH.CENTER_JITTER}\n    settings.scale_jitter_factor = {'template': cfg.DATA.TEMPLATE.SCALE_JITTER,\n                                    'search': cfg.DATA.SEARCH.SCALE_JITTER}\n    settings.grad_clip_norm = cfg.TRAIN.GRAD_CLIP_NORM\n    settings.print_stats = None\n    settings.batchsize = cfg.TRAIN.BATCH_SIZE\n    settings.scheduler_type = cfg.TRAIN.SCHEDULER.TYPE\n\n\ndef names2datasets(name_list: list, settings, image_loader):\n    assert isinstance(name_list, list)\n    datasets = []\n    #settings.use_lmdb = True\n    for name in name_list:\n        assert name in [\"LASOT\", \"GOT10K_vottrain\", \"GOT10K_votval\", \"GOT10K_train_full\", \"GOT10K_official_val\",\n                        \"COCO17\", \"VID\", \"TRACKINGNET\"]\n        if name == \"LASOT\":\n            if settings.use_lmdb:\n                print(\"Building lasot dataset from lmdb\")\n                datasets.append(Lasot_lmdb(settings.env.lasot_lmdb_dir, split='train', image_loader=image_loader))\n            else:\n                datasets.append(Lasot(settings.env.lasot_dir, split='train', image_loader=image_loader))\n        if name == \"GOT10K_vottrain\":\n            if settings.use_lmdb:\n                print(\"Building got10k from lmdb\")\n                datasets.append(Got10k_lmdb(settings.env.got10k_lmdb_dir, split='vottrain', image_loader=image_loader))\n            else:\n                datasets.append(Got10k(settings.env.got10k_dir, split='vottrain', image_loader=image_loader))\n        if name == \"GOT10K_train_full\":\n            if settings.use_lmdb:\n                print(\"Building got10k_train_full from lmdb\")\n                datasets.append(Got10k_lmdb(settings.env.got10k_lmdb_dir, split='train_full', image_loader=image_loader))\n            else:\n                datasets.append(Got10k(settings.env.got10k_dir, split='train_full', image_loader=image_loader))\n        if name == \"GOT10K_votval\":\n            if settings.use_lmdb:\n                print(\"Building got10k from lmdb\")\n                datasets.append(Got10k_lmdb(settings.env.got10k_lmdb_dir, split='votval', image_loader=image_loader))\n            else:\n                datasets.append(Got10k(settings.env.got10k_dir, split='votval', image_loader=image_loader))\n        if name == \"GOT10K_official_val\":\n            if settings.use_lmdb:\n                raise ValueError(\"Not implement\")\n            else:\n                datasets.append(Got10k(settings.env.got10k_val_dir, split=None, image_loader=image_loader))\n        if name == \"COCO17\":\n            if settings.use_lmdb:\n                print(\"Building COCO2017 from lmdb\")\n                datasets.append(MSCOCOSeq_lmdb(settings.env.coco_lmdb_dir, version=\"2017\", image_loader=image_loader))\n            else:\n                datasets.append(MSCOCOSeq(settings.env.coco_dir, version=\"2017\", image_loader=image_loader))\n        if name == \"VID\":\n            if settings.use_lmdb:\n                print(\"Building VID from lmdb\")\n                datasets.append(ImagenetVID_lmdb(settings.env.imagenet_lmdb_dir, image_loader=image_loader))\n            else:\n                datasets.append(ImagenetVID(settings.env.imagenet_dir, image_loader=image_loader))\n        if name == \"TRACKINGNET\":\n            if settings.use_lmdb:\n                print(\"Building TrackingNet from lmdb\")\n                datasets.append(TrackingNet_lmdb(settings.env.trackingnet_lmdb_dir, image_loader=image_loader))\n            else:\n                # raise ValueError(\"NOW WE CAN ONLY USE TRACKINGNET FROM LMDB\")\n                datasets.append(TrackingNet(settings.env.trackingnet_dir, image_loader=image_loader))\n    return datasets\n\n\ndef build_dataloaders(cfg, settings):\n    # Data transform\n    transform_joint = tfm.Transform(tfm.ToGrayscale(probability=0.05),\n                                    tfm.RandomHorizontalFlip(probability=0.5))\n\n    transform_train = tfm.Transform(tfm.ToTensorAndJitter(0.2),\n                                    tfm.RandomHorizontalFlip_Norm(probability=0.5),\n                                    tfm.Normalize(mean=cfg.DATA.MEAN, std=cfg.DATA.STD))\n\n    transform_val = tfm.Transform(tfm.ToTensor(),\n                                  tfm.Normalize(mean=cfg.DATA.MEAN, std=cfg.DATA.STD))\n\n    # The tracking pairs processing module\n    output_sz = settings.output_sz\n    search_area_factor = settings.search_area_factor\n\n    data_processing_train = processing.STARKProcessing(search_area_factor=search_area_factor,\n                                                       output_sz=output_sz,\n                                                       center_jitter_factor=settings.center_jitter_factor,\n                                                       scale_jitter_factor=settings.scale_jitter_factor,\n                                                       mode='sequence',\n                                                       transform=transform_train,\n                                                       joint_transform=transform_joint,\n                                                       settings=settings)\n\n    data_processing_val = processing.STARKProcessing(search_area_factor=search_area_factor,\n                                                     output_sz=output_sz,\n                                                     center_jitter_factor=settings.center_jitter_factor,\n                                                     scale_jitter_factor=settings.scale_jitter_factor,\n                                                     mode='sequence',\n                                                     transform=transform_val,\n                                                     joint_transform=transform_joint,\n                                                     settings=settings)\n\n    # Train sampler and loader\n    settings.num_template = getattr(cfg.DATA.TEMPLATE, \"NUMBER\", 1)\n    settings.num_search = getattr(cfg.DATA.SEARCH, \"NUMBER\", 1)\n    sampler_mode = getattr(cfg.DATA, \"SAMPLER_MODE\", \"causal\")\n    train_cls = getattr(cfg.TRAIN, \"TRAIN_CLS\", False)\n    print(\"sampler_mode\", sampler_mode)\n    dataset_train = sampler.TrackingSampler(datasets=names2datasets(cfg.DATA.TRAIN.DATASETS_NAME, settings, opencv_loader),\n                                            p_datasets=cfg.DATA.TRAIN.DATASETS_RATIO,\n                                            samples_per_epoch=cfg.DATA.TRAIN.SAMPLE_PER_EPOCH,\n                                            max_gap=cfg.DATA.MAX_SAMPLE_INTERVAL, num_search_frames=settings.num_search,\n                                            num_template_frames=settings.num_template, processing=data_processing_train,\n                                            frame_sample_mode=sampler_mode, train_cls=train_cls)\n\n    train_sampler = DistributedSampler(dataset_train) if settings.local_rank != -1 else None\n    shuffle = False if settings.local_rank != -1 else True\n\n    loader_train = LTRLoader('train', dataset_train, training=True, batch_size=cfg.TRAIN.BATCH_SIZE, shuffle=shuffle,\n                             num_workers=cfg.TRAIN.NUM_WORKER, drop_last=True, stack_dim=1, sampler=train_sampler)\n\n    # Validation samplers and loaders\n    dataset_val = sampler.TrackingSampler(datasets=names2datasets(cfg.DATA.VAL.DATASETS_NAME, settings, opencv_loader),\n                                          p_datasets=cfg.DATA.VAL.DATASETS_RATIO,\n                                          samples_per_epoch=cfg.DATA.VAL.SAMPLE_PER_EPOCH,\n                                          max_gap=cfg.DATA.MAX_SAMPLE_INTERVAL, num_search_frames=settings.num_search,\n                                          num_template_frames=settings.num_template, processing=data_processing_val,\n                                          frame_sample_mode=sampler_mode, train_cls=train_cls)\n    val_sampler = DistributedSampler(dataset_val) if settings.local_rank != -1 else None\n    loader_val = LTRLoader('val', dataset_val, training=False, batch_size=cfg.TRAIN.BATCH_SIZE,\n                           num_workers=cfg.TRAIN.NUM_WORKER, drop_last=True, stack_dim=1, sampler=val_sampler,\n                           epoch_interval=cfg.TRAIN.VAL_EPOCH_INTERVAL)\n\n    return loader_train, loader_val\n\n\ndef get_optimizer_scheduler(net, cfg):\n    train_cls = getattr(cfg.TRAIN, \"TRAIN_CLS\", False)\n    if train_cls:\n        print(\"Only training classification head. Learnable parameters are shown below.\")\n        param_dicts = [\n            {\"params\": [p for n, p in net.named_parameters() if \"cls\" in n and p.requires_grad]}\n        ]\n\n        for n, p in net.named_parameters():\n            if \"cls\" not in n:\n                p.requires_grad = False\n            else:\n                print(n)\n    else:\n        param_dicts = [\n            {\"params\": [p for n, p in net.named_parameters() if \"backbone\" not in n and p.requires_grad]},\n            {\n                \"params\": [p for n, p in net.named_parameters() if \"backbone\" in n and p.requires_grad],\n                \"lr\": cfg.TRAIN.LR * cfg.TRAIN.BACKBONE_MULTIPLIER,\n            },\n        ]\n        if is_main_process():\n            print(\"Learnable parameters are shown below.\")\n            for n, p in net.named_parameters():\n                if p.requires_grad:\n                    print(n)\n\n    if cfg.TRAIN.OPTIMIZER == \"ADAMW\":\n        optimizer = torch.optim.AdamW(param_dicts, lr=cfg.TRAIN.LR,\n                                      weight_decay=cfg.TRAIN.WEIGHT_DECAY)\n    else:\n        raise ValueError(\"Unsupported Optimizer\")\n    if cfg.TRAIN.SCHEDULER.TYPE == 'step':\n        lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, cfg.TRAIN.LR_DROP_EPOCH)\n    elif cfg.TRAIN.SCHEDULER.TYPE == \"Mstep\":\n        lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer,\n                                                            milestones=cfg.TRAIN.SCHEDULER.MILESTONES,\n                                                            gamma=cfg.TRAIN.SCHEDULER.GAMMA)\n    else:\n        raise ValueError(\"Unsupported scheduler\")\n    return optimizer, lr_scheduler\n\ndef get_optimizer_scheduler_v2(net, cfg):\n    train_cls = getattr(cfg.TRAIN, \"TRAIN_CLS\", False)\n    if train_cls:\n        print(\"Only training classification head. Learnable parameters are shown below.\")\n        param_dicts = [\n            {\"params\": [p for n, p in net.named_parameters() if \"cls\" in n and p.requires_grad]}\n        ]\n\n        for n, p in net.named_parameters():\n            if \"cls\" not in n:\n                p.requires_grad = False\n            else:\n                print(n)\n    else:\n        param_dicts = [\n            {\"params\": [p for n, p in net.named_parameters() if \"backbone\" not in n and p.requires_grad ]},\n            {\n                \"params\": [p for n, p in net.named_parameters() if \"backbone\" in n and p.requires_grad and \"output_bias\" not in n and \"embeddings\" not in n and \"extension\" not in n],\n                \"lr\": cfg.TRAIN.LR * cfg.TRAIN.BACKBONE_MULTIPLIER,\n            },\n            {\n                \"params\": [p for n, p in net.named_parameters() if \"backbone\" in n and p.requires_grad and (\"output_bias\" in n or \"embeddings\" in n or \"extension\" in n)],\n                \"lr\": cfg.TRAIN.LR,\n            },\n        ]\n        if is_main_process():\n            print(\"Learnable parameters are shown below.\")\n            for n, p in net.named_parameters():\n                if p.requires_grad:\n                    print(n)\n\n    if cfg.TRAIN.OPTIMIZER == \"ADAMW\":\n        optimizer = torch.optim.AdamW(param_dicts, lr=cfg.TRAIN.LR,\n                                      weight_decay=cfg.TRAIN.WEIGHT_DECAY)\n    else:\n        raise ValueError(\"Unsupported Optimizer\")\n    if cfg.TRAIN.SCHEDULER.TYPE == 'step':\n        lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, cfg.TRAIN.LR_DROP_EPOCH)\n    elif cfg.TRAIN.SCHEDULER.TYPE == \"Mstep\":\n        lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer,\n                                                            milestones=cfg.TRAIN.SCHEDULER.MILESTONES,\n                                                            gamma=cfg.TRAIN.SCHEDULER.GAMMA)\n    else:\n        raise ValueError(\"Unsupported scheduler\")\n    return optimizer, lr_scheduler"
  },
  {
    "path": "lib/train/data/__init__.py",
    "content": "from .loader import LTRLoader\nfrom .image_loader import jpeg4py_loader, opencv_loader, jpeg4py_loader_w_failsafe, default_image_loader\n"
  },
  {
    "path": "lib/train/data/bounding_box_utils.py",
    "content": "import torch\nimport numpy as np\n\ndef batch_center2corner(boxes):\n    xmin = boxes[:, 0] - boxes[:, 2] * 0.5\n    ymin = boxes[:, 1] - boxes[:, 3] * 0.5\n    xmax = boxes[:, 0] + boxes[:, 2] * 0.5\n    ymax = boxes[:, 1] + boxes[:, 3] * 0.5\n\n    if isinstance(boxes, np.ndarray):\n        return np.stack([xmin, ymin, xmax, ymax], 1)\n    else:\n        return torch.stack([xmin, ymin, xmax, ymax], 1)\n\ndef batch_corner2center(boxes):\n    cx = (boxes[:, 0] + boxes[:, 2]) * 0.5\n    cy = (boxes[:, 1] + boxes[:, 3]) * 0.5\n    w = (boxes[:, 2] - boxes[:, 0])\n    h = (boxes[:, 3] - boxes[:, 1])\n\n    if isinstance(boxes, np.ndarray):\n        return np.stack([cx, cy, w, h], 1)\n    else:\n        return torch.stack([cx, cy, w, h], 1)\n\ndef batch_xywh2center(boxes):\n    cx = boxes[:, 0] + (boxes[:, 2] - 1) / 2\n    cy = boxes[:, 1] + (boxes[:, 3] - 1) / 2\n    w = boxes[:, 2]\n    h = boxes[:, 3]\n\n    if isinstance(boxes, np.ndarray):\n        return np.stack([cx, cy, w, h], 1)\n    else:\n        return torch.stack([cx, cy, w, h], 1)\n\ndef batch_xywh2center2(boxes):\n    cx = boxes[:, 0] + boxes[:, 2] / 2\n    cy = boxes[:, 1] + boxes[:, 3] / 2\n    w = boxes[:, 2]\n    h = boxes[:, 3]\n\n    if isinstance(boxes, np.ndarray):\n        return np.stack([cx, cy, w, h], 1)\n    else:\n        return torch.stack([cx, cy, w, h], 1)\n\n\ndef batch_xywh2corner(boxes):\n    xmin = boxes[:, 0]\n    ymin = boxes[:, 1]\n    xmax = boxes[:, 0] + boxes[:, 2]\n    ymax = boxes[:, 1] + boxes[:, 3]\n\n    if isinstance(boxes, np.ndarray):\n        return np.stack([xmin, ymin, xmax, ymax], 1)\n    else:\n        return torch.stack([xmin, ymin, xmax, ymax], 1)\n\ndef rect_to_rel(bb, sz_norm=None):\n    \"\"\"Convert standard rectangular parametrization of the bounding box [x, y, w, h]\n    to relative parametrization [cx/sw, cy/sh, log(w), log(h)], where [cx, cy] is the center coordinate.\n    args:\n        bb  -  N x 4 tensor of boxes.\n        sz_norm  -  [N] x 2 tensor of value of [sw, sh] (optional). sw=w and sh=h if not given.\n    \"\"\"\n\n    c = bb[...,:2] + 0.5 * bb[...,2:]\n    if sz_norm is None:\n        c_rel = c / bb[...,2:]\n    else:\n        c_rel = c / sz_norm\n    sz_rel = torch.log(bb[...,2:])\n    return torch.cat((c_rel, sz_rel), dim=-1)\n\n\ndef rel_to_rect(bb, sz_norm=None):\n    \"\"\"Inverts the effect of rect_to_rel. See above.\"\"\"\n\n    sz = torch.exp(bb[...,2:])\n    if sz_norm is None:\n        c = bb[...,:2] * sz\n    else:\n        c = bb[...,:2] * sz_norm\n    tl = c - 0.5 * sz\n    return torch.cat((tl, sz), dim=-1)\n\n\ndef masks_to_bboxes(mask, fmt='c'):\n\n    \"\"\" Convert a mask tensor to one or more bounding boxes.\n    Note: This function is a bit new, make sure it does what it says.  /Andreas\n    :param mask: Tensor of masks, shape = (..., H, W)\n    :param fmt: bbox layout. 'c' => \"center + size\" or (x_center, y_center, width, height)\n                             't' => \"top left + size\" or (x_left, y_top, width, height)\n                             'v' => \"vertices\" or (x_left, y_top, x_right, y_bottom)\n    :return: tensor containing a batch of bounding boxes, shape = (..., 4)\n    \"\"\"\n    batch_shape = mask.shape[:-2]\n    mask = mask.reshape((-1, *mask.shape[-2:]))\n    bboxes = []\n\n    for m in mask:\n        mx = m.sum(dim=-2).nonzero()\n        my = m.sum(dim=-1).nonzero()\n        bb = [mx.min(), my.min(), mx.max(), my.max()] if (len(mx) > 0 and len(my) > 0) else [0, 0, 0, 0]\n        bboxes.append(bb)\n\n    bboxes = torch.tensor(bboxes, dtype=torch.float32, device=mask.device)\n    bboxes = bboxes.reshape(batch_shape + (4,))\n\n    if fmt == 'v':\n        return bboxes\n\n    x1 = bboxes[..., :2]\n    s = bboxes[..., 2:] - x1 + 1\n\n    if fmt == 'c':\n        return torch.cat((x1 + 0.5 * s, s), dim=-1)\n    elif fmt == 't':\n        return torch.cat((x1, s), dim=-1)\n\n    raise ValueError(\"Undefined bounding box layout '%s'\" % fmt)\n\n\ndef masks_to_bboxes_multi(mask, ids, fmt='c'):\n    assert mask.dim() == 2\n    bboxes = []\n\n    for id in ids:\n        mx = (mask == id).sum(dim=-2).nonzero()\n        my = (mask == id).float().sum(dim=-1).nonzero()\n        bb = [mx.min(), my.min(), mx.max(), my.max()] if (len(mx) > 0 and len(my) > 0) else [0, 0, 0, 0]\n\n        bb = torch.tensor(bb, dtype=torch.float32, device=mask.device)\n\n        x1 = bb[:2]\n        s = bb[2:] - x1 + 1\n\n        if fmt == 'v':\n            pass\n        elif fmt == 'c':\n            bb = torch.cat((x1 + 0.5 * s, s), dim=-1)\n        elif fmt == 't':\n            bb = torch.cat((x1, s), dim=-1)\n        else:\n            raise ValueError(\"Undefined bounding box layout '%s'\" % fmt)\n        bboxes.append(bb)\n\n    return bboxes\n"
  },
  {
    "path": "lib/train/data/image_loader.py",
    "content": "import jpeg4py\nimport cv2 as cv\nfrom PIL import Image\nimport numpy as np\n\ndavis_palette = np.repeat(np.expand_dims(np.arange(0,256), 1), 3, 1).astype(np.uint8)\ndavis_palette[:22, :] = [[0, 0, 0], [128, 0, 0], [0, 128, 0], [128, 128, 0],\n                         [0, 0, 128], [128, 0, 128], [0, 128, 128], [128, 128, 128],\n                         [64, 0, 0], [191, 0, 0], [64, 128, 0], [191, 128, 0],\n                         [64, 0, 128], [191, 0, 128], [64, 128, 128], [191, 128, 128],\n                         [0, 64, 0], [128, 64, 0], [0, 191, 0], [128, 191, 0],\n                         [0, 64, 128], [128, 64, 128]]\n\n\ndef default_image_loader(path):\n    \"\"\"The default image loader, reads the image from the given path. It first tries to use the jpeg4py_loader,\n    but reverts to the opencv_loader if the former is not available.\"\"\"\n    if default_image_loader.use_jpeg4py is None:\n        # Try using jpeg4py\n        im = jpeg4py_loader(path)\n        if im is None:\n            default_image_loader.use_jpeg4py = False\n            print('Using opencv_loader instead.')\n        else:\n            default_image_loader.use_jpeg4py = True\n            return im\n    if default_image_loader.use_jpeg4py:\n        return jpeg4py_loader(path)\n    return opencv_loader(path)\n\ndefault_image_loader.use_jpeg4py = None\n\n\ndef jpeg4py_loader(path):\n    \"\"\" Image reading using jpeg4py https://github.com/ajkxyz/jpeg4py\"\"\"\n    try:\n        return jpeg4py.JPEG(path).decode()\n    except Exception as e:\n        print('ERROR: Could not read image \"{}\"'.format(path))\n        print(e)\n        return None\n\n\ndef opencv_loader(path):\n    \"\"\" Read image using opencv's imread function and returns it in rgb format\"\"\"\n    try:\n        im = cv.imread(path, cv.IMREAD_COLOR)\n\n        # convert to rgb and return\n        return cv.cvtColor(im, cv.COLOR_BGR2RGB)\n    except Exception as e:\n        print('ERROR: Could not read image \"{}\"'.format(path))\n        print(e)\n        return None\n\n\ndef jpeg4py_loader_w_failsafe(path):\n    \"\"\" Image reading using jpeg4py https://github.com/ajkxyz/jpeg4py\"\"\"\n    try:\n        return jpeg4py.JPEG(path).decode()\n    except:\n        try:\n            im = cv.imread(path, cv.IMREAD_COLOR)\n\n            # convert to rgb and return\n            return cv.cvtColor(im, cv.COLOR_BGR2RGB)\n        except Exception as e:\n            print('ERROR: Could not read image \"{}\"'.format(path))\n            print(e)\n            return None\n\n\ndef opencv_seg_loader(path):\n    \"\"\" Read segmentation annotation using opencv's imread function\"\"\"\n    try:\n        return cv.imread(path)\n    except Exception as e:\n        print('ERROR: Could not read image \"{}\"'.format(path))\n        print(e)\n        return None\n\n\ndef imread_indexed(filename):\n    \"\"\" Load indexed image with given filename. Used to read segmentation annotations.\"\"\"\n\n    im = Image.open(filename)\n\n    annotation = np.atleast_3d(im)[...,0]\n    return annotation\n\n\ndef imwrite_indexed(filename, array, color_palette=None):\n    \"\"\" Save indexed image as png. Used to save segmentation annotation.\"\"\"\n\n    if color_palette is None:\n        color_palette = davis_palette\n\n    if np.atleast_3d(array).shape[2] != 1:\n        raise Exception(\"Saving indexed PNGs requires 2D array.\")\n\n    im = Image.fromarray(array)\n    im.putpalette(color_palette.ravel())\n    im.save(filename, format='PNG')"
  },
  {
    "path": "lib/train/data/loader.py",
    "content": "import torch\nimport torch.utils.data.dataloader\nimport importlib\nimport collections\nfrom torch._six import string_classes\nfrom lib.utils import TensorDict, TensorList\n\nif float(torch.__version__[:3]) >= 1.9 or len('.'.join((torch.__version__).split('.')[0:2])) > 3:\n    int_classes = int\nelse:\n    from torch._six import int_classes\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\ndef _check_use_shared_memory():\n    if hasattr(torch.utils.data.dataloader, '_use_shared_memory'):\n        return getattr(torch.utils.data.dataloader, '_use_shared_memory')\n    collate_lib = importlib.import_module('torch.utils.data._utils.collate')\n    if hasattr(collate_lib, '_use_shared_memory'):\n        return getattr(collate_lib, '_use_shared_memory')\n    return torch.utils.data.get_worker_info() is not None\n\n\ndef ltr_collate(batch):\n    \"\"\"Puts each data field into a tensor with outer dimension batch size\"\"\"\n\n    error_msg = \"batch must contain tensors, numbers, dicts or lists; found {}\"\n    elem_type = type(batch[0])\n    if isinstance(batch[0], torch.Tensor):\n        out = None\n        if _check_use_shared_memory():\n            # If we're in a background process, concatenate directly into a\n            # shared memory tensor to avoid an extra copy\n            numel = sum([x.numel() for x in batch])\n            storage = batch[0].storage()._new_shared(numel)\n            out = batch[0].new(storage)\n        return torch.stack(batch, 0, out=out)\n        # if batch[0].dim() < 4:\n        #     return torch.stack(batch, 0, out=out)\n        # return torch.cat(batch, 0, out=out)\n    elif elem_type.__module__ == 'numpy' and elem_type.__name__ != 'str_' \\\n            and elem_type.__name__ != 'string_':\n        elem = batch[0]\n        if elem_type.__name__ == 'ndarray':\n            # array of string classes and object\n            if torch.utils.data.dataloader.re.search('[SaUO]', elem.dtype.str) is not None:\n                raise TypeError(error_msg.format(elem.dtype))\n\n            return torch.stack([torch.from_numpy(b) for b in batch], 0)\n        if elem.shape == ():  # scalars\n            py_type = float if elem.dtype.name.startswith('float') else int\n            return torch.utils.data.dataloader.numpy_type_map[elem.dtype.name](list(map(py_type, batch)))\n    elif isinstance(batch[0], int_classes):\n        return torch.LongTensor(batch)\n    elif isinstance(batch[0], float):\n        return torch.DoubleTensor(batch)\n    elif isinstance(batch[0], string_classes):\n        return batch\n    elif isinstance(batch[0], TensorDict):\n        return TensorDict({key: ltr_collate([d[key] for d in batch]) for key in batch[0]})\n    elif isinstance(batch[0], collections.Mapping):\n        return {key: ltr_collate([d[key] for d in batch]) for key in batch[0]}\n    elif isinstance(batch[0], TensorList):\n        transposed = zip(*batch)\n        return TensorList([ltr_collate(samples) for samples in transposed])\n    elif isinstance(batch[0], collections.Sequence):\n        transposed = zip(*batch)\n        return [ltr_collate(samples) for samples in transposed]\n    elif batch[0] is None:\n        return batch\n\n    raise TypeError((error_msg.format(type(batch[0]))))\n\n\ndef ltr_collate_stack1(batch):\n    \"\"\"Puts each data field into a tensor. The tensors are stacked at dim=1 to form the batch\"\"\"\n\n    error_msg = \"batch must contain tensors, numbers, dicts or lists; found {}\"\n    elem_type = type(batch[0])\n    if isinstance(batch[0], torch.Tensor):\n        out = None\n        if _check_use_shared_memory():\n            # If we're in a background process, concatenate directly into a\n            # shared memory tensor to avoid an extra copy\n            numel = sum([x.numel() for x in batch])\n            storage = batch[0].storage()._new_shared(numel)\n            out = batch[0].new(storage)\n        return torch.stack(batch, 1, out=out)\n        # if batch[0].dim() < 4:\n        #     return torch.stack(batch, 0, out=out)\n        # return torch.cat(batch, 0, out=out)\n    elif elem_type.__module__ == 'numpy' and elem_type.__name__ != 'str_' \\\n            and elem_type.__name__ != 'string_':\n        elem = batch[0]\n        if elem_type.__name__ == 'ndarray':\n            # array of string classes and object\n            if torch.utils.data.dataloader.re.search('[SaUO]', elem.dtype.str) is not None:\n                raise TypeError(error_msg.format(elem.dtype))\n\n            return torch.stack([torch.from_numpy(b) for b in batch], 1)\n        if elem.shape == ():  # scalars\n            py_type = float if elem.dtype.name.startswith('float') else int\n            return torch.utils.data.dataloader.numpy_type_map[elem.dtype.name](list(map(py_type, batch)))\n    elif isinstance(batch[0], int_classes):\n        return torch.LongTensor(batch)\n    elif isinstance(batch[0], float):\n        return torch.DoubleTensor(batch)\n    elif isinstance(batch[0], string_classes):\n        return batch\n    elif isinstance(batch[0], TensorDict):\n        return TensorDict({key: ltr_collate_stack1([d[key] for d in batch]) for key in batch[0]})\n    elif isinstance(batch[0], collections.Mapping):\n        return {key: ltr_collate_stack1([d[key] for d in batch]) for key in batch[0]}\n    elif isinstance(batch[0], TensorList):\n        transposed = zip(*batch)\n        return TensorList([ltr_collate_stack1(samples) for samples in transposed])\n    elif isinstance(batch[0], collections.Sequence):\n        transposed = zip(*batch)\n        return [ltr_collate_stack1(samples) for samples in transposed]\n    elif batch[0] is None:\n        return batch\n\n    raise TypeError((error_msg.format(type(batch[0]))))\n\n\nclass LTRLoader(torch.utils.data.dataloader.DataLoader):\n    \"\"\"\n    Data loader. Combines a dataset and a sampler, and provides\n    single- or multi-process iterators over the dataset.\n\n    Note: The only difference with default pytorch DataLoader is that an additional option stack_dim is available to\n            select along which dimension the data should be stacked to form a batch.\n\n    Arguments:\n        dataset (Dataset): dataset from which to load the data.\n        batch_size (int, optional): how many samples per batch to load\n            (default: 1).\n        shuffle (bool, optional): set to ``True`` to have the data reshuffled\n            at every epoch (default: False).\n        sampler (Sampler, optional): defines the strategy to draw samples from\n            the dataset. If specified, ``shuffle`` must be False.\n        batch_sampler (Sampler, optional): like sampler, but returns a batch of\n            indices at a time. Mutually exclusive with batch_size, shuffle,\n            sampler, and drop_last.\n        num_workers (int, optional): how many subprocesses to use for data\n            loading. 0 means that the data will be loaded in the main process.\n            (default: 0)\n        collate_fn (callable, optional): merges a list of samples to form a mini-batch.\n        stack_dim (int): Dimension along which to stack to form the batch. (default: 0)\n        pin_memory (bool, optional): If ``True``, the data loader will copy tensors\n            into CUDA pinned memory before returning them.\n        drop_last (bool, optional): set to ``True`` to drop the last incomplete batch,\n            if the dataset size is not divisible by the batch size. If ``False`` and\n            the size of dataset is not divisible by the batch size, then the last batch\n            will be smaller. (default: False)\n        timeout (numeric, optional): if positive, the timeout value for collecting a batch\n            from workers. Should always be non-negative. (default: 0)\n        worker_init_fn (callable, optional): If not None, this will be called on each\n            worker subprocess with the worker id (an int in ``[0, num_workers - 1]``) as\n            input, after seeding and before data loading. (default: None)\n\n    .. note:: By default, each worker will have its PyTorch seed set to\n              ``base_seed + worker_id``, where ``base_seed`` is a long generated\n              by main process using its RNG. However, seeds for other libraries\n              may be duplicated upon initializing workers (w.g., NumPy), causing\n              each worker to return identical random numbers. (See\n              :ref:`dataloader-workers-random-seed` section in FAQ.) You may\n              use ``torch.initial_seed()`` to access the PyTorch seed for each\n              worker in :attr:`worker_init_fn`, and use it to set other seeds\n              before data loading.\n\n    .. warning:: If ``spawn`` start method is used, :attr:`worker_init_fn` cannot be an\n                 unpicklable object, e.g., a lambda function.\n    \"\"\"\n\n    __initialized = False\n\n    def __init__(self, name, dataset, training=True, batch_size=1, shuffle=False, sampler=None, batch_sampler=None,\n                 num_workers=0, epoch_interval=1, collate_fn=None, stack_dim=0, pin_memory=False, drop_last=False,\n                 timeout=0, worker_init_fn=None):\n        print(\"pin_memory is\", pin_memory)\n        if collate_fn is None:\n            if stack_dim == 0:\n                collate_fn = ltr_collate\n            elif stack_dim == 1:\n                collate_fn = ltr_collate_stack1\n            else:\n                raise ValueError('Stack dim no supported. Must be 0 or 1.')\n\n        super(LTRLoader, self).__init__(dataset, batch_size, shuffle, sampler, batch_sampler,\n                 num_workers, collate_fn, pin_memory, drop_last,\n                 timeout, worker_init_fn)\n\n        self.name = name\n        self.training = training\n        self.epoch_interval = epoch_interval\n        self.stack_dim = stack_dim\n"
  },
  {
    "path": "lib/train/data/processing.py",
    "content": "import torch\nimport torchvision.transforms as transforms\nfrom lib.utils import TensorDict\nimport lib.train.data.processing_utils as prutils\nimport torch.nn.functional as F\n\n\ndef stack_tensors(x):\n    if isinstance(x, (list, tuple)) and isinstance(x[0], torch.Tensor):\n        return torch.stack(x)\n    return x\n\n\nclass BaseProcessing:\n    \"\"\" Base class for Processing. Processing class is used to process the data returned by a dataset, before passing it\n     through the network. For example, it can be used to crop a search region around the object, apply various data\n     augmentations, etc.\"\"\"\n    def __init__(self, transform=transforms.ToTensor(), template_transform=None, search_transform=None, joint_transform=None):\n        \"\"\"\n        args:\n            transform       - The set of transformations to be applied on the images. Used only if template_transform or\n                                search_transform is None.\n            template_transform - The set of transformations to be applied on the template images. If None, the 'transform'\n                                argument is used instead.\n            search_transform  - The set of transformations to be applied on the search images. If None, the 'transform'\n                                argument is used instead.\n            joint_transform - The set of transformations to be applied 'jointly' on the template and search images.  For\n                                example, it can be used to convert both template and search images to grayscale.\n        \"\"\"\n        self.transform = {'template': transform if template_transform is None else template_transform,\n                          'search':  transform if search_transform is None else search_transform,\n                          'joint': joint_transform}\n\n    def __call__(self, data: TensorDict):\n        raise NotImplementedError\n\n\nclass STARKProcessing(BaseProcessing):\n    \"\"\" The processing class used for training LittleBoy. The images are processed in the following way.\n    First, the target bounding box is jittered by adding some noise. Next, a square region (called search region )\n    centered at the jittered target center, and of area search_area_factor^2 times the area of the jittered box is\n    cropped from the image. The reason for jittering the target box is to avoid learning the bias that the target is\n    always at the center of the search region. The search region is then resized to a fixed size given by the\n    argument output_sz.\n\n    \"\"\"\n\n    def __init__(self, search_area_factor, output_sz, center_jitter_factor, scale_jitter_factor,\n                 mode='pair', settings=None, *args, **kwargs):\n        \"\"\"\n        args:\n            search_area_factor - The size of the search region  relative to the target size.\n            output_sz - An integer, denoting the size to which the search region is resized. The search region is always\n                        square.\n            center_jitter_factor - A dict containing the amount of jittering to be applied to the target center before\n                                    extracting the search region. See _get_jittered_box for how the jittering is done.\n            scale_jitter_factor - A dict containing the amount of jittering to be applied to the target size before\n                                    extracting the search region. See _get_jittered_box for how the jittering is done.\n            mode - Either 'pair' or 'sequence'. If mode='sequence', then output has an extra dimension for frames\n        \"\"\"\n        super().__init__(*args, **kwargs)\n        self.search_area_factor = search_area_factor\n        self.output_sz = output_sz\n        self.center_jitter_factor = center_jitter_factor\n        self.scale_jitter_factor = scale_jitter_factor\n        self.mode = mode\n        self.settings = settings\n\n    def _get_jittered_box(self, box, mode):\n        \"\"\" Jitter the input box\n        args:\n            box - input bounding box\n            mode - string 'template' or 'search' indicating template or search data\n\n        returns:\n            torch.Tensor - jittered box\n        \"\"\"\n\n        jittered_size = box[2:4] * torch.exp(torch.randn(2) * self.scale_jitter_factor[mode])\n        max_offset = (jittered_size.prod().sqrt() * torch.tensor(self.center_jitter_factor[mode]).float())\n        jittered_center = box[0:2] + 0.5 * box[2:4] + max_offset * (torch.rand(2) - 0.5)\n\n        return torch.cat((jittered_center - 0.5 * jittered_size, jittered_size), dim=0)\n\n    def __call__(self, data: TensorDict):\n        \"\"\"\n        args:\n            data - The input data, should contain the following fields:\n                'template_images', search_images', 'template_anno', 'search_anno'\n        returns:\n            TensorDict - output data block with following fields:\n                'template_images', 'search_images', 'template_anno', 'search_anno', 'test_proposals', 'proposal_iou'\n        \"\"\"\n        # Apply joint transforms\n        if self.transform['joint'] is not None:\n            data['template_images'], data['template_anno'], data['template_masks'] = self.transform['joint'](\n                image=data['template_images'], bbox=data['template_anno'], mask=data['template_masks'])\n            data['search_images'], data['search_anno'], data['search_masks'] = self.transform['joint'](\n                image=data['search_images'], bbox=data['search_anno'], mask=data['search_masks'], new_roll=False)\n\n        data[\"target_in_search_images\"] = data[\"search_images\"]\n        data[\"target_in_search_anno\"] = data[\"search_anno\"]\n        data[\"target_in_search_masks\"] = data[\"search_masks\"]\n        self.scale_jitter_factor[\"target_in_search\"] = self.scale_jitter_factor[\"template\"]\n        self.center_jitter_factor[\"target_in_search\"] = self.center_jitter_factor[\"template\"]\n        self.search_area_factor[\"target_in_search\"] = self.search_area_factor[\"template\"]\n        self.output_sz[\"target_in_search\"] = self.output_sz[\"template\"]\n        self.transform[\"target_in_search\"] = self.transform[\"search\"]\n\n        for s in ['template', 'search', 'target_in_search']:\n            assert self.mode == 'sequence' or len(data[s + '_images']) == 1, \\\n                \"In pair mode, num train/test frames must be 1\"\n\n            # Add a uniform noise to the center pos\n            jittered_anno = [self._get_jittered_box(a, s) for a in data[s + '_anno']]\n\n            # 2021.1.9 Check whether data is valid. Avoid too small bounding boxes\n            w, h = torch.stack(jittered_anno, dim=0)[:, 2], torch.stack(jittered_anno, dim=0)[:, 3]\n\n            crop_sz = torch.ceil(torch.sqrt(w * h) * self.search_area_factor[s])\n            if (crop_sz < 1).any():\n                data['valid'] = False\n                # print(\"Too small box is found. Replace it with new data.\")\n                return data\n\n            # Crop image region centered at jittered_anno box and get the attention mask\n            crops, boxes, att_mask, mask_crops = prutils.jittered_center_crop(data[s + '_images'], jittered_anno,\n                                                                              data[s + '_anno'], self.search_area_factor[s],\n                                                                              self.output_sz[s], masks=data[s + '_masks'])\n            # Apply transforms\n            data[s + '_images'], data[s + '_anno'], data[s + '_att'], data[s + '_masks'] = self.transform[s](\n                image=crops, bbox=boxes, att=att_mask, mask=mask_crops, joint=False)\n\n\n            # 2021.1.9 Check whether elements in data[s + '_att'] is all 1\n            # Note that type of data[s + '_att'] is tuple, type of ele is torch.tensor\n            for ele in data[s + '_att']:\n                if (ele == 1).all():\n                    data['valid'] = False\n                    # print(\"Values of original attention mask are all one. Replace it with new data.\")\n                    return data\n            # 2021.1.10 more strict conditions: require the donwsampled masks not to be all 1\n            for ele in data[s + '_att']:\n                feat_size = self.output_sz[s] // 16  # 16 is the backbone stride\n                # (1,1,128,128) (1,1,256,256) --> (1,1,8,8) (1,1,16,16)\n                mask_down = F.interpolate(ele[None, None].float(), size=feat_size).to(torch.bool)[0]\n                if (mask_down == 1).all():\n                    data['valid'] = False\n                    # print(\"Values of down-sampled attention mask are all one. \"\n                    #       \"Replace it with new data.\")\n                    return data\n\n        data['valid'] = True\n        # if we use copy-and-paste augmentation\n        if data[\"template_masks\"] is None or data[\"search_masks\"] is None:\n            data[\"template_masks\"] = torch.zeros((1, self.output_sz[\"template\"], self.output_sz[\"template\"]))\n            data[\"search_masks\"] = torch.zeros((1, self.output_sz[\"search\"], self.output_sz[\"search\"]))\n        # Prepare output\n        if self.mode == 'sequence':\n            data = data.apply(stack_tensors)\n        else:\n            data = data.apply(lambda x: x[0] if isinstance(x, list) else x)\n\n        return data\n"
  },
  {
    "path": "lib/train/data/processing_utils.py",
    "content": "import torch\nimport math\nimport cv2 as cv\nimport torch.nn.functional as F\nimport numpy as np\n\n'''modified from the original test implementation\nReplace cv.BORDER_REPLICATE with cv.BORDER_CONSTANT\nAdd a variable called att_mask for computing attention and positional encoding later'''\n\n\ndef sample_target(im, target_bb, search_area_factor, output_sz=None, mask=None):\n    \"\"\" Extracts a square crop centered at target_bb box, of area search_area_factor^2 times target_bb area\n\n    args:\n        im - cv image\n        target_bb - target box [x, y, w, h]\n        search_area_factor - Ratio of crop size to target size\n        output_sz - (float) Size to which the extracted crop is resized (always square). If None, no resizing is done.\n\n    returns:\n        cv image - extracted crop\n        float - the factor by which the crop has been resized to make the crop size equal output_size\n    \"\"\"\n    if not isinstance(target_bb, list):\n        x, y, w, h = target_bb.tolist()\n    else:\n        x, y, w, h = target_bb\n    # Crop image\n    crop_sz = math.ceil(math.sqrt(w * h) * search_area_factor)\n\n    if crop_sz < 1:\n        raise Exception('Too small bounding box.')\n\n    x1 = round(x + 0.5 * w - crop_sz * 0.5)\n    x2 = x1 + crop_sz\n\n    y1 = round(y + 0.5 * h - crop_sz * 0.5)\n    y2 = y1 + crop_sz\n\n    x1_pad = max(0, -x1)\n    x2_pad = max(x2 - im.shape[1] + 1, 0)\n\n    y1_pad = max(0, -y1)\n    y2_pad = max(y2 - im.shape[0] + 1, 0)\n\n    # Crop target\n    im_crop = im[y1 + y1_pad:y2 - y2_pad, x1 + x1_pad:x2 - x2_pad, :]\n    if mask is not None:\n        mask_crop = mask[y1 + y1_pad:y2 - y2_pad, x1 + x1_pad:x2 - x2_pad]\n\n    # Pad\n    im_crop_padded = cv.copyMakeBorder(im_crop, y1_pad, y2_pad, x1_pad, x2_pad, cv.BORDER_CONSTANT)\n    # deal with attention mask\n    H, W, _ = im_crop_padded.shape\n    att_mask = np.ones((H,W))\n    end_x, end_y = -x2_pad, -y2_pad\n    if y2_pad == 0:\n        end_y = None\n    if x2_pad == 0:\n        end_x = None\n    att_mask[y1_pad:end_y, x1_pad:end_x] = 0\n    if mask is not None:\n        mask_crop_padded = F.pad(mask_crop, pad=(x1_pad, x2_pad, y1_pad, y2_pad), mode='constant', value=0)\n\n    if output_sz is not None:\n        resize_factor = output_sz / crop_sz\n        im_crop_padded = cv.resize(im_crop_padded, (output_sz, output_sz))\n        att_mask = cv.resize(att_mask, (output_sz, output_sz)).astype(np.bool_)\n        if mask is None:\n            return im_crop_padded, resize_factor, att_mask\n        mask_crop_padded = \\\n        F.interpolate(mask_crop_padded[None, None], (output_sz, output_sz), mode='bilinear', align_corners=False)[0, 0]\n        return im_crop_padded, resize_factor, att_mask, mask_crop_padded\n\n    else:\n        if mask is None:\n            return im_crop_padded, att_mask.astype(np.bool_), 1.0\n        return im_crop_padded, 1.0, att_mask.astype(np.bool_), mask_crop_padded\n\n\ndef transform_image_to_crop(box_in: torch.Tensor, box_extract: torch.Tensor, resize_factor: float,\n                            crop_sz: torch.Tensor, normalize=False) -> torch.Tensor:\n    \"\"\" Transform the box co-ordinates from the original image co-ordinates to the co-ordinates of the cropped image\n    args:\n        box_in - the box for which the co-ordinates are to be transformed\n        box_extract - the box about which the image crop has been extracted.\n        resize_factor - the ratio between the original image scale and the scale of the image crop\n        crop_sz - size of the cropped image\n\n    returns:\n        torch.Tensor - transformed co-ordinates of box_in\n    \"\"\"\n    box_extract_center = box_extract[0:2] + 0.5 * box_extract[2:4]\n\n    box_in_center = box_in[0:2] + 0.5 * box_in[2:4]\n\n    box_out_center = (crop_sz - 1) / 2 + (box_in_center - box_extract_center) * resize_factor\n    box_out_wh = box_in[2:4] * resize_factor\n\n    box_out = torch.cat((box_out_center - 0.5 * box_out_wh, box_out_wh))\n    if normalize:\n        return box_out / crop_sz[0]\n    else:\n        return box_out\n\n\ndef jittered_center_crop(frames, box_extract, box_gt, search_area_factor, output_sz, masks=None):\n    \"\"\" For each frame in frames, extracts a square crop centered at box_extract, of area search_area_factor^2\n    times box_extract area. The extracted crops are then resized to output_sz. Further, the co-ordinates of the box\n    box_gt are transformed to the image crop co-ordinates\n\n    args:\n        frames - list of frames\n        box_extract - list of boxes of same length as frames. The crops are extracted using anno_extract\n        box_gt - list of boxes of same length as frames. The co-ordinates of these boxes are transformed from\n                    image co-ordinates to the crop co-ordinates\n        search_area_factor - The area of the extracted crop is search_area_factor^2 times box_extract area\n        output_sz - The size to which the extracted crops are resized\n\n    returns:\n        list - list of image crops\n        list - box_gt location in the crop co-ordinates\n        \"\"\"\n\n    if masks is None:\n        crops_resize_factors = [sample_target(f, a, search_area_factor, output_sz)\n                                for f, a in zip(frames, box_extract)]\n        frames_crop, resize_factors, att_mask = zip(*crops_resize_factors)\n        masks_crop = None\n    else:\n        crops_resize_factors = [sample_target(f, a, search_area_factor, output_sz, m)\n                                for f, a, m in zip(frames, box_extract, masks)]\n        frames_crop, resize_factors, att_mask, masks_crop = zip(*crops_resize_factors)\n    # frames_crop: tuple of ndarray (128,128,3), att_mask: tuple of ndarray (128,128)\n    crop_sz = torch.Tensor([output_sz, output_sz])\n\n    # find the bb location in the crop\n    '''Note that here we use normalized coord'''\n    box_crop = [transform_image_to_crop(a_gt, a_ex, rf, crop_sz, normalize=True)\n                for a_gt, a_ex, rf in zip(box_gt, box_extract, resize_factors)]  # (x1,y1,w,h) list of tensors\n\n    return frames_crop, box_crop, att_mask, masks_crop\n\n\ndef transform_box_to_crop(box: torch.Tensor, crop_box: torch.Tensor, crop_sz: torch.Tensor, normalize=False) -> torch.Tensor:\n    \"\"\" Transform the box co-ordinates from the original image co-ordinates to the co-ordinates of the cropped image\n    args:\n        box - the box for which the co-ordinates are to be transformed\n        crop_box - bounding box defining the crop in the original image\n        crop_sz - size of the cropped image\n\n    returns:\n        torch.Tensor - transformed co-ordinates of box_in\n    \"\"\"\n\n    box_out = box.clone()\n    box_out[:2] -= crop_box[:2]\n\n    scale_factor = crop_sz / crop_box[2:]\n\n    box_out[:2] *= scale_factor\n    box_out[2:] *= scale_factor\n    if normalize:\n        return box_out / crop_sz[0]\n    else:\n        return box_out\n\n"
  },
  {
    "path": "lib/train/data/sampler.py",
    "content": "import random\nimport torch.utils.data\nfrom lib.utils import TensorDict\nimport numpy as np\n\n\ndef no_processing(data):\n    return data\n\n\nclass TrackingSampler(torch.utils.data.Dataset):\n    \"\"\" Class responsible for sampling frames from training sequences to form batches. \n\n    The sampling is done in the following ways. First a dataset is selected at random. Next, a sequence is selected\n    from that dataset. A base frame is then sampled randomly from the sequence. Next, a set of 'train frames' and\n    'test frames' are sampled from the sequence from the range [base_frame_id - max_gap, base_frame_id]  and\n    (base_frame_id, base_frame_id + max_gap] respectively. Only the frames in which the target is visible are sampled.\n    If enough visible frames are not found, the 'max_gap' is increased gradually till enough frames are found.\n\n    The sampled frames are then passed through the input 'processing' function for the necessary processing-\n    \"\"\"\n\n    def __init__(self, datasets, p_datasets, samples_per_epoch, max_gap,\n                 num_search_frames, num_template_frames=1, processing=no_processing, frame_sample_mode='causal',\n                 train_cls=False, pos_prob=0.5):\n        \"\"\"\n        args:\n            datasets - List of datasets to be used for training\n            p_datasets - List containing the probabilities by which each dataset will be sampled\n            samples_per_epoch - Number of training samples per epoch\n            max_gap - Maximum gap, in frame numbers, between the train frames and the test frames.\n            num_search_frames - Number of search frames to sample.\n            num_template_frames - Number of template frames to sample.\n            processing - An instance of Processing class which performs the necessary processing of the data.\n            frame_sample_mode - Either 'causal' or 'interval'. If 'causal', then the test frames are sampled in a causally,\n                                otherwise randomly within the interval.\n        \"\"\"\n        self.datasets = datasets\n        self.train_cls = train_cls  # whether we are training classification\n        self.pos_prob = pos_prob  # probability of sampling positive class when making classification\n\n        # If p not provided, sample uniformly from all videos\n        if p_datasets is None:\n            p_datasets = [len(d) for d in self.datasets]\n\n        # Normalize\n        p_total = sum(p_datasets)\n        self.p_datasets = [x / p_total for x in p_datasets]\n\n        self.samples_per_epoch = samples_per_epoch\n        self.max_gap = max_gap\n        self.num_search_frames = num_search_frames\n        self.num_template_frames = num_template_frames\n        self.processing = processing\n        self.frame_sample_mode = frame_sample_mode\n\n    def __len__(self):\n        return self.samples_per_epoch\n\n    def _sample_visible_ids(self, visible, num_ids=1, min_id=None, max_id=None,\n                            allow_invisible=False, force_invisible=False):\n        \"\"\" Samples num_ids frames between min_id and max_id for which target is visible\n\n        args:\n            visible - 1d Tensor indicating whether target is visible for each frame\n            num_ids - number of frames to be samples\n            min_id - Minimum allowed frame number\n            max_id - Maximum allowed frame number\n\n        returns:\n            list - List of sampled frame numbers. None if not sufficient visible frames could be found.\n        \"\"\"\n        if num_ids == 0:\n            return []\n        if min_id is None or min_id < 0:\n            min_id = 0\n        if max_id is None or max_id > len(visible):\n            max_id = len(visible)\n        # get valid ids\n        if force_invisible:\n            valid_ids = [i for i in range(min_id, max_id) if not visible[i]]\n        else:\n            if allow_invisible:\n                valid_ids = [i for i in range(min_id, max_id)]\n            else:\n                valid_ids = [i for i in range(min_id, max_id) if visible[i]]\n\n        # No visible ids\n        if len(valid_ids) == 0:\n            return None\n\n        return random.choices(valid_ids, k=num_ids)\n\n    def __getitem__(self, index):\n        if self.train_cls:\n            return self.getitem_cls()\n        else:\n            return self.getitem()\n\n    def getitem(self):\n        \"\"\"\n        returns:\n            TensorDict - dict containing all the data blocks\n        \"\"\"\n        valid = False\n        while not valid:\n            # Select a dataset\n            dataset = random.choices(self.datasets, self.p_datasets)[0]\n\n            is_video_dataset = dataset.is_video_sequence()\n\n            # sample a sequence from the given dataset\n            seq_id, visible, seq_info_dict = self.sample_seq_from_dataset(dataset, is_video_dataset)\n\n            if is_video_dataset:\n                template_frame_ids = None\n                search_frame_ids = None\n                gap_increase = 0\n\n                if self.frame_sample_mode == 'causal':\n                    # Sample test and train frames in a causal manner, i.e. search_frame_ids > template_frame_ids\n                    while search_frame_ids is None:\n                        base_frame_id = self._sample_visible_ids(visible, num_ids=1, min_id=self.num_template_frames - 1,\n                                                                 max_id=len(visible) - self.num_search_frames)\n                        prev_frame_ids = self._sample_visible_ids(visible, num_ids=self.num_template_frames - 1,\n                                                                  min_id=base_frame_id[0] - self.max_gap - gap_increase,\n                                                                  max_id=base_frame_id[0])\n                        if prev_frame_ids is None:\n                            gap_increase += 5\n                            continue\n                        template_frame_ids = base_frame_id + prev_frame_ids\n                        search_frame_ids = self._sample_visible_ids(visible, min_id=template_frame_ids[0] + 1,\n                                                                  max_id=template_frame_ids[0] + self.max_gap + gap_increase,\n                                                                  num_ids=self.num_search_frames)\n                        # Increase gap until a frame is found\n                        gap_increase += 5\n\n                elif self.frame_sample_mode == \"trident\" or self.frame_sample_mode == \"trident_pro\":\n                    template_frame_ids, search_frame_ids = self.get_frame_ids_trident(visible)\n                elif self.frame_sample_mode == \"stark\":\n                    template_frame_ids, search_frame_ids = self.get_frame_ids_stark(visible, seq_info_dict[\"valid\"])\n                else:\n                    raise ValueError(\"Illegal frame sample mode\")\n            else:\n                # In case of image dataset, just repeat the image to generate synthetic video\n                template_frame_ids = [1] * self.num_template_frames\n                search_frame_ids = [1] * self.num_search_frames\n            try:\n                template_frames, template_anno, meta_obj_train = dataset.get_frames(seq_id, template_frame_ids, seq_info_dict)\n                search_frames, search_anno, meta_obj_test = dataset.get_frames(seq_id, search_frame_ids, seq_info_dict)\n\n                H, W, _ = template_frames[0].shape\n                template_masks = template_anno['mask'] if 'mask' in template_anno else [torch.zeros((H, W))] * self.num_template_frames\n                search_masks = search_anno['mask'] if 'mask' in search_anno else [torch.zeros((H, W))] * self.num_search_frames\n\n                data = TensorDict({'template_images': template_frames,\n                                   'template_anno': template_anno['bbox'],\n                                   'template_masks': template_masks,\n                                   'search_images': search_frames,\n                                   'search_anno': search_anno['bbox'],\n                                   'search_masks': search_masks,\n                                   'dataset': dataset.get_name(),\n                                   'test_class': meta_obj_test.get('object_class_name')})\n                # make data augmentation\n                data = self.processing(data)\n\n                # check whether data is valid\n                valid = data['valid']\n            except:\n                valid = False\n\n        return data\n\n    def getitem_cls(self):\n        # get data for classification\n        \"\"\"\n        args:\n            index (int): Index (Ignored since we sample randomly)\n            aux (bool): whether the current data is for auxiliary use (e.g. copy-and-paste)\n\n        returns:\n            TensorDict - dict containing all the data blocks\n        \"\"\"\n        valid = False\n        label = None\n        while not valid:\n            # Select a dataset\n            dataset = random.choices(self.datasets, self.p_datasets)[0]\n\n            is_video_dataset = dataset.is_video_sequence()\n\n            # sample a sequence from the given dataset\n            seq_id, visible, seq_info_dict = self.sample_seq_from_dataset(dataset, is_video_dataset)\n            # sample template and search frame ids\n            if is_video_dataset:\n                if self.frame_sample_mode in [\"trident\", \"trident_pro\"]:\n                    template_frame_ids, search_frame_ids = self.get_frame_ids_trident(visible)\n                elif self.frame_sample_mode == \"stark\":\n                    template_frame_ids, search_frame_ids = self.get_frame_ids_stark(visible, seq_info_dict[\"valid\"])\n                else:\n                    raise ValueError(\"illegal frame sample mode\")\n            else:\n                # In case of image dataset, just repeat the image to generate synthetic video\n                template_frame_ids = [1] * self.num_template_frames\n                search_frame_ids = [1] * self.num_search_frames\n            try:\n                # \"try\" is used to handle trackingnet data failure\n                # get images and bounding boxes (for templates)\n                template_frames, template_anno, meta_obj_train = dataset.get_frames(seq_id, template_frame_ids,\n                                                                                    seq_info_dict)\n                H, W, _ = template_frames[0].shape\n                template_masks = template_anno['mask'] if 'mask' in template_anno else [torch.zeros(\n                    (H, W))] * self.num_template_frames\n                # get images and bounding boxes (for searches)\n                # positive samples\n                if random.random() < self.pos_prob:\n                    label = torch.ones(1,)\n                    search_frames, search_anno, meta_obj_test = dataset.get_frames(seq_id, search_frame_ids, seq_info_dict)\n                    search_masks = search_anno['mask'] if 'mask' in search_anno else [torch.zeros(\n                        (H, W))] * self.num_search_frames\n                # negative samples\n                else:\n                    label = torch.zeros(1,)\n                    if is_video_dataset:\n                        search_frame_ids = self._sample_visible_ids(visible, num_ids=1, force_invisible=True)\n                        if search_frame_ids is None:\n                            search_frames, search_anno, meta_obj_test = self.get_one_search()\n                        else:\n                            search_frames, search_anno, meta_obj_test = dataset.get_frames(seq_id, search_frame_ids,\n                                                                                           seq_info_dict)\n                            search_anno[\"bbox\"] = [self.get_center_box(H, W)]\n                    else:\n                        search_frames, search_anno, meta_obj_test = self.get_one_search()\n                    H, W, _ = search_frames[0].shape\n                    search_masks = search_anno['mask'] if 'mask' in search_anno else [torch.zeros(\n                        (H, W))] * self.num_search_frames\n\n                data = TensorDict({'template_images': template_frames,\n                                   'template_anno': template_anno['bbox'],\n                                   'template_masks': template_masks,\n                                   'search_images': search_frames,\n                                   'search_anno': search_anno['bbox'],\n                                   'search_masks': search_masks,\n                                   'dataset': dataset.get_name(),\n                                   'test_class': meta_obj_test.get('object_class_name')})\n\n                # make data augmentation\n                data = self.processing(data)\n                # add classification label\n                data[\"label\"] = label\n                # check whether data is valid\n                valid = data['valid']\n            except:\n                valid = False\n\n        return data\n\n    def get_center_box(self, H, W, ratio=1/8):\n        cx, cy, w, h = W/2, H/2, W * ratio, H * ratio\n        return torch.tensor([int(cx-w/2), int(cy-h/2), int(w), int(h)])\n\n    def sample_seq_from_dataset(self, dataset, is_video_dataset):\n\n        # Sample a sequence with enough visible frames\n        enough_visible_frames = False\n        while not enough_visible_frames:\n            # Sample a sequence\n            seq_id = random.randint(0, dataset.get_num_sequences() - 1)\n\n            # Sample frames\n            seq_info_dict = dataset.get_sequence_info(seq_id)\n            visible = seq_info_dict['visible']\n\n            enough_visible_frames = visible.type(torch.int64).sum().item() > 2 * (\n                    self.num_search_frames + self.num_template_frames) and len(visible) >= 20\n\n            enough_visible_frames = enough_visible_frames or not is_video_dataset\n        return seq_id, visible, seq_info_dict\n\n    def get_one_search(self):\n        # Select a dataset\n        dataset = random.choices(self.datasets, self.p_datasets)[0]\n\n        is_video_dataset = dataset.is_video_sequence()\n        # sample a sequence\n        seq_id, visible, seq_info_dict = self.sample_seq_from_dataset(dataset, is_video_dataset)\n        # sample a frame\n        if is_video_dataset:\n            if self.frame_sample_mode == \"stark\":\n                search_frame_ids = self._sample_visible_ids(seq_info_dict[\"valid\"], num_ids=1)\n            else:\n                search_frame_ids = self._sample_visible_ids(visible, num_ids=1, allow_invisible=True)\n        else:\n            search_frame_ids = [1]\n        # get the image, bounding box and other info\n        search_frames, search_anno, meta_obj_test = dataset.get_frames(seq_id, search_frame_ids, seq_info_dict)\n\n        return search_frames, search_anno, meta_obj_test\n\n    def get_frame_ids_trident(self, visible):\n        # get template and search ids in a 'trident' manner\n        template_frame_ids_extra = []\n        while None in template_frame_ids_extra or len(template_frame_ids_extra) == 0:\n            template_frame_ids_extra = []\n            # first randomly sample two frames from a video\n            template_frame_id1 = self._sample_visible_ids(visible, num_ids=1)  # the initial template id\n            search_frame_ids = self._sample_visible_ids(visible, num_ids=1)  # the search region id\n            # get the dynamic template id\n            for max_gap in self.max_gap:\n                if template_frame_id1[0] >= search_frame_ids[0]:\n                    min_id, max_id = search_frame_ids[0], search_frame_ids[0] + max_gap\n                else:\n                    min_id, max_id = search_frame_ids[0] - max_gap, search_frame_ids[0]\n                if self.frame_sample_mode == \"trident_pro\":\n                    f_id = self._sample_visible_ids(visible, num_ids=1, min_id=min_id, max_id=max_id,\n                                                    allow_invisible=True)\n                else:\n                    f_id = self._sample_visible_ids(visible, num_ids=1, min_id=min_id, max_id=max_id)\n                if f_id is None:\n                    template_frame_ids_extra += [None]\n                else:\n                    template_frame_ids_extra += f_id\n\n        template_frame_ids = template_frame_id1 + template_frame_ids_extra\n        return template_frame_ids, search_frame_ids\n\n    def get_frame_ids_stark(self, visible, valid):\n        # get template and search ids in a 'stark' manner\n        template_frame_ids_extra = []\n        while None in template_frame_ids_extra or len(template_frame_ids_extra) == 0:\n            template_frame_ids_extra = []\n            # first randomly sample two frames from a video\n            template_frame_id1 = self._sample_visible_ids(visible, num_ids=1)  # the initial template id\n            search_frame_ids = self._sample_visible_ids(visible, num_ids=1)  # the search region id\n            # get the dynamic template id\n            for max_gap in self.max_gap:\n                if template_frame_id1[0] >= search_frame_ids[0]:\n                    min_id, max_id = search_frame_ids[0], search_frame_ids[0] + max_gap\n                else:\n                    min_id, max_id = search_frame_ids[0] - max_gap, search_frame_ids[0]\n                \"\"\"we require the frame to be valid but not necessary visible\"\"\"\n                f_id = self._sample_visible_ids(valid, num_ids=1, min_id=min_id, max_id=max_id)\n                if f_id is None:\n                    template_frame_ids_extra += [None]\n                else:\n                    template_frame_ids_extra += f_id\n\n        template_frame_ids = template_frame_id1 + template_frame_ids_extra\n        return template_frame_ids, search_frame_ids"
  },
  {
    "path": "lib/train/data/sequence_sampler.py",
    "content": "import random\nimport torch.utils.data\nimport numpy as np\nfrom lib.utils import TensorDict\n\n\nclass SequenceSampler(torch.utils.data.Dataset):\n    \"\"\"\n    Sample sequence for sequence-level training\n    \"\"\"\n\n    def __init__(self, datasets, p_datasets, samples_per_epoch, max_gap,\n                 num_search_frames, num_template_frames=1, frame_sample_mode='sequential', max_interval=10, prob=0.7):\n        \"\"\"\n        args:\n            datasets - List of datasets to be used for training\n            p_datasets - List containing the probabilities by which each dataset will be sampled\n            samples_per_epoch - Number of training samples per epoch\n            max_gap - Maximum gap, in frame numbers, between the train frames and the search frames.\\\n            max_interval - Maximum interval between sampled frames\n            num_search_frames - Number of search frames to sample.\n            num_template_frames - Number of template frames to sample.\n            processing - An instance of Processing class which performs the necessary processing of the data.\n            frame_sample_mode - Either 'causal' or 'interval'. If 'causal', then the search frames are sampled in a causally,\n                                otherwise randomly within the interval.\n            prob - sequential sampling by prob / interval sampling by 1-prob\n        \"\"\"\n        self.datasets = datasets\n\n        # If p not provided, sample uniformly from all videos\n        if p_datasets is None:\n            p_datasets = [len(d) for d in self.datasets]\n\n        # Normalize\n        p_total = sum(p_datasets)\n        self.p_datasets = [x / p_total for x in p_datasets]\n\n        self.samples_per_epoch = samples_per_epoch\n        self.max_gap = max_gap\n        self.max_interval = max_interval\n        self.num_search_frames = num_search_frames\n        self.num_template_frames = num_template_frames\n        self.frame_sample_mode = frame_sample_mode\n        self.prob=prob\n        self.extra=1\n\n    def __len__(self):\n        return self.samples_per_epoch\n\n    def _sample_visible_ids(self, visible, num_ids=1, min_id=None, max_id=None):\n        \"\"\" Samples num_ids frames between min_id and max_id for which target is visible\n\n        args:\n            visible - 1d Tensor indicating whether target is visible for each frame\n            num_ids - number of frames to be samples\n            min_id - Minimum allowed frame number\n            max_id - Maximum allowed frame number\n\n        returns:\n            list - List of sampled frame numbers. None if not sufficient visible frames could be found.\n        \"\"\"\n        if num_ids == 0:\n            return []\n        if min_id is None or min_id < 0:\n            min_id = 0\n        if max_id is None or max_id > len(visible):\n            max_id = len(visible)\n\n        valid_ids = [i for i in range(min_id, max_id) if visible[i]]\n\n        # No visible ids\n        if len(valid_ids) == 0:\n            return None\n\n        return random.choices(valid_ids, k=num_ids)\n\n\n    def _sequential_sample(self, visible):\n        # Sample frames in sequential manner\n        template_frame_ids = self._sample_visible_ids(visible, num_ids=1, min_id=0,\n                                                   max_id=len(visible) - self.num_search_frames)\n        \n        if self.max_gap == -1:\n            left = template_frame_ids[0]\n        else:\n            # template frame (1) ->(max_gap) -> search frame (num_search_frames)\n            left_max = min(len(visible) - self.num_search_frames, template_frame_ids[0] + self.max_gap)\n            left = self._sample_visible_ids(visible, num_ids=1, min_id=template_frame_ids[0],\n                                            max_id=left_max)[0]\n\n        valid_ids = [i for i in range(left, len(visible)) if visible[i]]\n        search_frame_ids = valid_ids[:self.num_search_frames]\n\n        # if length is not enough\n        last = search_frame_ids[-1]\n        while len(search_frame_ids) < self.num_search_frames:\n            if last >= len(visible) - 1:\n                search_frame_ids.append(last)\n            else:\n                last += 1\n                if visible[last]:\n                    search_frame_ids.append(last)\n\n        return template_frame_ids, search_frame_ids\n\n\n    def _random_interval_sample(self, visible):\n        # Get valid ids\n        valid_ids = [i for i in range(len(visible)) if visible[i]]\n\n        # Sample template frame\n        avg_interval = self.max_interval\n        while avg_interval * (self.num_search_frames - 1) > len(visible):\n            avg_interval = max(avg_interval - 1, 1)\n\n        while True:\n            template_frame_ids = self._sample_visible_ids(visible, num_ids=1, min_id=0,\n                                                       max_id=len(visible) - avg_interval * (self.num_search_frames - 1))\n            if template_frame_ids == None:\n                avg_interval = avg_interval - 1\n            else:\n                break\n\n            if avg_interval == 0:\n                template_frame_ids = [valid_ids[0]]\n                break\n\n        # Sample first search frame\n        if self.max_gap == -1:\n            search_frame_ids = template_frame_ids\n        else:\n            avg_interval = self.max_interval\n            while avg_interval * (self.num_search_frames - 1) > len(visible):\n                avg_interval = max(avg_interval - 1, 1)\n\n            while True:\n                left_max = min(max(len(visible) - avg_interval * (self.num_search_frames - 1), template_frame_ids[0] + 1),\n                               template_frame_ids[0] + self.max_gap)\n                search_frame_ids = self._sample_visible_ids(visible, num_ids=1, min_id=template_frame_ids[0],\n                                                          max_id=left_max)\n\n                if search_frame_ids == None:\n                    avg_interval = avg_interval - 1\n                else:\n                    break\n\n                if avg_interval == -1:\n                    search_frame_ids = template_frame_ids\n                    break\n\n        # Sample rest of the search frames with random interval\n        last = search_frame_ids[0]\n        while last <= len(visible) - 1 and len(search_frame_ids) < self.num_search_frames:\n            # sample id with interval\n            max_id = min(last + self.max_interval + 1, len(visible))\n            id = self._sample_visible_ids(visible, num_ids=1, min_id=last,\n                                          max_id=max_id)\n\n            if id is None:\n                # If not found in current range, find from previous range\n                last = last + self.max_interval\n            else:\n                search_frame_ids.append(id[0])\n                last = search_frame_ids[-1]\n\n        # if length is not enough, randomly sample new ids\n        if len(search_frame_ids) < self.num_search_frames:\n            valid_ids = [x for x in valid_ids if x > search_frame_ids[0] and x not in search_frame_ids]\n\n            if len(valid_ids) > 0:\n                new_ids = random.choices(valid_ids, k=min(len(valid_ids),\n                                                          self.num_search_frames - len(search_frame_ids)))\n                search_frame_ids = search_frame_ids + new_ids\n                search_frame_ids = sorted(search_frame_ids, key=int)\n\n        # if length is still not enough, duplicate last frame\n        while len(search_frame_ids) < self.num_search_frames:\n            search_frame_ids.append(search_frame_ids[-1])\n\n        for i in range(1, self.num_search_frames):\n            if search_frame_ids[i] - search_frame_ids[i - 1] > self.max_interval:\n                print(search_frame_ids[i] - search_frame_ids[i - 1])\n\n        return template_frame_ids, search_frame_ids\n\n\n    def __getitem__(self, index):\n        \"\"\"\n        args:\n            index (int): Index (Ignored since we sample randomly)\n\n        returns:\n            TensorDict - dict containing all the data blocks\n        \"\"\"\n\n        # Select a dataset\n        dataset = random.choices(self.datasets, self.p_datasets)[0]\n        if dataset.get_name() == 'got10k' :\n            max_gap = self.max_gap\n            max_interval = self.max_interval\n        else:\n            max_gap = self.max_gap\n            max_interval = self.max_interval\n            self.max_gap = max_gap * self.extra\n            self.max_interval = max_interval * self.extra\n            \n        is_video_dataset = dataset.is_video_sequence()\n\n        # Sample a sequence with enough visible frames\n        enough_visible_frames = False\n        while not enough_visible_frames:\n            # Sample a sequence\n            seq_id = random.randint(0, dataset.get_num_sequences() - 1)\n\n            # Sample frames\n            seq_info_dict = dataset.get_sequence_info(seq_id)\n            visible = seq_info_dict['visible']\n\n            enough_visible_frames = visible.type(torch.int64).sum().item() > 2 * (\n                    self.num_search_frames + self.num_template_frames) and len(visible) >= (self.num_search_frames + self.num_template_frames)\n\n            enough_visible_frames = enough_visible_frames or not is_video_dataset\n\n        if is_video_dataset:\n            if self.frame_sample_mode == 'sequential':\n                template_frame_ids, search_frame_ids = self._sequential_sample(visible)\n\n            elif self.frame_sample_mode == 'random_interval':\n                if random.random() < self.prob:\n                    template_frame_ids, search_frame_ids = self._random_interval_sample(visible)\n                else:\n                    template_frame_ids, search_frame_ids = self._sequential_sample(visible)\n            else:\n                self.max_gap = max_gap\n                self.max_interval = max_interval\n                raise NotImplementedError\n        else:\n            # In case of image dataset, just repeat the image to generate synthetic video\n            template_frame_ids = [1] * self.num_template_frames\n            search_frame_ids = [1] * self.num_search_frames\n        #print(dataset.get_name(), search_frame_ids, self.max_gap, self.max_interval)\n        self.max_gap = max_gap\n        self.max_interval = max_interval\n        #print(self.max_gap, self.max_interval)\n        template_frames, template_anno, meta_obj_template = dataset.get_frames(seq_id, template_frame_ids, seq_info_dict)\n        search_frames, search_anno, meta_obj_search = dataset.get_frames(seq_id, search_frame_ids, seq_info_dict)\n        template_bbox = [bbox.numpy() for bbox in template_anno['bbox']] # tensor -> numpy array\n        search_bbox = [bbox.numpy() for bbox in search_anno['bbox']] # tensor -> numpy array\n        \n\n        return TensorDict({'template_images': np.array(template_frames).squeeze(),    # 1 template images\n                'template_annos': np.array(template_bbox).squeeze(),\n                'search_images': np.array(search_frames),      # (num_frames) search images\n                'search_annos': np.array(search_bbox),\n                'seq_id': seq_id,\n                'dataset': dataset.get_name(),\n                'search_class': meta_obj_search.get('object_class_name'),\n                'num_frames': len(search_frames)\n                })"
  },
  {
    "path": "lib/train/data/sequence_sampler_v2.py",
    "content": "import random\nimport torch.utils.data\nimport numpy as np\nfrom lib.utils import TensorDict\n\n\nclass SequenceSampler(torch.utils.data.Dataset):\n    \"\"\"\n    Sample sequence for sequence-level training\n    \"\"\"\n\n    def __init__(self, datasets, p_datasets, samples_per_epoch, max_gap,\n                 num_search_frames, num_template_frames=1, frame_sample_mode='sequential', max_interval=10, prob=0.7):\n        \"\"\"\n        args:\n            datasets - List of datasets to be used for training\n            p_datasets - List containing the probabilities by which each dataset will be sampled\n            samples_per_epoch - Number of training samples per epoch\n            max_gap - Maximum gap, in frame numbers, between the train frames and the search frames.\\\n            max_interval - Maximum interval between sampled frames\n            num_search_frames - Number of search frames to sample.\n            num_template_frames - Number of template frames to sample.\n            processing - An instance of Processing class which performs the necessary processing of the data.\n            frame_sample_mode - Either 'causal' or 'interval'. If 'causal', then the search frames are sampled in a causally,\n                                otherwise randomly within the interval.\n            prob - sequential sampling by prob / interval sampling by 1-prob\n        \"\"\"\n        self.datasets = datasets\n\n        # If p not provided, sample uniformly from all videos\n        if p_datasets is None:\n            p_datasets = [len(d) for d in self.datasets]\n\n        # Normalize\n        p_total = sum(p_datasets)\n        self.p_datasets = [x / p_total for x in p_datasets]\n\n        self.samples_per_epoch = samples_per_epoch\n        self.max_gap = max_gap\n        self.max_interval = max_interval\n        self.num_search_frames = num_search_frames\n        self.num_template_frames = num_template_frames\n        self.frame_sample_mode = frame_sample_mode\n        self.prob = prob\n        self.extra = 1\n\n    def __len__(self):\n        return self.samples_per_epoch\n\n    def _sample_visible_ids(self, visible, num_ids=1, min_id=None, max_id=None):\n        \"\"\" Samples num_ids frames between min_id and max_id for which target is visible\n\n        args:\n            visible - 1d Tensor indicating whether target is visible for each frame\n            num_ids - number of frames to be samples\n            min_id - Minimum allowed frame number\n            max_id - Maximum allowed frame number\n\n        returns:\n            list - List of sampled frame numbers. None if not sufficient visible frames could be found.\n        \"\"\"\n        if num_ids == 0:\n            return []\n        if min_id is None or min_id < 0:\n            min_id = 0\n        if max_id is None or max_id > len(visible):\n            max_id = len(visible)\n\n        valid_ids = [i for i in range(min_id, max_id) if visible[i]]\n\n        # No visible ids\n        if len(valid_ids) == 0:\n            return None\n\n        return random.choices(valid_ids, k=num_ids)\n\n    def _sequential_sample(self, visible):\n        # Sample frames in sequential manner\n        template_frame_ids = self._sample_visible_ids(visible, num_ids=1, min_id=0,\n                                                      max_id=len(visible) - self.num_search_frames)\n        template_another = self._sample_visible_ids(visible, num_ids=1, min_id=template_frame_ids[0],\n                                                    max_id=min(len(visible) - self.num_search_frames,\n                                                               template_frame_ids[0] + self.max_gap))\n        template_frame_ids.append(template_another[0])\n        template_frame_ids.sort()\n\n        if self.max_gap == -1:\n            left = template_frame_ids[1]\n        else:\n            # template frame (1) ->(max_gap) -> search frame (num_search_frames)\n            left_max = min(len(visible) - self.num_search_frames, template_frame_ids[1] + self.max_gap)\n            left = self._sample_visible_ids(visible, num_ids=1, min_id=template_frame_ids[1],\n                                            max_id=left_max)[0]\n\n        valid_ids = [i for i in range(left, len(visible)) if visible[i]]\n        search_frame_ids = valid_ids[:self.num_search_frames]\n\n        # if length is not enough\n        last = search_frame_ids[-1]\n        while len(search_frame_ids) < self.num_search_frames:\n            if last >= len(visible) - 1:\n                search_frame_ids.append(last)\n            else:\n                last += 1\n                if visible[last]:\n                    search_frame_ids.append(last)\n\n        return template_frame_ids, search_frame_ids\n\n    def _random_interval_sample(self, visible):\n        # Get valid ids\n        valid_ids = [i for i in range(len(visible)) if visible[i]]\n\n        # Sample template frame\n        avg_interval = self.max_interval\n        while avg_interval * (self.num_search_frames - 1) > len(visible):\n            avg_interval = max(avg_interval - 1, 1)\n\n        while True:\n            template_frame_ids = self._sample_visible_ids(visible, num_ids=1, min_id=0,\n                                                          max_id=len(visible) - avg_interval * (\n                                                                      self.num_search_frames - 1))\n            if template_frame_ids == None:\n                avg_interval = avg_interval - 1\n            else:\n                break\n\n            if avg_interval == 0:\n                template_frame_ids = [valid_ids[0]]\n                break\n\n        # Sample first search frame\n        if self.max_gap == -1:\n            search_frame_ids = template_frame_ids\n        else:\n            avg_interval = self.max_interval\n            while avg_interval * (self.num_search_frames - 1) > len(visible):\n                avg_interval = max(avg_interval - 1, 1)\n\n            while True:\n                left_max = min(\n                    max(len(visible) - avg_interval * (self.num_search_frames - 1), template_frame_ids[0] + 1),\n                    template_frame_ids[0] + self.max_gap)\n                search_frame_ids = self._sample_visible_ids(visible, num_ids=1, min_id=template_frame_ids[0],\n                                                            max_id=left_max)\n\n                if search_frame_ids == None:\n                    avg_interval = avg_interval - 1\n                else:\n                    break\n\n                if avg_interval == -1:\n                    search_frame_ids = template_frame_ids\n                    break\n\n        # Sample rest of the search frames with random interval\n        last = search_frame_ids[0]\n        while last <= len(visible) - 1 and len(search_frame_ids) < self.num_search_frames:\n            # sample id with interval\n            max_id = min(last + self.max_interval + 1, len(visible))\n            id = self._sample_visible_ids(visible, num_ids=1, min_id=last,\n                                          max_id=max_id)\n\n            if id is None:\n                # If not found in current range, find from previous range\n                last = last + self.max_interval\n            else:\n                search_frame_ids.append(id[0])\n                last = search_frame_ids[-1]\n\n        # if length is not enough, randomly sample new ids\n        if len(search_frame_ids) < self.num_search_frames:\n            valid_ids = [x for x in valid_ids if x > search_frame_ids[0] and x not in search_frame_ids]\n\n            if len(valid_ids) > 0:\n                new_ids = random.choices(valid_ids, k=min(len(valid_ids),\n                                                          self.num_search_frames - len(search_frame_ids)))\n                search_frame_ids = search_frame_ids + new_ids\n                search_frame_ids = sorted(search_frame_ids, key=int)\n\n        # if length is still not enough, duplicate last frame\n        while len(search_frame_ids) < self.num_search_frames:\n            search_frame_ids.append(search_frame_ids[-1])\n\n        for i in range(1, self.num_search_frames):\n            if search_frame_ids[i] - search_frame_ids[i - 1] > self.max_interval:\n                print(search_frame_ids[i] - search_frame_ids[i - 1])\n\n        return template_frame_ids, search_frame_ids\n\n    def __getitem__(self, index):\n        \"\"\"\n        args:\n            index (int): Index (Ignored since we sample randomly)\n\n        returns:\n            TensorDict - dict containing all the data blocks\n        \"\"\"\n\n        # Select a dataset\n        dataset = random.choices(self.datasets, self.p_datasets)[0]\n        if dataset.get_name() == 'got10k':\n            max_gap = self.max_gap\n            max_interval = self.max_interval\n        else:\n            max_gap = self.max_gap\n            max_interval = self.max_interval\n            self.max_gap = max_gap * self.extra\n            self.max_interval = max_interval * self.extra\n\n        is_video_dataset = dataset.is_video_sequence()\n\n        # Sample a sequence with enough visible frames\n        enough_visible_frames = False\n        while not enough_visible_frames:\n            # Sample a sequence\n            seq_id = random.randint(0, dataset.get_num_sequences() - 1)\n\n            # Sample frames\n            seq_info_dict = dataset.get_sequence_info(seq_id)\n            visible = seq_info_dict['visible']\n\n            enough_visible_frames = visible.type(torch.int64).sum().item() > 2 * (\n                    self.num_search_frames + self.num_template_frames) and len(visible) >= (\n                                                self.num_search_frames + self.num_template_frames)\n\n            enough_visible_frames = enough_visible_frames or not is_video_dataset\n\n        if is_video_dataset:\n            if self.frame_sample_mode == 'sequential':\n                template_frame_ids, search_frame_ids = self._sequential_sample(visible)\n\n            elif self.frame_sample_mode == 'random_interval':\n                if random.random() < self.prob:\n                    template_frame_ids, search_frame_ids = self._random_interval_sample(visible)\n                else:\n                    template_frame_ids, search_frame_ids = self._sequential_sample(visible)\n            else:\n                self.max_gap = max_gap\n                self.max_interval = max_interval\n                raise NotImplementedError\n        else:\n            # In case of image dataset, just repeat the image to generate synthetic video\n            template_frame_ids = [1] * self.num_template_frames\n            search_frame_ids = [1] * self.num_search_frames\n\n        self.max_gap = max_gap\n        self.max_interval = max_interval\n\n        # print(\"this is template_frame_ids\", template_frame_ids)\n        # print(\"this is search_frame_ids\", search_frame_ids)\n        template_frames, template_anno, meta_obj_template = dataset.get_frames(seq_id, template_frame_ids,\n                                                                               seq_info_dict)\n        search_frames, search_anno, meta_obj_search = dataset.get_frames(seq_id, search_frame_ids, seq_info_dict)\n        # visible_ratio = search_anno['visible_ratio']\n        template_bbox = [bbox.numpy() for bbox in template_anno['bbox']]  # tensor -> numpy array\n        search_bbox = [bbox.numpy() for bbox in search_anno['bbox']]  # tensor -> numpy array\n\n        return TensorDict({'template_images': np.array(template_frames).squeeze(),  # 1 template images\n                           'template_annos': np.array(template_bbox).squeeze(),\n                           'search_images': np.array(search_frames),  # (num_frames) search images\n                           'search_annos': np.array(search_bbox),\n                           'seq_id': seq_id,\n                           'dataset': dataset.get_name(),\n                           'search_class': meta_obj_search.get('object_class_name'),\n                           'num_frames': len(search_frames),\n                           # 'visible_ratio': visible_ratio\n                           })"
  },
  {
    "path": "lib/train/data/transforms.py",
    "content": "import random\nimport numpy as np\nimport math\nimport cv2 as cv\nimport torch\nimport torch.nn.functional as F\nimport torchvision.transforms.functional as tvisf\n\n\nclass Transform:\n    \"\"\"A set of transformations, used for e.g. data augmentation.\n    Args of constructor:\n        transforms: An arbitrary number of transformations, derived from the TransformBase class.\n                    They are applied in the order they are given.\n\n    The Transform object can jointly transform images, bounding boxes and segmentation masks.\n    This is done by calling the object with the following key-word arguments (all are optional).\n\n    The following arguments are inputs to be transformed. They are either supplied as a single instance, or a list of instances.\n        image  -  Image\n        coords  -  2xN dimensional Tensor of 2D image coordinates [y, x]\n        bbox  -  Bounding box on the form [x, y, w, h]\n        mask  -  Segmentation mask with discrete classes\n\n    The following parameters can be supplied with calling the transform object:\n        joint [Bool]  -  If True then transform all images/coords/bbox/mask in the list jointly using the same transformation.\n                         Otherwise each tuple (images, coords, bbox, mask) will be transformed independently using\n                         different random rolls. Default: True.\n        new_roll [Bool]  -  If False, then no new random roll is performed, and the saved result from the previous roll\n                            is used instead. Default: True.\n\n    Check the DiMPProcessing class for examples.\n    \"\"\"\n\n    def __init__(self, *transforms):\n        if len(transforms) == 1 and isinstance(transforms[0], (list, tuple)):\n            transforms = transforms[0]\n        self.transforms = transforms\n        self._valid_inputs = ['image', 'coords', 'bbox', 'mask', 'att']\n        self._valid_args = ['joint', 'new_roll']\n        self._valid_all = self._valid_inputs + self._valid_args\n\n    def __call__(self, **inputs):\n        var_names = [k for k in inputs.keys() if k in self._valid_inputs]\n        for v in inputs.keys():\n            if v not in self._valid_all:\n                raise ValueError('Incorrect input \\\"{}\\\" to transform. Only supports inputs {} and arguments {}.'.format(v, self._valid_inputs, self._valid_args))\n\n        joint_mode = inputs.get('joint', True)\n        new_roll = inputs.get('new_roll', True)\n\n        if not joint_mode:\n            out = zip(*[self(**inp) for inp in self._split_inputs(inputs)])\n            return tuple(list(o) for o in out)\n\n        out = {k: v for k, v in inputs.items() if k in self._valid_inputs}\n\n        for t in self.transforms:\n            out = t(**out, joint=joint_mode, new_roll=new_roll)\n        if len(var_names) == 1:\n            return out[var_names[0]]\n        # Make sure order is correct\n        return tuple(out[v] for v in var_names)\n\n    def _split_inputs(self, inputs):\n        var_names = [k for k in inputs.keys() if k in self._valid_inputs]\n        split_inputs = [{k: v for k, v in zip(var_names, vals)} for vals in zip(*[inputs[vn] for vn in var_names])]\n        for arg_name, arg_val in filter(lambda it: it[0]!='joint' and it[0] in self._valid_args, inputs.items()):\n            if isinstance(arg_val, list):\n                for inp, av in zip(split_inputs, arg_val):\n                    inp[arg_name] = av\n            else:\n                for inp in split_inputs:\n                    inp[arg_name] = arg_val\n        return split_inputs\n\n    def __repr__(self):\n        format_string = self.__class__.__name__ + '('\n        for t in self.transforms:\n            format_string += '\\n'\n            format_string += '    {0}'.format(t)\n        format_string += '\\n)'\n        return format_string\n\n\nclass TransformBase:\n    \"\"\"Base class for transformation objects. See the Transform class for details.\"\"\"\n    def __init__(self):\n        \"\"\"2020.12.24 Add 'att' to valid inputs\"\"\"\n        self._valid_inputs = ['image', 'coords', 'bbox', 'mask', 'att']\n        self._valid_args = ['new_roll']\n        self._valid_all = self._valid_inputs + self._valid_args\n        self._rand_params = None\n\n    def __call__(self, **inputs):\n        # Split input\n        input_vars = {k: v for k, v in inputs.items() if k in self._valid_inputs}\n        input_args = {k: v for k, v in inputs.items() if k in self._valid_args}\n\n        # Roll random parameters for the transform\n        if input_args.get('new_roll', True):\n            rand_params = self.roll()\n            if rand_params is None:\n                rand_params = ()\n            elif not isinstance(rand_params, tuple):\n                rand_params = (rand_params,)\n            self._rand_params = rand_params\n\n        outputs = dict()\n        for var_name, var in input_vars.items():\n            if var is not None:\n                transform_func = getattr(self, 'transform_' + var_name)\n                if var_name in ['coords', 'bbox']:\n                    params = (self._get_image_size(input_vars),) + self._rand_params\n                else:\n                    params = self._rand_params\n                if isinstance(var, (list, tuple)):\n                    outputs[var_name] = [transform_func(x, *params) for x in var]\n                else:\n                    outputs[var_name] = transform_func(var, *params)\n        return outputs\n\n    def _get_image_size(self, inputs):\n        im = None\n        for var_name in ['image', 'mask']:\n            if inputs.get(var_name) is not None:\n                im = inputs[var_name]\n                break\n        if im is None:\n            return None\n        if isinstance(im, (list, tuple)):\n            im = im[0]\n        if isinstance(im, np.ndarray):\n            return im.shape[:2]\n        if torch.is_tensor(im):\n            return (im.shape[-2], im.shape[-1])\n        raise Exception('Unknown image type')\n\n    def roll(self):\n        return None\n\n    def transform_image(self, image, *rand_params):\n        \"\"\"Must be deterministic\"\"\"\n        return image\n\n    def transform_coords(self, coords, image_shape, *rand_params):\n        \"\"\"Must be deterministic\"\"\"\n        return coords\n\n    def transform_bbox(self, bbox, image_shape, *rand_params):\n        \"\"\"Assumes [x, y, w, h]\"\"\"\n        # Check if not overloaded\n        if self.transform_coords.__code__ == TransformBase.transform_coords.__code__:\n            return bbox\n\n        coord = bbox.clone().view(-1,2).t().flip(0)\n\n        x1 = coord[1, 0]\n        x2 = coord[1, 0] + coord[1, 1]\n\n        y1 = coord[0, 0]\n        y2 = coord[0, 0] + coord[0, 1]\n\n        coord_all = torch.tensor([[y1, y1, y2, y2], [x1, x2, x2, x1]])\n\n        coord_transf = self.transform_coords(coord_all, image_shape, *rand_params).flip(0)\n        tl = torch.min(coord_transf, dim=1)[0]\n        sz = torch.max(coord_transf, dim=1)[0] - tl\n        bbox_out = torch.cat((tl, sz), dim=-1).reshape(bbox.shape)\n        return bbox_out\n\n    def transform_mask(self, mask, *rand_params):\n        \"\"\"Must be deterministic\"\"\"\n        return mask\n\n    def transform_att(self, att, *rand_params):\n        \"\"\"2020.12.24 Added to deal with attention masks\"\"\"\n        return att\n\n\nclass ToTensor(TransformBase):\n    \"\"\"Convert to a Tensor\"\"\"\n\n    def transform_image(self, image):\n        # handle numpy array\n        if image.ndim == 2:\n            image = image[:, :, None]\n\n        image = torch.from_numpy(image.transpose((2, 0, 1)))\n        # backward compatibility\n        if isinstance(image, torch.ByteTensor):\n            return image.float().div(255)\n        else:\n            return image\n\n    def transfrom_mask(self, mask):\n        if isinstance(mask, np.ndarray):\n            return torch.from_numpy(mask)\n\n    def transform_att(self, att):\n        if isinstance(att, np.ndarray):\n            return torch.from_numpy(att).to(torch.bool)\n        elif isinstance(att, torch.Tensor):\n            return att.to(torch.bool)\n        else:\n            raise ValueError (\"dtype must be np.ndarray or torch.Tensor\")\n\n\nclass ToTensorAndJitter(TransformBase):\n    \"\"\"Convert to a Tensor and jitter brightness\"\"\"\n    def __init__(self, brightness_jitter=0.0, normalize=True):\n        super().__init__()\n        self.brightness_jitter = brightness_jitter\n        self.normalize = normalize\n\n    def roll(self):\n        return np.random.uniform(max(0, 1 - self.brightness_jitter), 1 + self.brightness_jitter)\n\n    def transform_image(self, image, brightness_factor):\n        # handle numpy array\n        image = torch.from_numpy(image.transpose((2, 0, 1)))\n\n        # backward compatibility\n        if self.normalize:\n            return image.float().mul(brightness_factor/255.0).clamp(0.0, 1.0)\n        else:\n            return image.float().mul(brightness_factor).clamp(0.0, 255.0)\n\n    def transform_mask(self, mask, brightness_factor):\n        if isinstance(mask, np.ndarray):\n            return torch.from_numpy(mask)\n        else:\n            return mask\n    def transform_att(self, att, brightness_factor):\n        if isinstance(att, np.ndarray):\n            return torch.from_numpy(att).to(torch.bool)\n        elif isinstance(att, torch.Tensor):\n            return att.to(torch.bool)\n        else:\n            raise ValueError (\"dtype must be np.ndarray or torch.Tensor\")\n\n\nclass Normalize(TransformBase):\n    \"\"\"Normalize image\"\"\"\n    def __init__(self, mean, std, inplace=False):\n        super().__init__()\n        self.mean = mean\n        self.std = std\n        self.inplace = inplace\n\n    def transform_image(self, image):\n        return tvisf.normalize(image, self.mean, self.std, self.inplace)\n\n\nclass ToGrayscale(TransformBase):\n    \"\"\"Converts image to grayscale with probability\"\"\"\n    def __init__(self, probability = 0.5):\n        super().__init__()\n        self.probability = probability\n        self.color_weights = np.array([0.2989, 0.5870, 0.1140], dtype=np.float32)\n\n    def roll(self):\n        return random.random() < self.probability\n\n    def transform_image(self, image, do_grayscale):\n        if do_grayscale:\n            if torch.is_tensor(image):\n                raise NotImplementedError('Implement torch variant.')\n            img_gray = cv.cvtColor(image, cv.COLOR_RGB2GRAY)\n            return np.stack([img_gray, img_gray, img_gray], axis=2)\n            # return np.repeat(np.sum(img * self.color_weights, axis=2, keepdims=True).astype(np.uint8), 3, axis=2)\n        return image\n\n\nclass ToBGR(TransformBase):\n    \"\"\"Converts image to BGR\"\"\"\n    def transform_image(self, image):\n        if torch.is_tensor(image):\n            raise NotImplementedError('Implement torch variant.')\n        img_bgr = cv.cvtColor(image, cv.COLOR_RGB2BGR)\n        return img_bgr\n\n\nclass RandomHorizontalFlip(TransformBase):\n    \"\"\"Horizontally flip image randomly with a probability p.\"\"\"\n    def __init__(self, probability = 0.5):\n        super().__init__()\n        self.probability = probability\n\n    def roll(self):\n        return random.random() < self.probability\n\n    def transform_image(self, image, do_flip):\n        if do_flip:\n            if torch.is_tensor(image):\n                return image.flip((2,))\n            return np.fliplr(image).copy()\n        return image\n\n    def transform_coords(self, coords, image_shape, do_flip):\n        if do_flip:\n            coords_flip = coords.clone()\n            coords_flip[1,:] = (image_shape[1] - 1) - coords[1,:]\n            return coords_flip\n        return coords\n\n    def transform_mask(self, mask, do_flip):\n        if do_flip:\n            if torch.is_tensor(mask):\n                return mask.flip((-1,))\n            return np.fliplr(mask).copy()\n        return mask\n\n    def transform_att(self, att, do_flip):\n        if do_flip:\n            if torch.is_tensor(att):\n                return att.flip((-1,))\n            return np.fliplr(att).copy()\n        return att\n\n\nclass RandomHorizontalFlip_Norm(RandomHorizontalFlip):\n    \"\"\"Horizontally flip image randomly with a probability p.\n    The difference is that the coord is normalized to [0,1]\"\"\"\n    def __init__(self, probability = 0.5):\n        super().__init__()\n        self.probability = probability\n\n    def transform_coords(self, coords, image_shape, do_flip):\n        \"\"\"we should use 1 rather than image_shape\"\"\"\n        if do_flip:\n            coords_flip = coords.clone()\n            coords_flip[1,:] = 1 - coords[1,:]\n            return coords_flip\n        return coords\n"
  },
  {
    "path": "lib/train/data/wandb_logger.py",
    "content": "from collections import OrderedDict\r\n\r\ntry:\r\n    import wandb\r\nexcept ImportError:\r\n    raise ImportError(\r\n        'Please run \"pip install wandb\" to install wandb')\r\n\r\n\r\nclass WandbWriter:\r\n    def __init__(self, exp_name, cfg, output_dir, cur_step=0, step_interval=0):\r\n        self.wandb = wandb\r\n        self.step = cur_step\r\n        self.interval = step_interval\r\n        wandb.init(project=\"tracking\", name=exp_name, config=cfg, dir=output_dir)\r\n\r\n    def write_log(self, stats: OrderedDict, epoch=-1):\r\n        self.step += 1\r\n        for loader_name, loader_stats in stats.items():\r\n            if loader_stats is None:\r\n                continue\r\n\r\n            log_dict = {}\r\n            for var_name, val in loader_stats.items():\r\n                if hasattr(val, 'avg'):\r\n                    log_dict.update({loader_name + '/' + var_name: val.avg})\r\n                else:\r\n                    log_dict.update({loader_name + '/' + var_name: val.val})\r\n\r\n                if epoch >= 0:\r\n                    log_dict.update({loader_name + '/epoch': epoch})\r\n\r\n            self.wandb.log(log_dict, step=self.step*self.interval)\r\n"
  },
  {
    "path": "lib/train/data_specs/README.md",
    "content": "# README\n\n## Description for different text files\nGOT10K\n- got10k_train_full_split.txt: the complete GOT-10K training set. (9335 videos)\n- got10k_train_split.txt: part of videos from the GOT-10K training set\n- got10k_val_split.txt: another part of videos from the GOT-10K training set\n- got10k_vot_exclude.txt: 1k videos that are forbidden from \"using to train models then testing on VOT\" (as required by [VOT Challenge](https://www.votchallenge.net/vot2020/participation.html))\n- got10k_vot_train_split.txt: part of videos from the \"VOT-permitted\" GOT-10K training set\n- got10k_vot_val_split.txt: another part of videos from the \"VOT-permitted\" GOT-10K training set\n\nLaSOT\n- lasot_train_split.txt: the complete LaSOT training set\n\nTrackingNnet\n- trackingnet_classmap.txt: The map from the sequence name to the target class for the TrackingNet"
  },
  {
    "path": "lib/train/data_specs/got10k_train_full_split.txt",
    "content": "3784\n8998\n3906\n1631\n8277\n8358\n2338\n7938\n2988\n8302\n2662\n2663\n2825\n7447\n4781\n2218\n6348\n5860\n4517\n2819\n8075\n5391\n116\n3606\n7976\n7941\n1024\n4519\n1970\n557\n8579\n6908\n993\n7204\n1991\n3674\n8781\n6840\n5\n3225\n3763\n8688\n6778\n5777\n4794\n2744\n8126\n3864\n1733\n2923\n6829\n701\n683\n2081\n1831\n2404\n1459\n2741\n5972\n3618\n7462\n2654\n103\n2174\n6224\n2989\n2506\n2766\n5912\n2699\n3295\n3986\n609\n4895\n6673\n801\n1098\n1602\n2490\n3129\n8476\n3186\n7355\n4784\n4270\n1812\n4226\n2267\n8873\n6544\n6112\n2381\n4752\n753\n3776\n6511\n6016\n731\n2559\n7369\n5866\n563\n7731\n1105\n5603\n50\n4238\n2208\n8725\n4994\n4719\n1444\n8807\n7298\n8139\n8760\n8173\n2332\n4131\n5207\n1065\n8562\n3992\n4024\n2188\n9095\n6765\n1707\n6105\n6922\n5362\n1486\n7898\n4135\n6574\n1551\n998\n6565\n8127\n8927\n2544\n4365\n510\n768\n3535\n3875\n6808\n2931\n487\n1088\n4451\n368\n2470\n8111\n3493\n7338\n8281\n6390\n1271\n4373\n3667\n3494\n3757\n2966\n3756\n7840\n6315\n7827\n3300\n6261\n4163\n2217\n6549\n94\n7236\n9136\n1857\n6691\n3470\n6271\n807\n516\n9311\n6098\n3144\n8420\n5425\n5694\n2643\n6696\n6072\n7285\n3781\n903\n8522\n6092\n5979\n2622\n2529\n855\n3420\n3261\n8953\n7866\n2492\n3157\n359\n1520\n2642\n7452\n759\n36\n8931\n1744\n4350\n1089\n9199\n4295\n1889\n1908\n4868\n4498\n1968\n9103\n3273\n8723\n7413\n4114\n5584\n4874\n1427\n5211\n7618\n1542\n1353\n8158\n4168\n3200\n6345\n8560\n5619\n5953\n3158\n8849\n5831\n1411\n7294\n8103\n6539\n7397\n1006\n5450\n3119\n4274\n5352\n4571\n2319\n4217\n4976\n902\n1814\n2651\n3299\n3398\n982\n2428\n5793\n1346\n7057\n3737\n7329\n4449\n2110\n7405\n1773\n958\n3901\n4127\n8234\n2994\n7066\n1289\n2995\n5871\n3556\n9085\n846\n2366\n585\n7032\n5516\n5230\n3481\n2732\n6658\n7423\n1855\n6384\n3554\n5823\n4948\n7058\n4667\n5377\n2503\n7694\n9191\n9144\n655\n3409\n62\n8019\n8970\n5523\n7403\n3379\n2323\n4833\n5750\n3178\n6548\n8891\n7501\n3280\n7404\n343\n2171\n8397\n1367\n8611\n6118\n6603\n3729\n7182\n9048\n7733\n5642\n7141\n3335\n4845\n5449\n3467\n6250\n163\n5168\n2040\n5339\n3609\n8352\n3426\n8567\n769\n187\n6151\n6437\n7028\n8507\n3970\n9146\n2068\n5028\n7492\n1661\n2815\n2469\n2563\n3814\n8430\n4305\n3479\n5678\n9115\n4132\n1211\n5459\n4814\n545\n4556\n238\n4296\n2724\n1260\n2581\n6087\n4632\n4313\n380\n1209\n5447\n3032\n7942\n8943\n806\n2432\n6130\n4314\n2131\n9045\n6531\n5706\n6747\n7724\n2017\n3292\n5469\n2743\n424\n4233\n7643\n8619\n5192\n4516\n9324\n3537\n9152\n8058\n7526\n8711\n1949\n5982\n1732\n6702\n7027\n6388\n7012\n328\n2130\n452\n306\n7669\n3134\n5761\n3703\n44\n4189\n695\n7672\n5224\n9215\n5644\n3143\n3704\n5443\n2348\n7177\n2328\n4725\n354\n1418\n7810\n7746\n9002\n5759\n7226\n4535\n9160\n4385\n5397\n7249\n2936\n3204\n6287\n385\n2371\n2738\n3636\n9033\n2246\n2680\n6940\n4310\n2054\n9250\n9080\n4568\n5586\n4469\n2038\n3410\n7900\n4332\n6108\n678\n3319\n9079\n1054\n4048\n4751\n1320\n6890\n7931\n1398\n4349\n5299\n5025\n7932\n5738\n7787\n4590\n4020\n1274\n2488\n8497\n3372\n8965\n3219\n799\n3664\n6500\n7093\n4362\n6205\n4244\n4652\n1964\n5945\n6434\n2031\n2684\n6632\n4588\n8271\n3232\n5782\n2904\n6789\n5636\n7200\n3632\n5435\n8203\n3480\n4786\n7579\n3351\n1921\n798\n3646\n3094\n4359\n1654\n5975\n376\n5965\n780\n7821\n9224\n6738\n3185\n2133\n6248\n5996\n2834\n531\n5688\n2448\n7925\n7974\n5924\n6401\n5778\n6594\n5442\n8336\n4522\n3770\n6340\n6328\n4946\n4161\n2954\n2588\n8465\n2885\n1606\n5787\n3407\n3121\n7310\n1413\n1932\n4787\n2579\n3325\n508\n5610\n6480\n4290\n479\n3792\n6628\n2545\n6717\n6972\n2665\n6730\n3547\n6845\n5929\n3540\n4356\n8993\n1052\n2235\n8356\n3403\n8818\n8260\n572\n4159\n1180\n5348\n941\n7948\n2676\n3539\n4866\n6422\n8365\n3217\n1310\n2059\n9177\n1419\n2283\n8892\n8162\n1212\n6277\n3725\n7806\n6149\n7874\n718\n6888\n7118\n277\n656\n8763\n8289\n4759\n5854\n8659\n7710\n3145\n5981\n1881\n5799\n6947\n1609\n6396\n2631\n2887\n318\n2550\n6132\n1736\n2907\n7816\n48\n4304\n8133\n6698\n2760\n7779\n7732\n7642\n1154\n7242\n711\n9262\n539\n8033\n7440\n1913\n5480\n5570\n8594\n8772\n4654\n8974\n6128\n6183\n1071\n8449\n2142\n2298\n524\n1695\n820\n4053\n8241\n1856\n8641\n3981\n217\n1063\n9286\n3152\n221\n5461\n1270\n2006\n7164\n1199\n6951\n5604\n5400\n5309\n3498\n6407\n6661\n7097\n8165\n5169\n3852\n7070\n5702\n4344\n6648\n6904\n3272\n7119\n5795\n2365\n2659\n353\n5444\n6968\n2755\n1924\n2098\n2972\n6006\n5865\n8740\n2418\n3401\n7856\n5841\n598\n836\n1147\n931\n8897\n0\n6049\n1837\n865\n1871\n6116\n6831\n5773\n3587\n303\n1883\n2163\n3070\n1308\n7953\n6300\n6909\n853\n7301\n3279\n123\n7186\n3194\n5553\n5133\n1931\n4622\n6075\n4891\n5722\n5693\n8\n2339\n6596\n71\n379\n4506\n4370\n1238\n2707\n3344\n4254\n8767\n1726\n325\n4148\n5438\n5357\n548\n1332\n6824\n2290\n2335\n3146\n2594\n2315\n3389\n3885\n2621\n4116\n5389\n7412\n7222\n4894\n8595\n2000\n4978\n4721\n6444\n3796\n9321\n2236\n6409\n1523\n1468\n9249\n8270\n2341\n2874\n174\n4757\n4502\n4703\n9034\n9108\n5451\n2619\n5022\n9158\n490\n6540\n1466\n2962\n8771\n3036\n2712\n4539\n1581\n5638\n9246\n4308\n4363\n4647\n4470\n1636\n2511\n1311\n6560\n7519\n8027\n9217\n6464\n6364\n3779\n4822\n3563\n3982\n5896\n5510\n6655\n1524\n2846\n3137\n621\n141\n1887\n6567\n8921\n4671\n6052\n8445\n8699\n7349\n3553\n2117\n7651\n5034\n5383\n649\n3818\n9022\n8414\n1012\n8159\n5081\n8571\n4765\n9135\n4361\n4073\n9142\n727\n2835\n8229\n3989\n4490\n4923\n5477\n1638\n3643\n712\n9044\n2230\n499\n7166\n96\n3172\n8431\n8401\n1470\n6356\n8817\n927\n4212\n2152\n1795\n3812\n4949\n1219\n1538\n3029\n6481\n9042\n7775\n7742\n423\n2085\n7715\n4541\n9061\n5916\n3950\n7420\n4878\n7406\n7046\n7808\n4911\n8804\n6927\n8820\n3264\n300\n8670\n2979\n252\n4407\n3383\n4688\n8504\n6723\n26\n3837\n2489\n4137\n8209\n229\n6490\n2364\n9016\n1763\n1728\n338\n8335\n9063\n5280\n2791\n641\n5454\n4581\n5420\n4548\n2840\n8508\n3463\n7231\n7619\n2560\n1755\n6201\n165\n1471\n6279\n5806\n6867\n5890\n2396\n3416\n1981\n6073\n5872\n3045\n4182\n7607\n3318\n4414\n2998\n6553\n7139\n5624\n2123\n3666\n723\n5110\n6932\n8200\n2222\n8399\n1041\n4138\n1594\n3569\n9253\n393\n7940\n8004\n1475\n6759\n5393\n1107\n2597\n878\n9309\n7576\n5250\n1759\n3142\n2015\n571\n3921\n1255\n7080\n893\n2160\n1355\n82\n1562\n9153\n8583\n4085\n4644\n7196\n9165\n3558\n4550\n6374\n7826\n8602\n4146\n9257\n6083\n874\n8383\n3731\n3374\n3653\n8222\n7344\n470\n1813\n4478\n6871\n7245\n6866\n3998\n7433\n276\n1915\n1988\n8168\n2518\n2686\n831\n6143\n5205\n8718\n1703\n7729\n2077\n7983\n8450\n1195\n9232\n507\n7989\n6974\n4054\n5828\n8655\n6679\n5245\n7783\n5886\n9098\n6491\n8782\n3525\n6542\n131\n8110\n9186\n9074\n4933\n9035\n2607\n4\n2057\n6273\n2711\n5829\n3382\n2696\n3043\n2048\n619\n2499\n5295\n1162\n7807\n3694\n2194\n3149\n1940\n7934\n840\n3592\n8237\n4731\n1324\n8486\n8726\n8573\n2928\n9078\n2272\n2564\n1370\n5911\n7434\n8026\n407\n7546\n2004\n5849\n3034\n7887\n3425\n1118\n926\n3430\n1544\n5902\n2282\n1124\n2334\n129\n1372\n4842\n6473\n4382\n1028\n415\n8269\n8073\n6910\n2796\n3038\n5735\n5080\n2852\n6306\n8842\n9188\n3637\n1066\n532\n928\n5485\n2838\n6753\n9008\n7984\n2816\n8819\n7103\n5977\n5044\n2064\n2599\n4973\n382\n3249\n6446\n6638\n852\n1724\n3368\n892\n3250\n8258\n7962\n4300\n1616\n167\n8855\n2090\n4424\n879\n5136\n5350\n2635\n7828\n8506\n63\n3004\n3847\n3676\n1184\n1705\n6745\n1263\n5020\n746\n1888\n7036\n1033\n3914\n5433\n3905\n4641\n8909\n228\n4801\n3766\n8085\n643\n6914\n9280\n3013\n5657\n3696\n1590\n2920\n8282\n2403\n416\n911\n3849\n4215\n1120\n5490\n296\n2306\n3140\n3742\n4819\n6153\n6414\n760\n3000\n7498\n7108\n6429\n3031\n5314\n751\n3357\n5808\n7505\n98\n7652\n4027\n6257\n3943\n1799\n8577\n5577\n4969\n9163\n2025\n6061\n4026\n5732\n588\n7017\n1415\n4961\n4940\n7152\n538\n706\n2802\n8983\n3375\n1246\n6593\n5837\n1789\n7939\n4997\n5939\n2411\n6133\n199\n7593\n1702\n5406\n6082\n2359\n2912\n6109\n100\n8149\n5470\n2807\n3384\n6413\n3362\n5621\n6019\n9241\n9268\n7703\n4111\n7967\n5458\n7181\n5492\n1112\n6729\n4577\n106\n8853\n3774\n979\n7082\n4610\n1853\n9003\n9292\n2867\n6262\n2245\n3460\n1557\n767\n4796\n8147\n2658\n5769\n6985\n7065\n421\n7990\n3289\n1540\n9316\n2251\n6896\n5947\n4965\n2652\n4480\n963\n9047\n7168\n7824\n3976\n6210\n7018\n7179\n5016\n7789\n6102\n6828\n7659\n9109\n9071\n8115\n7628\n7110\n16\n7513\n835\n939\n4078\n2351\n2322\n3881\n4945\n560\n6837\n6094\n6475\n7901\n3\n771\n8029\n3135\n8044\n7127\n3741\n5156\n7030\n4906\n113\n3747\n7042\n5232\n5225\n3002\n4747\n6879\n5379\n4886\n7192\n4184\n1896\n1834\n8689\n3665\n2957\n6913\n8009\n4851\n6420\n7987\n828\n3003\n8884\n8815\n3198\n8008\n194\n6251\n3303\n3934\n395\n1285\n4169\n1648\n1347\n3600\n4631\n509\n211\n6230\n7241\n8250\n2219\n2582\n8353\n7790\n7583\n4462\n3904\n9004\n6942\n1704\n5686\n8051\n2981\n5511\n6182\n7088\n1699\n1222\n3455\n6189\n1528\n5197\n6221\n7893\n3283\n2837\n7773\n8766\n2942\n8021\n614\n4102\n7362\n1786\n400\n133\n556\n3127\n5237\n3727\n1440\n3873\n6322\n8448\n6285\n8696\n8800\n4009\n3386\n454\n4847\n5685\n9093\n246\n1314\n5895\n6863\n4302\n4260\n8405\n8417\n7116\n255\n3223\n4737\n7852\n6337\n814\n710\n1094\n6103\n5809\n5882\n6336\n4974\n1499\n2806\n3744\n2664\n2436\n4482\n8665\n8918\n1076\n8676\n5725\n9248\n4755\n1447\n9328\n5500\n78\n2653\n792\n6854\n6093\n6172\n3378\n4492\n5529\n5476\n3846\n1391\n383\n4289\n3883\n2648\n3265\n2525\n5402\n4599\n6870\n6877\n4413\n2464\n8519\n2521\n1839\n5822\n5664\n7257\n5375\n6852\n6764\n5182\n8914\n3015\n8509\n3080\n4562\n8979\n6215\n6643\n8601\n6096\n4812\n5246\n7862\n527\n7849\n6737\n12\n2468\n7961\n275\n27\n5932\n3840\n7341\n4996\n8564\n2154\n3788\n6138\n7831\n4442\n757\n4464\n1170\n2568\n19\n323\n6584\n7675\n3441\n2067\n9027\n2486\n4379\n4744\n1737\n7563\n301\n3907\n4742\n6857\n1221\n9284\n8458\n8236\n2897\n4004\n1526\n5345\n4423\n6246\n8578\n1057\n3711\n4986\n4785\n3997\n7311\n4788\n107\n8387\n2041\n2608\n8628\n5830\n6031\n783\n6817\n3293\n541\n773\n8473\n2501\n7247\n5667\n804\n483\n1639\n696\n6060\n5429\n5762\n1527\n7342\n1329\n6225\n7895\n381\n8030\n8520\n8362\n4734\n3526\n9273\n2039\n4142\n5084\n875\n6905\n8968\n5275\n3052\n650\n7509\n232\n2595\n3631\n1810\n4355\n8315\n8908\n1777\n4834\n3164\n2336\n1543\n6212\n8346\n3024\n3719\n1242\n6265\n8101\n3133\n6150\n6358\n3316\n4089\n1647\n4629\n7117\n2596\n5366\n1225\n6371\n624\n2209\n1428\n1158\n7648\n466\n8765\n802\n153\n4639\n3657\n6482\n9320\n2693\n6591\n3294\n2617\n5052\n6305\n3227\n8784\n7170\n93\n5868\n6716\n1671\n178\n2703\n954\n3254\n2262\n5046\n5743\n8647\n6393\n7706\n6604\n3728\n6978\n7489\n7474\n8754\n2740\n2233\n6038\n1491\n8814\n2080\n2358\n5944\n5653\n1164\n9259\n4518\n7343\n5748\n3897\n923\n5967\n2677\n3503\n1202\n4966\n1836\n1863\n6634\n1962\n9096\n9064\n977\n4049\n1464\n658\n536\n3402\n8064\n1309\n259\n7999\n8122\n910\n224\n6152\n7142\n6070\n7523\n8411\n2408\n6766\n9214\n9312\n8325\n6192\n626\n6025\n6240\n8708\n4630\n6777\n1075\n8906\n408\n9269\n6236\n9067\n2514\n8568\n2324\n156\n3136\n3530\n7878\n7308\n4335\n2065\n3845\n4453\n3356\n1450\n371\n7219\n5171\n201\n8642\n2099\n477\n1603\n8339\n7430\n3061\n235\n8291\n1133\n8474\n7035\n8653\n989\n4569\n9092\n8347\n3102\n1743\n9086\n5140\n7438\n1530\n4342\n2460\n7646\n5047\n5071\n5430\n6944\n610\n2803\n1448\n4696\n6156\n4386\n4248\n4256\n994\n2112\n805\n8011\n8276\n8999\n4956\n1712\n2795\n7553\n6436\n2158\n9083\n3184\n5784\n4428\n612\n5288\n6222\n1365\n5074\n6848\n575\n5213\n2175\n4240\n351\n2086\n2656\n5150\n9255\n8189\n7735\n1261\n1344\n4097\n8674\n2984\n4235\n5998\n6488\n537\n1267\n7486\n7124\n6245\n7955\n7337\n5436\n1194\n8226\n209\n1710\n7906\n4357\n4139\n5679\n2584\n2854\n1004\n8246\n8586\n5087\n1878\n4926\n6637\n3197\n7757\n8249\n4055\n6502\n1248\n990\n3928\n2770\n2751\n1020\n6426\n4190\n6839\n2671\n884\n3871\n9212\n4179\n3394\n10\n5861\n5316\n6869\n2985\n8905\n8559\n4457\n2480\n2313\n4100\n4395\n6835\n7799\n7890\n2785\n5468\n7302\n5862\n1803\n6376\n3171\n8591\n717\n7053\n1655\n4489\n2522\n2921\n8555\n1984\n895\n8949\n1305\n738\n7606\n112\n3042\n1325\n437\n3167\n3340\n511\n3689\n5813\n8982\n69\n4421\n7150\n550\n8829\n8685\n3147\n8956\n3166\n7023\n8633\n3308\n2014\n3573\n3880\n4045\n2069\n6051\n4950\n702\n6664\n8418\n2454\n6181\n4853\n4166\n7022\n7418\n3605\n9181\n7172\n5031\n4589\n7858\n6586\n6351\n8334\n7504\n634\n3759\n1890\n890\n6959\n5085\n4919\n2161\n1191\n256\n3610\n7079\n3427\n4071\n7323\n2982\n7263\n7444\n4251\n5846\n4864\n3649\n4311\n7461\n8120\n4582\n6373\n2805\n4872\n4869\n5493\n5867\n2670\n7099\n30\n8933\n930\n7919\n501\n7261\n5289\n7449\n7772\n3613\n7848\n3196\n474\n205\n841\n2611\n6185\n3088\n409\n7239\n5938\n7871\n1343\n6705\n1027\n5596\n2199\n9113\n5471\n6134\n838\n2345\n8359\n4061\n1474\n3229\n270\n4245\n1979\n5995\n1517\n8652\n4006\n4880\n6137\n4693\n2528\n6996\n2926\n5798\n2477\n2549\n1128\n3341\n6014\n4479\n2861\n4208\n5175\n5174\n5118\n3736\n5463\n1588\n2327\n8380\n7982\n1514\n1058\n4586\n6608\n7985\n3044\n1822\n3628\n6851\n549\n1811\n2184\n2601\n4608\n8922\n2540\n6659\n3859\n307\n3650\n3767\n8167\n505\n4366\n4824\n5520\n461\n1933\n2401\n8106\n2055\n7844\n8544\n8838\n4797\n7419\n6686\n7670\n6039\n5672\n5141\n6543\n206\n5252\n4718\n888\n1601\n3218\n5114\n713\n4022\n4419\n6708\n397\n425\n6612\n5057\n1729\n6573\n4729\n4080\n1034\n2961\n534\n8194\n5598\n9218\n2424\n329\n4154\n1597\n922\n109\n8823\n3578\n9038\n8437\n3307\n128\n8032\n1412\n7333\n8762\n8851\n8865\n3056\n468\n3808\n3064\n8798\n7052\n7767\n9231\n1086\n2162\n6566\n2109\n3439\n6122\n3642\n7696\n8610\n5279\n1808\n8687\n8377\n817\n8714\n6066\n4008\n3640\n6015\n1021\n7601\n4855\n6017\n87\n7071\n2730\n7268\n3614\n6084\n6117\n6924\n9102\n2829\n375\n8724\n2095\n22\n1541\n2970\n633\n139\n451\n4521\n179\n1396\n3876\n5824\n8020\n426\n4982\n4172\n1157\n190\n4859\n1455\n3110\n3323\n9104\n858\n6719\n6428\n4495\n8551\n2141\n3984\n3066\n67\n4299\n5821\n8444\n6581\n6097\n7090\n7781\n8944\n3085\n8606\n2114\n5355\n8901\n1461\n3301\n422\n7000\n4820\n5790\n1379\n7536\n4199\n8736\n8991\n5241\n1698\n1294\n1753\n196\n2987\n8680\n4658\n4144\n8639\n6441\n8255\n8156\n3677\n6385\n6520\n7700\n3760\n6001\n1144\n5478\n7394\n8057\n5018\n4232\n5235\n6844\n3111\n8802\n867\n949\n7843\n573\n2278\n6801\n7629\n2714\n5105\n6946\n2697\n5315\n1571\n8677\n2537\n4374\n3833\n7820\n3750\n2033\n6526\n3884\n8706\n7195\n417\n3603\n3001\n6284\n5873\n5718\n8576\n8457\n3589\n5839\n459\n3626\n6342\n8729\n6933\n607\n6053\n8228\n3773\n1805\n6365\n5142\n6069\n1389\n9026\n570\n4614\n5712\n5533\n9222\n2821\n1897\n819\n766\n4060\n4902\n5905\n6842\n5446\n1277\n4303\n2836\n934\n1014\n7822\n7494\n3466\n665\n1047\n5881\n3328\n4664\n315\n1315\n1462\n8616\n7725\n2756\n5749\n1730\n8184\n4567\n5065\n7499\n8867\n1304\n3669\n9192\n410\n8177\n6710\n1210\n2329\n8443\n3911\n1899\n7686\n3315\n7190\n6180\n3116\n5341\n4394\n8337\n9182\n6969\n5715\n2172\n1742\n2782\n3715\n9195\n7960\n2517\n4890\n8294\n2337\n8014\n3353\n7475\n2193\n4843\n8831\n4200\n4653\n6196\n6957\n3063\n2996\n8959\n8973\n6529\n3457\n5274\n8002\n6823\n6154\n5561\n1780\n9318\n7657\n1758\n6503\n7678\n3274\n1625\n4327\n3236\n8575\n3155\n4707\n4331\n1494\n8756\n3174\n1074\n8116\n8295\n8311\n3048\n3752\n6050\n6483\n8003\n9175\n4674\n1642\n2556\n6166\n7165\n8441\n5413\n3990\n1640\n1778\n7500\n8304\n1395\n4315\n5949\n3364\n242\n5763\n1036\n249\n2430\n7426\n8131\n411\n6267\n2045\n6606\n899\n8065\n9052\n7507\n5779\n5616\n2107\n5408\n2980\n6310\n5776\n4328\n821\n3251\n2354\n7076\n1700\n5313\n6736\n79\n8212\n3959\n5677\n7545\n160\n6790\n6859\n3659\n6770\n1106\n8846\n956\n7472\n2050\n8099\n4795\n8053\n9293\n7037\n1646\n9307\n1069\n5322\n5332\n2708\n8977\n917\n2419\n184\n2105\n1578\n3923\n5780\n1903\n2512\n429\n5582\n493\n4972\n445\n8286\n555\n320\n8300\n322\n617\n3413\n4459\n525\n5631\n6314\n5157\n5300\n8545\n182\n1031\n4429\n2495\n7586\n1534\n3099\n3916\n3738\n1919\n535\n2119\n1299\n177\n1838\n2159\n4099\n8285\n5172\n8540\n6020\n7683\n3073\n3115\n1673\n3087\n3488\n2416\n1894\n5942\n3597\n5834\n2007\n43\n1779\n4174\n2023\n2546\n2429\n9006\n436\n4214\n4536\n3693\n5426\n6767\n5903\n4368\n2170\n5051\n7490\n7882\n2859\n5035\n7835\n5372\n7122\n925\n3253\n6338\n8393\n4093\n5848\n7588\n2683\n8049\n5403\n5894\n8745\n8550\n2941\n3484\n9029\n4461\n8022\n725\n2355\n1619\n3030\n1975\n5623\n2415\n1957\n6141\n9278\n3226\n3062\n5670\n7326\n8759\n8496\n6619\n8187\n8262\n6199\n951\n7183\n668\n2388\n4698\n5681\n8240\n2851\n871\n4988\n9084\n9089\n3162\n1167\n8244\n5227\n6461\n2831\n776\n5010\n5770\n5282\n3574\n5102\n1278\n2281\n5455\n305\n4628\n4663\n9119\n7487\n8746\n4889\n6569\n1175\n102\n2386\n8940\n2479\n5566\n53\n8833\n1918\n8001\n321\n6786\n6861\n4358\n2771\n7467\n975\n4777\n605\n3543\n2600\n7584\n9299\n4530\n6477\n7364\n7328\n183\n4761\n7543\n304\n1196\n4623\n7839\n2139\n5519\n1953\n533\n5989\n7590\n7428\n6346\n6162\n1091\n1946\n6260\n4405\n5676\n8924\n7171\n8409\n1866\n6379\n3411\n2387\n3051\n7398\n154\n1185\n6442\n6004\n1611\n2165\n9018\n8323\n616\n3995\n8952\n1533\n7853\n4194\n213\n789\n4991\n3675\n7456\n5752\n175\n7556\n4195\n907\n2248\n9057\n8467\n4594\n1017\n7968\n880\n7446\n3304\n1666\n4942\n3867\n4802\n9156\n6357\n4621\n887\n6213\n5261\n1336\n521\n8928\n1818\n7864\n4792\n6742\n157\n1593\n823\n7235\n5303\n5633\n1100\n1692\n8047\n5993\n1460\n6714\n1630\n6440\n6307\n3608\n292\n212\n401\n5974\n7107\n8301\n8342\n2720\n4583\n2757\n7315\n833\n4466\n4236\n1282\n5273\n2149\n287\n8484\n2380\n8119\n7167\n737\n5076\n6598\n3596\n5382\n2650\n8980\n3421\n1356\n1954\n7823\n1172\n2226\n1941\n6136\n7274\n2256\n4928\n324\n1407\n4410\n4579\n1061\n7113\n486\n862\n3435\n6956\n2873\n1465\n6113\n8225\n8512\n6806\n272\n6008\n1241\n88\n5662\n3555\n689\n8733\n2812\n7453\n6282\n420\n2471\n4477\n7495\n1445\n594\n6939\n1564\n8704\n8590\n7992\n7374\n5796\n9298\n4213\n5713\n5864\n326\n5513\n402\n464\n608\n1951\n8640\n8180\n3347\n3459\n4162\n2690\n7478\n5856\n5240\n2389\n3022\n602\n5547\n1798\n1345\n9276\n599\n3673\n3277\n1635\n8625\n1567\n5928\n636\n5671\n2896\n3477\n412\n7575\n4201\n685\n4760\n1229\n4275\n8960\n3123\n4471\n5941\n3355\n3999\n7157\n6354\n7741\n6850\n8783\n1943\n6769\n7330\n8721\n8477\n1381\n848\n778\n6408\n2644\n5817\n1441\n1723\n2144\n2776\n2368\n120\n367\n8839\n8749\n5353\n4158\n3148\n9114\n1233\n9228\n8857\n2895\n1286\n200\n6755\n5125\n5857\n1657\n7658\n5097\n5000\n942\n7020\n586\n784\n7078\n6194\n8658\n8957\n9325\n1851\n8911\n4862\n7004\n1186\n8824\n1651\n2999\n561\n7639\n4316\n5086\n3187\n7912\n2624\n9183\n8487\n5089\n8475\n7554\n4031\n6297\n6059\n5329\n115\n2058\n7650\n7634\n7121\n2485\n7805\n2241\n7713\n4352\n2409\n1026\n2745\n4549\n6474\n5124\n5201\n6556\n6617\n9091\n3945\n8402\n5648\n5257\n2192\n4901\n7750\n6131\n6027\n6352\n4625\n1254\n5498\n3720\n8261\n3939\n5576\n3685\n6713\n8472\n991\n8354\n8068\n5655\n5997\n1029\n7506\n6740\n2575\n2990\n4898\n583\n7402\n3290\n5388\n6715\n8235\n5361\n4970\n1363\n3338\n5731\n9014\n5358\n2216\n2856\n635\n1193\n3705\n6334\n7666\n5270\n1384\n6368\n8604\n3564\n1937\n2481\n1341\n721\n2100\n3958\n6551\n3813\n2592\n7980\n5385\n319\n2357\n8761\n8910\n8693\n1204\n489\n4827\n8024\n7832\n6427\n3895\n89\n9068\n8067\n1708\n1111\n8963\n1902\n9251\n5719\n9143\n5537\n9169\n77\n5365\n1840\n485\n4456\n2841\n1169\n3271\n7144\n6886\n9140\n7173\n6003\n1659\n1807\n8371\n2439\n274\n4660\n3448\n6623\n347\n2103\n3400\n2106\n9073\n8169\n3687\n3305\n4416\n8454\n6635\n332\n2433\n2909\n3839\n4063\n1944\n6509\n1296\n7770\n1880\n6610\n4075\n9331\n4484\n302\n418\n4219\n1333\n2350\n6498\n8424\n4694\n4883\n5269\n6580\n5007\n6722\n1669\n8470\n2571\n513\n3810\n7049\n6332\n7363\n3532\n8456\n2097\n297\n8841\n7180\n714\n1587\n5234\n4268\n2320\n7372\n660\n8503\n1668\n8847\n1101\n7275\n3336\n6460\n722\n7782\n3947\n502\n4258\n2132\n1835\n181\n3841\n427\n3446\n2551\n8324\n6963\n4284\n7297\n7577\n3399\n9148\n8213\n5656\n8440\n851\n657\n2446\n4292\n6992\n976\n1108\n2681\n3237\n8582\n377\n5969\n5287\n9209\n8523\n7178\n7833\n6175\n2126\n3023\n5090\n7491\n6640\n6077\n2221\n2780\n1694\n4094\n144\n6161\n3203\n7123\n749\n3625\n3848\n980\n2270\n7819\n3672\n7689\n7203\n2718\n1714\n2884\n3474\n3802\n3851\n4224\n7237\n5415\n7998\n7207\n4106\n9036\n1046\n8731\n5070\n6818\n4592\n6056\n693\n1328\n3309\n5791\n2629\n2736\n202\n388\n7886\n4417\n8786\n8822\n4035\n7718\n8492\n5505\n1192\n4388\n8941\n5019\n7538\n6732\n7296\n6389\n5923\n1405\n3278\n3917\n1688\n8374\n443\n4037\n9099\n5190\n6402\n4177\n9310\n7747\n4348\n7197\n4844\n4998\n5609\n4345\n29\n3332\n8648\n4107\n346\n2577\n3941\n1215\n3782\n8252\n4706\n2675\n3790\n7459\n6164\n7316\n1149\n6687\n582\n3139\n5040\n7645\n3882\n7322\n4034\n1861\n4701\n8757\n3208\n8801\n6349\n8907\n1823\n4528\n4789\n143\n4746\n9234\n3866\n9245\n1911\n1366\n4393\n2061\n859\n1959\n6967\n3138\n7382\n9031\n6237\n845\n80\n6911\n7163\n5229\n4736\n8738\n33\n8543\n357\n3193\n7262\n4448\n6796\n6793\n3321\n7569\n6411\n7692\n7340\n1417\n5847\n3836\n2678\n1188\n8727\n223\n8615\n7417\n5771\n3170\n8061\n2935\n8263\n8257\n6883\n1276\n1239\n812\n6258\n3922\n7525\n8117\n3039\n603\n8554\n7573\n2787\n3445\n5115\n3478\n962\n3961\n6570\n7722\n216\n2797\n5154\n2530\n4904\n2405\n7542\n4021\n3252\n5370\n9302\n236\n4532\n1361\n3373\n1716\n2183\n1583\n3783\n868\n1687\n8925\n1433\n6198\n8208\n6367\n7603\n882\n3469\n1645\n7654\n1176\n4231\n150\n7997\n5456\n7031\n4375\n8840\n5634\n6945\n705\n3442\n4774\n3822\n7148\n1922\n8459\n6249\n8713\n6197\n8599\n6071\n6756\n1634\n950\n5640\n7749\n5920\n6622\n4783\n7837\n7479\n7229\n3919\n1797\n5272\n8945\n4908\n5439\n6903\n5833\n6930\n8197\n9261\n1711\n5483\n6046\n4285\n8852\n7409\n8971\n8278\n7534\n7792\n2444\n7496\n8063\n1665\n248\n3894\n4585\n1982\n66\n6651\n4850\n1240\n7511\n7524\n9258\n2075\n3979\n4714\n7592\n965\n2919\n8239\n1842\n8013\n4750\n2344\n6155\n3468\n31\n2087\n1599\n1573\n5883\n7613\n195\n3749\n644\n2189\n8779\n8743\n9005\n8081\n1040\n7785\n5820\n8830\n5495\n4867\n2710\n3843\n491\n7153\n6217\n1148\n4741\n1761\n5484\n3423\n5474\n6916\n5876\n7252\n1739\n8930\n6647\n5198\n4903\n8488\n7366\n2774\n2726\n2385\n7625\n3179\n2211\n8845\n6600\n399\n6810\n3447\n6684\n4915\n8368\n1867\n2325\n2101\n1335\n7734\n3722\n7437\n3716\n7025\n4000\n6897\n1408\n7154\n5013\n2204\n9233\n4225\n3817\n1877\n9161\n2197\n6991\n3390\n280\n1892\n1612\n7753\n2801\n7246\n7909\n6229\n9314\n8407\n1436\n3879\n6432\n6849\n5326\n5327\n8535\n7910\n7745\n5545\n7916\n207\n1783\n6158\n8517\n7361\n8070\n6430\n119\n6146\n4183\n1083\n7385\n4497\n9133\n1686\n3765\n5099\n595\n8046\n4418\n4043\n2361\n7915\n9149\n1717\n1141\n6375\n1018\n5602\n1262\n7485\n9178\n6629\n3339\n8934\n4648\n7988\n6252\n3440\n864\n5418\n3874\n7280\n6191\n8388\n4323\n6792\n4324\n2232\n7228\n8684\n7813\n6187\n6678\n3177\n3534\n4953\n4402\n7739\n6319\n2414\n8700\n5946\n8238\n4533\n6917\n4167\n4618\n2115\n2268\n3081\n1247\n4001\n8580\n7636\n3101\n2195\n1559\n3714\n2484\n7188\n6028\n7530\n2828\n1977\n3238\n6496\n2340\n110\n3247\n7532\n7541\n924\n1632\n484\n4487\n4439\n6447\n1319\n4944\n6347\n1791\n2285\n8087\n5452\n91\n1166\n162\n5185\n7933\n4743\n1627\n7259\n8620\n8525\n8207\n5845\n9011\n5525\n4269\n4700\n1824\n8186\n8872\n8299\n3957\n8242\n4558\n6439\n2666\n5943\n6958\n8112\n5121\n8806\n6170\n7688\n3486\n2082\n7436\n2778\n1096\n786\n2206\n5170\n1443\n6030\n3312\n9151\n8485\n6404\n8498\n2883\n8961\n2280\n8341\n9137\n4337\n2809\n2445\n809\n8298\n8643\n8316\n4951\n6853\n1572\n3215\n3938\n2249\n6515\n1337\n8328\n7712\n1429\n4117\n5441\n3230\n4152\n7225\n3513\n6953\n1507\n348\n3639\n5739\n2673\n1550\n6301\n1652\n8453\n204\n6833\n8056\n2200\n5217\n1854\n4711\n7368\n4572\n4032\n7531\n1013\n3634\n2875\n6058\n8307\n7609\n1766\n904\n667\n5410\n6578\n3601\n1664\n3233\n7390\n8178\n4486\n4952\n4427\n4876\n9166\n3107\n2772\n6295\n5001\n5296\n3371\n6518\n6327\n854\n1615\n8288\n1912\n5927\n6202\n5814\n9032\n1059\n3214\n6547\n7038\n5781\n6926\n4390\n6114\n1622\n4318\n5803\n5984\n736\n3561\n6554\n5045\n4277\n7386\n9081\n8462\n2034\n4955\n2701\n932\n1298\n7758\n7176\n9205\n2276\n3077\n3803\n3562\n8054\n7946\n295\n1843\n7728\n1629\n7768\n3663\n6363\n2971\n431\n9285\n2513\n1116\n3656\n4529\n6366\n5758\n6339\n8398\n816\n4153\n648\n2536\n1826\n7870\n8113\n7730\n7101\n6555\n9256\n6774\n1072\n4578\n2598\n3604\n5880\n861\n8273\n3350\n3117\n4685\n9219\n4334\n5165\n2035\n7224\n4066\n4253\n4447\n3815\n5038\n253\n3658\n2252\n330\n3967\n6443\n2143\n7336\n6135\n593\n2734\n8390\n4655\n7800\n1399\n1173\n5618\n2822\n7905\n7503\n4431\n2443\n1568\n3909\n1974\n2496\n4772\n5164\n4105\n2138\n2864\n3799\n3924\n4882\n8245\n1585\n5528\n5692\n5730\n5832\n137\n3175\n2894\n2062\n3899\n2752\n4028\n2113\n5411\n293\n2647\n730\n3758\n1667\n8879\n9303\n6653\n3698\n3968\n3053\n503\n2150\n4645\n2257\n4627\n8303\n7966\n8742\n4692\n5901\n8547\n2277\n5546\n986\n370\n4697\n8712\n4804\n4881\n1182\n6650\n7290\n3487\n2814\n5668\n7567\n5333\n3724\n4164\n3084\n8896\n3888\n6537\n17\n6882\n3531\n704\n1037\n8866\n5263\n6758\n3762\n1393\n3824\n5575\n5112\n214\n1439\n5700\n8932\n1306\n5011\n6928\n5173\n4098\n1132\n7352\n4778\n7723\n1368\n2390\n670\n2685\n5855\n1772\n6380\n3853\n940\n5424\n6091\n1748\n6193\n5297\n6572\n8877\n6874\n430\n5041\n5267\n1145\n7448\n620\n9112\n4294\n1432\n72\n130\n2393\n7920\n4597\n6614\n8889\n3697\n1895\n3462\n2616\n3978\n4791\n7846\n7780\n8372\n428\n6559\n8326\n9211\n2363\n1525\n5980\n7888\n3331\n8118\n7899\n615\n7377\n791\n5930\n6627\n8322\n1138\n770\n8460\n5100\n8274\n8350\n6316\n2893\n7594\n9236\n5082\n8150\n1986\n1909\n8902\n2145\n3617\n3501\n7\n2426\n5056\n8016\n2702\n5360\n8135\n8385\n8378\n8018\n8574\n720\n8893\n3021\n1978\n4782\n1816\n2083\n4051\n1446\n5870\n971\n9097\n8006\n4222\n8287\n686\n1377\n611\n8153\n4920\n4808\n1536\n679\n4096\n3891\n4884\n432\n4615\n8988\n5560\n3451\n5589\n3514\n6169\n1414\n3244\n1490\n7100\n3588\n690\n7317\n4171\n2266\n6800\n108\n2793\n5151\n6977\n2587\n8188\n8752\n6318\n5815\n5116\n263\n3311\n5191\n5689\n289\n3392\n5755\n1022\n5548\n9319\n8937\n6011\n7632\n5328\n4993\n4141\n5407\n1865\n520\n7305\n7208\n526\n3645\n1859\n2520\n3523\n8629\n7304\n8881\n3076\n4005\n8329\n2205\n2214\n6925\n8691\n4136\n8883\n974\n7873\n7952\n3965\n5887\n7964\n7189\n2406\n2783\n8086\n405\n6568\n5147\n2021\n4727\n4826\n7674\n1600\n5078\n2949\n6624\n6541\n8986\n5740\n4679\n8500\n3591\n4434\n398\n983\n7544\n1478\n4570\n6012\n465\n9330\n7206\n808\n8737\n2356\n4959\n8812\n6955\n3599\n2168\n1420\n1721\n1794\n5897\n8422\n2\n4023\n2739\n3619\n8797\n5496\n8951\n8181\n6893\n9254\n1809\n5682\n4309\n6929\n2742\n5988\n3363\n4493\n8434\n4210\n1503\n1876\n5094\n4600\n4936\n4798\n3933\n5216\n646\n7660\n3098\n8773\n4076\n1576\n5335\n3746\n3327\n47\n4602\n8636\n4129\n363\n6417\n7416\n9025\n4377\n4766\n2779\n4151\n9046\n7860\n3154\n3476\n7620\n966\n2052\n8344\n1752\n7199\n4412\n8895\n8882\n2463\n339\n56\n5390\n4821\n7555\n6558\n1905\n5258\n8880\n4205\n3580\n6735\n1023\n4511\n3850\n161\n7395\n2532\n3349\n7055\n7387\n758\n1907\n872\n3006\n659\n815\n1961\n6902\n7668\n4708\n1904\n4433\n5159\n6816\n8664\n6918\n1016\n6513\n7314\n5364\n7480\n9313\n716\n3395\n6843\n2292\n918\n4329\n1035\n6344\n8593\n3404\n5212\n837\n480\n8524\n1342\n3690\n6797\n7414\n288\n8863\n3352\n1628\n24\n135\n3314\n2181\n8650\n5915\n8078\n6812\n1375\n6040\n906\n5635\n7126\n1387\n7458\n6119\n5591\n3795\n1531\n95\n1960\n7522\n3033\n898\n4607\n4921\n3913\n2623\n4430\n6268\n7063\n1326\n9075\n2505\n7400\n1284\n2951\n747\n6466\n1357\n6493\n7320\n5892\n576\n5107\n5559\n97\n2583\n6361\n8843\n3509\n7892\n6086\n1476\n4612\n7427\n4267\n9094\n7050\n6048\n8455\n8382\n2227\n284\n2898\n3221\n2353\n2157\n5990\n5810\n3581\n7279\n6188\n7859\n3549\n5539\n7918\n2022\n9066\n630\n2500\n5111\n6561\n5127\n8095\n5569\n6123\n1338\n8605\n3491\n4187\n8220\n7334\n9213\n3067\n6997\n2853\n4735\n4372\n1489\n5954\n6662\n2207\n973\n3361\n960\n6350\n4170\n7431\n8076\n1129\n750\n7559\n7194\n2261\n2300\n6590\n5893\n6889\n3125\n8788\n334\n7286\n3472\n8164\n7693\n1469\n1181\n669\n7515\n5563\n4773\n3210\n6324\n3113\n9070\n3638\n7551\n2541\n3506\n5138\n4069\n7198\n7560\n3306\n6100\n2932\n4473\n1741\n14\n4672\n7564\n8748\n8874\n3804\n3678\n2240\n2610\n2862\n1358\n5716\n42\n5176\n9326\n8464\n1038\n2993\n3017\n9072\n32\n4809\n4364\n2808\n4125\n448\n152\n7299\n5431\n6178\n793\n3444\n9120\n8410\n4963\n772\n5457\n6954\n3014\n6881\n286\n553\n1948\n6398\n6255\n3057\n8646\n6176\n2700\n7106\n5663\n6683\n1281\n6013\n8799\n7635\n9289\n1885\n442\n2225\n6294\n5054\n2674\n7884\n8730\n8216\n4203\n1488\n7111\n4013\n3623\n7950\n1971\n1966\n3248\n2900\n1553\n472\n3865\n7796\n6937\n4591\n8098\n5208\n294\n5627\n5691\n5687\n7149\n4879\n3624\n7005\n2773\n3112\n9185\n1633\n7830\n5101\n8707\n8469\n4678\n4860\n700\n5527\n9194\n2794\n5068\n2639\n1177\n4282\n6492\n8128\n5859\n5029\n5123\n2877\n522\n5048\n7230\n2104\n6642\n6731\n2717\n5149\n2043\n9059\n5277\n844\n1394\n3262\n5515\n6706\n3651\n9105\n7671\n2880\n3607\n6410\n2508\n8463\n2394\n1916\n1125\n5343\n3322\n5307\n4547\n1589\n8478\n8899\n2955\n8028\n7293\n4619\n4058\n2781\n8715\n1272\n5734\n4474\n4863\n4367\n49\n8844\n5605\n8671\n6743\n4281\n7077\n1874\n2626\n2516\n258\n5249\n6186\n7958\n5432\n3801\n6288\n4732\n9121\n7558\n2527\n4661\n6819\n3835\n7508\n584\n215\n5036\n4261\n8978\n5228\n647\n4657\n2591\n5931\n5088\n9204\n929\n4381\n5421\n2965\n5050\n6495\n5033\n4799\n959\n6115\n3520\n1232\n5811\n317\n8976\n7705\n3842\n2178\n7187\n1373\n7112\n2694\n8627\n8493\n3991\n7441\n6308\n2589\n6462\n3406\n7673\n8660\n2902\n752\n1025\n849\n7682\n6982\n6652\n3612\n298\n5148\n4873\n3414\n1693\n1458\n327\n2016\n5002\n6768\n7016\n5583\n3270\n857\n8232\n7158\n7981\n4676\n4675\n2164\n8360\n6709\n8143\n365\n4062\n4527\n7928\n9009\n6228\n5818\n2533\n9305\n8887\n55\n2507\n8870\n6649\n5158\n76\n5595\n6693\n5306\n8666\n3020\n7527\n3082\n6304\n1591\n6145\n6868\n7205\n9107\n1165\n6773\n172\n1993\n4176\n8400\n4611\n7589\n8702\n5386\n6095\n6335\n1561\n8805\n5963\n7393\n3681\n2037\n4968\n7451\n3360\n7466\n8361\n4455\n4064\n5422\n1689\n3977\n7269\n362\n4178\n4145\n6127\n5162\n2399\n9225\n7068\n1650\n794\n3007\n1348\n7736\n444\n6081\n5298\n2026\n2543\n9087\n3593\n7425\n3730\n8468\n2641\n7529\n1720\n6377\n8732\n5851\n7956\n3150\n3785\n6485\n3611\n2869\n8510\n4775\n4463\n1251\n9124\n6873\n3391\n6505\n4118\n1617\n8837\n7051\n3213\n3668\n5347\n8452\n6289\n5840\n478\n3522\n453\n3376\n6190\n3342\n2237\n2870\n5178\n5567\n5952\n6919\n3005\n134\n3397\n7443\n8539\n6822\n5264\n3288\n5962\n8421\n6744\n8608\n4656\n1802\n2073\n4271\n1043\n2922\n8211\n2196\n5260\n3789\n7211\n7571\n7834\n5680\n2047\n5502\n3369\n3437\n3286\n5517\n3912\n8386\n1442\n6961\n2191\n2417\n9088\n5155\n6813\n4520\n7375\n1224\n811\n1891\n3748\n4123\n2789\n5305\n8419\n7248\n9237\n992\n4038\n4499\n2060\n5538\n850\n2669\n7612\n104\n9290\n2526\n1287\n4160\n4633\n7125\n742\n744\n4534\n2407\n7714\n4555\n8764\n7661\n4722\n7721\n3205\n6657\n1214\n3754\n6080\n4593\n3018\n8792\n2294\n4450\n7701\n9301\n127\n7069\n4513\n6243\n8025\n4010\n8632\n4715\n5284\n4574\n726\n4252\n4561\n7354\n299\n6088\n1090\n5012\n5684\n3489\n5639\n4888\n1584\n1969\n4846\n2915\n6804\n2775\n7306\n6506\n9306\n5231\n7740\n4283\n953\n6725\n458\n8290\n1504\n1539\n8885\n138\n3764\n1256\n257\n335\n1011\n7060\n5986\n9323\n4740\n8994\n4140\n6807\n8254\n3963\n9297\n2102\n2964\n9207\n4910\n8709\n4411\n1672\n457\n5852\n8037\n4932\n3679\n8794\n2362\n8592\n495\n8432\n1608\n2155\n7411\n2881\n9244\n37\n6535\n8219\n4505\n8635\n1928\n8384\n2570\n8996\n7610\n2128\n8728\n6656\n8935\n6681\n2070\n176\n9062\n972\n514\n1796\n4039\n6838\n2462\n230\n569\n5521\n4637\n4939\n4420\n2863\n672\n4995\n3807\n447\n1656\n2005\n5113\n3297\n8858\n2118\n6309\n1926\n481\n1156\n1509\n1228\n1787\n5978\n8678\n3951\n2929\n4980\n5039\n4713\n7002\n151\n5536\n8148\n3823\n4709\n2299\n142\n7067\n2372\n3761\n9\n2265\n5747\n2764\n724\n2913\n3151\n4525\n6370\n4247\n9329\n5494\n3721\n629\n3621\n7371\n59\n1999\n6704\n3734\n2698\n4691\n6938\n9117\n8415\n6353\n6750\n9077\n2679\n7623\n2478\n7321\n6611\n4007\n2076\n5772\n6416\n2264\n8348\n2672\n6546\n754\n6934\n7908\n8546\n4404\n592\n4748\n6625\n2129\n7944\n2377\n6\n8929\n8275\n3515\n4524\n3660\n8710\n419\n6878\n170\n8313\n7460\n8753\n2917\n6891\n6663\n4918\n7129\n396\n7256\n3500\n631\n5585\n8343\n2695\n6168\n6292\n3176\n5092\n5160\n3701\n9021\n7221\n7825\n1216\n1438\n3471\n2318\n8923\n6223\n2182\n7621\n8514\n9010\n8987\n1252\n1972\n1872\n1715\n8205\n6463\n8138\n8989\n5661\n2890\n565\n2427\n8946\n1303\n3718\n6000\n3620\n1560\n5276\n8089\n9260\n1467\n6173\n7641\n7520\n5061\n4677\n5757\n4400\n2620\n2719\n8995\n2079\n6644\n1683\n8141\n7754\n5744\n2952\n7568\n654\n7457\n5368\n3310\n1510\n4440\n1513\n3072\n8034\n1456\n9164\n3163\n3035\n6111\n5042\n7161\n1401\n1084\n8000\n6672\n8531\n5404\n6550\n8379\n9141\n8681\n7752\n6394\n7011\n3739\n8253\n978\n4771\n6024\n4828\n7959\n1649\n1727\n7073\n8349\n6952\n661\n7283\n3159\n2590\n3496\n8741\n3969\n2956\n4565\n920\n1830\n8558\n1930\n6677\n6825\n8256\n7454\n7521\n4710\n1768\n3753\n6459\n5606\n5292\n1397\n240\n2733\n946\n6711\n3242\n2627\n4929\n5006\n3202\n132\n2295\n2746\n1293\n2124\n5405\n4065\n818\n7464\n1820\n4398\n1312\n6994\n6920\n261\n987\n6120\n3109\n331\n2986\n4338\n7774\n5122\n8396\n1364\n8969\n6712\n8161\n7083\n7595\n5940\n1566\n6419\n8634\n4432\n6047\n4749\n6076\n1161\n8217\n674\n8494\n3688\n2447\n4704\n969\n7477\n1160\n3243\n3173\n4979\n9288\n6860\n1662\n6171\n225\n5143\n313\n8327\n3275\n3385\n7626\n3103\n4401\n6794\n5600\n5043\n7664\n933\n6830\n4452\n3980\n1604\n5875\n6633\n4635\n5756\n3329\n1751\n8108\n4817\n1989\n1237\n1893\n2848\n9334\n51\n8875\n4981\n5417\n4134\n877\n6688\n3545\n4943\n5615\n2476\n1684\n3652\n7396\n1769\n1171\n6563\n3415\n3644\n340\n6630\n8284\n3256\n7240\n5371\n3405\n2108\n6360\n1734\n5612\n8638\n2343\n1103\n7803\n6809\n3055\n188\n8031\n3124\n3683\n4537\n988\n2297\n4893\n6499\n3396\n839\n4467\n5195\n4041\n6457\n4441\n6378\n6472\n6195\n4912\n6884\n5922\n7014\n1660\n38\n1595\n6752\n4554\n1292\n2709\n3800\n6057\n1980\n8775\n6587\n6392\n6263\n7214\n5219\n282\n309\n6685\n2253\n6311\n4092\n18\n7570\n5543\n4081\n2515\n6278\n8690\n5294\n6184\n5215\n9130\n6720\n250\n7250\n4983\n639\n3567\n7841\n2636\n4067\n8446\n5703\n8609\n2586\n7695\n1253\n6701\n7930\n6317\n5921\n7719\n8501\n7312\n4110\n6219\n4552\n5059\n4088\n7975\n9132\n6054\n692\n3412\n4079\n6754\n6950\n5281\n3028\n8321\n3877\n7614\n8939\n4188\n2223\n239\n4745\n6875\n7096\n5571\n4403\n2640\n5556\n1845\n6690\n1825\n4157\n314\n4682\n8825\n1003\n6206\n8093\n7215\n6465\n99\n8077\n6631\n4206\n2523\n366\n1208\n6043\n4640\n1457\n5475\n4985\n1351\n3090\n5625\n7307\n8466\n2003\n8854\n218\n1500\n4476\n2293\n1847\n5032\n2147\n866\n3710\n2552\n1749\n6692\n3926\n4112\n6458\n735\n9171\n60\n9304\n6726\n2630\n2882\n1178\n1151\n4922\n4662\n173\n7233\n1776\n6533\n4113\n2423\n2425\n4343\n5800\n970\n6372\n1009\n6607\n3068\n8435\n6423\n3126\n4813\n1709\n1201\n7104\n5620\n3932\n5701\n5724\n3366\n8050\n4984\n5023\n9203\n5079\n627\n290\n779\n5572\n5233\n1392\n4975\n8534\n8210\n2269\n1143\n2475\n2562\n905\n4546\n267\n3536\n8538\n449\n101\n7367\n2722\n4605\n7356\n6781\n8537\n8697\n6820\n8340\n8926\n3821\n2349\n2259\n6545\n8100\n8395\n2258\n2911\n5108\n3946\n1406\n8683\n8296\n5579\n2177\n8264\n1425\n3940\n957\n3647\n515\n5342\n8363\n2449\n3108\n1001\n2937\n3452\n5574\n4319\n9184\n8381\n945\n6876\n600\n5714\n4871\n8532\n1852\n8856\n392\n2018\n8878\n369\n5711\n9230\n5304\n7266\n1681\n7829\n2309\n4683\n8938\n2255\n6159\n3207\n4651\n2029\n4341\n5106\n5794\n9024\n4712\n2434\n7151\n7359\n6431\n1290\n5918\n8705\n3438\n5554\n8876\n7415\n6290\n5373\n3805\n2950\n2331\n6772\n8997\n6576\n2307\n8515\n4033\n3428\n6487\n6595\n45\n5792\n333\n762\n2383\n3388\n666\n2166\n460\n943\n364\n6980\n8223\n8221\n637\n6218\n4108\n5381\n4649\n5096\n1614\n8768\n5095\n3809\n5030\n984\n3538\n5120\n2498\n5222\n5613\n5486\n5119\n241\n5707\n9227\n544\n4109\n7771\n728\n3671\n9327\n1230\n9270\n1070\n8565\n4769\n7056\n5654\n7965\n1793\n5956\n7883\n1362\n5479\n8769\n8821\n8320\n1901\n1994\n2461\n5552\n389\n2839\n6467\n2762\n4763\n3499\n1487\n7599\n4488\n3241\n8272\n1131\n4496\n7006\n7265\n4897\n2747\n6618\n5291\n4563\n5146\n1939\n6369\n8548\n6163\n5526\n4068\n9030\n5349\n8433\n748\n1477\n4265\n9200\n3878\n462\n6846\n9040\n4806\n3519\n6798\n5464\n5179\n546\n6044\n8114\n7216\n6276\n1495\n494\n8146\n5434\n856\n8403\n8071\n3972\n5544\n3337\n6855\n1546\n2824\n1718\n6009\n2042\n251\n9076\n3330\n5004\n192\n4717\n3797\n1146\n394\n7814\n7699\n4659\n4689\n4156\n7903\n9054\n7332\n7811\n1119\n5531\n6782\n5210\n8412\n2633\n7924\n4624\n8314\n5666\n3240\n2310\n4262\n8160\n4553\n8196\n2661\n7213\n7455\n7399\n870\n6126\n1227\n1226\n781\n937\n6343\n2578\n2892\n4124\n2792\n5696\n6865\n6455\n8312\n5193\n6026\n5251\n3787\n4460\n4687\n7923\n1140\n9106\n796\n2482\n9170\n8695\n2749\n6734\n4825\n114\n8319\n827\n4175\n390\n7611\n7484\n1249\n7727\n955\n579\n3629\n8915\n2958\n885\n7227\n1424\n4810\n4604\n1535\n774\n7518\n5428\n1955\n8233\n2645\n2167\n6484\n3855\n1502\n4861\n2333\n2973\n4829\n1906\n3966\n476\n9023\n6960\n3483\n2748\n5891\n8174\n7702\n8948\n5324\n4396\n1605\n2823\n7348\n7347\n5933\n310\n9082\n916\n4255\n203\n4239\n5976\n6200\n6435\n4425\n787\n1121\n6034\n13\n39\n3104\n5961\n5507\n5785\n1463\n7339\n1575\n7801\n5445\n8283\n5951\n6995\n999\n5163\n6023\n3786\n6536\n5850\n3524\n3528\n4508\n6674\n2939\n8227\n4598\n7550\n8495\n8622\n1152\n4538\n4003\n1318\n739\n3296\n8202\n1552\n6204\n5236\n3576\n4699\n9238\n1879\n488\n2274\n433\n5587\n1678\n9282\n7914\n8552\n6445\n7971\n8331\n6880\n7476\n7282\n1570\n7271\n3827\n6489\n8091\n9287\n7351\n1765\n5286\n6921\n542\n1762\n8553\n4987\n894\n3622\n7855\n92\n3131\n4811\n3590\n6517\n4510\n733\n4954\n1360\n5669\n2842\n8107\n5646\n5968\n1618\n1827\n7709\n8521\n5807\n5321\n9239\n5501\n3745\n4437\n1586\n7273\n5265\n6605\n7917\n1607\n6074\n4668\n7061\n1580\n8694\n8461\n4573\n618\n9173\n5243\n435\n8770\n2421\n7450\n3870\n8308\n2605\n2934\n9240\n6887\n4512\n1198\n7585\n7691\n7738\n2843\n8423\n7929\n6971\n7854\n86\n9128\n4298\n622\n790\n9155\n6579\n2203\n7716\n1265\n8645\n3834\n1174\n7380\n623\n8936\n4306\n8082\n4312\n8661\n5753\n7243\n2768\n8155\n85\n4143\n3047\n8479\n7809\n2833\n5555\n7578\n1637\n1936\n8130\n5549\n8062\n7143\n5522\n8966\n5614\n8105\n8719\n7655\n7502\n8268\n5760\n6695\n5565\n7615\n9226\n4870\n4507\n3160\n4835\n1598\n2465\n4422\n5248\n7867\n1078\n5015\n6660\n1676\n5354\n6391\n5351\n7184\n6280\n5936\n6124\n1327\n2906\n269\n8292\n2466\n8809\n5167\n8142\n8204\n2713\n1910\n2930\n2494\n5592\n7384\n7726\n5727\n625\n1735\n5710\n5518\n2491\n1410\n4989\n5183\n8777\n6562\n4947\n3692\n6129\n384\n1097\n2084\n5209\n3723\n7272\n6895\n2459\n543\n8621\n5394\n6211\n2074\n1511\n2524\n7776\n5055\n7191\n6207\n7922\n281\n8436\n2918\n3141\n4800\n6323\n7631\n8903\n2716\n3735\n3012\n5301\n3975\n2800\n7963\n105\n1920\n7391\n4909\n1754\n4816\n5488\n5145\n5098\n5139\n5268\n9317\n8631\n4346\n7318\n136\n3993\n1220\n2151\n308\n7483\n7582\n3071\n1339\n3777\n8191\n5378\n7087\n1056\n7465\n5608\n6564\n512\n2754\n2687\n1596\n5376\n1512\n566\n6382\n7360\n1757\n8035\n2296\n4264\n3551\n1053\n4716\n1537\n8518\n254\n6253\n7132\n8557\n3490\n9267\n5473\n2412\n7539\n7136\n6670\n3974\n891\n1323\n5958\n1217\n2879\n9118\n1259\n2317\n7033\n2467\n6665\n6244\n2180\n2140\n7098\n5126\n6395\n4150\n547\n4120\n4307\n1725\n2737\n8549\n8195\n1245\n6286\n935\n1756\n1701\n1626\n7379\n3492\n3717\n5802\n2817\n1234\n1005\n4101\n21\n2576\n4650\n3381\n1030\n2844\n1641\n936\n2729\n6469\n8913\n8369\n5994\n341\n81\n4083\n1685\n5152\n3380\n8739\n6615\n3829\n164\n7927\n4779\n829\n4216\n8528\n3641\n4606\n2769\n6970\n1545\n8850\n4971\n5489\n2008\n4564\n8682\n7784\n5768\n9252\n901\n438\n3577\n2765\n5904\n664\n3348\n6298\n3602\n2502\n8617\n7684\n4293\n5166\n5805\n4126\n2451\n6906\n7234\n9243\n3778\n2940\n1087\n9053\n5026\n2504\n5283\n2820\n4242\n797\n3925\n1383\n8750\n7861\n1403\n6973\n7617\n968\n3065\n5395\n4347\n8144\n2688\n6527\n8597\n8673\n7327\n6331\n1422\n7115\n244\n7013\n2092\n54\n7970\n5742\n3464\n4823\n8588\n2938\n3060\n6406\n4149\n2375\n6616\n8803\n1555\n4369\n1380\n3011\n6144\n3367\n4990\n7370\n7131\n1995\n2602\n985\n8785\n8480\n9125\n1927\n3269\n3771\n1032\n7378\n6900\n5726\n2731\n2020\n4503\n3313\n6727\n8793\n2304\n523\n6036\n58\n7993\n5512\n5049\n2721\n8482\n673\n7937\n1168\n4472\n8247\n7287\n9017\n6421\n9190\n3584\n1819\n1792\n2810\n6033\n638\n6749\n7677\n981\n7160\n4726\n1886\n7845\n7911\n6975\n568\n7422\n4613\n4501\n2569\n4263\n3206\n4133\n2420\n3706\n8894\n2263\n5774\n4925\n9180\n8888\n2945\n2091\n1873\n6303\n729\n6728\n2156\n3267\n1860\n6597\n1374\n4930\n5253\n938\n580\n5825\n4839\n166\n8198\n6892\n8701\n74\n7094\n7284\n8954\n3156\n6140\n4279\n5594\n2229\n7535\n5466\n8413\n7105\n8192\n2632\n7638\n9308\n8530\n832\n4643\n2201\n3268\n4322\n6510\n2967\n262\n403\n7973\n1258\n8828\n4036\n5838\n9263\n8529\n2788\n4202\n237\n3838\n1291\n2305\n4056\n5628\n7281\n1430\n6476\n7935\n2850\n6041\n2013\n4016\n4576\n5312\n6827\n6321\n8669\n8439\n830\n1942\n1519\n2750\n6106\n6993\n6235\n5899\n7313\n5331\n4371\n7086\n4399\n8600\n2660\n5409\n3465\n5499\n6231\n5745\n1801\n5337\n4468\n1451\n4192\n1275\n8230\n2302\n1114\n4960\n8860\n3900\n6468\n5058\n1505\n8868\n5588\n3858\n1947\n2565\n1472\n8499\n243\n8442\n6583\n7085\n5374\n2250\n4291\n4426\n492\n2311\n8305\n3662\n5338\n8780\n7488\n3890\n5005\n2442\n4680\n7358\n9116\n4397\n5999\n587\n7902\n83\n3566\n2134\n8942\n4767\n6601\n2456\n1745\n5736\n5254\n8017\n4015\n7690\n3798\n8947\n1067\n2116\n7945\n590\n2547\n2535\n64\n2053\n5359\n2493\n6669\n4351\n6412\n7473\n6147\n7175\n6983\n5196\n745\n2657\n3497\n697\n3161\n7528\n2239\n5991\n3201\n7681\n2440\n5189\n2959\n2044\n8917\n2046\n6313\n6333\n5318\n2763\n4301\n2555\n2213\n2933\n4121\n1340\n3903\n4392\n7889\n5323\n1055\n707\n3857\n518\n6078\n5134\n6645\n9138\n1592\n680\n4446\n7943\n3461\n3887\n5601\n2321\n6621\n558\n4914\n913\n5637\n6453\n8511\n4531\n1218\n5508\n2603\n6802\n8426\n8297\n2947\n5971\n6552\n5262\n5935\n782\n7435\n8357\n6139\n1136\n1473\n5008\n3585\n3627\n2914\n5356\n2997\n2347\n881\n5652\n4849\n8808\n8351\n4017\n2010\n6836\n7616\n4391\n3630\n3712\n6099\n2969\n5238\n4333\n2301\n4406\n1236\n1050\n1864\n1104\n8408\n8251\n8795\n5879\n3365\n7481\n8206\n2452\n1767\n8859\n124\n3948\n4444\n8962\n4438\n5003\n1740\n8428\n3105\n5117\n1095\n1480\n8755\n7881\n3097\n4877\n155\n1917\n2455\n6042\n337\n6724\n6045\n8483\n7135\n2242\n4566\n1679\n834\n1746\n795\n3548\n2314\n2036\n4046\n9129\n6979\n7084\n5091\n2413\n8170\n5775\n1817\n529\n7220\n813\n2916\n5130\n8972\n126\n1243\n2370\n4831\n9122\n3010\n5104\n2613\n6761\n7482\n909\n2146\n4595\n5340\n3512\n6283\n2346\n653\n6121\n2615\n7421\n1869\n1002\n8834\n2991\n8992\n632\n1093\n4543\n645\n2352\n4115\n373\n1483\n6966\n8598\n3896\n3434\n5987\n8318\n1815\n1223\n1548\n6885\n5073\n6330\n2573\n1369\n4095\n1431\n2185\n5766\n1301\n7258\n8048\n7598\n2847\n1996\n2378\n8561\n743\n6381\n271\n1956\n7439\n7596\n7134\n6636\n5804\n1858\n6214\n4730\n8536\n1203\n3118\n9202\n1875\n5885\n8975\n168\n5898\n4014\n4186\n3346\n3041\n5558\n9296\n8157\n4339\n3234\n1738\n2604\n6803\n5387\n5590\n125\n2173\n8012\n8005\n4858\n3069\n651\n372\n378\n8366\n6299\n1449\n7793\n8541\n3235\n8043\n3086\n3983\n6949\n4690\n2176\n6494\n7637\n8406\n3856\n7408\n350\n7021\n8224\n7044\n7662\n6697\n7679\n169\n528\n7029\n2790\n7138\n7432\n7602\n8333\n1582\n1378\n519\n482\n9279\n8015\n6592\n4514\n3542\n2612\n628\n5053\n6699\n6227\n2094\n1621\n847\n3598\n2728\n8490\n7276\n6620\n8345\n9216\n4278\n4059\n9058\n5063\n5816\n4173\n8134\n1997\n3182\n3224\n8129\n5109\n4494\n189\n7640\n8243\n180\n2963\n1123\n5593\n3263\n4185\n7140\n8990\n6320\n9275\n4601\n4854\n5907\n1135\n8083\n5964\n7788\n1992\n8069\n9174\n6160\n35\n8572\n2865\n46\n3952\n6418\n2510\n5783\n20\n3816\n2715\n3930\n2548\n5204\n4122\n4103\n708\n7756\n3825\n777\n3550\n8502\n3929\n5440\n6751\n7764\n4070\n7331\n3743\n9131\n9206\n3828\n23\n41\n4197\n234\n5723\n7622\n8832\n4626\n2169\n5599\n2976\n5266\n1967\n1150\n5334\n90\n822\n2538\n3169\n6771\n7442\n498\n4967\n5580\n7581\n7680\n4728\n1115\n4040\n1064\n3106\n6266\n4415\n9294\n5597\n7059\n197\n7218\n6948\n5690\n4234\n1653\n4485\n4019\n3370\n919\n1330\n6085\n2078\n3768\n5427\n4545\n2435\n8862\n3633\n8145\n5221\n1388\n5913\n8140\n7471\n7156\n6989\n1190\n6832\n2830\n4387\n3454\n7469\n2910\n4526\n5187\n2410\n9223\n6247\n6912\n4681\n1300\n7407\n8612\n6523\n3616\n6894\n7253\n4515\n5874\n5448\n7137\n7957\n1130\n3092\n7054\n3516\n5797\n1000\n2727\n4336\n9090\n6403\n7255\n8919\n6522\n6760\n8898\n4803\n1938\n374\n8686\n9150\n3985\n7045\n3475\n6065\n7991\n1409\n7851\n6671\n6090\n5826\n7857\n1155\n8964\n1117\n7072\n6064\n2497\n4899\n2397\n3189\n2369\n15\n5027\n5754\n8950\n5617\n8391\n914\n6264\n279\n6174\n5184\n3733\n7392\n5278\n2924\n567\n7994\n352\n8084\n2148\n2723\n3359\n70\n1870\n7708\n220\n3994\n9013\n3191\n9220\n4155\n5717\n1110\n2198\n9179\n785\n5325\n4770\n4250\n52\n4634\n5072\n9037\n601\n8036\n7996\n2483\n7232\n8675\n8836\n1279\n5346\n7676\n6104\n1515\n4603\n5607\n7894\n5144\n2628\n68\n440\n3586\n3083\n4830\n4378\n7762\n1134\n4542\n7850\n6296\n2866\n4011\n8751\n4776\n7954\n7102\n5697\n2032\n5729\n5017\n6962\n2051\n1092\n764\n9019\n2759\n8581\n1484\n8618\n912\n2382\n4892\n8447\n8176\n5491\n5695\n5504\n1060\n7064\n709\n578\n4320\n2379\n7649\n8416\n1613\n5344\n7512\n7865\n3037\n6689\n6557\n1569\n5955\n3707\n9168\n8566\n1775\n5950\n6943\n7804\n434\n6179\n9300\n1142\n7947\n6456\n6291\n5789\n6538\n9134\n3049\n5075\n5399\n5161\n1623\n948\n6302\n6063\n7516\n117\n506\n3302\n7146\n355\n3854\n1081\n2827\n1496\n2574\n6167\n3183\n4287\n5482\n1722\n7319\n7277\n3860\n3443\n3298\n8364\n3826\n7254\n2360\n5093\n7039\n6325\n4230\n2567\n6241\n4443\n559\n2625\n4228\n8967\n6405\n1674\n3936\n4475\n8556\n8585\n896\n3713\n6259\n4297\n6718\n2392\n2279\n4927\n1283\n2374\n2860\n7665\n663\n596\n6293\n6805\n2811\n7383\n8306\n8330\n3153\n2153\n2618\n2441\n3615\n8092\n552\n5285\n5255\n8124\n9247\n5530\n8175\n6242\n5660\n3433\n1610\n1832\n3892\n3862\n640\n2127\n2474\n4196\n3495\n7217\n5206\n4836\n7759\n4376\n800\n4227\n3699\n9055\n5665\n6826\n7463\n9065\n4720\n5069\n3245\n3453\n3358\n6532\n5970\n7921\n4087\n1547\n3424\n8040\n7995\n6787\n9069\n8716\n2561\n8199\n1479\n2767\n7818\n7145\n604\n7597\n4896\n9281\n4666\n185\n8171\n7978\n3059\n9196\n9221\n2135\n1800\n2974\n1529\n5948\n446\n4436\n8672\n3508\n6208\n5673\n6998\n5203\n278\n7041\n9110\n5853\n8121\n1764\n3046\n2400\n6575\n4738\n2228\n7761\n9322\n7019\n6931\n6383\n6762\n283\n3935\n2534\n7717\n6785\n471\n8214\n231\n4241\n5310\n3844\n5746\n2011\n7209\n336\n6433\n756\n9167\n6741\n3345\n7685\n4018\n6682\n9147\n4790\n5836\n5906\n8747\n676\n3964\n6362\n3510\n7510\n2308\n1806\n5917\n1189\n4012\n3387\n1331\n5319\n5423\n8900\n147\n3780\n1696\n9111\n6783\n6497\n4104\n1898\n3987\n260\n4616\n2121\n9283\n1400\n2437\n4670\n2735\n1163\n2096\n6521\n1423\n4523\n2243\n6667\n6990\n3944\n6915\n6763\n5611\n404\n2691\n1015\n7092\n7562\n8624\n2291\n4193\n5934\n5503\n2326\n4408\n2960\n842\n1963\n3354\n5568\n9050\n3806\n439\n9154\n6055\n6451\n2190\n7633\n688\n4354\n8890\n2813\n2872\n8102\n8317\n6609\n1497\n8389\n6449\n1682\n3594\n5103\n5812\n863\n268\n3054\n8079\n2260\n2027\n3091\n7687\n6703\n3557\n2019\n8427\n2799\n8182\n6641\n3168\n2284\n1934\n4865\n1077\n6507\n1658\n3811\n1774\n7897\n2238\n2943\n191\n3869\n3246\n4057\n3188\n414\n8072\n7838\n1382\n4962\n6010\n5363\n4042\n1983\n4077\n7429\n1833\n3583\n4044\n1109\n1295\n386\n5481\n3927\n311\n1349\n5651\n5878\n562\n2202\n8904\n765\n1501\n8654\n2975\n2689\n3680\n5180\n1900\n7707\n4723\n8912\n4029\n3579\n869\n2888\n8657\n6599\n741\n4288\n2244\n7357\n5704\n8791\n208\n8587\n7969\n4805\n8526\n4887\n8871\n7468\n3343\n886\n7794\n5764\n2646\n6454\n6101\n7885\n7744\n1297\n4119\n4856\n122\n2286\n2925\n5131\n3570\n5843\n3027\n5320\n5626\n540\n1862\n5401\n7335\n699\n7760\n9198\n3259\n7345\n8698\n1280\n6479\n3100\n3988\n1322\n5737\n1268\n3257\n6791\n3326\n4815\n7644\n1082\n2826\n6821\n8984\n2553\n5290\n5909\n4762\n9242\n8096\n8066\n4325\n6666\n7193\n7114\n8060\n2376\n7872\n6788\n3544\n5460\n3507\n2509\n6626\n3429\n5542\n4220\n2968\n5271\n4249\n3863\n1868\n5581\n2012\n6270\n8038\n4050\n121\n2845\n1565\n1998\n2275\n5524\n6068\n7624\n4913\n9277\n1506\n803\n8848\n5925\n2450\n2072\n8190\n4753\n9162\n1923\n825\n7303\n9028\n2088\n8516\n1556\n5937\n7847\n2367\n7549\n1049\n1521\n4739\n3931\n8958\n4130\n7877\n7876\n897\n5985\n7346\n7537\n111\n3700\n1126\n7896\n1288\n3419\n4673\n1051\n5720\n1068\n3458\n146\n291\n6256\n5514\n2857\n4580\n6239\n6525\n8717\n391\n4841\n6676\n4360\n1453\n4211\n73\n1675\n1987\n4025\n1321\n662\n8265\n6424\n2758\n7765\n7656\n3209\n7497\n7600\n9039\n7697\n5177\n2983\n5622\n9295\n1200\n3284\n964\n2024\n1269\n4551\n8088\n5659\n2212\n5199\n5551\n8607\n5573\n2247\n5200\n6341\n7951\n8429\n7720\n5919\n1273\n3529\n6707\n9176\n7552\n3255\n5649\n6110\n9235\n1137\n9272\n775\n788\n5786\n5186\n6746\n2667\n9145\n7630\n3953\n1828\n8827\n6471\n4702\n7815\n467\n6387\n3195\n6238\n6508\n2373\n5983\n4931\n2948\n921\n2438\n517\n3949\n2137\n3216\n5683\n3695\n1719\n4837\n9159\n6981\n860\n7410\n5497\n1770\n5557\n8810\n5194\n4857\n9100\n6329\n2609\n1925\n3686\n9041\n4924\n349\n9187\n3393\n3661\n7120\n6858\n4587\n3831\n3130\n5396\n5060\n6486\n3937\n8023\n824\n5398\n1354\n8861\n5534\n7292\n4389\n6029\n6226\n3505\n4326\n7445\n581\n6089\n3450\n7324\n6516\n6775\n1207\n4575\n5135\n9265\n3918\n9020\n3473\n3898\n7812\n6571\n6757\n6639\n2557\n1206\n6148\n7325\n8790\n4938\n7026\n4383\n8041\n1250\n7267\n1952\n7561\n8811\n4941\n8373\n4848\n6602\n8355\n8104\n5214\n6654\n4330\n995\n3181\n3422\n456\n1782\n3408\n6530\n719\n7587\n5910\n3058\n740\n2009\n4207\n5336\n2798\n9229\n8668\n2473\n4221\n1493\n3281\n171\n9157\n9139\n7766\n6220\n9127\n3324\n5308\n3708\n2431\n8080\n2093\n2585\n406\n7040\n5064\n5247\n4758\n6512\n2953\n4257\n4935\n2705\n2572\n3436\n8513\n5884\n1385\n4852\n2637\n7091\n2761\n6007\n8332\n6694\n2422\n4917\n2186\n6898\n1390\n6965\n3132\n7698\n475\n2002\n2692\n5024\n7365\n7373\n4091\n1731\n947\n3962\n8692\n1788\n8734\n8656\n6862\n6856\n1950\n1914\n5658\n3635\n1620\n4780\n2580\n1454\n2786\n687\n7238\n3648\n6452\n1197\n3190\n5900\n9043\n4958\n1935\n1821\n1187\n1153\n7737\n7223\n3820\n7169\n7350\n5674\n6254\n3025\n6680\n1690\n2899\n3893\n1577\n5728\n9189\n5077\n34\n3560\n2179\n5462\n1402\n3654\n1376\n7936\n4246\n5506\n1179\n5647\n4686\n8644\n1352\n2855\n6079\n2254\n2668\n2287\n2457\n3418\n7264\n677\n3074\n2655\n1042\n2210\n4504\n7089\n8309\n4209\n4280\n3258\n2977\n84\n4705\n1244\n3511\n6355\n8813\n3228\n9266\n1122\n613\n732\n5202\n8425\n2638\n6470\n2886\n3541\n8132\n2063\n8201\n5129\n2818\n7949\n6936\n8090\n4465\n7295\n5239\n7009\n9271\n8563\n2832\n952\n8136\n6776\n3565\n5188\n7288\n6999\n285\n5487\n7763\n7608\n8584\n2071\n7868\n2804\n3655\n7048\n6847\n3276\n4082\n4272\n3910\n3709\n1574\n4559\n7580\n7081\n5014\n7769\n8183\n6386\n7574\n356\n4937\n2487\n9315\n7572\n3040\n671\n2682\n8626\n3868\n8623\n387\n8679\n4074\n1481\n3527\n3595\n4754\n2453\n1579\n4638\n9123\n1829\n316\n3009\n3691\n763\n4875\n3572\n4642\n3128\n4273\n2777\n6032\n4793\n233\n7147\n996\n3199\n8835\n3517\n7210\n6125\n6037\n3684\n8589\n3915\n3095\n8310\n3180\n7043\n4458\n2889\n57\n4483\n7667\n8375\n1434\n7493\n6986\n4733\n8471\n5827\n2111\n1313\n7986\n3075\n2614\n7547\n4977\n8527\n3212\n7300\n5842\n5244\n3291\n597\n1007\n2030\n227\n3830\n5540\n247\n5643\n9333\n1958\n3096\n1371\n5220\n7926\n2927\n1516\n7130\n193\n1522\n6165\n6923\n3794\n4223\n5535\n2472\n8630\n3971\n9101\n2946\n222\n4609\n7291\n8542\n6501\n7548\n4557\n6274\n1010\n5226\n7309\n1317\n9056\n6275\n1624\n1099\n4191\n4030\n7270\n5392\n2316\n3819\n1670\n8154\n8045\n4807\n8864\n2391\n5908\n8338\n8218\n6400\n9193\n3165\n843\n6613\n6941\n4380\n9332\n5629\n7557\n4321\n3702\n681\n734\n1159\n4665\n5959\n1697\n5509\n8774\n7389\n3832\n3751\n8637\n3079\n1680\n6841\n703\n684\n8293\n3682\n5733\n4818\n3231\n3078\n5562\n9001\n3889\n7024\n2519\n1713\n3287\n219\n6021\n8776\n2289\n7212\n4832\n4684\n4617\n4237\n2649\n8185\n6326\n3568\n551\n1426\n4181\n8869\n312\n2905\n4165\n8248\n2558\n900\n1044\n8613\n7743\n5437\n7604\n3122\n5708\n8649\n2878\n4695\n4491\n1929\n7533\n5223\n7711\n915\n1844\n5751\n3008\n8055\n961\n6142\n4636\n61\n198\n2271\n5698\n4596\n4500\n5709\n5819\n7972\n2992\n1643\n1048\n6281\n8886\n360\n4198\n1841\n6814\n3960\n2606\n7001\n5888\n450\n7133\n7015\n7034\n5153\n8920\n5066\n469\n1302\n8816\n463\n8651\n5869\n8193\n6582\n5578\n1231\n9274\n7260\n7751\n8052\n6799\n2089\n2342\n8451\n3260\n5550\n7795\n2288\n1205\n40\n496\n8367\n7836\n5973\n3908\n5242\n5062\n2706\n997\n6514\n5419\n9201\n1965\n6062\n3050\n5302\n8735\n358\n2398\n7470\n1644\n8179\n7047\n1549\n5414\n2539\n7381\n589\n8166\n8505\n6035\n3956\n4540\n6721\n8074\n1062\n2384\n2531\n7159\n3502\n3902\n4584\n2554\n264\n8720\n2849\n4916\n5218\n7202\n883\n4560\n1677\n4317\n7863\n4509\n6577\n2903\n1452\n1416\n5369\n473\n6233\n6359\n5992\n4934\n8059\n6834\n4907\n3320\n8267\n8280\n2066\n2402\n1485\n3772\n3732\n4764\n9126\n3575\n5564\n4768\n5641\n1884\n2330\n1804\n344\n698\n3089\n1532\n4454\n761\n7289\n8094\n3432\n1747\n6811\n8722\n8826\n4646\n3222\n8614\n2901\n7003\n652\n8663\n4266\n413\n810\n75\n3334\n4905\n6438\n4756\n5137\n6528\n6534\n6988\n6177\n8533\n889\n5384\n7201\n5132\n7802\n6864\n3973\n873\n4840\n1482\n8376\n3769\n5858\n6675\n4286\n2593\n5863\n4353\n7817\n7540\n4999\n4838\n2303\n6002\n7913\n1508\n5317\n7755\n2784\n4964\n3431\n6209\n3755\n6022\n6399\n6232\n3954\n455\n5416\n6448\n1558\n7591\n245\n140\n9210\n6585\n4084\n967\n7798\n6795\n7095\n6733\n3861\n9264\n361\n1045\n755\n8042\n7074\n7778\n6415\n4724\n6450\n2049\n1563\n1307\n3485\n1790\n7869\n3282\n6907\n3920\n2868\n5801\n5632\n1079\n5009\n3955\n7517\n5128\n3417\n3019\n2725\n1784\n2312\n2753\n6976\n342\n8266\n1849\n2273\n5037\n7880\n3793\n7401\n5412\n8279\n1257\n3670\n9049\n3266\n8955\n6519\n8916\n2858\n694\n5650\n1019\n4669\n1785\n3533\n5877\n2704\n8603\n3726\n6668\n497\n1085\n6815\n6157\n6646\n6964\n186\n8097\n5645\n8481\n8215\n3775\n2542\n7514\n5699\n4072\n3518\n5767\n3239\n3740\n1404\n8981\n4086\n6397\n6984\n4204\n6899\n682\n6589\n3317\n2944\n3456\n4340\n7424\n9208\n6504\n4409\n1\n145\n1882\n4620\n2634\n4992\n5453\n4481\n3377\n266\n7875\n530\n1235\n7605\n504\n1771\n8489\n345\n7353\n7797\n7174\n5914\n2871\n5721\n6067\n3582\n7653\n5467\n6234\n691\n8758\n2122\n1213\n2908\n1492\n1437\n2187\n1266\n2395\n7278\n8491\n5256\n1554\n8163\n5966\n7128\n7904\n1691\n6272\n1264\n3996\n1706\n1334\n1316\n6478\n6935\n1518\n6700\n8703\n8744\n8152\n8778\n5367\n4218\n9007\n6312\n606\n7565\n5293\n2891\n675\n2125\n2120\n826\n7008\n5705\n7748\n8010\n1498\n5330\n5472\n2215\n7627\n3016\n6588\n1850\n4128\n8569\n6987\n7566\n148\n8151\n8789\n7907\n8596\n715\n6018\n9060\n3872\n1750\n5889\n4047\n5960\n3120\n3449\n1421\n1102\n3333\n9197\n8796\n8123\n8007\n2028\n8404\n1945\n1985\n8109\n5380\n8438\n3504\n6739\n4180\n5835\n4243\n25\n4002\n1976\n3482\n8392\n158\n5181\n4885\n8985\n11\n6872\n6425\n5926\n7062\n5083\n8394\n4259\n5844\n1990\n3942\n5532\n2220\n28\n5957\n149\n6748\n1663\n3559\n7647\n2566\n1359\n8787\n5259\n7010\n554\n8231\n4229\n6005\n8172\n8125\n1350\n3571\n9051\n1973\n1386\n1781\n5788\n159\n7007\n3220\n1846\n3093\n4445\n2056\n8370\n3211\n1113\n4384\n2231\n273\n4276\n642\n7663\n5311\n265\n226\n9012\n7879\n118\n7109\n7251\n1760\n8667\n2876\n7162\n3552\n6901\n6779\n5021\n6524\n4957\n3114\n4544\n441\n1848\n2136\n2458\n8662\n1127\n5541\n3026\n1080\n6780\n2224\n8259\n1073\n9000\n7244\n7977\n500\n4435\n7376\n7979\n1435\n9291\n7704\n3791\n3521\n210\n7388\n1039\n6269\n4052\n8570\n3285\n564\n8039\n3546\n6203\n1183\n6107\n4147\n6216\n2234\n7185\n3192\n7155\n2001\n7777\n876\n944\n908\n7791\n5465\n6784\n65\n9172\n5675\n7075\n3886\n7891\n2978\n1008\n5630\n591\n5067\n1139\n577\n9015\n574\n8137\n7786\n5765\n4900\n4090\n7842\n5741\n"
  },
  {
    "path": "lib/train/data_specs/got10k_train_split.txt",
    "content": "3784\n8998\n3906\n1631\n8277\n8358\n2338\n7938\n2988\n8302\n2662\n2663\n2825\n7447\n4781\n2218\n6348\n5860\n4517\n2819\n8075\n5391\n116\n3606\n7976\n7941\n1024\n4519\n1970\n557\n8579\n6908\n993\n7204\n1991\n3674\n8781\n6840\n5\n3225\n3763\n8688\n6778\n5777\n4794\n2744\n8126\n3864\n1733\n2923\n6829\n701\n683\n2081\n1831\n2404\n1459\n2741\n5972\n3618\n7462\n2654\n103\n2174\n6224\n2989\n2506\n2766\n5912\n2699\n3295\n3986\n609\n4895\n6673\n801\n1098\n1602\n2490\n3129\n8476\n3186\n7355\n4784\n4270\n1812\n4226\n2267\n8873\n6544\n6112\n2381\n4752\n753\n3776\n6511\n6016\n731\n2559\n7369\n5866\n563\n7731\n1105\n5603\n50\n4238\n2208\n8725\n4994\n4719\n1444\n8807\n7298\n8139\n8760\n8173\n2332\n4131\n5207\n1065\n8562\n3992\n4024\n2188\n9095\n6765\n1707\n6105\n6922\n5362\n1486\n7898\n4135\n6574\n1551\n998\n6565\n8127\n8927\n2544\n4365\n510\n768\n3535\n3875\n6808\n2931\n487\n1088\n4451\n368\n2470\n8111\n3493\n7338\n8281\n6390\n1271\n4373\n3667\n3494\n3757\n2966\n3756\n7840\n6315\n7827\n3300\n6261\n4163\n2217\n6549\n94\n7236\n9136\n1857\n6691\n3470\n6271\n807\n516\n9311\n6098\n3144\n8420\n5425\n5694\n2643\n6696\n6072\n7285\n3781\n903\n8522\n6092\n5979\n2622\n2529\n855\n3420\n3261\n8953\n7866\n2492\n3157\n359\n1520\n2642\n7452\n759\n36\n8931\n1744\n4350\n1089\n9199\n4295\n1889\n1908\n4868\n4498\n1968\n9103\n3273\n8723\n7413\n4114\n5584\n4874\n1427\n5211\n7618\n1542\n1353\n8158\n4168\n3200\n6345\n8560\n5619\n5953\n3158\n8849\n5831\n1411\n7294\n8103\n6539\n7397\n1006\n5450\n3119\n4274\n5352\n4571\n2319\n4217\n4976\n902\n1814\n2651\n3299\n3398\n982\n2428\n5793\n1346\n7057\n3737\n7329\n4449\n2110\n7405\n1773\n958\n3901\n4127\n8234\n2994\n7066\n1289\n2995\n5871\n3556\n9085\n846\n2366\n585\n7032\n5516\n5230\n3481\n2732\n6658\n7423\n1855\n6384\n3554\n5823\n4948\n7058\n4667\n5377\n2503\n7694\n9191\n9144\n655\n3409\n62\n8019\n8970\n5523\n7403\n3379\n2323\n4833\n5750\n3178\n6548\n8891\n7501\n3280\n7404\n343\n2171\n8397\n1367\n8611\n6118\n6603\n3729\n7182\n9048\n7733\n5642\n7141\n3335\n4845\n5449\n3467\n6250\n163\n5168\n2040\n5339\n3609\n8352\n3426\n8567\n769\n187\n6151\n6437\n7028\n8507\n3970\n9146\n2068\n5028\n7492\n1661\n2815\n2469\n2563\n3814\n8430\n4305\n3479\n5678\n9115\n4132\n1211\n5459\n4814\n545\n4556\n238\n4296\n2724\n1260\n2581\n6087\n4632\n4313\n380\n1209\n5447\n3032\n7942\n8943\n806\n2432\n6130\n4314\n2131\n9045\n6531\n5706\n6747\n7724\n2017\n3292\n5469\n2743\n424\n4233\n7643\n8619\n5192\n4516\n9324\n3537\n9152\n8058\n7526\n8711\n1949\n5982\n1732\n6702\n7027\n6388\n7012\n328\n2130\n452\n306\n7669\n3134\n5761\n3703\n44\n4189\n695\n7672\n5224\n9215\n5644\n3143\n3704\n5443\n2348\n7177\n2328\n4725\n354\n1418\n7810\n7746\n9002\n5759\n7226\n4535\n9160\n4385\n5397\n7249\n2936\n3204\n6287\n385\n2371\n2738\n3636\n9033\n2246\n2680\n6940\n4310\n2054\n9250\n9080\n4568\n5586\n4469\n2038\n3410\n7900\n4332\n6108\n678\n3319\n9079\n1054\n4048\n4751\n1320\n6890\n7931\n1398\n4349\n5299\n5025\n7932\n5738\n7787\n4590\n4020\n1274\n2488\n8497\n3372\n8965\n3219\n799\n3664\n6500\n7093\n4362\n6205\n4244\n4652\n1964\n5945\n6434\n2031\n2684\n6632\n4588\n8271\n3232\n5782\n2904\n6789\n5636\n7200\n3632\n5435\n8203\n3480\n4786\n7579\n3351\n1921\n798\n3646\n3094\n4359\n1654\n5975\n376\n5965\n780\n7821\n9224\n6738\n3185\n2133\n6248\n5996\n2834\n531\n5688\n2448\n7925\n7974\n5924\n6401\n5778\n6594\n5442\n8336\n4522\n3770\n6340\n6328\n4946\n4161\n2954\n2588\n8465\n2885\n1606\n5787\n3407\n3121\n7310\n1413\n1932\n4787\n2579\n3325\n508\n5610\n6480\n4290\n479\n3792\n6628\n2545\n6717\n6972\n2665\n6730\n3547\n6845\n5929\n3540\n4356\n8993\n1052\n2235\n8356\n3403\n8818\n8260\n572\n4159\n1180\n5348\n941\n7948\n2676\n3539\n4866\n6422\n8365\n3217\n1310\n2059\n9177\n1419\n2283\n8892\n8162\n1212\n6277\n3725\n7806\n6149\n7874\n718\n6888\n7118\n277\n656\n8763\n8289\n4759\n5854\n8659\n7710\n3145\n5981\n1881\n5799\n6947\n1609\n6396\n2631\n2887\n318\n2550\n6132\n1736\n2907\n7816\n48\n4304\n8133\n6698\n2760\n7779\n7732\n7642\n1154\n7242\n711\n9262\n539\n8033\n7440\n1913\n5480\n5570\n8594\n8772\n4654\n8974\n6128\n6183\n1071\n8449\n2142\n2298\n524\n1695\n820\n4053\n8241\n1856\n8641\n3981\n217\n1063\n9286\n3152\n221\n5461\n1270\n2006\n7164\n1199\n6951\n5604\n5400\n5309\n3498\n6407\n6661\n7097\n8165\n5169\n3852\n7070\n5702\n4344\n6648\n6904\n3272\n7119\n5795\n2365\n2659\n353\n5444\n6968\n2755\n1924\n2098\n2972\n6006\n5865\n8740\n2418\n3401\n7856\n5841\n598\n836\n1147\n931\n8897\n0\n6049\n1837\n865\n1871\n6116\n6831\n5773\n3587\n303\n1883\n2163\n3070\n1308\n7953\n6300\n6909\n853\n7301\n3279\n123\n7186\n3194\n5553\n5133\n1931\n4622\n6075\n4891\n5722\n5693\n8\n2339\n6596\n71\n379\n4506\n4370\n1238\n2707\n3344\n4254\n8767\n1726\n325\n4148\n5438\n5357\n548\n1332\n6824\n2290\n2335\n3146\n2594\n2315\n3389\n3885\n2621\n4116\n5389\n7412\n7222\n4894\n8595\n2000\n4978\n4721\n6444\n3796\n9321\n2236\n6409\n1523\n1468\n9249\n8270\n2341\n2874\n174\n4757\n4502\n4703\n9034\n9108\n5451\n2619\n5022\n9158\n490\n6540\n1466\n2962\n8771\n3036\n2712\n4539\n1581\n5638\n9246\n4308\n4363\n4647\n4470\n1636\n2511\n1311\n6560\n7519\n8027\n9217\n6464\n6364\n3779\n4822\n3563\n3982\n5896\n5510\n6655\n1524\n2846\n3137\n621\n141\n1887\n6567\n8921\n4671\n6052\n8445\n8699\n7349\n3553\n2117\n7651\n5034\n5383\n649\n3818\n9022\n8414\n1012\n8159\n5081\n8571\n4765\n9135\n4361\n4073\n9142\n727\n2835\n8229\n3989\n4490\n4923\n5477\n1638\n3643\n712\n9044\n2230\n499\n7166\n96\n3172\n8431\n8401\n1470\n6356\n8817\n927\n4212\n2152\n1795\n3812\n4949\n1219\n1538\n3029\n6481\n9042\n7775\n7742\n423\n2085\n7715\n4541\n9061\n5916\n3950\n7420\n4878\n7406\n7046\n7808\n4911\n8804\n6927\n8820\n3264\n300\n8670\n2979\n252\n4407\n3383\n4688\n8504\n6723\n26\n3837\n2489\n4137\n8209\n229\n6490\n2364\n9016\n1763\n1728\n338\n8335\n9063\n5280\n2791\n641\n5454\n4581\n5420\n4548\n2840\n8508\n3463\n7231\n7619\n2560\n1755\n6201\n165\n1471\n6279\n5806\n6867\n5890\n2396\n3416\n1981\n6073\n5872\n3045\n4182\n7607\n3318\n4414\n2998\n6553\n7139\n5624\n2123\n3666\n723\n5110\n6932\n8200\n2222\n8399\n1041\n4138\n1594\n3569\n9253\n393\n7940\n8004\n1475\n6759\n5393\n1107\n2597\n878\n9309\n7576\n5250\n1759\n3142\n2015\n571\n3921\n1255\n7080\n893\n2160\n1355\n82\n1562\n9153\n8583\n4085\n4644\n7196\n9165\n3558\n4550\n6374\n7826\n8602\n4146\n9257\n6083\n874\n8383\n3731\n3374\n3653\n8222\n7344\n470\n1813\n4478\n6871\n7245\n6866\n3998\n7433\n276\n1915\n1988\n8168\n2518\n2686\n831\n6143\n5205\n8718\n1703\n7729\n2077\n7983\n8450\n1195\n9232\n507\n7989\n6974\n4054\n5828\n8655\n6679\n5245\n7783\n5886\n9098\n6491\n8782\n3525\n6542\n131\n8110\n9186\n9074\n4933\n9035\n2607\n4\n2057\n6273\n2711\n5829\n3382\n2696\n3043\n2048\n619\n2499\n5295\n1162\n7807\n3694\n2194\n3149\n1940\n7934\n840\n3592\n8237\n4731\n1324\n8486\n8726\n8573\n2928\n9078\n2272\n2564\n1370\n5911\n7434\n8026\n407\n7546\n2004\n5849\n3034\n7887\n3425\n1118\n926\n3430\n1544\n5902\n2282\n1124\n2334\n129\n1372\n4842\n6473\n4382\n1028\n415\n8269\n8073\n6910\n2796\n3038\n5735\n5080\n2852\n6306\n8842\n9188\n3637\n1066\n532\n928\n5485\n2838\n6753\n9008\n7984\n2816\n8819\n7103\n5977\n5044\n2064\n2599\n4973\n382\n3249\n6446\n6638\n852\n1724\n3368\n892\n3250\n8258\n7962\n4300\n1616\n167\n8855\n2090\n4424\n879\n5136\n5350\n2635\n7828\n8506\n63\n3004\n3847\n3676\n1184\n1705\n6745\n1263\n5020\n746\n1888\n7036\n1033\n3914\n5433\n3905\n4641\n8909\n228\n4801\n3766\n8085\n643\n6914\n9280\n3013\n5657\n3696\n1590\n2920\n8282\n2403\n416\n911\n3849\n4215\n1120\n5490\n296\n2306\n3140\n3742\n4819\n6153\n6414\n760\n3000\n7498\n7108\n6429\n3031\n5314\n751\n3357\n5808\n7505\n98\n7652\n4027\n6257\n3943\n1799\n8577\n5577\n4969\n9163\n2025\n6061\n4026\n5732\n588\n7017\n1415\n4961\n4940\n7152\n538\n706\n2802\n8983\n3375\n1246\n6593\n5837\n1789\n7939\n4997\n5939\n2411\n6133\n199\n7593\n1702\n5406\n6082\n2359\n2912\n6109\n100\n8149\n5470\n2807\n3384\n6413\n3362\n5621\n6019\n9241\n9268\n7703\n4111\n7967\n5458\n7181\n5492\n1112\n6729\n4577\n106\n8853\n3774\n979\n7082\n4610\n1853\n9003\n9292\n2867\n6262\n2245\n3460\n1557\n767\n4796\n8147\n2658\n5769\n6985\n7065\n421\n7990\n3289\n1540\n9316\n2251\n6896\n5947\n4965\n2652\n4480\n963\n9047\n7168\n7824\n3976\n6210\n7018\n7179\n5016\n7789\n6102\n6828\n7659\n9109\n9071\n8115\n7628\n7110\n16\n7513\n835\n939\n4078\n2351\n2322\n3881\n4945\n560\n6837\n6094\n6475\n7901\n3\n771\n8029\n3135\n8044\n7127\n3741\n5156\n7030\n4906\n113\n3747\n7042\n5232\n5225\n3002\n4747\n6879\n5379\n4886\n7192\n4184\n1896\n1834\n8689\n3665\n2957\n6913\n8009\n4851\n6420\n7987\n828\n3003\n8884\n8815\n3198\n8008\n194\n6251\n3303\n3934\n395\n1285\n4169\n1648\n1347\n3600\n4631\n509\n211\n6230\n7241\n8250\n2219\n2582\n8353\n7790\n7583\n4462\n3904\n9004\n6942\n1704\n5686\n8051\n2981\n5511\n6182\n7088\n1699\n1222\n3455\n6189\n1528\n5197\n6221\n7893\n3283\n2837\n7773\n8766\n2942\n8021\n614\n4102\n7362\n1786\n400\n133\n556\n3127\n5237\n3727\n1440\n3873\n6322\n8448\n6285\n8696\n8800\n4009\n3386\n454\n4847\n5685\n9093\n246\n1314\n5895\n6863\n4302\n4260\n8405\n8417\n7116\n255\n3223\n4737\n7852\n6337\n814\n710\n1094\n6103\n5809\n5882\n6336\n4974\n1499\n2806\n3744\n2664\n2436\n4482\n8665\n8918\n1076\n8676\n5725\n9248\n4755\n1447\n9328\n5500\n78\n2653\n792\n6854\n6093\n6172\n3378\n4492\n5529\n5476\n3846\n1391\n383\n4289\n3883\n2648\n3265\n2525\n5402\n4599\n6870\n6877\n4413\n2464\n8519\n2521\n1839\n5822\n5664\n7257\n5375\n6852\n6764\n5182\n8914\n3015\n8509\n3080\n4562\n8979\n6215\n6643\n8601\n6096\n4812\n5246\n7862\n527\n7849\n6737\n12\n2468\n7961\n275\n27\n5932\n3840\n7341\n4996\n8564\n2154\n3788\n6138\n7831\n4442\n757\n4464\n1170\n2568\n19\n323\n6584\n7675\n3441\n2067\n9027\n2486\n4379\n4744\n1737\n7563\n301\n3907\n4742\n6857\n1221\n9284\n8458\n8236\n2897\n4004\n1526\n5345\n4423\n6246\n8578\n1057\n3711\n4986\n4785\n3997\n7311\n4788\n107\n8387\n2041\n2608\n8628\n5830\n6031\n783\n6817\n3293\n541\n773\n8473\n2501\n7247\n5667\n804\n483\n1639\n696\n6060\n5429\n5762\n1527\n7342\n1329\n6225\n7895\n381\n8030\n8520\n8362\n4734\n3526\n9273\n2039\n4142\n5084\n875\n6905\n8968\n5275\n3052\n650\n7509\n232\n2595\n3631\n1810\n4355\n8315\n8908\n1777\n4834\n3164\n2336\n1543\n6212\n8346\n3024\n3719\n1242\n6265\n8101\n3133\n6150\n6358\n3316\n4089\n1647\n4629\n7117\n2596\n5366\n1225\n6371\n624\n2209\n1428\n1158\n7648\n466\n8765\n802\n153\n4639\n3657\n6482\n9320\n2693\n6591\n3294\n2617\n5052\n6305\n3227\n8784\n7170\n93\n5868\n6716\n1671\n178\n2703\n954\n3254\n2262\n5046\n5743\n8647\n6393\n7706\n6604\n3728\n6978\n7489\n7474\n8754\n2740\n2233\n6038\n1491\n8814\n2080\n2358\n5944\n5653\n1164\n9259\n4518\n7343\n5748\n3897\n923\n5967\n2677\n3503\n1202\n4966\n1836\n1863\n6634\n1962\n9096\n9064\n977\n4049\n1464\n658\n536\n3402\n8064\n1309\n259\n7999\n8122\n910\n224\n6152\n7142\n6070\n7523\n8411\n2408\n6766\n9214\n9312\n8325\n6192\n626\n6025\n6240\n8708\n4630\n6777\n1075\n8906\n408\n9269\n6236\n9067\n2514\n8568\n2324\n156\n3136\n3530\n7878\n7308\n4335\n2065\n3845\n4453\n3356\n1450\n371\n7219\n5171\n201\n8642\n2099\n477\n1603\n8339\n7430\n3061\n235\n8291\n1133\n8474\n7035\n8653\n989\n4569\n9092\n8347\n3102\n1743\n9086\n5140\n7438\n1530\n4342\n2460\n7646\n5047\n5071\n5430\n6944\n610\n2803\n1448\n4696\n6156\n4386\n4248\n4256\n994\n2112\n805\n8011\n8276\n8999\n4956\n1712\n2795\n7553\n6436\n2158\n9083\n3184\n5784\n4428\n612\n5288\n6222\n1365\n5074\n6848\n575\n5213\n2175\n4240\n351\n2086\n2656\n5150\n9255\n8189\n7735\n1261\n1344\n4097\n8674\n2984\n4235\n5998\n6488\n537\n1267\n7486\n7124\n6245\n7955\n7337\n5436\n1194\n8226\n209\n1710\n7906\n4357\n4139\n5679\n2584\n2854\n1004\n8246\n8586\n5087\n1878\n4926\n6637\n3197\n7757\n8249\n4055\n6502\n1248\n990\n3928\n2770\n2751\n1020\n6426\n4190\n6839\n2671\n884\n3871\n9212\n4179\n3394\n10\n5861\n5316\n6869\n2985\n8905\n8559\n4457\n2480\n2313\n4100\n4395\n6835\n7799\n7890\n2785\n5468\n7302\n5862\n1803\n6376\n3171\n8591\n717\n7053\n1655\n4489\n2522\n2921\n8555\n1984\n895\n8949\n1305\n738\n7606\n112\n3042\n1325\n437\n3167\n3340\n511\n3689\n5813\n8982\n69\n4421\n7150\n550\n8829\n8685\n3147\n8956\n3166\n7023\n8633\n3308\n2014\n3573\n3880\n4045\n2069\n6051\n4950\n702\n6664\n8418\n2454\n6181\n4853\n4166\n7022\n7418\n3605\n9181\n7172\n5031\n4589\n7858\n6586\n6351\n8334\n7504\n634\n3759\n1890\n890\n6959\n5085\n4919\n2161\n1191\n256\n3610\n7079\n3427\n4071\n7323\n2982\n7263\n7444\n4251\n5846\n4864\n3649\n4311\n7461\n8120\n4582\n6373\n2805\n4872\n4869\n5493\n5867\n2670\n7099\n30\n8933\n930\n7919\n501\n7261\n5289\n7449\n7772\n3613\n7848\n3196\n474\n205\n841\n2611\n6185\n3088\n409\n7239\n5938\n7871\n1343\n6705\n1027\n5596\n2199\n9113\n5471\n6134\n838\n2345\n8359\n4061\n1474\n3229\n270\n4245\n1979\n5995\n1517\n8652\n4006\n4880\n6137\n4693\n2528\n6996\n2926\n5798\n2477\n2549\n1128\n3341\n6014\n4479\n2861\n4208\n5175\n5174\n5118\n3736\n5463\n1588\n2327\n8380\n7982\n1514\n1058\n4586\n6608\n7985\n3044\n1822\n3628\n6851\n549\n1811\n2184\n2601\n4608\n8922\n2540\n6659\n3859\n307\n3650\n3767\n8167\n505\n4366\n4824\n5520\n461\n1933\n2401\n8106\n2055\n7844\n8544\n8838\n4797\n7419\n6686\n7670\n6039\n5672\n5141\n6543\n206\n5252\n4718\n888\n1601\n3218\n5114\n713\n4022\n4419\n6708\n397\n425\n6612\n5057\n1729\n6573\n4729\n4080\n1034\n2961\n534\n8194\n5598\n9218\n2424\n329\n4154\n1597\n922\n109\n8823\n3578\n9038\n8437\n3307\n128\n8032\n1412\n7333\n8762\n8851\n8865\n3056\n468\n3808\n3064\n8798\n7052\n7767\n9231\n1086\n2162\n6566\n2109\n3439\n6122\n3642\n7696\n8610\n5279\n1808\n8687\n8377\n817\n8714\n6066\n4008\n3640\n6015\n1021\n7601\n4855\n6017\n87\n7071\n2730\n7268\n3614\n6084\n6117\n6924\n9102\n2829\n375\n8724\n2095\n22\n1541\n2970\n633\n139\n451\n4521\n179\n1396\n3876\n5824\n8020\n426\n4982\n4172\n1157\n190\n4859\n1455\n3110\n3323\n9104\n858\n6719\n6428\n4495\n8551\n2141\n3984\n3066\n67\n4299\n5821\n8444\n6581\n6097\n7090\n7781\n8944\n3085\n8606\n2114\n5355\n8901\n1461\n3301\n422\n7000\n4820\n5790\n1379\n7536\n4199\n8736\n8991\n5241\n1698\n1294\n1753\n196\n2987\n8680\n4658\n4144\n8639\n6441\n8255\n8156\n3677\n6385\n6520\n7700\n3760\n6001\n1144\n5478\n7394\n8057\n5018\n4232\n5235\n6844\n3111\n8802\n867\n949\n7843\n573\n2278\n6801\n7629\n2714\n5105\n6946\n2697\n5315\n1571\n8677\n2537\n4374\n3833\n7820\n3750\n2033\n6526\n3884\n8706\n7195\n417\n3603\n3001\n6284\n5873\n5718\n8576\n8457\n3589\n5839\n459\n3626\n6342\n8729\n6933\n607\n6053\n8228\n3773\n1805\n6365\n5142\n6069\n1389\n9026\n570\n4614\n5712\n5533\n9222\n2821\n1897\n819\n766\n4060\n4902\n5905\n6842\n5446\n1277\n4303\n2836\n934\n1014\n7822\n7494\n3466\n665\n1047\n5881\n3328\n4664\n315\n1315\n1462\n8616\n7725\n2756\n5749\n1730\n8184\n4567\n5065\n7499\n8867\n1304\n3669\n9192\n410\n8177\n6710\n1210\n2329\n8443\n3911\n1899\n7686\n3315\n7190\n6180\n3116\n5341\n4394\n8337\n9182\n6969\n5715\n2172\n1742\n2782\n3715\n9195\n7960\n2517\n4890\n8294\n2337\n8014\n3353\n7475\n2193\n4843\n8831\n4200\n4653\n6196\n6957\n3063\n2996\n8959\n8973\n6529\n3457\n5274\n8002\n6823\n6154\n5561\n1780\n9318\n7657\n1758\n6503\n7678\n3274\n1625\n4327\n3236\n8575\n3155\n4707\n4331\n1494\n8756\n3174\n1074\n8116\n8295\n8311\n3048\n3752\n6050\n6483\n8003\n9175\n4674\n1642\n2556\n6166\n7165\n8441\n5413\n3990\n1640\n1778\n7500\n8304\n1395\n4315\n5949\n3364\n242\n5763\n1036\n249\n2430\n7426\n8131\n411\n6267\n2045\n6606\n899\n8065\n9052\n7507\n5779\n5616\n2107\n5408\n2980\n6310\n5776\n4328\n821\n3251\n2354\n7076\n1700\n5313\n6736\n79\n8212\n3959\n5677\n7545\n160\n6790\n6859\n3659\n6770\n1106\n8846\n956\n7472\n2050\n8099\n4795\n8053\n9293\n7037\n1646\n9307\n1069\n5322\n5332\n2708\n8977\n917\n2419\n184\n2105\n1578\n3923\n5780\n1903\n2512\n429\n5582\n493\n4972\n445\n8286\n555\n320\n8300\n322\n617\n3413\n4459\n525\n5631\n6314\n5157\n5300\n8545\n182\n1031\n4429\n2495\n7586\n1534\n3099\n3916\n3738\n1919\n535\n2119\n1299\n177\n1838\n2159\n4099\n8285\n5172\n8540\n6020\n7683\n3073\n3115\n1673\n3087\n3488\n2416\n1894\n5942\n3597\n5834\n2007\n43\n1779\n4174\n2023\n2546\n2429\n9006\n436\n4214\n4536\n3693\n5426\n6767\n5903\n4368\n2170\n5051\n7490\n7882\n2859\n5035\n7835\n5372\n7122\n925\n3253\n6338\n8393\n4093\n5848\n7588\n2683\n8049\n5403\n5894\n8745\n8550\n2941\n3484\n9029\n4461\n8022\n725\n2355\n1619\n3030\n1975\n5623\n2415\n1957\n6141\n9278\n3226\n3062\n5670\n7326\n8759\n8496\n6619\n8187\n8262\n6199\n951\n7183\n668\n2388\n4698\n5681\n8240\n2851\n871\n4988\n9084\n9089\n3162\n1167\n8244\n5227\n6461\n2831\n776\n5010\n5770\n5282\n3574\n5102\n1278\n2281\n5455\n305\n4628\n4663\n9119\n7487\n8746\n4889\n6569\n1175\n102\n2386\n8940\n2479\n5566\n53\n8833\n1918\n8001\n321\n6786\n6861\n4358\n2771\n7467\n975\n4777\n605\n3543\n2600\n7584\n9299\n4530\n6477\n7364\n7328\n183\n4761\n7543\n304\n1196\n4623\n7839\n2139\n5519\n1953\n533\n5989\n7590\n7428\n6346\n6162\n1091\n1946\n6260\n4405\n5676\n8924\n7171\n8409\n1866\n6379\n3411\n2387\n3051\n7398\n154\n1185\n6442\n6004\n1611\n2165\n9018\n8323\n616\n3995\n8952\n1533\n7853\n4194\n213\n789\n4991\n3675\n7456\n5752\n175\n7556\n4195\n907\n2248\n9057\n8467\n4594\n1017\n7968\n880\n7446\n3304\n1666\n4942\n3867\n4802\n9156\n6357\n4621\n887\n6213\n5261\n1336\n521\n8928\n1818\n7864\n4792\n6742\n157\n1593\n823\n7235\n5303\n5633\n1100\n1692\n8047\n5993\n1460\n6714\n1630\n6440\n6307\n3608\n292\n212\n401\n5974\n7107\n8301\n8342\n2720\n4583\n2757\n7315\n833\n4466\n4236\n1282\n5273\n2149\n287\n8484\n2380\n8119\n7167\n737\n5076\n6598\n3596\n5382\n2650\n8980\n3421\n1356\n1954\n7823\n1172\n2226\n1941\n6136\n7274\n2256\n4928\n324\n1407\n4410\n4579\n1061\n7113\n486\n862\n3435\n6956\n2873\n1465\n6113\n8225\n8512\n6806\n272\n6008\n1241\n88\n5662\n3555\n689\n8733\n2812\n7453\n6282\n420\n2471\n4477\n7495\n1445\n594\n6939\n1564\n8704\n8590\n7992\n7374\n5796\n9298\n4213\n5713\n5864\n326\n5513\n402\n464\n608\n1951\n8640\n8180\n3347\n3459\n4162\n2690\n7478\n5856\n5240\n2389\n3022\n602\n5547\n1798\n1345\n9276\n599\n3673\n3277\n1635\n8625\n1567\n5928\n636\n5671\n2896\n3477\n412\n7575\n4201\n685\n4760\n1229\n4275\n8960\n3123\n4471\n5941\n3355\n3999\n7157\n6354\n7741\n6850\n8783\n1943\n6769\n7330\n8721\n8477\n1381\n848\n778\n6408\n2644\n5817\n1441\n1723\n2144\n2776\n2368\n120\n367\n8839\n8749\n5353\n4158\n3148\n9114\n1233\n9228\n8857\n2895\n1286\n200\n6755\n5125\n5857\n1657\n7658\n5097\n5000\n942\n7020\n586\n784\n7078\n6194\n8658\n8957\n9325\n1851\n8911\n4862\n7004\n1186\n8824\n1651\n2999\n561\n7639\n4316\n5086\n3187\n7912\n2624\n9183\n8487\n5089\n8475\n7554\n4031\n6297\n6059\n5329\n115\n2058\n7650\n7634\n7121\n2485\n7805\n2241\n7713\n4352\n2409\n1026\n2745\n4549\n6474\n5124\n5201\n6556\n6617\n9091\n3945\n8402\n5648\n5257\n2192\n4901\n7750\n6131\n6027\n6352\n4625\n1254\n5498\n3720\n8261\n3939\n5576\n3685\n6713\n8472\n991\n8354\n8068\n5655\n5997\n1029\n7506\n6740\n2575\n2990\n4898\n583\n7402\n3290\n5388\n6715\n8235\n5361\n4970\n1363\n3338\n5731\n9014\n5358\n2216\n2856\n635\n1193\n3705\n6334\n7666\n5270\n1384\n6368\n8604\n3564\n1937\n2481\n1341\n721\n2100\n3958\n6551\n3813\n2592\n7980\n5385\n319\n2357\n8761\n8910\n8693\n1204\n489\n4827\n8024\n7832\n6427\n3895\n89\n9068\n8067\n1708\n1111\n8963\n1902\n9251\n5719\n9143\n5537\n9169\n77\n5365\n1840\n485\n4456\n2841\n1169\n3271\n7144\n6886\n9140\n7173\n6003\n1659\n1807\n8371\n2439\n274\n4660\n3448\n6623\n347\n2103\n3400\n2106\n9073\n8169\n3687\n3305\n4416\n8454\n6635\n332\n2433\n2909\n3839\n4063\n1944\n6509\n1296\n7770\n1880\n6610\n4075\n9331\n4484\n302\n418\n4219\n1333\n2350\n6498\n8424\n4694\n4883\n5269\n6580\n5007\n6722\n1669\n8470\n2571\n513\n3810\n7049\n6332\n7363\n3532\n8456\n2097\n297\n8841\n7180\n714\n1587\n5234\n4268\n2320\n7372\n660\n8503\n1668\n8847\n1101\n7275\n3336\n6460\n722\n7782\n3947\n502\n4258\n2132\n1835\n181\n3841\n427\n3446\n2551\n8324\n6963\n4284\n7297\n7577\n3399\n9148\n8213\n5656\n8440\n851\n657\n2446\n4292\n6992\n976\n1108\n2681\n3237\n8582\n377\n5969\n5287\n9209\n8523\n7178\n7833\n6175\n2126\n3023\n5090\n7491\n6640\n6077\n2221\n2780\n1694\n4094\n144\n6161\n3203\n7123\n749\n3625\n3848\n980\n2270\n7819\n3672\n7689\n7203\n2718\n1714\n2884\n3474\n3802\n3851\n4224\n7237\n5415\n7998\n7207\n4106\n9036\n1046\n8731\n5070\n6818\n4592\n6056\n693\n1328\n3309\n5791\n2629\n2736\n202\n388\n7886\n4417\n8786\n8822\n4035\n7718\n8492\n5505\n1192\n4388\n8941\n5019\n7538\n6732\n7296\n6389\n5923\n1405\n3278\n3917\n1688\n8374\n443\n4037\n9099\n5190\n6402\n4177\n9310\n7747\n4348\n7197\n4844\n4998\n5609\n4345\n29\n3332\n8648\n4107\n346\n2577\n3941\n1215\n3782\n8252\n4706\n2675\n3790\n7459\n6164\n7316\n1149\n6687\n582\n3139\n5040\n7645\n3882\n7322\n4034\n1861\n4701\n8757\n3208\n8801\n6349\n8907\n1823\n4528\n4789\n143\n4746\n9234\n3866\n9245\n1911\n1366\n4393\n2061\n859\n1959\n6967\n3138\n7382\n9031\n6237\n845\n80\n6911\n7163\n5229\n4736\n8738\n33\n8543\n357\n3193\n7262\n4448\n6796\n6793\n3321\n7569\n6411\n7692\n7340\n1417\n5847\n3836\n2678\n1188\n8727\n223\n8615\n7417\n5771\n3170\n8061\n2935\n8263\n8257\n6883\n1276\n1239\n812\n6258\n3922\n7525\n8117\n3039\n603\n8554\n7573\n2787\n3445\n5115\n3478\n962\n3961\n6570\n7722\n216\n2797\n5154\n2530\n4904\n2405\n7542\n4021\n3252\n5370\n9302\n236\n4532\n1361\n3373\n1716\n2183\n1583\n3783\n868\n1687\n8925\n1433\n6198\n8208\n6367\n7603\n882\n3469\n1645\n7654\n1176\n4231\n150\n7997\n5456\n7031\n4375\n8840\n5634\n6945\n705\n3442\n4774\n3822\n7148\n1922\n8459\n6249\n8713\n6197\n8599\n6071\n6756\n1634\n950\n5640\n7749\n5920\n6622\n4783\n7837\n7479\n7229\n3919\n1797\n5272\n8945\n4908\n5439\n6903\n5833\n6930\n8197\n9261\n1711\n5483\n6046\n4285\n8852\n7409\n8971\n8278\n7534\n7792\n2444\n7496\n8063\n1665\n248\n3894\n4585\n1982\n66\n6651\n4850\n1240\n7511\n7524\n9258\n2075\n3979\n4714\n7592\n965\n2919\n8239\n1842\n8013\n4750\n2344\n6155\n3468\n31\n2087\n1599\n1573\n5883\n7613\n195\n3749\n644\n2189\n8779\n8743\n9005\n8081\n1040\n7785\n5820\n8830\n5495\n4867\n2710\n3843\n491\n7153\n6217\n1148\n4741\n1761\n5484\n3423\n5474\n6916\n5876\n7252\n1739\n8930\n6647\n5198\n4903\n8488\n7366\n2774\n2726\n2385\n7625\n3179\n2211\n8845\n6600\n399\n6810\n3447\n6684\n4915\n8368\n1867\n2325\n2101\n1335\n7734\n3722\n7437\n3716\n7025\n4000\n6897\n1408\n7154\n5013\n2204\n9233\n4225\n3817\n1877\n9161\n2197\n6991\n3390\n280\n1892\n1612\n7753\n2801\n7246\n7909\n6229\n9314\n8407\n1436\n3879\n6432\n6849\n5326\n5327\n8535\n7910\n7745\n5545\n7916\n207\n1783\n6158\n8517\n7361\n8070\n6430\n119\n6146\n4183\n1083\n7385\n4497\n9133\n1686\n3765\n5099\n595\n8046\n4418\n4043\n2361\n7915\n9149\n1717\n1141\n6375\n1018\n5602\n1262\n7485\n9178\n6629\n3339\n8934\n4648\n7988\n6252\n3440\n864\n5418\n3874\n7280\n6191\n8388\n4323\n6792\n4324\n2232\n7228\n8684\n7813\n6187\n6678\n3177\n3534\n4953\n4402\n7739\n6319\n2414\n8700\n5946\n8238\n4533\n6917\n4167\n4618\n2115\n2268\n3081\n1247\n4001\n8580\n7636\n3101\n2195\n1559\n3714\n2484\n7188\n6028\n7530\n2828\n1977\n3238\n6496\n2340\n110\n3247\n7532\n7541\n924\n1632\n484\n4487\n4439\n6447\n1319\n4944\n6347\n1791\n2285\n8087\n5452\n91\n1166\n162\n5185\n7933\n4743\n1627\n7259\n8620\n8525\n8207\n5845\n9011\n5525\n4269\n4700\n1824\n8186\n8872\n8299\n3957\n8242\n4558\n6439\n2666\n5943\n6958\n8112\n5121\n8806\n6170\n7688\n3486\n2082\n7436\n2778\n1096\n786\n2206\n5170\n1443\n6030\n3312\n9151\n8485\n6404\n8498\n2883\n8961\n2280\n8341\n9137\n4337\n2809\n2445\n809\n8298\n8643\n8316\n4951\n6853\n1572\n3215\n3938\n2249\n6515\n1337\n8328\n7712\n1429\n4117\n5441\n3230\n4152\n7225\n3513\n6953\n1507\n348\n3639\n5739\n2673\n1550\n6301\n1652\n8453\n204\n6833\n8056\n2200\n5217\n1854\n4711\n7368\n4572\n4032\n7531\n1013\n3634\n2875\n6058\n8307\n7609\n1766\n904\n667\n5410\n6578\n3601\n1664\n3233\n7390\n8178\n4486\n4952\n4427\n4876\n9166\n3107\n2772\n6295\n5001\n5296\n3371\n6518\n6327\n854\n1615\n8288\n1912\n5927\n6202\n5814\n9032\n1059\n3214\n6547\n7038\n5781\n6926\n4390\n6114\n1622\n4318\n5803\n5984\n736\n3561\n6554\n5045\n4277\n7386\n9081\n8462\n2034\n4955\n2701\n932\n1298\n7758\n7176\n9205\n2276\n3077\n3803\n3562\n8054\n7946\n295\n1843\n7728\n1629\n7768\n3663\n6363\n2971\n431\n9285\n2513\n1116\n3656\n4529\n6366\n5758\n6339\n8398\n816\n4153\n648\n2536\n1826\n7870\n8113\n7730\n7101\n6555\n9256\n6774\n1072\n4578\n2598\n3604\n5880\n861\n8273\n3350\n3117\n4685\n9219\n4334\n5165\n2035\n7224\n4066\n4253\n4447\n3815\n5038\n253\n3658\n2252\n330\n3967\n6443\n2143\n7336\n6135\n593\n2734\n8390\n4655\n7800\n1399\n1173\n5618\n2822\n7905\n7503\n4431\n2443\n1568\n3909\n1974\n2496\n4772\n5164\n4105\n2138\n2864\n3799\n3924\n4882\n8245\n1585\n5528\n5692\n5730\n5832\n137\n3175\n2894\n2062\n3899\n2752\n4028\n2113\n5411\n293\n2647\n730\n3758\n1667\n8879\n9303\n6653\n3698\n3968\n3053\n503\n2150\n4645\n2257\n4627\n8303\n7966\n8742\n4692\n5901\n8547\n2277\n5546\n986\n370\n4697\n8712\n4804\n4881\n1182\n6650\n7290\n3487\n2814\n5668\n7567\n5333\n3724\n4164\n3084\n8896\n3888\n6537\n17\n6882\n3531\n704\n1037\n8866\n5263\n6758\n3762\n1393\n3824\n5575\n5112\n214\n1439\n5700\n8932\n1306\n5011\n6928\n5173\n4098\n1132\n7352\n4778\n7723\n1368\n2390\n670\n2685\n5855\n1772\n6380\n3853\n940\n5424\n6091\n1748\n6193\n5297\n6572\n8877\n6874\n430\n5041\n5267\n1145\n7448\n620\n9112\n4294\n1432\n72\n130\n2393\n7920\n4597\n6614\n8889\n3697\n1895\n3462\n2616\n3978\n4791\n7846\n7780\n8372\n428\n6559\n8326\n9211\n2363\n1525\n5980\n7888\n3331\n8118\n7899\n615\n7377\n791\n5930\n6627\n8322\n1138\n770\n8460\n5100\n8274\n8350\n6316\n2893\n7594\n9236\n5082\n8150\n1986\n1909\n8902\n2145\n3617\n3501\n7\n2426\n5056\n8016\n2702\n5360\n8135\n8385\n8378\n8018\n8574\n720\n8893\n3021\n1978\n4782\n1816\n2083\n4051\n1446\n5870\n971\n9097\n8006\n4222\n8287\n686\n1377\n611\n8153\n4920\n4808\n1536\n679\n4096\n3891\n4884\n432\n4615\n8988\n5560\n3451\n5589\n3514\n6169\n1414\n3244\n1490\n7100\n3588\n690\n7317\n4171\n2266\n6800\n108\n2793\n5151\n6977\n2587\n8188\n8752\n6318\n5815\n5116\n263\n3311\n5191\n5689\n289\n3392\n5755\n1022\n5548\n9319\n8937\n6011\n7632\n5328\n4993\n4141\n5407\n1865\n520\n7305\n7208\n526\n3645\n1859\n2520\n3523\n8629\n7304\n8881\n3076\n4005\n8329\n2205\n2214\n6925\n8691\n4136\n8883\n974\n7873\n7952\n3965\n5887\n7964\n7189\n2406\n2783\n8086\n405\n6568\n5147\n2021\n4727\n4826\n7674\n1600\n5078\n2949\n6624\n6541\n8986\n5740\n4679\n8500\n3591\n4434\n398\n983\n7544\n1478\n4570\n6012\n465\n9330\n7206\n808\n8737\n2356\n4959\n8812\n6955\n3599\n2168\n1420\n1721\n1794\n5897\n8422\n2\n4023\n2739\n3619\n8797\n5496\n8951\n8181\n6893\n9254\n1809\n5682\n4309\n6929\n2742\n5988\n3363\n4493\n8434\n4210\n1503\n1876\n5094\n4600\n4936\n4798\n3933\n5216\n646\n7660\n3098\n8773\n4076\n1576\n5335\n3746\n3327\n47\n4602\n8636\n4129\n363\n6417\n7416\n9025\n4377\n4766\n2779\n4151\n9046\n7860\n3154\n3476\n7620\n966\n2052\n8344\n1752\n7199\n4412\n8895\n8882\n2463\n339\n56\n5390\n4821\n7555\n6558\n1905\n5258\n8880\n4205\n3580\n6735\n1023\n4511\n3850\n161\n7395\n2532\n3349\n7055\n7387\n758\n1907\n872\n3006\n659\n815\n1961\n6902\n7668\n4708\n1904\n4433\n5159\n6816\n8664\n6918\n1016\n6513\n7314\n5364\n7480\n9313\n716\n3395\n6843\n2292\n918\n4329\n1035\n6344\n8593\n3404\n5212\n837\n480\n8524\n1342\n3690\n6797\n7414\n288\n8863\n3352\n1628\n24\n135\n3314\n2181\n8650\n5915\n8078\n6812\n1375\n6040\n906\n5635\n7126\n1387\n7458\n6119\n5591\n3795\n1531\n95\n1960\n7522\n3033\n898\n4607\n4921\n3913\n2623\n4430\n6268\n7063\n1326\n9075\n2505\n7400\n1284\n2951\n747\n6466\n1357\n6493\n7320\n5892\n576\n5107\n5559\n97\n2583\n6361\n8843\n3509\n7892\n6086\n1476\n4612\n7427\n4267\n9094\n7050\n6048\n8455\n8382\n2227\n284\n2898\n3221\n2353\n2157\n5990\n5810\n3581\n7279\n6188\n7859\n3549\n5539\n7918\n2022\n9066\n630\n2500\n5111\n6561\n5127\n8095\n5569\n6123\n1338\n8605\n3491\n4187\n8220\n7334\n9213\n3067\n6997\n2853\n4735\n4372\n1489\n5954\n6662\n2207\n973\n3361\n960\n6350\n4170\n7431\n8076\n1129\n750\n7559\n7194\n2261\n2300\n6590\n5893\n6889\n3125\n8788\n334\n7286\n3472\n8164\n7693\n1469\n1181\n669\n7515\n5563\n4773\n3210\n6324\n3113\n9070\n3638\n7551\n2541\n3506\n5138\n4069\n7198\n7560\n3306\n6100\n2932\n4473\n1741\n14\n4672\n7564\n8748\n8874\n3804\n3678\n2240\n2610\n2862\n1358\n5716\n42\n5176\n9326\n8464\n1038\n2993\n3017\n9072\n32\n4809\n4364\n2808\n4125\n448\n152\n7299\n5431\n6178\n793\n3444\n9120\n8410\n4963\n772\n5457\n6954\n3014\n6881\n286\n553\n1948\n6398\n6255\n3057\n8646\n6176\n2700\n7106\n5663\n6683\n1281\n6013\n8799\n7635\n9289\n1885\n442\n2225\n6294\n5054\n2674\n7884\n8730\n8216\n4203\n1488\n7111\n4013\n3623\n7950\n1971\n1966\n3248\n2900\n1553\n472\n3865\n7796\n6937\n4591\n8098\n5208\n294\n5627\n5691\n5687\n7149\n4879\n3624\n7005\n2773\n3112\n9185\n1633\n7830\n5101\n8707\n8469\n4678\n4860\n700\n5527\n9194\n2794\n5068\n2639\n1177\n4282\n6492\n8128\n5859\n5029\n5123\n2877\n522\n5048\n7230\n2104\n6642\n6731\n2717\n5149\n2043\n9059\n5277\n844\n1394\n3262\n5515\n6706\n3651\n9105\n7671\n2880\n3607\n6410\n2508\n8463\n2394\n1916\n1125\n5343\n3322\n5307\n4547\n1589\n8478\n8899\n2955\n8028\n7293\n4619\n4058\n2781\n8715\n1272\n5734\n4474\n4863\n4367\n49\n8844\n5605\n8671\n6743\n4281\n7077\n1874\n2626\n2516\n258\n5249\n6186\n7958\n5432\n3801\n6288\n4732\n9121\n7558\n2527\n4661\n6819\n3835\n7508\n584\n215\n5036\n4261\n8978\n5228\n647\n4657\n2591\n5931\n5088\n9204\n929\n4381\n5421\n2965\n5050\n6495\n5033\n4799\n959\n6115\n3520\n1232\n5811\n317\n8976\n7705\n3842\n2178\n7187\n1373\n7112\n2694\n8627\n8493\n3991\n7441\n6308\n2589\n6462\n3406\n7673\n8660\n2902\n752\n1025\n849\n7682\n6982\n6652\n3612\n298\n5148\n4873\n3414\n1693\n1458\n327\n2016\n5002\n6768\n7016\n5583\n3270\n857\n8232\n7158\n7981\n4676\n4675\n2164\n8360\n6709\n8143\n365\n4062\n4527\n7928\n9009\n6228\n5818\n2533\n9305\n8887\n55\n2507\n8870\n6649\n5158\n76\n5595\n6693\n5306\n8666\n3020\n7527\n3082\n6304\n1591\n6145\n6868\n7205\n9107\n1165\n6773\n172\n1993\n4176\n8400\n4611\n7589\n8702\n5386\n6095\n6335\n1561\n8805\n5963\n7393\n3681\n2037\n4968\n7451\n3360\n7466\n8361\n4455\n4064\n5422\n1689\n3977\n7269\n362\n4178\n4145\n6127\n5162\n2399\n9225\n7068\n1650\n794\n3007\n1348\n7736\n444\n6081\n5298\n2026\n2543\n9087\n3593\n7425\n3730\n8468\n2641\n7529\n1720\n6377\n8732\n5851\n7956\n3150\n3785\n6485\n3611\n2869\n8510\n4775\n4463\n1251\n9124\n6873\n3391\n6505\n4118\n1617\n8837\n7051\n3213\n3668\n5347\n8452\n6289\n5840\n478\n3522\n453\n3376\n6190\n3342\n2237\n2870\n5178\n5567\n5952\n6919\n3005\n134\n3397\n7443\n8539\n6822\n5264\n3288\n5962\n8421\n6744\n8608\n4656\n1802\n2073\n4271\n1043\n2922\n8211\n2196\n5260\n3789\n7211\n7571\n7834\n5680\n2047\n5502\n3369\n3437\n3286\n5517\n3912\n8386\n1442\n6961\n2191\n2417\n9088\n5155\n6813\n4520\n7375\n1224\n811\n1891\n3748\n4123\n2789\n5305\n8419\n7248\n9237\n992\n4038\n4499\n2060\n5538\n850\n2669\n7612\n104\n9290\n2526\n1287\n4160\n4633\n7125\n742\n744\n4534\n2407\n7714\n4555\n8764\n7661\n4722\n7721\n3205\n6657\n1214\n3754\n6080\n4593\n3018\n8792\n2294\n4450\n7701\n9301\n127\n7069\n4513\n6243\n8025\n4010\n8632\n4715\n5284\n4574\n726\n4252\n4561\n7354\n299\n6088\n1090\n5012\n5684\n3489\n5639\n4888\n1584\n1969\n4846\n2915\n6804\n2775\n7306\n6506\n9306\n5231\n7740\n4283\n953\n6725\n458\n8290\n1504\n1539\n8885\n138\n3764\n1256\n257\n335\n1011\n7060\n5986\n9323\n4740\n8994\n4140\n6807\n8254\n3963\n9297\n2102\n2964\n9207\n4910\n8709\n4411\n1672\n457\n5852\n8037\n4932\n3679\n8794\n2362\n8592\n495\n8432\n1608\n2155\n7411\n2881\n9244\n37\n6535\n8219\n4505\n8635\n1928\n8384\n2570\n8996\n7610\n2128\n8728\n6656\n8935\n6681\n2070\n176\n9062\n972\n514\n1796\n4039\n6838\n2462\n230\n569\n5521\n4637\n4939\n4420\n2863\n672\n4995\n3807\n447\n1656\n2005\n5113\n3297\n8858\n2118\n6309\n1926\n481\n1156\n1509\n1228\n1787\n5978\n8678\n3951\n2929\n4980\n5039\n4713\n7002\n151\n5536\n8148\n3823\n4709\n2299\n142\n7067\n2372\n3761\n9\n2265\n5747\n2764\n724\n2913\n3151\n4525\n6370\n4247\n9329\n5494\n3721\n629\n3621\n7371\n59\n1999\n6704\n3734\n2698\n4691\n6938\n9117\n8415\n6353\n6750\n9077\n2679\n7623\n2478\n7321\n6611\n4007\n2076\n5772\n6416\n2264\n8348\n2672\n6546\n754\n6934\n7908\n8546\n4404\n592\n4748\n6625\n2129\n7944\n2377\n6\n8929\n8275\n3515\n4524\n3660\n8710\n419\n6878\n170\n8313\n7460\n8753\n2917\n6891\n6663\n4918\n7129\n396\n7256\n3500\n631\n5585\n8343\n2695\n6168\n6292\n3176\n5092\n5160\n3701\n9021\n7221\n7825\n1216\n1438\n3471\n2318\n8923\n6223\n2182\n7621\n8514\n9010\n8987\n1252\n1972\n1872\n1715\n8205\n6463\n8138\n8989\n5661\n2890\n565\n2427\n8946\n1303\n3718\n6000\n3620\n1560\n5276\n8089\n9260\n1467\n6173\n7641\n7520\n5061\n4677\n5757\n4400\n2620\n2719\n8995\n2079\n6644\n1683\n8141\n7754\n5744\n2952\n7568\n654\n7457\n5368\n3310\n1510\n4440\n1513\n3072\n8034\n1456\n9164\n3163\n3035\n6111\n5042\n7161\n1401\n1084\n8000\n6672\n8531\n5404\n6550\n8379\n9141\n8681\n7752\n6394\n7011\n3739\n8253\n978\n4771\n6024\n4828\n7959\n1649\n1727\n7073\n8349\n6952\n661\n7283\n3159\n2590\n3496\n8741\n3969\n2956\n4565\n920\n1830\n8558\n1930\n6677\n6825\n8256\n7454\n7521\n4710\n1768\n3753\n6459\n5606\n5292\n1397\n240\n2733\n946\n6711\n3242\n2627\n4929\n5006\n3202\n132\n2295\n2746\n1293\n2124\n5405\n4065\n818\n7464\n1820\n4398\n1312\n6994\n6920\n261\n987\n6120\n3109\n331\n2986\n4338\n7774\n5122\n8396\n1364\n8969\n6712\n8161\n7083\n7595\n5940\n1566\n6419\n8634\n4432\n6047\n4749\n6076\n1161\n8217\n674\n8494\n3688\n2447\n4704\n969\n7477\n1160\n3243\n3173\n4979\n9288\n6860\n1662\n6171\n225\n5143\n313\n8327\n3275\n3385\n7626\n3103\n4401\n6794\n5600\n5043\n7664\n933\n6830\n4452\n3980\n1604\n5875\n6633\n4635\n5756\n3329\n1751\n8108\n4817\n1989\n1237\n1893\n2848\n9334\n51\n8875\n4981\n5417\n4134\n877\n6688\n3545\n4943\n5615\n2476\n1684\n3652\n7396\n1769\n1171\n6563\n3415\n3644\n340\n6630\n8284\n3256\n7240\n5371\n3405\n2108\n6360\n1734\n5612\n8638\n2343\n1103\n7803\n6809\n3055\n188\n8031\n3124\n3683\n4537\n988\n2297\n4893\n6499\n3396\n839\n4467\n5195\n4041\n6457\n4441\n6378\n6472\n6195\n4912\n6884\n5922\n7014\n1660\n38\n1595\n6752\n4554\n1292\n2709\n3800\n6057\n1980\n8775\n6587\n6392\n6263\n7214\n5219\n282\n309\n6685\n2253\n6311\n4092\n18\n7570\n5543\n4081\n2515\n6278\n8690\n5294\n6184\n5215\n9130\n6720\n250\n7250\n4983\n639\n3567\n7841\n2636\n4067\n8446\n5703\n8609\n2586\n7695\n1253\n6701\n7930\n6317\n5921\n7719\n8501\n7312\n4110\n6219\n4552\n5059\n4088\n7975\n9132\n6054\n692\n3412\n4079\n6754\n6950\n5281\n3028\n8321\n3877\n7614\n8939\n4188\n2223\n239\n4745\n6875\n7096\n5571\n4403\n2640\n5556\n1845\n6690\n1825\n4157\n314\n4682\n8825\n1003\n6206\n8093\n7215\n6465\n99\n8077\n6631\n4206\n2523\n366\n1208\n6043\n4640\n1457\n5475\n4985\n1351\n3090\n5625\n7307\n8466\n2003\n8854\n218\n1500\n4476\n2293\n1847\n5032\n2147\n866\n3710\n2552\n1749\n6692\n3926\n4112\n6458\n735\n9171\n60\n9304\n6726\n2630\n2882\n1178\n1151\n4922\n4662\n173\n7233\n1776\n6533\n4113\n2423\n2425\n4343\n5800\n970\n6372\n1009\n6607\n3068\n8435\n6423\n3126\n4813\n1709\n1201\n7104\n5620\n3932\n5701\n5724\n3366\n8050\n4984\n5023\n9203\n5079\n627\n290\n779\n5572\n5233\n1392\n4975\n8534\n8210\n2269\n1143\n2475\n2562\n905\n4546\n267\n3536\n8538\n449\n101\n7367\n2722\n4605\n7356\n6781\n8537\n8697\n6820\n8340\n8926\n3821\n2349\n2259\n6545\n8100\n8395\n2258\n2911\n5108\n3946\n1406\n8683\n8296\n5579\n2177\n8264\n1425\n3940\n957\n3647\n515\n5342\n8363\n2449\n3108\n1001\n2937\n3452\n5574\n4319\n9184\n8381\n945\n6876\n600\n5714\n4871\n8532\n1852\n8856\n392\n2018\n8878\n369\n5711\n9230\n5304\n7266\n1681\n7829\n2309\n4683\n8938\n2255\n6159\n3207\n4651\n2029\n4341\n5106\n5794\n9024\n4712\n2434\n7151\n7359\n6431\n1290\n5918\n8705\n3438\n5554\n8876\n7415\n6290\n5373\n3805\n2950\n2331\n6772\n8997\n6576\n2307\n8515\n4033\n3428\n6487\n6595\n45\n5792\n333\n762\n2383\n3388\n666\n2166\n460\n943\n364\n6980\n8223\n8221\n637\n6218\n4108\n5381\n4649\n5096\n1614\n8768\n5095\n3809\n5030\n984\n3538\n5120\n2498\n5222\n5613\n5486\n5119\n241\n5707\n9227\n544\n4109\n7771\n728\n3671\n9327\n1230\n9270\n1070\n8565\n4769\n7056\n5654\n7965\n1793\n5956\n7883\n1362\n5479\n8769\n8821\n8320\n1901\n1994\n2461\n5552\n389\n2839\n6467\n2762\n4763\n3499\n1487\n7599\n4488\n3241\n8272\n1131\n4496\n7006\n7265\n4897\n2747\n6618\n5291\n4563\n5146\n1939\n6369\n8548\n6163\n5526\n4068\n9030\n5349\n8433\n748\n1477\n4265\n9200\n3878\n462\n6846\n9040\n4806\n3519\n6798\n5464\n5179\n546\n6044\n8114\n7216\n6276\n1495\n494\n8146\n5434\n856\n8403\n8071\n3972\n5544\n3337\n6855\n1546\n2824\n1718\n6009\n2042\n251\n9076\n3330\n5004\n192\n4717\n3797\n1146\n394\n7814\n7699\n4659\n4689\n4156\n7903\n9054\n7332\n7811\n1119\n5531\n6782\n5210\n8412\n2633\n7924\n4624\n8314\n5666\n3240\n2310\n4262\n8160\n4553\n8196\n2661\n7213\n7455\n7399\n870\n6126\n1227\n1226\n781\n937\n6343\n2578\n2892\n4124\n2792\n5696\n6865\n6455\n8312\n5193\n6026\n5251\n3787\n4460\n4687\n7923\n1140\n9106\n796\n2482\n9170\n8695\n2749\n6734\n4825\n114\n8319\n827\n4175\n390\n7611\n7484\n1249\n7727\n955\n579\n3629\n8915\n2958\n885\n7227\n1424\n4810\n4604\n1535\n774\n7518\n5428\n1955\n8233\n2645\n2167\n6484\n3855\n1502\n4861\n2333\n2973\n4829\n1906\n3966\n476\n9023\n6960\n3483\n2748\n5891\n8174\n7702\n8948\n5324\n4396\n1605\n2823\n7348\n7347\n5933\n310\n9082\n916\n4255\n203\n4239\n5976\n6200\n6435\n4425\n787\n1121\n6034\n13\n39\n3104\n5961\n5507\n5785\n1463\n7339\n1575\n7801\n5445\n8283\n5951\n6995\n999\n5163\n6023\n3786\n6536\n5850\n3524\n3528\n4508\n6674\n2939\n8227\n4598\n7550\n8495\n8622\n1152\n4538\n4003\n1318\n739\n3296\n8202\n1552\n6204\n5236\n3576\n4699\n9238\n1879\n488\n2274\n433\n5587\n1678\n9282\n7914\n8552\n6445\n7971\n8331\n6880\n7476\n7282\n1570\n7271\n3827\n6489\n8091\n9287\n7351\n1765\n5286\n6921\n542\n1762\n8553\n4987\n894\n3622\n7855\n92\n3131\n4811\n3590\n6517\n4510\n733\n4954\n1360\n5669\n2842\n8107\n5646\n5968\n1618\n1827\n7709\n8521\n5807\n5321\n9239\n5501\n3745\n4437\n1586\n7273\n5265\n6605\n7917\n1607\n6074\n4668\n7061\n1580\n8694\n8461\n4573\n618\n9173\n5243\n435\n8770\n2421\n7450\n3870\n8308\n2605\n2934\n9240\n6887\n4512\n1198\n7585\n7691\n7738\n2843\n8423\n7929\n6971\n7854\n86\n9128\n4298\n622\n790\n9155\n6579\n2203\n7716\n1265\n8645\n3834\n1174\n7380\n623\n8936\n4306\n8082\n4312\n8661\n5753\n7243\n2768\n8155\n85\n4143\n3047\n8479\n7809\n2833\n5555\n7578\n1637\n1936\n8130\n5549\n8062\n7143\n5522\n8966\n5614\n8105\n8719\n7655\n7502\n8268\n5760\n6695\n5565\n7615\n9226\n4870\n4507\n3160\n4835\n1598\n2465\n4422\n5248\n7867\n1078\n5015\n6660\n1676\n5354\n6391\n5351\n7184\n6280\n5936\n6124\n1327\n2906\n269\n8292\n2466\n8809\n5167\n8142\n8204\n2713\n1910\n2930\n2494\n5592\n7384\n7726\n5727\n625\n1735\n5710\n5518\n2491\n1410\n4989\n5183\n8777\n6562\n4947\n3692\n6129\n384\n1097\n2084\n5209\n3723\n7272\n6895\n2459\n543\n8621\n5394\n6211\n2074\n1511\n2524\n7776\n5055\n7191\n6207\n7922\n281\n8436\n2918\n3141\n4800\n6323\n7631\n8903\n2716\n3735\n3012\n5301\n3975\n2800\n7963\n105\n1920\n7391\n4909\n1754\n4816\n5488\n5145\n5098\n5139\n5268\n9317\n8631\n4346\n7318\n136\n3993\n1220\n2151\n308\n7483\n7582\n3071\n1339\n3777\n8191\n5378\n7087\n1056\n7465\n5608\n6564\n512\n2754\n2687\n1596\n5376\n1512\n566\n6382\n7360\n1757\n8035\n2296\n4264\n3551\n1053\n4716\n1537\n8518\n254\n6253\n7132\n8557\n3490\n9267\n5473\n2412\n7539\n7136\n6670\n3974\n891\n1323\n5958\n1217\n2879\n9118\n1259\n2317\n7033\n2467\n6665\n6244\n2180\n2140\n7098\n5126\n6395\n4150\n547\n4120\n4307\n1725\n2737\n8549\n8195\n1245\n6286\n935\n1756\n1701\n1626\n7379\n3492\n3717\n5802\n2817\n1234\n1005\n4101\n21\n2576\n4650\n3381\n1030\n2844\n1641\n936\n2729\n6469\n8913\n8369\n5994\n341\n81\n4083\n1685\n5152\n3380\n8739\n6615\n3829\n164\n7927\n4779\n829\n4216\n8528\n3641\n4606\n2769\n6970\n1545\n8850\n4971\n5489\n2008\n4564\n8682\n7784\n5768\n9252\n901\n438\n3577\n2765\n5904\n664\n3348\n6298\n3602\n2502\n8617\n7684\n4293\n5166\n5805\n4126\n2451\n6906\n7234\n9243\n3778\n2940\n1087\n9053\n5026\n2504\n5283\n2820\n4242\n797\n3925\n1383\n8750\n7861\n1403\n6973\n7617\n968\n3065\n5395\n4347\n8144\n2688\n6527\n8597\n8673\n7327\n6331\n1422\n7115\n244\n7013\n2092\n54\n7970\n5742\n3464\n4823\n8588\n2938\n3060\n6406\n4149\n2375\n6616\n8803\n1555\n4369\n1380\n3011\n6144\n3367\n4990\n7370\n7131\n1995\n2602\n985\n8785\n8480\n9125\n1927\n3269\n3771\n1032\n7378\n6900\n5726\n2731\n2020\n4503\n3313\n6727\n8793\n2304\n523\n6036\n58\n7993\n5512\n5049\n2721\n8482\n673\n7937\n1168\n4472\n8247\n7287\n9017\n6421\n9190\n3584\n1819\n1792\n2810\n6033\n638\n6749\n7677\n981\n7160\n4726\n1886\n7845\n7911\n6975\n568\n7422\n4613\n4501\n2569\n4263\n3206\n4133\n2420\n3706\n8894\n2263\n5774\n4925\n9180\n8888\n2945\n2091\n1873\n6303\n729\n6728\n2156\n3267\n1860\n6597\n1374\n4930\n5253\n938\n580\n5825\n4839\n166\n8198\n6892\n8701\n74\n7094\n7284\n8954\n3156\n6140\n4279\n5594\n2229\n7535\n5466\n8413\n7105\n8192\n2632\n7638\n9308\n8530\n832\n4643\n2201\n3268\n4322\n6510\n2967\n262\n403\n7973\n1258\n8828\n4036\n5838\n9263\n8529\n2788\n4202\n237\n3838\n1291\n2305\n4056\n5628\n7281\n1430\n6476\n7935\n2850\n6041\n2013\n4016\n4576\n5312\n6827\n6321\n8669\n8439\n830\n1942\n1519\n2750\n6106\n6993\n6235\n5899\n7313\n5331\n4371\n7086\n4399\n8600\n2660\n5409\n3465\n5499\n6231\n5745\n1801\n5337\n4468\n1451\n4192\n1275\n8230\n2302\n1114\n4960\n8860\n3900\n6468\n5058\n1505\n8868\n5588\n3858\n1947\n2565\n1472\n8499\n243\n8442\n6583\n7085\n5374\n2250\n4291\n4426\n492\n2311\n8305\n3662\n5338\n8780\n7488\n3890\n5005\n2442\n4680\n7358\n9116\n4397\n5999\n587\n7902\n83\n3566\n2134\n8942\n4767\n6601\n2456\n1745\n5736\n5254\n8017\n4015\n7690\n3798\n8947\n1067\n2116\n7945\n590\n2547\n2535\n64\n2053\n5359\n2493\n6669\n4351\n6412\n7473\n6147\n7175\n6983\n5196\n745\n2657\n3497\n697\n3161\n7528\n2239\n5991\n3201\n7681\n2440\n5189\n2959\n2044\n8917\n2046\n6313\n6333\n5318\n2763\n4301\n2555\n2213\n2933\n4121\n1340\n3903\n4392\n7889\n5323\n1055\n707\n3857\n518\n6078\n5134\n6645\n9138\n1592\n680\n4446\n7943\n3461\n3887\n5601\n2321\n6621\n558\n4914\n913\n5637\n6453\n8511\n4531\n1218\n5508\n2603\n6802\n8426\n8297\n2947\n5971\n6552\n5262\n5935\n782\n7435\n8357\n6139\n1136\n1473\n5008\n3585\n3627\n2914\n5356\n2997\n2347\n881\n5652\n4849\n8808\n8351\n4017\n2010\n6836\n7616\n4391\n3630\n3712\n6099\n2969\n5238\n4333\n2301\n4406\n1236\n1050\n1864\n1104\n8408\n8251\n8795\n5879\n3365\n7481\n8206\n2452\n1767\n8859\n124\n3948\n4444\n8962\n4438\n5003\n1740\n8428\n3105\n5117\n1095\n1480\n8755\n7881\n3097\n4877\n155\n1917\n2455\n6042\n337\n6724\n6045\n8483\n7135\n2242\n4566\n1679\n834\n1746\n795\n3548\n2314\n2036\n4046\n9129\n6979\n7084\n5091\n2413\n8170\n5775\n1817\n529\n7220\n813\n2916\n5130\n8972\n126\n1243\n2370\n4831\n9122\n3010\n5104\n2613\n6761\n7482\n909\n2146\n4595\n5340\n3512\n6283\n2346\n653\n6121\n2615\n7421\n1869\n1002\n8834\n2991\n8992\n632\n1093\n4543\n645\n2352\n4115\n373\n1483\n6966\n8598\n3896\n3434\n5987\n8318\n1815\n1223\n1548\n6885\n5073\n6330\n2573\n1369\n4095\n1431\n2185\n5766\n1301\n7258\n8048\n7598\n2847\n1996\n2378\n8561\n743\n6381\n271\n1956\n7439\n7596\n7134\n6636\n5804\n1858\n6214\n4730\n8536\n1203\n3118\n9202\n1875\n5885\n8975\n168\n5898\n4014\n4186\n3346\n3041\n5558\n9296\n8157\n4339\n3234\n1738\n2604\n6803\n5387\n5590\n125\n2173\n8012\n8005\n4858\n3069\n651\n372\n378\n8366\n6299\n1449\n7793\n8541\n3235\n8043\n3086\n3983\n6949\n4690\n2176\n6494\n7637\n8406\n3856\n7408\n350\n7021\n8224\n7044\n7662\n6697\n7679\n169\n528\n7029\n2790\n7138\n7432\n7602\n8333\n1582\n1378\n519\n482\n9279\n8015\n6592\n4514\n3542\n2612\n628\n5053\n6699\n6227\n2094\n1621\n847\n3598\n2728\n8490\n7276\n6620\n8345\n9216\n4278\n4059\n9058\n5063\n5816\n4173\n8134\n1997\n3182\n3224\n8129\n5109\n4494\n189\n7640\n8243\n180\n2963\n1123\n5593\n3263\n4185\n7140\n8990\n6320\n9275\n4601\n4854\n5907\n1135\n8083\n5964\n7788\n1992\n8069\n9174\n6160\n35\n8572\n2865\n46\n3952\n6418\n2510\n5783\n20\n3816\n2715\n3930\n2548\n5204\n4122\n4103\n708\n7756\n3825\n777\n3550\n8502\n3929\n5440\n6751\n7764\n4070\n7331\n3743\n9131\n9206\n3828\n23\n41\n4197\n234\n5723\n7622\n8832\n4626\n2169\n5599\n2976\n5266\n1967\n1150\n5334\n90\n822\n2538\n3169\n6771\n7442\n498\n4967\n5580\n7581\n7680\n4728\n1115\n4040\n1064\n3106\n6266\n4415\n9294\n5597\n7059\n197\n7218\n6948\n5690\n4234\n1653\n4485\n4019\n3370\n919\n1330\n6085\n2078\n3768\n5427\n4545\n2435\n8862\n3633\n8145\n5221\n1388\n5913\n8140\n7471\n7156\n6989\n1190\n6832\n2830\n4387\n3454\n7469\n2910\n4526\n5187\n2410\n9223\n6247\n6912\n4681\n1300\n7407\n8612\n6523\n3616\n6894\n7253\n4515\n5874\n5448\n7137\n7957\n1130\n3092\n7054\n3516\n5797\n1000\n2727\n4336\n9090\n6403\n7255\n8919\n6522\n6760\n8898\n4803\n1938\n374\n8686\n9150\n3985\n7045\n3475\n6065\n7991\n1409\n7851\n6671\n6090\n5826\n7857\n1155\n8964\n1117\n7072\n6064\n2497\n4899\n2397\n3189\n2369\n15\n5027\n5754\n8950\n5617\n8391\n914\n6264\n279\n6174\n5184\n3733\n7392\n5278\n2924\n567\n7994\n352\n8084\n2148\n2723\n3359\n70\n1870\n7708\n220\n3994\n9013\n3191\n9220\n4155\n5717\n1110\n2198\n9179\n785\n5325\n4770\n4250\n52\n4634\n5072\n9037\n601\n8036\n7996\n2483\n7232\n8675\n8836\n1279\n5346\n7676\n6104\n1515\n4603\n5607\n7894\n5144\n2628\n68\n440\n3586\n3083\n4830\n4378\n7762\n1134\n4542\n7850\n6296\n2866\n4011\n8751\n4776\n7954\n7102\n5697\n2032\n5729\n5017\n6962\n2051\n1092\n764\n9019\n2759\n8581\n1484\n8618\n912\n2382\n4892\n8447\n8176\n5491\n5695\n5504\n1060\n7064\n709\n578\n4320\n2379\n7649\n8416\n1613\n5344\n7512\n7865\n3037\n6689\n6557\n1569\n5955\n3707\n9168\n8566\n1775\n5950\n6943\n7804\n434\n6179\n9300\n1142\n7947\n6456\n6291\n5789\n6538\n9134\n3049\n5075\n5399\n5161\n1623\n948\n6302\n6063\n7516\n117\n506\n3302\n7146\n355\n3854\n1081\n2827\n1496\n2574\n6167\n3183\n4287\n5482\n1722\n7319\n7277\n3860\n3443\n3298\n8364\n3826\n7254\n2360\n5093\n7039\n6325\n4230\n2567\n6241\n4443\n559\n2625\n4228\n8967\n6405\n1674\n3936\n4475\n8556\n8585\n896\n3713\n6259\n4297\n6718\n2392\n2279\n4927\n1283\n2374\n2860\n7665\n663\n596\n6293\n6805\n2811\n7383\n8306\n8330\n3153\n2153\n2618\n2441\n3615\n8092\n552\n5285\n5255\n8124\n9247\n5530\n8175\n6242\n5660\n3433\n1610\n1832\n3892\n3862\n640\n2127\n2474\n4196\n3495\n7217\n5206\n4836\n7759\n4376\n800\n4227\n3699\n9055\n5665\n6826\n7463\n9065\n4720\n5069\n3245\n3453\n3358\n6532\n5970\n7921\n4087\n1547\n3424\n8040\n7995\n6787\n9069\n8716\n2561\n8199\n1479\n2767\n7818\n7145\n604\n7597\n4896\n9281\n4666\n185\n8171\n7978\n3059\n9196\n9221\n2135\n1800\n2974\n1529\n5948\n446\n4436\n8672\n3508\n6208\n5673\n6998\n5203\n278\n7041\n9110\n5853\n8121\n1764\n3046\n2400\n6575\n4738\n2228\n7761\n9322\n7019\n6931\n6383\n6762\n283\n3935\n2534\n7717\n6785\n471\n8214\n231\n4241\n5310\n3844\n5746\n2011\n7209\n336\n6433\n756\n9167\n6741\n3345\n7685\n4018\n6682\n9147\n4790\n5836\n5906\n8747\n676\n3964\n6362\n3510\n7510\n2308\n1806\n5917\n1189\n4012\n3387\n1331\n5319\n5423\n8900\n147\n3780\n1696\n9111\n6783\n6497\n4104\n1898\n3987\n260\n4616\n2121\n9283\n1400\n2437\n4670\n2735\n1163\n2096\n6521\n1423\n4523\n2243\n6667\n6990\n3944\n6915\n6763\n5611\n404\n2691\n1015\n7092\n7562\n8624\n2291\n4193\n5934\n5503\n2326\n4408\n2960\n842\n1963\n3354\n5568\n9050\n3806\n439\n9154\n6055\n6451\n2190\n7633\n688\n4354\n8890\n2813\n2872\n8102\n8317\n6609\n1497\n8389\n6449\n1682\n3594\n5103\n5812\n863\n268\n3054\n8079\n2260\n2027\n3091\n7687\n6703\n3557\n2019\n8427\n2799\n8182\n6641\n3168\n2284\n1934\n4865\n1077\n6507\n1658\n3811\n1774\n7897\n2238\n2943\n191\n3869\n3246\n4057\n3188\n414\n8072\n7838\n1382\n4962\n6010\n5363\n4042\n1983\n4077\n7429\n1833\n3583\n4044\n1109\n1295\n386\n5481\n3927\n311\n"
  },
  {
    "path": "lib/train/data_specs/got10k_val_split.txt",
    "content": "1349\n5651\n5878\n562\n2202\n8904\n765\n1501\n8654\n2975\n2689\n3680\n5180\n1900\n7707\n4723\n8912\n4029\n3579\n869\n2888\n8657\n6599\n741\n4288\n2244\n7357\n5704\n8791\n208\n8587\n7969\n4805\n8526\n4887\n8871\n7468\n3343\n886\n7794\n5764\n2646\n6454\n6101\n7885\n7744\n1297\n4119\n4856\n122\n2286\n2925\n5131\n3570\n5843\n3027\n5320\n5626\n540\n1862\n5401\n7335\n699\n7760\n9198\n3259\n7345\n8698\n1280\n6479\n3100\n3988\n1322\n5737\n1268\n3257\n6791\n3326\n4815\n7644\n1082\n2826\n6821\n8984\n2553\n5290\n5909\n4762\n9242\n8096\n8066\n4325\n6666\n7193\n7114\n8060\n2376\n7872\n6788\n3544\n5460\n3507\n2509\n6626\n3429\n5542\n4220\n2968\n5271\n4249\n3863\n1868\n5581\n2012\n6270\n8038\n4050\n121\n2845\n1565\n1998\n2275\n5524\n6068\n7624\n4913\n9277\n1506\n803\n8848\n5925\n2450\n2072\n8190\n4753\n9162\n1923\n825\n7303\n9028\n2088\n8516\n1556\n5937\n7847\n2367\n7549\n1049\n1521\n4739\n3931\n8958\n4130\n7877\n7876\n897\n5985\n7346\n7537\n111\n3700\n1126\n7896\n1288\n3419\n4673\n1051\n5720\n1068\n3458\n146\n291\n6256\n5514\n2857\n4580\n6239\n6525\n8717\n391\n4841\n6676\n4360\n1453\n4211\n73\n1675\n1987\n4025\n1321\n662\n8265\n6424\n2758\n7765\n7656\n3209\n7497\n7600\n9039\n7697\n5177\n2983\n5622\n9295\n1200\n3284\n964\n2024\n1269\n4551\n8088\n5659\n2212\n5199\n5551\n8607\n5573\n2247\n5200\n6341\n7951\n8429\n7720\n5919\n1273\n3529\n6707\n9176\n7552\n3255\n5649\n6110\n9235\n1137\n9272\n775\n788\n5786\n5186\n6746\n2667\n9145\n7630\n3953\n1828\n8827\n6471\n4702\n7815\n467\n6387\n3195\n6238\n6508\n2373\n5983\n4931\n2948\n921\n2438\n517\n3949\n2137\n3216\n5683\n3695\n1719\n4837\n9159\n6981\n860\n7410\n5497\n1770\n5557\n8810\n5194\n4857\n9100\n6329\n2609\n1925\n3686\n9041\n4924\n349\n9187\n3393\n3661\n7120\n6858\n4587\n3831\n3130\n5396\n5060\n6486\n3937\n8023\n824\n5398\n1354\n8861\n5534\n7292\n4389\n6029\n6226\n3505\n4326\n7445\n581\n6089\n3450\n7324\n6516\n6775\n1207\n4575\n5135\n9265\n3918\n9020\n3473\n3898\n7812\n6571\n6757\n6639\n2557\n1206\n6148\n7325\n8790\n4938\n7026\n4383\n8041\n1250\n7267\n1952\n7561\n8811\n4941\n8373\n4848\n6602\n8355\n8104\n5214\n6654\n4330\n995\n3181\n3422\n456\n1782\n3408\n6530\n719\n7587\n5910\n3058\n740\n2009\n4207\n5336\n2798\n9229\n8668\n2473\n4221\n1493\n3281\n171\n9157\n9139\n7766\n6220\n9127\n3324\n5308\n3708\n2431\n8080\n2093\n2585\n406\n7040\n5064\n5247\n4758\n6512\n2953\n4257\n4935\n2705\n2572\n3436\n8513\n5884\n1385\n4852\n2637\n7091\n2761\n6007\n8332\n6694\n2422\n4917\n2186\n6898\n1390\n6965\n3132\n7698\n475\n2002\n2692\n5024\n7365\n7373\n4091\n1731\n947\n3962\n8692\n1788\n8734\n8656\n6862\n6856\n1950\n1914\n5658\n3635\n1620\n4780\n2580\n1454\n2786\n687\n7238\n3648\n6452\n1197\n3190\n5900\n9043\n4958\n1935\n1821\n1187\n1153\n7737\n7223\n3820\n7169\n7350\n5674\n6254\n3025\n6680\n1690\n2899\n3893\n1577\n5728\n9189\n5077\n34\n3560\n2179\n5462\n1402\n3654\n1376\n7936\n4246\n5506\n1179\n5647\n4686\n8644\n1352\n2855\n6079\n2254\n2668\n2287\n2457\n3418\n7264\n677\n3074\n2655\n1042\n2210\n4504\n7089\n8309\n4209\n4280\n3258\n2977\n84\n4705\n1244\n3511\n6355\n8813\n3228\n9266\n1122\n613\n732\n5202\n8425\n2638\n6470\n2886\n3541\n8132\n2063\n8201\n5129\n2818\n7949\n6936\n8090\n4465\n7295\n5239\n7009\n9271\n8563\n2832\n952\n8136\n6776\n3565\n5188\n7288\n6999\n285\n5487\n7763\n7608\n8584\n2071\n7868\n2804\n3655\n7048\n6847\n3276\n4082\n4272\n3910\n3709\n1574\n4559\n7580\n7081\n5014\n7769\n8183\n6386\n7574\n356\n4937\n2487\n9315\n7572\n3040\n671\n2682\n8626\n3868\n8623\n387\n8679\n4074\n1481\n3527\n3595\n4754\n2453\n1579\n4638\n9123\n1829\n316\n3009\n3691\n763\n4875\n3572\n4642\n3128\n4273\n2777\n6032\n4793\n233\n7147\n996\n3199\n8835\n3517\n7210\n6125\n6037\n3684\n8589\n3915\n3095\n8310\n3180\n7043\n4458\n2889\n57\n4483\n7667\n8375\n1434\n7493\n6986\n4733\n8471\n5827\n2111\n1313\n7986\n3075\n2614\n7547\n4977\n8527\n3212\n7300\n5842\n5244\n3291\n597\n1007\n2030\n227\n3830\n5540\n247\n5643\n9333\n1958\n3096\n1371\n5220\n7926\n2927\n1516\n7130\n193\n1522\n6165\n6923\n3794\n4223\n5535\n2472\n8630\n3971\n9101\n2946\n222\n4609\n7291\n8542\n6501\n7548\n4557\n6274\n1010\n5226\n7309\n1317\n9056\n6275\n1624\n1099\n4191\n4030\n7270\n5392\n2316\n3819\n1670\n8154\n8045\n4807\n8864\n2391\n5908\n8338\n8218\n6400\n9193\n3165\n843\n6613\n6941\n4380\n9332\n5629\n7557\n4321\n3702\n681\n734\n1159\n4665\n5959\n1697\n5509\n8774\n7389\n3832\n3751\n8637\n3079\n1680\n6841\n703\n684\n8293\n3682\n5733\n4818\n3231\n3078\n5562\n9001\n3889\n7024\n2519\n1713\n3287\n219\n6021\n8776\n2289\n7212\n4832\n4684\n4617\n4237\n2649\n8185\n6326\n3568\n551\n1426\n4181\n8869\n312\n2905\n4165\n8248\n2558\n900\n1044\n8613\n7743\n5437\n7604\n3122\n5708\n8649\n2878\n4695\n4491\n1929\n7533\n5223\n7711\n915\n1844\n5751\n3008\n8055\n961\n6142\n4636\n61\n198\n2271\n5698\n4596\n4500\n5709\n5819\n7972\n2992\n1643\n1048\n6281\n8886\n360\n4198\n1841\n6814\n3960\n2606\n7001\n5888\n450\n7133\n7015\n7034\n5153\n8920\n5066\n469\n1302\n8816\n463\n8651\n5869\n8193\n6582\n5578\n1231\n9274\n7260\n7751\n8052\n6799\n2089\n2342\n8451\n3260\n5550\n7795\n2288\n1205\n40\n496\n8367\n7836\n5973\n3908\n5242\n5062\n2706\n997\n6514\n5419\n9201\n1965\n6062\n3050\n5302\n8735\n358\n2398\n7470\n1644\n8179\n7047\n1549\n5414\n2539\n7381\n589\n8166\n8505\n6035\n3956\n4540\n6721\n8074\n1062\n2384\n2531\n7159\n3502\n3902\n4584\n2554\n264\n8720\n2849\n4916\n5218\n7202\n883\n4560\n1677\n4317\n7863\n4509\n6577\n2903\n1452\n1416\n5369\n473\n6233\n6359\n5992\n4934\n8059\n6834\n4907\n3320\n8267\n8280\n2066\n2402\n1485\n3772\n3732\n4764\n9126\n3575\n5564\n4768\n5641\n1884\n2330\n1804\n344\n698\n3089\n1532\n4454\n761\n7289\n8094\n3432\n1747\n6811\n8722\n8826\n4646\n3222\n8614\n2901\n7003\n652\n8663\n4266\n413\n810\n75\n3334\n4905\n6438\n4756\n5137\n6528\n6534\n6988\n6177\n8533\n889\n5384\n7201\n5132\n7802\n6864\n3973\n873\n4840\n1482\n8376\n3769\n5858\n6675\n4286\n2593\n5863\n4353\n7817\n7540\n4999\n4838\n2303\n6002\n7913\n1508\n5317\n7755\n2784\n4964\n3431\n6209\n3755\n6022\n6399\n6232\n3954\n455\n5416\n6448\n1558\n7591\n245\n140\n9210\n6585\n4084\n967\n7798\n6795\n7095\n6733\n3861\n9264\n361\n1045\n755\n8042\n7074\n7778\n6415\n4724\n6450\n2049\n1563\n1307\n3485\n1790\n7869\n3282\n6907\n3920\n2868\n5801\n5632\n1079\n5009\n3955\n7517\n5128\n3417\n3019\n2725\n1784\n2312\n2753\n6976\n342\n8266\n1849\n2273\n5037\n7880\n3793\n7401\n5412\n8279\n1257\n3670\n9049\n3266\n8955\n6519\n8916\n2858\n694\n5650\n1019\n4669\n1785\n3533\n5877\n2704\n8603\n3726\n6668\n497\n1085\n6815\n6157\n6646\n6964\n186\n8097\n5645\n8481\n8215\n3775\n2542\n7514\n5699\n4072\n3518\n5767\n3239\n3740\n1404\n8981\n4086\n6397\n6984\n4204\n6899\n682\n6589\n3317\n2944\n3456\n4340\n7424\n9208\n6504\n4409\n1\n145\n1882\n4620\n2634\n4992\n5453\n4481\n3377\n266\n7875\n530\n1235\n7605\n504\n1771\n8489\n345\n7353\n7797\n7174\n5914\n2871\n5721\n6067\n3582\n7653\n5467\n6234\n691\n8758\n2122\n1213\n2908\n1492\n1437\n2187\n1266\n2395\n7278\n8491\n5256\n1554\n8163\n5966\n7128\n7904\n1691\n6272\n1264\n3996\n1706\n1334\n1316\n6478\n6935\n1518\n6700\n8703\n8744\n8152\n8778\n5367\n4218\n9007\n6312\n606\n7565\n5293\n2891\n675\n2125\n2120\n826\n7008\n5705\n7748\n8010\n1498\n5330\n5472\n2215\n7627\n3016\n6588\n1850\n4128\n8569\n6987\n7566\n148\n8151\n8789\n7907\n8596\n715\n6018\n9060\n3872\n1750\n5889\n4047\n5960\n3120\n3449\n1421\n1102\n3333\n9197\n8796\n8123\n8007\n2028\n8404\n1945\n1985\n8109\n5380\n8438\n3504\n6739\n4180\n5835\n4243\n25\n4002\n1976\n3482\n8392\n158\n5181\n4885\n8985\n11\n6872\n6425\n5926\n7062\n5083\n8394\n4259\n5844\n1990\n3942\n5532\n2220\n28\n5957\n149\n6748\n1663\n3559\n7647\n2566\n1359\n8787\n5259\n7010\n554\n8231\n4229\n6005\n8172\n8125\n1350\n3571\n9051\n1973\n1386\n1781\n5788\n159\n7007\n3220\n1846\n3093\n4445\n2056\n8370\n3211\n1113\n4384\n2231\n273\n4276\n642\n7663\n5311\n265\n226\n9012\n7879\n118\n7109\n7251\n1760\n8667\n2876\n7162\n3552\n6901\n6779\n5021\n6524\n4957\n3114\n4544\n441\n1848\n2136\n2458\n8662\n1127\n5541\n3026\n1080\n6780\n2224\n8259\n1073\n9000\n7244\n7977\n500\n4435\n7376\n7979\n1435\n9291\n7704\n3791\n3521\n210\n7388\n1039\n6269\n4052\n8570\n3285\n564\n8039\n3546\n6203\n1183\n6107\n4147\n6216\n2234\n7185\n3192\n7155\n2001\n7777\n876\n944\n908\n7791\n5465\n6784\n65\n9172\n5675\n7075\n3886\n7891\n2978\n1008\n5630\n591\n5067\n1139\n577\n9015\n574\n8137\n7786\n5765\n4900\n4090\n7842\n5741\n"
  },
  {
    "path": "lib/train/data_specs/got10k_vot_exclude.txt",
    "content": "GOT-10k_Train_000004\nGOT-10k_Train_000013\nGOT-10k_Train_000015\nGOT-10k_Train_000020\nGOT-10k_Train_000024\nGOT-10k_Train_000034\nGOT-10k_Train_000038\nGOT-10k_Train_000048\nGOT-10k_Train_000051\nGOT-10k_Train_000059\nGOT-10k_Train_000077\nGOT-10k_Train_000081\nGOT-10k_Train_000089\nGOT-10k_Train_000093\nGOT-10k_Train_000094\nGOT-10k_Train_000096\nGOT-10k_Train_000104\nGOT-10k_Train_000107\nGOT-10k_Train_000108\nGOT-10k_Train_000120\nGOT-10k_Train_000132\nGOT-10k_Train_000170\nGOT-10k_Train_000186\nGOT-10k_Train_000212\nGOT-10k_Train_000213\nGOT-10k_Train_000222\nGOT-10k_Train_000223\nGOT-10k_Train_000240\nGOT-10k_Train_000246\nGOT-10k_Train_000249\nGOT-10k_Train_000266\nGOT-10k_Train_000268\nGOT-10k_Train_000287\nGOT-10k_Train_000293\nGOT-10k_Train_000305\nGOT-10k_Train_000316\nGOT-10k_Train_000319\nGOT-10k_Train_000322\nGOT-10k_Train_000331\nGOT-10k_Train_000334\nGOT-10k_Train_000354\nGOT-10k_Train_000361\nGOT-10k_Train_000368\nGOT-10k_Train_000382\nGOT-10k_Train_000401\nGOT-10k_Train_000417\nGOT-10k_Train_000448\nGOT-10k_Train_000454\nGOT-10k_Train_000458\nGOT-10k_Train_000466\nGOT-10k_Train_000475\nGOT-10k_Train_000484\nGOT-10k_Train_000488\nGOT-10k_Train_000501\nGOT-10k_Train_000510\nGOT-10k_Train_000512\nGOT-10k_Train_000519\nGOT-10k_Train_000539\nGOT-10k_Train_000544\nGOT-10k_Train_000555\nGOT-10k_Train_000564\nGOT-10k_Train_000568\nGOT-10k_Train_000583\nGOT-10k_Train_000587\nGOT-10k_Train_000593\nGOT-10k_Train_000621\nGOT-10k_Train_000624\nGOT-10k_Train_000625\nGOT-10k_Train_000638\nGOT-10k_Train_000648\nGOT-10k_Train_000654\nGOT-10k_Train_000669\nGOT-10k_Train_000701\nGOT-10k_Train_000709\nGOT-10k_Train_000712\nGOT-10k_Train_000731\nGOT-10k_Train_000734\nGOT-10k_Train_000737\nGOT-10k_Train_000744\nGOT-10k_Train_000746\nGOT-10k_Train_000748\nGOT-10k_Train_000762\nGOT-10k_Train_000764\nGOT-10k_Train_000765\nGOT-10k_Train_000766\nGOT-10k_Train_000767\nGOT-10k_Train_000775\nGOT-10k_Train_000783\nGOT-10k_Train_000790\nGOT-10k_Train_000829\nGOT-10k_Train_000857\nGOT-10k_Train_000859\nGOT-10k_Train_000867\nGOT-10k_Train_000872\nGOT-10k_Train_000880\nGOT-10k_Train_000884\nGOT-10k_Train_000909\nGOT-10k_Train_000915\nGOT-10k_Train_000922\nGOT-10k_Train_000928\nGOT-10k_Train_000933\nGOT-10k_Train_000941\nGOT-10k_Train_000961\nGOT-10k_Train_000966\nGOT-10k_Train_000968\nGOT-10k_Train_000971\nGOT-10k_Train_000972\nGOT-10k_Train_000995\nGOT-10k_Train_001003\nGOT-10k_Train_001010\nGOT-10k_Train_001011\nGOT-10k_Train_001019\nGOT-10k_Train_001021\nGOT-10k_Train_001035\nGOT-10k_Train_001039\nGOT-10k_Train_001047\nGOT-10k_Train_001057\nGOT-10k_Train_001069\nGOT-10k_Train_001077\nGOT-10k_Train_001079\nGOT-10k_Train_001085\nGOT-10k_Train_001088\nGOT-10k_Train_001091\nGOT-10k_Train_001104\nGOT-10k_Train_001112\nGOT-10k_Train_001113\nGOT-10k_Train_001124\nGOT-10k_Train_001128\nGOT-10k_Train_001143\nGOT-10k_Train_001145\nGOT-10k_Train_001146\nGOT-10k_Train_001148\nGOT-10k_Train_001150\nGOT-10k_Train_001154\nGOT-10k_Train_001156\nGOT-10k_Train_001157\nGOT-10k_Train_001163\nGOT-10k_Train_001181\nGOT-10k_Train_001184\nGOT-10k_Train_001189\nGOT-10k_Train_001200\nGOT-10k_Train_001225\nGOT-10k_Train_001264\nGOT-10k_Train_001288\nGOT-10k_Train_001296\nGOT-10k_Train_001298\nGOT-10k_Train_001299\nGOT-10k_Train_001314\nGOT-10k_Train_001319\nGOT-10k_Train_001329\nGOT-10k_Train_001331\nGOT-10k_Train_001340\nGOT-10k_Train_001374\nGOT-10k_Train_001384\nGOT-10k_Train_001394\nGOT-10k_Train_001407\nGOT-10k_Train_001415\nGOT-10k_Train_001430\nGOT-10k_Train_001433\nGOT-10k_Train_001453\nGOT-10k_Train_001457\nGOT-10k_Train_001471\nGOT-10k_Train_001473\nGOT-10k_Train_001480\nGOT-10k_Train_001484\nGOT-10k_Train_001489\nGOT-10k_Train_001514\nGOT-10k_Train_001537\nGOT-10k_Train_001544\nGOT-10k_Train_001545\nGOT-10k_Train_001551\nGOT-10k_Train_001558\nGOT-10k_Train_001560\nGOT-10k_Train_001562\nGOT-10k_Train_001563\nGOT-10k_Train_001570\nGOT-10k_Train_001576\nGOT-10k_Train_001604\nGOT-10k_Train_001615\nGOT-10k_Train_001617\nGOT-10k_Train_001618\nGOT-10k_Train_001619\nGOT-10k_Train_001624\nGOT-10k_Train_001650\nGOT-10k_Train_001651\nGOT-10k_Train_001663\nGOT-10k_Train_001673\nGOT-10k_Train_001685\nGOT-10k_Train_001692\nGOT-10k_Train_001700\nGOT-10k_Train_001722\nGOT-10k_Train_001731\nGOT-10k_Train_001732\nGOT-10k_Train_001738\nGOT-10k_Train_001740\nGOT-10k_Train_001742\nGOT-10k_Train_001747\nGOT-10k_Train_001759\nGOT-10k_Train_001769\nGOT-10k_Train_001781\nGOT-10k_Train_001791\nGOT-10k_Train_001794\nGOT-10k_Train_001795\nGOT-10k_Train_001818\nGOT-10k_Train_001833\nGOT-10k_Train_001836\nGOT-10k_Train_001841\nGOT-10k_Train_001852\nGOT-10k_Train_001863\nGOT-10k_Train_001865\nGOT-10k_Train_001878\nGOT-10k_Train_001898\nGOT-10k_Train_001919\nGOT-10k_Train_001923\nGOT-10k_Train_001929\nGOT-10k_Train_001935\nGOT-10k_Train_001938\nGOT-10k_Train_001942\nGOT-10k_Train_001955\nGOT-10k_Train_001964\nGOT-10k_Train_001966\nGOT-10k_Train_001982\nGOT-10k_Train_002005\nGOT-10k_Train_002009\nGOT-10k_Train_002035\nGOT-10k_Train_002068\nGOT-10k_Train_002073\nGOT-10k_Train_002076\nGOT-10k_Train_002084\nGOT-10k_Train_002112\nGOT-10k_Train_002115\nGOT-10k_Train_002116\nGOT-10k_Train_002123\nGOT-10k_Train_002125\nGOT-10k_Train_002129\nGOT-10k_Train_002139\nGOT-10k_Train_002146\nGOT-10k_Train_002166\nGOT-10k_Train_002168\nGOT-10k_Train_002176\nGOT-10k_Train_002184\nGOT-10k_Train_002190\nGOT-10k_Train_002192\nGOT-10k_Train_002211\nGOT-10k_Train_002216\nGOT-10k_Train_002233\nGOT-10k_Train_002240\nGOT-10k_Train_002247\nGOT-10k_Train_002250\nGOT-10k_Train_002252\nGOT-10k_Train_002253\nGOT-10k_Train_002261\nGOT-10k_Train_002274\nGOT-10k_Train_002276\nGOT-10k_Train_002292\nGOT-10k_Train_002302\nGOT-10k_Train_002304\nGOT-10k_Train_002305\nGOT-10k_Train_002320\nGOT-10k_Train_002345\nGOT-10k_Train_002355\nGOT-10k_Train_002359\nGOT-10k_Train_002363\nGOT-10k_Train_002374\nGOT-10k_Train_002376\nGOT-10k_Train_002389\nGOT-10k_Train_002393\nGOT-10k_Train_002400\nGOT-10k_Train_002408\nGOT-10k_Train_002418\nGOT-10k_Train_002437\nGOT-10k_Train_002440\nGOT-10k_Train_002442\nGOT-10k_Train_002454\nGOT-10k_Train_002456\nGOT-10k_Train_002465\nGOT-10k_Train_002466\nGOT-10k_Train_002474\nGOT-10k_Train_002479\nGOT-10k_Train_002484\nGOT-10k_Train_002511\nGOT-10k_Train_002514\nGOT-10k_Train_002517\nGOT-10k_Train_002523\nGOT-10k_Train_002527\nGOT-10k_Train_002534\nGOT-10k_Train_002555\nGOT-10k_Train_002587\nGOT-10k_Train_002589\nGOT-10k_Train_002612\nGOT-10k_Train_002627\nGOT-10k_Train_002639\nGOT-10k_Train_002652\nGOT-10k_Train_002693\nGOT-10k_Train_002699\nGOT-10k_Train_002716\nGOT-10k_Train_002725\nGOT-10k_Train_002727\nGOT-10k_Train_002730\nGOT-10k_Train_002755\nGOT-10k_Train_002756\nGOT-10k_Train_002760\nGOT-10k_Train_002763\nGOT-10k_Train_002837\nGOT-10k_Train_002841\nGOT-10k_Train_002856\nGOT-10k_Train_002862\nGOT-10k_Train_002863\nGOT-10k_Train_002866\nGOT-10k_Train_002877\nGOT-10k_Train_002884\nGOT-10k_Train_002886\nGOT-10k_Train_002887\nGOT-10k_Train_002907\nGOT-10k_Train_002908\nGOT-10k_Train_002909\nGOT-10k_Train_002914\nGOT-10k_Train_002920\nGOT-10k_Train_002922\nGOT-10k_Train_002936\nGOT-10k_Train_002940\nGOT-10k_Train_002944\nGOT-10k_Train_002953\nGOT-10k_Train_002961\nGOT-10k_Train_002964\nGOT-10k_Train_002996\nGOT-10k_Train_003003\nGOT-10k_Train_003004\nGOT-10k_Train_003007\nGOT-10k_Train_003012\nGOT-10k_Train_003027\nGOT-10k_Train_003028\nGOT-10k_Train_003033\nGOT-10k_Train_003034\nGOT-10k_Train_003036\nGOT-10k_Train_003044\nGOT-10k_Train_003056\nGOT-10k_Train_003069\nGOT-10k_Train_003078\nGOT-10k_Train_003079\nGOT-10k_Train_003095\nGOT-10k_Train_003096\nGOT-10k_Train_003107\nGOT-10k_Train_003108\nGOT-10k_Train_003127\nGOT-10k_Train_003128\nGOT-10k_Train_003129\nGOT-10k_Train_003132\nGOT-10k_Train_003146\nGOT-10k_Train_003155\nGOT-10k_Train_003173\nGOT-10k_Train_003208\nGOT-10k_Train_003239\nGOT-10k_Train_003245\nGOT-10k_Train_003246\nGOT-10k_Train_003262\nGOT-10k_Train_003275\nGOT-10k_Train_003283\nGOT-10k_Train_003296\nGOT-10k_Train_003308\nGOT-10k_Train_003310\nGOT-10k_Train_003313\nGOT-10k_Train_003317\nGOT-10k_Train_003318\nGOT-10k_Train_003354\nGOT-10k_Train_003379\nGOT-10k_Train_003384\nGOT-10k_Train_003396\nGOT-10k_Train_003401\nGOT-10k_Train_003423\nGOT-10k_Train_003435\nGOT-10k_Train_003438\nGOT-10k_Train_003442\nGOT-10k_Train_003444\nGOT-10k_Train_003455\nGOT-10k_Train_003456\nGOT-10k_Train_003464\nGOT-10k_Train_003466\nGOT-10k_Train_003474\nGOT-10k_Train_003482\nGOT-10k_Train_003488\nGOT-10k_Train_003502\nGOT-10k_Train_003515\nGOT-10k_Train_003520\nGOT-10k_Train_003530\nGOT-10k_Train_003551\nGOT-10k_Train_003570\nGOT-10k_Train_003571\nGOT-10k_Train_003578\nGOT-10k_Train_003583\nGOT-10k_Train_003590\nGOT-10k_Train_003593\nGOT-10k_Train_003618\nGOT-10k_Train_003626\nGOT-10k_Train_003650\nGOT-10k_Train_003652\nGOT-10k_Train_003663\nGOT-10k_Train_003690\nGOT-10k_Train_003704\nGOT-10k_Train_003709\nGOT-10k_Train_003716\nGOT-10k_Train_003721\nGOT-10k_Train_003722\nGOT-10k_Train_003724\nGOT-10k_Train_003729\nGOT-10k_Train_003756\nGOT-10k_Train_003768\nGOT-10k_Train_003782\nGOT-10k_Train_003786\nGOT-10k_Train_003788\nGOT-10k_Train_003791\nGOT-10k_Train_003820\nGOT-10k_Train_003821\nGOT-10k_Train_003827\nGOT-10k_Train_003834\nGOT-10k_Train_003835\nGOT-10k_Train_003839\nGOT-10k_Train_003843\nGOT-10k_Train_003854\nGOT-10k_Train_003856\nGOT-10k_Train_003881\nGOT-10k_Train_003899\nGOT-10k_Train_003904\nGOT-10k_Train_003906\nGOT-10k_Train_003913\nGOT-10k_Train_003937\nGOT-10k_Train_003940\nGOT-10k_Train_003943\nGOT-10k_Train_003950\nGOT-10k_Train_003972\nGOT-10k_Train_003974\nGOT-10k_Train_003978\nGOT-10k_Train_003981\nGOT-10k_Train_003982\nGOT-10k_Train_004003\nGOT-10k_Train_004004\nGOT-10k_Train_004008\nGOT-10k_Train_004012\nGOT-10k_Train_004013\nGOT-10k_Train_004030\nGOT-10k_Train_004036\nGOT-10k_Train_004040\nGOT-10k_Train_004052\nGOT-10k_Train_004054\nGOT-10k_Train_004055\nGOT-10k_Train_004057\nGOT-10k_Train_004063\nGOT-10k_Train_004068\nGOT-10k_Train_004072\nGOT-10k_Train_004075\nGOT-10k_Train_004078\nGOT-10k_Train_004082\nGOT-10k_Train_004102\nGOT-10k_Train_004103\nGOT-10k_Train_004105\nGOT-10k_Train_004111\nGOT-10k_Train_004120\nGOT-10k_Train_004122\nGOT-10k_Train_004124\nGOT-10k_Train_004142\nGOT-10k_Train_004158\nGOT-10k_Train_004170\nGOT-10k_Train_004175\nGOT-10k_Train_004181\nGOT-10k_Train_004190\nGOT-10k_Train_004193\nGOT-10k_Train_004194\nGOT-10k_Train_004199\nGOT-10k_Train_004202\nGOT-10k_Train_004217\nGOT-10k_Train_004225\nGOT-10k_Train_004229\nGOT-10k_Train_004230\nGOT-10k_Train_004234\nGOT-10k_Train_004241\nGOT-10k_Train_004246\nGOT-10k_Train_004249\nGOT-10k_Train_004255\nGOT-10k_Train_004268\nGOT-10k_Train_004276\nGOT-10k_Train_004292\nGOT-10k_Train_004293\nGOT-10k_Train_004295\nGOT-10k_Train_004296\nGOT-10k_Train_004302\nGOT-10k_Train_004324\nGOT-10k_Train_004337\nGOT-10k_Train_004342\nGOT-10k_Train_004351\nGOT-10k_Train_004356\nGOT-10k_Train_004376\nGOT-10k_Train_004380\nGOT-10k_Train_004395\nGOT-10k_Train_004398\nGOT-10k_Train_004399\nGOT-10k_Train_004408\nGOT-10k_Train_004430\nGOT-10k_Train_004439\nGOT-10k_Train_004440\nGOT-10k_Train_004462\nGOT-10k_Train_004473\nGOT-10k_Train_004476\nGOT-10k_Train_004478\nGOT-10k_Train_004481\nGOT-10k_Train_004483\nGOT-10k_Train_004484\nGOT-10k_Train_004503\nGOT-10k_Train_004513\nGOT-10k_Train_004517\nGOT-10k_Train_004533\nGOT-10k_Train_004536\nGOT-10k_Train_004594\nGOT-10k_Train_004595\nGOT-10k_Train_004607\nGOT-10k_Train_004619\nGOT-10k_Train_004626\nGOT-10k_Train_004642\nGOT-10k_Train_004646\nGOT-10k_Train_004652\nGOT-10k_Train_004658\nGOT-10k_Train_004660\nGOT-10k_Train_004661\nGOT-10k_Train_004668\nGOT-10k_Train_004673\nGOT-10k_Train_004679\nGOT-10k_Train_004694\nGOT-10k_Train_004702\nGOT-10k_Train_004709\nGOT-10k_Train_004717\nGOT-10k_Train_004757\nGOT-10k_Train_004768\nGOT-10k_Train_004824\nGOT-10k_Train_004826\nGOT-10k_Train_004833\nGOT-10k_Train_004839\nGOT-10k_Train_004843\nGOT-10k_Train_004852\nGOT-10k_Train_004862\nGOT-10k_Train_004865\nGOT-10k_Train_004878\nGOT-10k_Train_004880\nGOT-10k_Train_004881\nGOT-10k_Train_004902\nGOT-10k_Train_004906\nGOT-10k_Train_004920\nGOT-10k_Train_004950\nGOT-10k_Train_004951\nGOT-10k_Train_004952\nGOT-10k_Train_004973\nGOT-10k_Train_004983\nGOT-10k_Train_004984\nGOT-10k_Train_004990\nGOT-10k_Train_004993\nGOT-10k_Train_004995\nGOT-10k_Train_005004\nGOT-10k_Train_005007\nGOT-10k_Train_005022\nGOT-10k_Train_005024\nGOT-10k_Train_005040\nGOT-10k_Train_005046\nGOT-10k_Train_005047\nGOT-10k_Train_005058\nGOT-10k_Train_005063\nGOT-10k_Train_005072\nGOT-10k_Train_005097\nGOT-10k_Train_005098\nGOT-10k_Train_005099\nGOT-10k_Train_005108\nGOT-10k_Train_005113\nGOT-10k_Train_005119\nGOT-10k_Train_005126\nGOT-10k_Train_005146\nGOT-10k_Train_005166\nGOT-10k_Train_005191\nGOT-10k_Train_005207\nGOT-10k_Train_005255\nGOT-10k_Train_005269\nGOT-10k_Train_005280\nGOT-10k_Train_005310\nGOT-10k_Train_005317\nGOT-10k_Train_005319\nGOT-10k_Train_005334\nGOT-10k_Train_005338\nGOT-10k_Train_005339\nGOT-10k_Train_005354\nGOT-10k_Train_005364\nGOT-10k_Train_005382\nGOT-10k_Train_005385\nGOT-10k_Train_005389\nGOT-10k_Train_005390\nGOT-10k_Train_005396\nGOT-10k_Train_005398\nGOT-10k_Train_005399\nGOT-10k_Train_005401\nGOT-10k_Train_005413\nGOT-10k_Train_005415\nGOT-10k_Train_005420\nGOT-10k_Train_005457\nGOT-10k_Train_005465\nGOT-10k_Train_005488\nGOT-10k_Train_005493\nGOT-10k_Train_005510\nGOT-10k_Train_005523\nGOT-10k_Train_005538\nGOT-10k_Train_005553\nGOT-10k_Train_005556\nGOT-10k_Train_005575\nGOT-10k_Train_005577\nGOT-10k_Train_005582\nGOT-10k_Train_005594\nGOT-10k_Train_005606\nGOT-10k_Train_005611\nGOT-10k_Train_005636\nGOT-10k_Train_005639\nGOT-10k_Train_005642\nGOT-10k_Train_005651\nGOT-10k_Train_005652\nGOT-10k_Train_005653\nGOT-10k_Train_005681\nGOT-10k_Train_005686\nGOT-10k_Train_005689\nGOT-10k_Train_005701\nGOT-10k_Train_005712\nGOT-10k_Train_005716\nGOT-10k_Train_005724\nGOT-10k_Train_005731\nGOT-10k_Train_005732\nGOT-10k_Train_005734\nGOT-10k_Train_005741\nGOT-10k_Train_005764\nGOT-10k_Train_005767\nGOT-10k_Train_005788\nGOT-10k_Train_005791\nGOT-10k_Train_005800\nGOT-10k_Train_005813\nGOT-10k_Train_005816\nGOT-10k_Train_005830\nGOT-10k_Train_005852\nGOT-10k_Train_005876\nGOT-10k_Train_005877\nGOT-10k_Train_005884\nGOT-10k_Train_005910\nGOT-10k_Train_005929\nGOT-10k_Train_005943\nGOT-10k_Train_005958\nGOT-10k_Train_005995\nGOT-10k_Train_006002\nGOT-10k_Train_006010\nGOT-10k_Train_006018\nGOT-10k_Train_006021\nGOT-10k_Train_006022\nGOT-10k_Train_006040\nGOT-10k_Train_006046\nGOT-10k_Train_006057\nGOT-10k_Train_006075\nGOT-10k_Train_006087\nGOT-10k_Train_006099\nGOT-10k_Train_006115\nGOT-10k_Train_006126\nGOT-10k_Train_006129\nGOT-10k_Train_006142\nGOT-10k_Train_006161\nGOT-10k_Train_006163\nGOT-10k_Train_006193\nGOT-10k_Train_006195\nGOT-10k_Train_006204\nGOT-10k_Train_006206\nGOT-10k_Train_006215\nGOT-10k_Train_006216\nGOT-10k_Train_006220\nGOT-10k_Train_006224\nGOT-10k_Train_006232\nGOT-10k_Train_006241\nGOT-10k_Train_006247\nGOT-10k_Train_006287\nGOT-10k_Train_006300\nGOT-10k_Train_006315\nGOT-10k_Train_006318\nGOT-10k_Train_006322\nGOT-10k_Train_006337\nGOT-10k_Train_006341\nGOT-10k_Train_006344\nGOT-10k_Train_006348\nGOT-10k_Train_006349\nGOT-10k_Train_006363\nGOT-10k_Train_006366\nGOT-10k_Train_006376\nGOT-10k_Train_006378\nGOT-10k_Train_006395\nGOT-10k_Train_006402\nGOT-10k_Train_006406\nGOT-10k_Train_006412\nGOT-10k_Train_006413\nGOT-10k_Train_006427\nGOT-10k_Train_006448\nGOT-10k_Train_006459\nGOT-10k_Train_006464\nGOT-10k_Train_006474\nGOT-10k_Train_006477\nGOT-10k_Train_006482\nGOT-10k_Train_006483\nGOT-10k_Train_006496\nGOT-10k_Train_006498\nGOT-10k_Train_006499\nGOT-10k_Train_006505\nGOT-10k_Train_006506\nGOT-10k_Train_006514\nGOT-10k_Train_006533\nGOT-10k_Train_006563\nGOT-10k_Train_006569\nGOT-10k_Train_006573\nGOT-10k_Train_006584\nGOT-10k_Train_006585\nGOT-10k_Train_006587\nGOT-10k_Train_006591\nGOT-10k_Train_006592\nGOT-10k_Train_006598\nGOT-10k_Train_006605\nGOT-10k_Train_006631\nGOT-10k_Train_006633\nGOT-10k_Train_006644\nGOT-10k_Train_006651\nGOT-10k_Train_006654\nGOT-10k_Train_006672\nGOT-10k_Train_006717\nGOT-10k_Train_006728\nGOT-10k_Train_006736\nGOT-10k_Train_006740\nGOT-10k_Train_006746\nGOT-10k_Train_006754\nGOT-10k_Train_006759\nGOT-10k_Train_006766\nGOT-10k_Train_006789\nGOT-10k_Train_006796\nGOT-10k_Train_006797\nGOT-10k_Train_006817\nGOT-10k_Train_006818\nGOT-10k_Train_006849\nGOT-10k_Train_006851\nGOT-10k_Train_006855\nGOT-10k_Train_006872\nGOT-10k_Train_006879\nGOT-10k_Train_006900\nGOT-10k_Train_006912\nGOT-10k_Train_006926\nGOT-10k_Train_006936\nGOT-10k_Train_006955\nGOT-10k_Train_006968\nGOT-10k_Train_006969\nGOT-10k_Train_006979\nGOT-10k_Train_006980\nGOT-10k_Train_006984\nGOT-10k_Train_006986\nGOT-10k_Train_006991\nGOT-10k_Train_007017\nGOT-10k_Train_007032\nGOT-10k_Train_007035\nGOT-10k_Train_007048\nGOT-10k_Train_007064\nGOT-10k_Train_007065\nGOT-10k_Train_007075\nGOT-10k_Train_007077\nGOT-10k_Train_007081\nGOT-10k_Train_007083\nGOT-10k_Train_007089\nGOT-10k_Train_007106\nGOT-10k_Train_007107\nGOT-10k_Train_007131\nGOT-10k_Train_007138\nGOT-10k_Train_007144\nGOT-10k_Train_007150\nGOT-10k_Train_007168\nGOT-10k_Train_007170\nGOT-10k_Train_007177\nGOT-10k_Train_007181\nGOT-10k_Train_007183\nGOT-10k_Train_007190\nGOT-10k_Train_007208\nGOT-10k_Train_007220\nGOT-10k_Train_007223\nGOT-10k_Train_007247\nGOT-10k_Train_007273\nGOT-10k_Train_007284\nGOT-10k_Train_007289\nGOT-10k_Train_007293\nGOT-10k_Train_007294\nGOT-10k_Train_007296\nGOT-10k_Train_007316\nGOT-10k_Train_007322\nGOT-10k_Train_007355\nGOT-10k_Train_007360\nGOT-10k_Train_007362\nGOT-10k_Train_007364\nGOT-10k_Train_007388\nGOT-10k_Train_007392\nGOT-10k_Train_007403\nGOT-10k_Train_007404\nGOT-10k_Train_007426\nGOT-10k_Train_007427\nGOT-10k_Train_007443\nGOT-10k_Train_007446\nGOT-10k_Train_007461\nGOT-10k_Train_007482\nGOT-10k_Train_007489\nGOT-10k_Train_007499\nGOT-10k_Train_007503\nGOT-10k_Train_007507\nGOT-10k_Train_007515\nGOT-10k_Train_007521\nGOT-10k_Train_007523\nGOT-10k_Train_007525\nGOT-10k_Train_007535\nGOT-10k_Train_007559\nGOT-10k_Train_007566\nGOT-10k_Train_007582\nGOT-10k_Train_007586\nGOT-10k_Train_007596\nGOT-10k_Train_007616\nGOT-10k_Train_007623\nGOT-10k_Train_007634\nGOT-10k_Train_007637\nGOT-10k_Train_007643\nGOT-10k_Train_007645\nGOT-10k_Train_007653\nGOT-10k_Train_007660\nGOT-10k_Train_007661\nGOT-10k_Train_007663\nGOT-10k_Train_007672\nGOT-10k_Train_007700\nGOT-10k_Train_007710\nGOT-10k_Train_007714\nGOT-10k_Train_007717\nGOT-10k_Train_007718\nGOT-10k_Train_007737\nGOT-10k_Train_007741\nGOT-10k_Train_007746\nGOT-10k_Train_007763\nGOT-10k_Train_007769\nGOT-10k_Train_007780\nGOT-10k_Train_007803\nGOT-10k_Train_007821\nGOT-10k_Train_007825\nGOT-10k_Train_007839\nGOT-10k_Train_007848\nGOT-10k_Train_007873\nGOT-10k_Train_007877\nGOT-10k_Train_007882\nGOT-10k_Train_007894\nGOT-10k_Train_007905\nGOT-10k_Train_007908\nGOT-10k_Train_007911\nGOT-10k_Train_007914\nGOT-10k_Train_007918\nGOT-10k_Train_007929\nGOT-10k_Train_007936\nGOT-10k_Train_007938\nGOT-10k_Train_007965\nGOT-10k_Train_007969\nGOT-10k_Train_007973\nGOT-10k_Train_007987\nGOT-10k_Train_007999\nGOT-10k_Train_008001\nGOT-10k_Train_008034\nGOT-10k_Train_008050\nGOT-10k_Train_008056\nGOT-10k_Train_008068\nGOT-10k_Train_008073\nGOT-10k_Train_008089\nGOT-10k_Train_008095\nGOT-10k_Train_008101\nGOT-10k_Train_008128\nGOT-10k_Train_008139\nGOT-10k_Train_008147\nGOT-10k_Train_008154\nGOT-10k_Train_008171\nGOT-10k_Train_008180\nGOT-10k_Train_008193\nGOT-10k_Train_008194\nGOT-10k_Train_008201\nGOT-10k_Train_008212\nGOT-10k_Train_008226\nGOT-10k_Train_008230\nGOT-10k_Train_008231\nGOT-10k_Train_008236\nGOT-10k_Train_008239\nGOT-10k_Train_008241\nGOT-10k_Train_008243\nGOT-10k_Train_008249\nGOT-10k_Train_008250\nGOT-10k_Train_008273\nGOT-10k_Train_008278\nGOT-10k_Train_008291\nGOT-10k_Train_008310\nGOT-10k_Train_008311\nGOT-10k_Train_008317\nGOT-10k_Train_008319\nGOT-10k_Train_008331\nGOT-10k_Train_008332\nGOT-10k_Train_008344\nGOT-10k_Train_008369\nGOT-10k_Train_008377\nGOT-10k_Train_008386\nGOT-10k_Train_008392\nGOT-10k_Train_008396\nGOT-10k_Train_008432\nGOT-10k_Train_008438\nGOT-10k_Train_008439\nGOT-10k_Train_008440\nGOT-10k_Train_008442\nGOT-10k_Train_008443\nGOT-10k_Train_008455\nGOT-10k_Train_008471\nGOT-10k_Train_008484\nGOT-10k_Train_008490\nGOT-10k_Train_008492\nGOT-10k_Train_008499\nGOT-10k_Train_008502\nGOT-10k_Train_008507\nGOT-10k_Train_008520\nGOT-10k_Train_008525\nGOT-10k_Train_008568\nGOT-10k_Train_008587\nGOT-10k_Train_008589\nGOT-10k_Train_008591\nGOT-10k_Train_008606\nGOT-10k_Train_008612\nGOT-10k_Train_008623\nGOT-10k_Train_008628\nGOT-10k_Train_008633\nGOT-10k_Train_008634\nGOT-10k_Train_008645\nGOT-10k_Train_008656\nGOT-10k_Train_008668\nGOT-10k_Train_008670\nGOT-10k_Train_008702\nGOT-10k_Train_008714\nGOT-10k_Train_008723\nGOT-10k_Train_008731\nGOT-10k_Train_008732\nGOT-10k_Train_008734\nGOT-10k_Train_008747\nGOT-10k_Train_008787\nGOT-10k_Train_008794\nGOT-10k_Train_008805\nGOT-10k_Train_008829\nGOT-10k_Train_008837\nGOT-10k_Train_008838\nGOT-10k_Train_008853\nGOT-10k_Train_008878\nGOT-10k_Train_008879\nGOT-10k_Train_008880\nGOT-10k_Train_008891\nGOT-10k_Train_008895\nGOT-10k_Train_008907\nGOT-10k_Train_008909\nGOT-10k_Train_008922\nGOT-10k_Train_008935\nGOT-10k_Train_008939\nGOT-10k_Train_008972\nGOT-10k_Train_008975\nGOT-10k_Train_008976\nGOT-10k_Train_009002\nGOT-10k_Train_009031\nGOT-10k_Train_009040\nGOT-10k_Train_009052\nGOT-10k_Train_009056\nGOT-10k_Train_009057\nGOT-10k_Train_009066\nGOT-10k_Train_009076\nGOT-10k_Train_009103\nGOT-10k_Train_009115\nGOT-10k_Train_009117\nGOT-10k_Train_009127\nGOT-10k_Train_009137\nGOT-10k_Train_009145\nGOT-10k_Train_009150\nGOT-10k_Train_009155\nGOT-10k_Train_009156\nGOT-10k_Train_009160\nGOT-10k_Train_009179\nGOT-10k_Train_009181\nGOT-10k_Train_009196\nGOT-10k_Train_009203\nGOT-10k_Train_009216\nGOT-10k_Train_009219\nGOT-10k_Train_009222\nGOT-10k_Train_009224\nGOT-10k_Train_009229\nGOT-10k_Train_009231\nGOT-10k_Train_009235\nGOT-10k_Train_009242\nGOT-10k_Train_009263\nGOT-10k_Train_009265\nGOT-10k_Train_009280\nGOT-10k_Train_009282\nGOT-10k_Train_009300\nGOT-10k_Train_009301\nGOT-10k_Train_009329\nGOT-10k_Train_009332\nGOT-10k_Train_009334"
  },
  {
    "path": "lib/train/data_specs/got10k_vot_train_split.txt",
    "content": "3784\n8998\n1631\n8277\n8358\n2338\n2988\n8302\n2662\n2663\n2825\n7447\n4781\n2218\n5860\n2819\n8075\n5391\n116\n3606\n7976\n7941\n1024\n4519\n1970\n557\n8579\n6908\n993\n7204\n1991\n3674\n8781\n6840\n5\n3225\n3763\n8688\n6778\n5777\n4794\n2744\n8126\n3864\n1733\n2923\n6829\n683\n2081\n1831\n2404\n1459\n2741\n5972\n7462\n2654\n103\n2174\n2989\n2506\n2766\n5912\n3295\n3986\n609\n4895\n6673\n801\n1098\n1602\n2490\n8476\n3186\n4784\n4270\n1812\n4226\n2267\n8873\n6544\n6112\n2381\n4752\n753\n3776\n6511\n6016\n2559\n7369\n5866\n563\n7731\n1105\n5603\n50\n4238\n2208\n8725\n4994\n4719\n1444\n8807\n7298\n8760\n8173\n2332\n4131\n1065\n8562\n3992\n4024\n2188\n9095\n6765\n1707\n6105\n6922\n5362\n1486\n7898\n4135\n6574\n998\n6565\n8127\n8927\n2544\n4365\n768\n3535\n3875\n6808\n2931\n487\n4451\n2470\n8111\n3493\n7338\n8281\n6390\n1271\n4373\n3667\n3494\n3757\n2966\n7840\n7827\n3300\n6261\n4163\n2217\n6549\n7236\n9136\n1857\n6691\n3470\n6271\n807\n516\n9311\n6098\n3144\n8420\n5425\n5694\n2643\n6696\n6072\n7285\n3781\n903\n8522\n6092\n5979\n2622\n2529\n855\n3420\n3261\n8953\n7866\n2492\n3157\n359\n1520\n2642\n7452\n759\n36\n8931\n1744\n4350\n1089\n9199\n1889\n1908\n4868\n4498\n1968\n3273\n7413\n4114\n5584\n4874\n1427\n5211\n7618\n1542\n1353\n8158\n4168\n3200\n6345\n8560\n5619\n5953\n3158\n8849\n5831\n1411\n8103\n6539\n7397\n1006\n5450\n3119\n4274\n5352\n4571\n2319\n4976\n902\n1814\n2651\n3299\n3398\n982\n2428\n5793\n1346\n7057\n3737\n7329\n4449\n2110\n7405\n1773\n958\n3901\n4127\n8234\n2994\n7066\n1289\n2995\n5871\n3556\n9085\n846\n2366\n585\n5516\n5230\n3481\n2732\n6658\n7423\n1855\n6384\n3554\n5823\n4948\n7058\n4667\n5377\n2503\n7694\n9191\n9144\n655\n3409\n62\n8019\n8970\n2323\n5750\n3178\n6548\n7501\n3280\n343\n2171\n8397\n1367\n8611\n6118\n6603\n7182\n9048\n7733\n7141\n3335\n4845\n5449\n3467\n6250\n163\n5168\n2040\n3609\n8352\n3426\n8567\n769\n187\n6151\n6437\n7028\n3970\n9146\n5028\n7492\n1661\n2815\n2469\n2563\n3814\n8430\n4305\n3479\n5678\n4132\n1211\n5459\n4814\n545\n4556\n238\n2724\n1260\n2581\n4632\n4313\n380\n1209\n5447\n3032\n7942\n8943\n806\n2432\n6130\n4314\n2131\n9045\n6531\n5706\n6747\n7724\n2017\n3292\n5469\n2743\n424\n4233\n8619\n5192\n4516\n9324\n3537\n9152\n8058\n7526\n8711\n1949\n5982\n6702\n7027\n6388\n7012\n328\n2130\n452\n306\n7669\n3134\n5761\n3703\n44\n4189\n695\n5224\n9215\n5644\n3143\n5443\n2348\n2328\n4725\n1418\n7810\n5759\n7226\n4535\n4385\n5397\n7249\n3204\n385\n2371\n2738\n3636\n9033\n2246\n2680\n6940\n4310\n2054\n9250\n9080\n4568\n5586\n4469\n2038\n3410\n7900\n4332\n6108\n678\n3319\n9079\n1054\n4048\n4751\n1320\n6890\n7931\n1398\n4349\n5299\n5025\n7932\n5738\n7787\n4590\n4020\n1274\n2488\n8497\n3372\n8965\n3219\n799\n3664\n6500\n7093\n4362\n6205\n4244\n5945\n6434\n2031\n2684\n6632\n4588\n8271\n3232\n5782\n2904\n7200\n3632\n5435\n8203\n3480\n4786\n7579\n3351\n1921\n798\n3646\n3094\n4359\n1654\n5975\n376\n5965\n780\n6738\n3185\n2133\n6248\n5996\n2834\n531\n5688\n2448\n7925\n7974\n5924\n6401\n5778\n6594\n5442\n8336\n4522\n3770\n6340\n6328\n4946\n4161\n2954\n2588\n8465\n2885\n1606\n5787\n3407\n3121\n7310\n1413\n1932\n4787\n2579\n3325\n508\n5610\n6480\n4290\n479\n3792\n6628\n2545\n6972\n2665\n6730\n3547\n6845\n3540\n8993\n1052\n2235\n8356\n3403\n8818\n8260\n572\n4159\n1180\n5348\n7948\n2676\n3539\n4866\n6422\n8365\n3217\n1310\n2059\n9177\n1419\n2283\n8892\n8162\n1212\n6277\n3725\n7806\n6149\n7874\n718\n6888\n7118\n277\n656\n8763\n8289\n4759\n5854\n8659\n3145\n5981\n1881\n5799\n6947\n1609\n6396\n2631\n318\n2550\n6132\n1736\n7816\n4304\n8133\n6698\n7779\n7732\n7642\n7242\n711\n9262\n8033\n7440\n1913\n5480\n5570\n8594\n8772\n4654\n8974\n6128\n6183\n1071\n8449\n2142\n2298\n524\n1695\n820\n4053\n1856\n8641\n217\n1063\n9286\n3152\n221\n5461\n1270\n2006\n7164\n1199\n6951\n5604\n5400\n5309\n3498\n6407\n6661\n7097\n8165\n5169\n3852\n7070\n5702\n4344\n6648\n6904\n3272\n7119\n5795\n2365\n2659\n353\n5444\n1924\n2098\n2972\n6006\n5865\n8740\n7856\n5841\n598\n836\n1147\n931\n8897\n0\n6049\n1837\n865\n1871\n6116\n6831\n5773\n3587\n303\n1883\n2163\n3070\n1308\n7953\n6909\n853\n7301\n3279\n123\n7186\n3194\n5133\n1931\n4622\n4891\n5722\n5693\n8\n2339\n6596\n71\n379\n4506\n4370\n1238\n2707\n3344\n4254\n8767\n1726\n325\n4148\n5438\n5357\n548\n1332\n6824\n2290\n2335\n2594\n2315\n3389\n3885\n2621\n4116\n7412\n7222\n4894\n8595\n2000\n4978\n4721\n6444\n3796\n9321\n2236\n6409\n1523\n1468\n9249\n8270\n2341\n2874\n174\n4502\n4703\n9034\n9108\n5451\n2619\n9158\n490\n6540\n1466\n2962\n8771\n2712\n4539\n1581\n5638\n9246\n4308\n4363\n4647\n4470\n1636\n1311\n6560\n7519\n8027\n9217\n6364\n3779\n4822\n3563\n5896\n6655\n1524\n2846\n3137\n141\n1887\n6567\n8921\n4671\n6052\n8445\n8699\n7349\n3553\n2117\n7651\n5034\n5383\n649\n3818\n9022\n8414\n1012\n8159\n5081\n8571\n4765\n9135\n4361\n4073\n9142\n727\n2835\n8229\n3989\n4490\n4923\n5477\n1638\n3643\n9044\n2230\n499\n7166\n3172\n8431\n8401\n1470\n6356\n8817\n927\n4212\n2152\n3812\n4949\n1219\n1538\n3029\n6481\n9042\n7775\n7742\n423\n2085\n7715\n4541\n9061\n5916\n7420\n7406\n7046\n7808\n4911\n8804\n6927\n8820\n3264\n300\n2979\n252\n4407\n3383\n4688\n8504\n6723\n26\n3837\n2489\n4137\n8209\n229\n6490\n2364\n9016\n1763\n1728\n338\n8335\n9063\n2791\n641\n5454\n4581\n4548\n2840\n8508\n3463\n7231\n7619\n2560\n1755\n6201\n165\n6279\n5806\n6867\n5890\n2396\n3416\n1981\n6073\n5872\n3045\n4182\n7607\n4414\n2998\n6553\n7139\n5624\n3666\n723\n5110\n6932\n8200\n2222\n8399\n1041\n4138\n1594\n3569\n9253\n393\n7940\n8004\n1475\n5393\n1107\n2597\n878\n9309\n7576\n5250\n3142\n2015\n571\n3921\n1255\n7080\n893\n2160\n1355\n82\n9153\n8583\n4085\n4644\n7196\n9165\n3558\n4550\n6374\n7826\n8602\n4146\n9257\n6083\n874\n8383\n3731\n3374\n3653\n8222\n7344\n470\n1813\n6871\n7245\n6866\n3998\n7433\n276\n1915\n1988\n8168\n2518\n2686\n831\n6143\n5205\n8718\n1703\n7729\n2077\n7983\n8450\n1195\n9232\n507\n7989\n6974\n5828\n8655\n6679\n5245\n7783\n5886\n9098\n6491\n8782\n3525\n6542\n131\n8110\n9186\n9074\n4933\n9035\n2607\n2057\n6273\n2711\n5829\n3382\n2696\n3043\n2048\n619\n2499\n5295\n1162\n7807\n3694\n2194\n3149\n1940\n7934\n840\n3592\n8237\n4731\n1324\n8486\n8726\n8573\n2928\n9078\n2272\n2564\n1370\n5911\n7434\n8026\n407\n7546\n2004\n5849\n7887\n3425\n1118\n926\n3430\n5902\n2282\n2334\n129\n1372\n4842\n6473\n4382\n1028\n415\n8269\n6910\n2796\n3038\n5735\n5080\n2852\n6306\n8842\n9188\n3637\n1066\n532\n5485\n2838\n6753\n9008\n7984\n2816\n8819\n7103\n5977\n5044\n2064\n2599\n3249\n6446\n6638\n852\n1724\n3368\n892\n3250\n8258\n7962\n4300\n1616\n167\n8855\n2090\n4424\n879\n5136\n5350\n2635\n7828\n8506\n63\n3847\n3676\n1705\n6745\n1263\n5020\n1888\n7036\n1033\n3914\n5433\n3905\n4641\n228\n4801\n3766\n8085\n643\n6914\n3013\n5657\n3696\n1590\n8282\n2403\n416\n911\n3849\n4215\n1120\n5490\n296\n2306\n3140\n3742\n4819\n6153\n6414\n760\n3000\n7498\n7108\n6429\n3031\n5314\n751\n3357\n5808\n7505\n98\n7652\n4027\n6257\n1799\n8577\n4969\n9163\n2025\n6061\n4026\n588\n4961\n4940\n7152\n538\n706\n2802\n8983\n3375\n1246\n6593\n5837\n1789\n7939\n4997\n5939\n2411\n6133\n199\n7593\n1702\n5406\n6082\n2912\n6109\n100\n8149\n5470\n2807\n3362\n5621\n6019\n9241\n9268\n7703\n7967\n5458\n5492\n6729\n4577\n106\n3774\n979\n7082\n4610\n1853\n9003\n9292\n2867\n6262\n2245\n3460\n1557\n4796\n2658\n5769\n6985\n421\n7990\n3289\n1540\n9316\n2251\n6896\n5947\n4965\n4480\n963\n9047\n7824\n3976\n6210\n7018\n7179\n5016\n7789\n6102\n6828\n7659\n9109\n9071\n8115\n7628\n7110\n16\n7513\n835\n939\n2351\n2322\n4945\n560\n6837\n6094\n6475\n7901\n3\n771\n8029\n3135\n8044\n7127\n3741\n5156\n7030\n113\n3747\n7042\n5232\n5225\n3002\n4747\n5379\n4886\n7192\n4184\n1896\n1834\n8689\n3665\n2957\n6913\n8009\n4851\n6420\n828\n8884\n8815\n3198\n8008\n194\n6251\n3303\n3934\n395\n1285\n4169\n1648\n1347\n3600\n4631\n509\n211\n6230\n7241\n2219\n2582\n8353\n7790\n7583\n9004\n6942\n1704\n8051\n2981\n5511\n6182\n7088\n1699\n1222\n6189\n1528\n5197\n6221\n7893\n7773\n8766\n2942\n8021\n614\n1786\n400\n133\n556\n5237\n3727\n1440\n3873\n8448\n6285\n8696\n8800\n4009\n3386\n4847\n5685\n9093\n5895\n6863\n4260\n8405\n8417\n7116\n255\n3223\n4737\n7852\n814\n710\n1094\n6103\n5809\n5882\n6336\n4974\n1499\n2806\n3744\n2664\n2436\n4482\n8665\n8918\n1076\n8676\n5725\n9248\n4755\n1447\n9328\n5500\n78\n2653\n792\n6854\n6093\n6172\n3378\n4492\n5529\n5476\n3846\n1391\n383\n4289\n3883\n2648\n3265\n2525\n5402\n4599\n6870\n6877\n4413\n2464\n8519\n2521\n1839\n5822\n5664\n7257\n5375\n6852\n6764\n5182\n8914\n3015\n8509\n3080\n4562\n8979\n6643\n8601\n6096\n4812\n5246\n7862\n527\n7849\n6737\n12\n2468\n7961\n275\n27\n5932\n3840\n7341\n4996\n8564\n2154\n6138\n7831\n4442\n757\n4464\n1170\n2568\n19\n323\n7675\n3441\n2067\n9027\n2486\n4379\n4744\n1737\n7563\n301\n3907\n4742\n6857\n1221\n9284\n8458\n2897\n1526\n5345\n4423\n6246\n8578\n3711\n4986\n4785\n3997\n7311\n4788\n8387\n2041\n2608\n6031\n3293\n541\n773\n8473\n2501\n5667\n804\n483\n1639\n696\n6060\n5429\n5762\n1527\n7342\n6225\n7895\n381\n8030\n8362\n4734\n3526\n9273\n2039\n5084\n875\n6905\n8968\n5275\n3052\n650\n7509\n232\n2595\n3631\n1810\n4355\n8315\n8908\n1777\n4834\n3164\n2336\n1543\n6212\n8346\n3024\n3719\n1242\n6265\n3133\n6150\n6358\n3316\n4089\n1647\n4629\n7117\n2596\n5366\n6371\n2209\n1428\n1158\n7648\n8765\n802\n153\n4639\n3657\n9320\n3294\n2617\n5052\n6305\n3227\n8784\n5868\n6716\n1671\n178\n2703\n954\n3254\n2262\n5743\n8647\n6393\n7706\n6604\n3728\n6978\n7474\n8754\n2740\n6038\n1491\n8814\n2080\n2358\n5944\n1164\n9259\n4518\n7343\n5748\n3897\n923\n5967\n2677\n3503\n1202\n4966\n6634\n1962\n9096\n9064\n977\n4049\n1464\n658\n536\n3402\n8064\n1309\n259\n8122\n910\n224\n6152\n7142\n6070\n8411\n9214\n9312\n8325\n6192\n626\n6025\n6240\n8708\n4630\n6777\n1075\n8906\n408\n9269\n6236\n9067\n2324\n156\n3136\n7878\n7308\n4335\n2065\n3845\n4453\n3356\n1450\n371\n7219\n5171\n201\n8642\n2099\n477\n1603\n8339\n7430\n3061\n235\n1133\n8474\n8653\n989\n4569\n9092\n8347\n3102\n1743\n9086\n5140\n7438\n1530\n2460\n7646\n5071\n5430\n6944\n610\n2803\n1448\n4696\n6156\n4386\n4248\n4256\n994\n805\n8011\n8276\n8999\n4956\n1712\n2795\n7553\n6436\n2158\n9083\n3184\n5784\n4428\n612\n5288\n6222\n1365\n5074\n6848\n575\n5213\n2175\n4240\n351\n2086\n2656\n5150\n9255\n8189\n7735\n1261\n1344\n4097\n8674\n2984\n4235\n5998\n6488\n537\n1267\n7486\n7124\n6245\n7955\n7337\n5436\n1194\n209\n1710\n7906\n4357\n4139\n5679\n2584\n2854\n1004\n8246\n8586\n5087\n4926\n6637\n3197\n7757\n6502\n1248\n990\n3928\n2770\n2751\n1020\n6426\n6839\n2671\n3871\n9212\n4179\n3394\n10\n5861\n5316\n6869\n2985\n8905\n8559\n4457\n2480\n2313\n4100\n6835\n7799\n7890\n2785\n5468\n7302\n5862\n1803\n3171\n717\n7053\n1655\n4489\n2522\n2921\n8555\n1984\n895\n8949\n1305\n738\n7606\n112\n3042\n1325\n437\n3167\n3340\n511\n3689\n8982\n69\n4421\n550\n8685\n3147\n8956\n3166\n7023\n2014\n3573\n3880\n4045\n2069\n6051\n702\n6664\n8418\n6181\n4853\n4166\n7022\n7418\n3605\n7172\n5031\n4589\n7858\n6586\n6351\n8334\n7504\n634\n3759\n1890\n890\n6959\n5085\n4919\n2161\n1191\n256\n3610\n7079\n3427\n4071\n7323\n2982\n7263\n7444\n4251\n5846\n4864\n3649\n4311\n8120\n4582\n6373\n2805\n4872\n4869\n5867\n2670\n7099\n30\n8933\n930\n7919\n7261\n5289\n7449\n7772\n3613\n3196\n474\n205\n841\n2611\n6185\n3088\n409\n7239\n5938\n7871\n1343\n6705\n1027\n5596\n2199\n9113\n5471\n6134\n838\n8359\n4061\n1474\n3229\n270\n4245\n1979\n1517\n8652\n4006\n6137\n4693\n2528\n6996\n2926\n5798\n2477\n2549\n3341\n6014\n4479\n2861\n4208\n5175\n5174\n5118\n3736\n5463\n1588\n2327\n8380\n7982\n1058\n4586\n6608\n7985\n1822\n3628\n549\n1811\n2601\n4608\n2540\n6659\n3859\n307\n3767\n8167\n505\n4366\n5520\n461\n1933\n2401\n8106\n2055\n7844\n8544\n4797\n7419\n6686\n7670\n6039\n5672\n5141\n6543\n206\n5252\n4718\n888\n1601\n3218\n5114\n713\n4022\n4419\n6708\n397\n425\n6612\n5057\n1729\n4729\n4080\n1034\n534\n5598\n9218\n2424\n329\n4154\n1597\n109\n8823\n9038\n8437\n3307\n128\n8032\n1412\n7333\n8762\n8851\n8865\n468\n3808\n3064\n8798\n7052\n7767\n1086\n2162\n6566\n2109\n3439\n6122\n3642\n7696\n8610\n5279\n1808\n8687\n817\n6066\n3640\n6015\n7601\n4855\n6017\n87\n7071\n7268\n3614\n6084\n6117\n6924\n9102\n2829\n375\n8724\n2095\n22\n1541\n2970\n633\n139\n451\n4521\n179\n1396\n3876\n5824\n8020\n426\n4982\n4172\n190\n4859\n1455\n3110\n3323\n9104\n858\n6719\n6428\n4495\n8551\n2141\n3984\n3066\n67\n4299\n5821\n8444\n6581\n6097\n7090\n7781\n8944\n3085\n2114\n5355\n8901\n1461\n3301\n422\n7000\n4820\n5790\n1379\n7536\n8736\n8991\n5241\n1698\n1294\n1753\n196\n2987\n8680\n4144\n8639\n6441\n8255\n8156\n3677\n6385\n6520\n3760\n6001\n1144\n5478\n7394\n8057\n5018\n4232\n5235\n6844\n3111\n8802\n949\n7843\n573\n2278\n6801\n7629\n2714\n5105\n6946\n2697\n5315\n1571\n8677\n2537\n4374\n3833\n7820\n3750\n2033\n6526\n3884\n8706\n7195\n3603\n3001\n6284\n5873\n5718\n8576\n8457\n3589\n5839\n459\n6342\n8729\n6933\n607\n6053\n8228\n3773\n1805\n6365\n5142\n6069\n1389\n9026\n570\n4614\n5533\n2821\n1897\n819\n4060\n5905\n6842\n5446\n1277\n4303\n2836\n934\n1014\n7822\n7494\n665\n5881\n3328\n4664\n315\n1315\n1462\n8616\n7725\n5749\n1730\n8184\n4567\n5065\n8867\n1304\n3669\n9192\n410\n8177\n6710\n1210\n2329\n3911\n1899\n7686\n3315\n6180\n3116\n5341\n4394\n8337\n9182\n5715\n2172\n2782\n3715\n9195\n7960\n4890\n8294\n2337\n8014\n3353\n7475\n2193\n8831\n4200\n4653\n6196\n6957\n3063\n8959\n8973\n6529\n3457\n5274\n8002\n6823\n6154\n5561\n1780\n9318\n7657\n1758\n6503\n7678\n3274\n1625\n4327\n3236\n8575\n4707\n4331\n1494\n8756\n3174\n1074\n8116\n8295\n3048\n3752\n6050\n8003\n9175\n4674\n1642\n2556\n6166\n7165\n8441\n3990\n1640\n1778\n7500\n8304\n1395\n4315\n5949\n3364\n242\n5763\n1036\n2430\n8131\n411\n6267\n2045\n6606\n899\n8065\n5779\n5616\n2107\n5408\n2980\n6310\n5776\n4328\n821\n3251\n2354\n7076\n5313\n79\n3959\n5677\n7545\n160\n6790\n6859\n3659\n6770\n1106\n8846\n956\n7472\n2050\n8099\n4795\n8053\n9293\n7037\n1646\n9307\n5322\n5332\n2708\n8977\n917\n2419\n184\n2105\n1578\n3923\n5780\n1903\n2512\n429\n493\n4972\n445\n8286\n320\n8300\n617\n3413\n4459\n525\n5631\n6314\n5157\n5300\n8545\n182\n1031\n4429\n2495\n1534\n3099\n3916\n3738\n535\n2119\n177\n1838\n2159\n4099\n8285\n5172\n8540\n6020\n7683\n3073\n3115\n3087\n2416\n1894\n5942\n3597\n5834\n2007\n43\n1779\n4174\n2023\n2546\n2429\n9006\n436\n4214\n3693\n5426\n6767\n5903\n4368\n2170\n5051\n7490\n2859\n5035\n7835\n5372\n7122\n925\n3253\n6338\n8393\n4093\n5848\n7588\n2683\n8049\n5403\n5894\n8745\n8550\n2941\n3484\n9029\n4461\n8022\n725\n3030\n1975\n5623\n2415\n1957\n6141\n9278\n3226\n3062\n5670\n7326\n8759\n8496\n6619\n8187\n8262\n6199\n951\n668\n2388\n4698\n8240\n2851\n871\n4988\n9084\n9089\n3162\n1167\n8244\n5227\n6461\n2831\n776\n5010\n5770\n5282\n3574\n5102\n1278\n2281\n5455\n4628\n4663\n9119\n7487\n8746\n4889\n1175\n102\n2386\n8940\n5566\n53\n8833\n1918\n321\n6786\n6861\n4358\n2771\n7467\n975\n4777\n605\n3543\n2600\n7584\n9299\n4530\n7328\n183\n4761\n7543\n304\n1196\n4623\n5519\n1953\n533\n5989\n7590\n7428\n6346\n6162\n1946\n6260\n4405\n5676\n8924\n7171\n8409\n1866\n6379\n3411\n2387\n3051\n7398\n154\n1185\n6442\n6004\n1611\n2165\n9018\n8323\n616\n3995\n8952\n1533\n7853\n789\n4991\n3675\n7456\n5752\n175\n7556\n4195\n907\n2248\n8467\n1017\n7968\n3304\n1666\n4942\n3867\n4802\n6357\n4621\n887\n6213\n5261\n1336\n521\n8928\n7864\n4792\n6742\n157\n1593\n823\n7235\n5303\n5633\n1100\n8047\n5993\n1460\n6714\n1630\n6440\n6307\n3608\n292\n5974\n8301\n8342\n2720\n4583\n2757\n7315\n833\n4466\n4236\n1282\n5273\n2149\n2380\n8119\n7167\n5076\n3596\n2650\n8980\n3421\n1356\n1954\n7823\n1172\n2226\n1941\n6136\n7274\n2256\n4928\n324\n4410\n4579\n1061\n7113\n486\n862\n6956\n2873\n1465\n6113\n8225\n8512\n6806\n272\n6008\n1241\n88\n5662\n3555\n689\n8733\n2812\n7453\n6282\n420\n2471\n4477\n7495\n1445\n594\n6939\n1564\n8704\n8590\n7992\n7374\n5796\n9298\n4213\n5713\n5864\n326\n5513\n402\n464\n608\n1951\n8640\n3347\n3459\n4162\n2690\n7478\n5856\n5240\n3022\n602\n5547\n1798\n1345\n9276\n599\n3673\n3277\n1635\n8625\n1567\n5928\n636\n5671\n2896\n3477\n412\n7575\n4201\n685\n4760\n1229\n4275\n8960\n3123\n4471\n5941\n3355\n3999\n7157\n6354\n6850\n8783\n1943\n6769\n7330\n8721\n8477\n1381\n848\n778\n6408\n2644\n5817\n1441\n1723\n2144\n2776\n2368\n367\n8839\n8749\n5353\n3148\n9114\n1233\n9228\n8857\n2895\n1286\n200\n6755\n5125\n5857\n1657\n7658\n5000\n942\n7020\n586\n784\n7078\n6194\n8658\n8957\n9325\n1851\n8911\n7004\n1186\n8824\n2999\n561\n7639\n4316\n5086\n3187\n7912\n2624\n9183\n8487\n5089\n8475\n7554\n4031\n6297\n6059\n5329\n115\n2058\n7650\n7121\n2485\n7805\n2241\n7713\n4352\n2409\n1026\n2745\n4549\n5124\n5201\n6556\n6617\n9091\n3945\n8402\n5648\n5257\n4901\n7750\n6131\n6027\n6352\n4625\n1254\n5498\n3720\n8261\n3939\n5576\n3685\n6713\n8472\n991\n8354\n5655\n5997\n1029\n7506\n2575\n2990\n4898\n7402\n3290\n5388\n6715\n8235\n5361\n4970\n1363\n3338\n9014\n5358\n635\n1193\n3705\n6334\n7666\n5270\n6368\n8604\n3564\n1937\n2481\n1341\n721\n2100\n3958\n6551\n3813\n2592\n7980\n2357\n8761\n8910\n8693\n1204\n489\n4827\n8024\n7832\n3895\n9068\n8067\n1708\n1111\n8963\n1902\n9251\n5719\n9143\n5537\n9169\n5365\n1840\n485\n4456\n1169\n3271\n6886\n9140\n7173\n6003\n1659\n1807\n8371\n2439\n274\n3448\n6623\n347\n2103\n3400\n2106\n9073\n8169\n3687\n3305\n4416\n8454\n6635\n332\n2433\n1944\n6509\n7770\n1880\n6610\n9331\n302\n418\n4219\n1333\n2350\n8424\n4883\n6580\n6722\n1669\n8470\n2571\n513\n3810\n7049\n6332\n7363\n3532\n8456\n2097\n297\n8841\n7180\n714\n1587\n5234\n7372\n660\n8503\n1668\n8847\n1101\n7275\n3336\n6460\n722\n7782\n3947\n502\n4258\n2132\n1835\n181\n3841\n427\n3446\n2551\n8324\n6963\n4284\n7297\n7577\n3399\n9148\n8213\n5656\n851\n657\n2446\n6992\n976\n1108\n2681\n3237\n8582\n377\n5969\n5287\n9209\n8523\n7178\n7833\n6175\n2126\n3023\n5090\n7491\n6640\n6077\n2221\n2780\n1694\n4094\n144\n3203\n7123\n749\n3625\n3848\n980\n2270\n7819\n3672\n7689\n7203\n2718\n1714\n3802\n3851\n4224\n7237\n7998\n7207\n4106\n9036\n1046\n5070\n4592\n6056\n693\n1328\n3309\n2629\n2736\n202\n388\n7886\n4417\n8786\n8822\n4035\n5505\n1192\n4388\n8941\n5019\n7538\n6732\n6389\n5923\n1405\n3278\n3917\n1688\n8374\n443\n4037\n9099\n5190\n4177\n9310\n7747\n4348\n7197\n4844\n4998\n5609\n4345\n29\n3332\n8648\n4107\n346\n2577\n3941\n1215\n8252\n4706\n2675\n3790\n7459\n6164\n1149\n6687\n582\n3139\n3882\n4034\n1861\n4701\n8757\n8801\n1823\n4528\n4789\n143\n4746\n9234\n3866\n9245\n1911\n1366\n4393\n2061\n1959\n6967\n3138\n7382\n6237\n845\n80\n6911\n7163\n5229\n4736\n8738\n33\n8543\n357\n3193\n7262\n4448\n6793\n3321\n7569\n6411\n7692\n7340\n1417\n5847\n3836\n2678\n1188\n8727\n8615\n7417\n5771\n3170\n8061\n2935\n8263\n8257\n6883\n1276\n1239\n812\n6258\n3922\n8117\n3039\n603\n8554\n7573\n2787\n3445\n5115\n3478\n962\n3961\n6570\n7722\n216\n2797\n5154\n2530\n4904\n2405\n7542\n4021\n3252\n5370\n9302\n236\n4532\n1361\n3373\n1716\n2183\n1583\n3783\n868\n1687\n8925\n6198\n8208\n6367\n7603\n882\n3469\n1645\n7654\n1176\n4231\n150\n7997\n5456\n7031\n4375\n8840\n5634\n6945\n705\n4774\n3822\n7148\n1922\n8459\n6249\n8713\n6197\n8599\n6071\n6756\n1634\n950\n5640\n7749\n5920\n6622\n4783\n7837\n7479\n7229\n3919\n1797\n5272\n8945\n4908\n5439\n6903\n5833\n6930\n8197\n9261\n1711\n5483\n4285\n8852\n7409\n8971\n7534\n7792\n2444\n7496\n8063\n1665\n248\n3894\n4585\n66\n4850\n1240\n7511\n7524\n9258\n2075\n3979\n4714\n7592\n965\n2919\n1842\n8013\n4750\n2344\n6155\n3468\n31\n2087\n1599\n1573\n5883\n7613\n195\n3749\n644\n2189\n8779\n8743\n9005\n8081\n1040\n7785\n5820\n8830\n5495\n4867\n2710\n491\n7153\n6217\n4741\n1761\n5484\n5474\n6916\n7252\n1739\n8930\n6647\n5198\n4903\n8488\n7366\n2774\n2726\n2385\n7625\n3179\n8845\n6600\n399\n6810\n3447\n6684\n4915\n8368\n1867\n2325\n2101\n1335\n7734\n7437\n7025\n4000\n6897\n1408\n7154\n5013\n2204\n9233\n3817\n1877\n9161\n2197\n3390\n280\n1892\n1612\n7753\n2801\n7246\n7909\n6229\n9314\n8407\n1436\n3879\n6432\n5326\n5327\n8535\n7910\n7745\n5545\n7916\n207\n1783\n6158\n8517\n7361\n8070\n6430\n119\n6146\n4183\n1083\n7385\n4497\n9133\n1686\n3765\n595\n8046\n4418\n4043\n2361\n7915\n9149\n1717\n1141\n6375\n1018\n5602\n1262\n7485\n9178\n6629\n3339\n8934\n4648\n7988\n6252\n3440\n864\n5418\n3874\n7280\n6191\n8388\n4323\n6792\n2232\n7228\n8684\n7813\n6187\n6678\n3177\n3534\n4953\n4402\n7739\n6319\n2414\n8700\n5946\n8238\n6917\n4167\n4618\n2268\n3081\n1247\n4001\n8580\n7636\n3101\n2195\n1559\n3714\n7188\n6028\n7530\n2828\n1977\n3238\n2340\n110\n3247\n7532\n7541\n924\n1632\n4487\n6447\n4944\n6347\n2285\n8087\n5452\n91\n1166\n162\n5185\n7933\n4743\n1627\n7259\n8620\n8207\n5845\n9011\n5525\n4269\n4700\n1824\n8186\n8872\n8299\n3957\n8242\n4558\n6439\n2666\n6958\n8112\n5121\n8806\n6170\n7688\n3486\n2082\n7436\n2778\n1096\n786\n2206\n5170\n1443\n6030\n3312\n9151\n8485\n6404\n8498\n2883\n8961\n2280\n8341\n2809\n2445\n809\n8298\n8643\n8316\n6853\n1572\n3215\n3938\n2249\n6515\n1337\n8328\n7712\n1429\n4117\n5441\n3230\n4152\n7225\n3513\n6953\n1507\n348\n3639\n5739\n2673\n1550\n6301\n1652\n8453\n204\n6833\n2200\n5217\n1854\n4711\n7368\n4572\n4032\n7531\n1013\n3634\n2875\n6058\n8307\n7609\n1766\n904\n667\n5410\n6578\n3601\n1664\n3233\n7390\n8178\n4486\n4427\n4876\n9166\n2772\n6295\n5001\n5296\n3371\n6518\n6327\n854\n8288\n1912\n5927\n6202\n5814\n9032\n1059\n3214\n6547\n7038\n5781\n4390\n6114\n1622\n4318\n5803\n5984\n736\n3561\n6554\n5045\n4277\n7386\n9081\n8462\n2034\n4955\n2701\n932\n7758\n7176\n9205\n3077\n3803\n3562\n8054\n7946\n295\n1843\n7728\n1629\n7768\n2971\n431\n9285\n2513\n1116\n3656\n4529\n5758\n6339\n8398\n816\n4153\n2536\n1826\n7870\n8113\n7730\n7101\n6555\n9256\n6774\n1072\n4578\n2598\n3604\n5880\n861\n3350\n3117\n4685\n4334\n5165\n7224\n4066\n4253\n4447\n3815\n5038\n253\n3658\n330\n3967\n6443\n2143\n7336\n6135\n2734\n8390\n4655\n7800\n1399\n1173\n5618\n2822\n4431\n2443\n1568\n3909\n1974\n2496\n4772\n5164\n2138\n2864\n3799\n3924\n4882\n8245\n1585\n5528\n5692\n5730\n5832\n137\n3175\n2894\n2062\n2752\n4028\n2113\n5411\n2647\n730\n3758\n1667\n9303\n6653\n3698\n3968\n3053\n503\n2150\n4645\n2257\n4627\n8303\n7966\n8742\n4692\n5901\n8547\n2277\n5546\n986\n370\n4697\n8712\n4804\n1182\n6650\n7290\n3487\n2814\n5668\n7567\n5333\n4164\n3084\n8896\n3888\n6537\n17\n6882\n3531\n704\n1037\n8866\n5263\n6758\n3762\n1393\n3824\n5112\n214\n1439\n5700\n8932\n1306\n5011\n6928\n5173\n4098\n1132\n7352\n4778\n7723\n1368\n2390\n670\n2685\n5855\n1772\n6380\n3853\n940\n5424\n6091\n1748\n5297\n6572\n8877\n6874\n430\n5041\n5267\n7448\n620\n9112\n4294\n1432\n72\n130\n7920\n4597\n6614\n8889\n3697\n1895\n3462\n2616\n4791\n7846\n8372\n428\n6559\n8326\n9211\n1525\n5980\n7888\n3331\n8118\n7899\n615\n7377\n791\n5930\n6627\n8322\n1138\n770\n8460\n5100\n8274\n8350\n6316\n2893\n7594\n9236\n5082\n8150\n1986\n1909\n8902\n2145\n3617\n3501\n7\n2426\n5056\n8016\n2702\n5360\n8135\n8385\n8378\n8018\n8574\n720\n8893\n3021\n1978\n4782\n1816\n2083\n4051\n1446\n5870\n9097\n8006\n4222\n8287\n686\n1377\n611\n8153\n4808\n1536\n679\n4096\n3891\n4884\n432\n4615\n8988\n5560\n3451\n5589\n3514\n6169\n1414\n3244\n1490\n7100\n3588\n690\n7317\n4171\n2266\n6800\n2793\n5151\n6977\n8188\n8752\n5815\n5116\n263\n3311\n289\n3392\n5755\n1022\n5548\n9319\n8937\n6011\n7632\n5328\n4141\n5407\n520\n7305\n526\n3645\n1859\n2520\n3523\n8629\n7304\n8881\n3076\n4005\n8329\n2205\n2214\n6925\n8691\n4136\n8883\n974\n7952\n3965\n5887\n7964\n7189\n2406\n2783\n8086\n405\n6568\n5147\n2021\n4727\n7674\n1600\n5078\n2949\n6624\n6541\n8986\n5740\n8500\n3591\n4434\n398\n983\n7544\n1478\n4570\n6012\n465\n9330\n7206\n808\n8737\n2356\n4959\n8812\n3599\n1420\n1721\n5897\n8422\n2\n4023\n2739\n3619\n8797\n5496\n8951\n8181\n6893\n9254\n1809\n5682\n4309\n6929\n2742\n5988\n3363\n4493\n8434\n4210\n1503\n1876\n5094\n4600\n4936\n4798\n3933\n5216\n646\n3098\n8773\n4076\n5335\n3746\n3327\n47\n4602\n8636\n4129\n363\n6417\n7416\n9025\n4377\n4766\n2779\n4151\n9046\n7860\n3154\n3476\n7620\n2052\n1752\n7199\n4412\n8882\n2463\n339\n56\n4821\n7555\n6558\n1905\n5258\n4205\n3580\n6735\n1023\n4511\n3850\n161\n7395\n2532\n3349\n7055\n7387\n758\n1907\n3006\n659\n815\n1961\n6902\n7668\n4708\n1904\n4433\n5159\n6816\n8664\n6918\n1016\n6513\n7314\n7480\n9313\n716\n3395\n6843\n918\n4329\n8593\n3404\n5212\n837\n480\n8524\n1342\n7414\n288\n8863\n3352\n1628\n135\n3314\n2181\n8650\n5915\n8078\n6812\n1375\n906\n5635\n7126\n1387\n7458\n6119\n5591\n3795\n1531\n95\n1960\n7522\n898\n4921\n2623\n6268\n7063\n1326\n9075\n2505\n7400\n1284\n2951\n747\n6466\n1357\n6493\n7320\n5892\n576\n5107\n5559\n97\n2583\n6361\n8843\n3509\n7892\n6086\n1476\n4612\n4267\n9094\n7050\n6048\n8382\n2227\n284\n2898\n3221\n2353\n2157\n5990\n5810\n3581\n7279\n6188\n7859\n3549\n5539\n2022\n630\n2500\n5111\n6561\n5127\n5569\n6123\n1338\n8605\n3491\n4187\n8220\n7334\n9213\n3067\n6997\n2853\n4735\n4372\n5954\n6662\n2207\n973\n3361\n960\n6350\n7431\n8076\n1129\n750\n7194\n2300\n6590\n5893\n6889\n3125\n8788\n7286\n3472\n8164\n7693\n1469\n5563\n4773\n3210\n6324\n3113\n9070\n3638\n7551\n2541\n3506\n5138\n4069\n7198\n7560\n3306\n6100\n2932\n1741\n14\n4672\n7564\n8748\n8874\n3804\n3678\n2610\n1358\n42\n5176\n9326\n8464\n1038\n2993\n3017\n9072\n32\n4809\n4364\n2808\n4125\n152\n7299\n5431\n6178\n793\n9120\n8410\n4963\n772\n6954\n3014\n6881\n286\n553\n1948\n6398\n6255\n3057\n8646\n6176\n2700\n5663\n6683\n1281\n6013\n8799\n7635\n9289\n1885\n442\n2225\n6294\n5054\n2674\n7884\n8730\n8216\n4203\n1488\n7111\n3623\n7950\n1971\n3248\n2900\n1553\n472\n3865\n7796\n6937\n4591\n8098\n5208\n294\n5627\n5691\n5687\n7149\n4879\n3624\n7005\n2773\n3112\n9185\n1633\n7830\n5101\n8707\n8469\n4678\n4860\n700\n5527\n9194\n2794\n5068\n1177\n4282\n6492\n5859\n5029\n5123\n522\n5048\n7230\n2104\n6642\n6731\n2717\n5149\n2043\n9059\n5277\n844\n5515\n6706\n3651\n9105\n7671\n2880\n3607\n6410\n2508\n8463\n2394\n1916\n1125\n5343\n3322\n5307\n4547\n1589\n8478\n8899\n2955\n8028\n4058\n2781\n8715\n1272\n4474\n4863\n4367\n49\n8844\n5605\n8671\n6743\n4281\n1874\n2626\n2516\n258\n5249\n6186\n7958\n5432\n3801\n6288\n4732\n9121\n7558\n6819\n7508\n584\n215\n5036\n4261\n8978\n5228\n647\n4657\n2591\n5931\n5088\n9204\n929\n4381\n5421\n2965\n5050\n6495\n5033\n4799\n959\n1232\n5811\n317\n7705\n3842\n2178\n7187\n1373\n7112\n2694\n8627\n8493\n3991\n7441\n6308\n6462\n3406\n7673\n8660\n2902\n752\n1025\n849\n7682\n6982\n6652\n3612\n298\n5148\n4873\n3414\n1693\n1458\n327\n2016\n5002\n6768\n7016\n5583\n3270\n8232\n7158\n7981\n4676\n4675\n2164\n8360\n6709\n8143\n365\n4062\n4527\n7928\n9009\n6228\n5818\n2533\n9305\n8887\n55\n2507\n8870\n6649\n5158\n76\n5595\n6693\n5306\n8666\n3020\n7527\n3082\n6304\n1591\n6145\n6868\n7205\n9107\n1165\n6773\n172\n1993\n4176\n8400\n4611\n7589\n5386\n6095\n6335\n1561\n5963\n7393\n3681\n2037\n4968\n7451\n3360\n7466\n8361\n4455\n4064\n5422\n1689\n3977\n7269\n362\n4178\n4145\n6127\n5162\n2399\n9225\n7068\n794\n1348\n7736\n444\n6081\n5298\n2026\n2543\n9087\n7425\n3730\n8468\n2641\n7529\n1720\n6377\n5851\n7956\n3150\n3785\n6485\n3611\n2869\n8510\n4775\n4463\n1251\n9124\n6873\n3391\n4118\n7051\n3213\n3668\n5347\n8452\n6289\n5840\n478\n3522\n453\n3376\n6190\n3342\n2237\n2870\n5178\n5567\n5952\n6919\n3005\n134\n3397\n8539\n6822\n5264\n3288\n5962\n8421\n6744\n8608\n4656\n1802\n4271\n1043\n8211\n2196\n5260\n3789\n7211\n7571\n7834\n5680\n2047\n5502\n3369\n3437\n3286\n5517\n3912\n1442\n6961\n2191\n2417\n9088\n5155\n6813\n4520\n7375\n1224\n811\n1891\n3748\n4123\n2789\n5305\n8419\n7248\n9237\n992\n4038\n4499\n2060\n850\n2669\n7612\n9290\n2526\n1287\n4160\n4633\n7125\n742\n4534\n2407\n4555\n8764\n4722\n7721\n3205\n6657\n1214\n3754\n6080\n4593\n3018\n8792\n2294\n4450\n7701\n127\n7069\n6243\n8025\n4010\n8632\n4715\n5284\n4574\n726\n4252\n4561\n7354\n299\n6088\n1090\n5012\n5684\n3489\n4888\n1584\n1969\n4846\n2915\n6804\n2775\n7306\n9306\n5231\n7740\n4283\n953\n6725\n8290\n1504\n1539\n8885\n138\n3764\n1256\n257\n335\n7060\n5986\n9323\n4740\n8994\n4140\n6807\n8254\n3963\n9297\n2102\n9207\n4910\n8709\n4411\n1672\n457\n8037\n4932\n3679\n2362\n8592\n495\n1608\n2155\n7411\n2881\n9244\n37\n6535\n8219\n4505\n8635\n1928\n8384\n2570\n8996\n7610\n2128\n8728\n6656\n6681\n2070\n176\n9062\n514\n1796\n4039\n6838\n2462\n230\n569\n5521\n4637\n4939\n4420\n672\n3807\n447\n1656\n3297\n8858\n2118\n6309\n1926\n481\n1509\n1228\n1787\n5978\n8678\n3951\n2929\n4980\n5039\n4713\n7002\n151\n5536\n8148\n3823\n2299\n142\n7067\n2372\n3761\n9\n2265\n5747\n2764\n724\n2913\n3151\n4525\n6370\n4247\n5494\n629\n3621\n7371\n1999\n6704\n3734\n2698\n4691\n6938\n8415\n6353\n6750\n9077\n2679\n2478\n7321\n6611\n4007\n5772\n6416\n2264\n8348\n2672\n6546\n754\n6934\n8546\n4404\n592\n4748\n6625\n7944\n2377\n6\n8929\n8275\n4524\n3660\n8710\n419\n6878\n8313\n7460\n8753\n2917\n6891\n6663\n4918\n7129\n396\n7256\n3500\n631\n5585\n8343\n2695\n6168\n6292\n3176\n5092\n5160\n3701\n9021\n7221\n1216\n1438\n3471\n2318\n8923\n6223\n2182\n7621\n8514\n9010\n8987\n1252\n1972\n1872\n1715\n8205\n6463\n8138\n8989\n5661\n2890\n565\n2427\n8946\n1303\n3718\n6000\n3620\n5276\n9260\n1467\n6173\n7641\n7520\n5061\n4677\n5757\n4400\n2620\n2719\n8995\n2079\n1683\n8141\n7754\n5744\n2952\n7568\n7457\n5368\n1510\n1513\n3072\n1456\n9164\n3163\n3035\n6111\n5042\n7161\n1401\n1084\n8000\n8531\n5404\n6550\n8379\n9141\n8681\n7752\n6394\n7011\n3739\n8253\n978\n4771\n6024\n4828\n7959\n1649\n1727\n7073\n8349\n6952\n661\n7283\n3159\n2590\n3496\n8741\n3969\n2956\n4565\n920\n1830\n8558\n1930\n6677\n6825\n8256\n7454\n4710\n1768\n3753\n5292\n1397\n2733\n946\n6711\n3242\n4929\n5006\n3202\n2295\n2746\n1293\n2124\n5405\n4065\n818\n7464\n1820\n1312\n6994\n6920\n261\n987\n6120\n3109\n2986\n4338\n7774\n5122\n1364\n8969\n6712\n8161\n7595\n5940\n1566\n6419\n4432\n6047\n4749\n6076\n1161\n8217\n674\n8494\n3688\n2447\n4704\n969\n7477\n1160\n3243\n4979\n9288\n6860\n1662\n6171\n225\n5143\n313\n8327\n3385\n7626\n3103\n4401\n6794\n5600\n5043\n7664\n6830\n4452\n3980\n5875\n4635\n5756\n3329\n1751\n8108\n4817\n1989\n1237\n1893\n2848\n8875\n4981\n5417\n4134\n877\n6688\n3545\n4943\n5615\n2476\n1684\n7396\n1171\n3415\n3644\n340\n6630\n8284\n3256\n7240\n5371\n3405\n2108\n6360\n1734\n5612\n8638\n2343\n1103\n6809\n3055\n188\n8031\n3124\n3683\n4537\n988\n2297\n4893\n839\n4467\n5195\n4041\n6457\n4441\n6472\n4912\n6884\n5922\n7014\n1660\n1595\n6752\n4554\n1292\n2709\n3800\n1980\n8775\n6392\n6263\n7214\n5219\n282\n309\n6685\n6311\n4092\n18\n7570\n5543\n4081\n2515\n6278\n8690\n5294\n6184\n5215\n9130\n6720\n250\n7250\n639\n3567\n7841\n2636\n4067\n8446\n5703\n8609\n2586\n7695\n1253\n6701\n7930\n6317\n5921\n7719\n8501\n7312\n4110\n6219\n4552\n5059\n4088\n7975\n9132\n6054\n692\n3412\n4079\n6950\n5281\n8321\n3877\n7614\n4188\n2223\n239\n4745\n6875\n7096\n5571\n4403\n2640\n1845\n6690\n1825\n4157\n314\n4682\n8825\n8093\n7215\n6465\n99\n8077\n4206\n366\n1208\n6043\n4640\n5475\n4985\n1351\n3090\n5625\n7307\n8466\n2003\n8854\n218\n1500\n2293\n1847\n5032\n2147\n866\n3710\n2552\n1749\n6692\n3926\n4112\n6458\n735\n9171\n60\n9304\n6726\n2630\n2882\n1178\n1151\n4922\n4662\n173\n7233\n1776\n4113\n2423\n2425\n4343\n970\n6372\n1009\n6607\n3068\n8435\n6423\n3126\n4813\n1709\n1201\n7104\n5620\n3932\n3366\n5023\n5079\n627\n290\n779\n5572\n5233\n1392\n4975\n8534\n8210\n2269\n2475\n2562\n905\n4546\n267\n3536\n8538\n449\n101\n7367\n2722\n4605\n7356\n6781\n8537\n8697\n6820\n8340\n8926\n2349\n2259\n6545\n8100\n8395\n2258\n2911\n3946\n1406\n8683\n8296\n5579\n2177\n8264\n1425\n957\n3647\n515\n5342\n8363\n2449\n1001\n2937\n3452\n5574\n4319\n9184\n8381\n945\n6876\n600\n5714\n4871\n8532\n8856\n392\n2018\n369\n5711\n9230\n5304\n7266\n1681\n7829\n2309\n4683\n8938\n2255\n6159\n3207\n4651\n2029\n4341\n5106\n5794\n9024\n4712\n2434\n7151\n7359\n6431\n1290\n5918\n8705\n5554\n8876\n7415\n6290\n5373\n3805\n2950\n2331\n6772\n8997\n6576\n2307\n8515\n4033\n3428\n6487\n6595\n45\n5792\n333\n2383\n3388\n666\n460\n943\n364\n8223\n8221\n637\n6218\n4108\n5381\n4649\n5096\n1614\n8768\n5095\n3809\n5030\n984\n3538\n5120\n2498\n5222\n5613\n5486\n241\n5707\n9227\n4109\n7771\n728\n3671\n9327\n1230\n9270\n1070\n8565\n4769\n7056\n5654\n1793\n5956\n7883\n1362\n5479\n8769\n8821\n8320\n1901\n1994\n2461\n5552\n389\n2839\n6467\n2762\n4763\n3499\n1487\n7599\n4488\n3241\n8272\n1131\n4496\n7006\n7265\n4897\n2747\n6618\n5291\n4563\n1939\n6369\n8548\n5526\n9030\n5349\n8433\n1477\n4265\n9200\n3878\n462\n6846\n4806\n3519\n6798\n5464\n5179\n546\n6044\n8114\n7216\n6276\n1495\n494\n8146\n5434\n856\n8403\n8071\n5544\n3337\n1546\n2824\n1718\n6009\n2042\n251\n3330\n192\n3797\n394\n7814\n7699\n4659\n4689\n4156\n7903\n9054\n7332\n7811\n1119\n5531\n6782\n5210\n8412\n2633\n7924\n4624\n8314\n5666\n3240\n2310\n4262\n8160\n4553\n8196\n2661\n7213\n7455\n7399\n870\n1227\n1226\n781\n937\n6343\n2578\n2892\n2792\n5696\n6865\n6455\n8312\n5193\n6026\n5251\n3787\n4460\n4687\n7923\n1140\n9106\n796\n2482\n9170\n8695\n2749\n6734\n4825\n114\n827\n390\n7611\n7484\n1249\n7727\n955\n579\n3629\n8915\n2958\n885\n7227\n1424\n4810\n4604\n1535\n774\n7518\n5428\n8233\n2645\n2167\n6484\n3855\n1502\n4861\n2333\n2973\n4829\n1906\n3966\n476\n9023\n6960\n3483\n2748\n5891\n8174\n7702\n8948\n5324\n4396\n1605\n2823\n7348\n7347\n5933\n310\n9082\n916\n203\n4239\n5976\n6200\n6435\n4425\n787\n1121\n6034\n39\n3104\n5961\n5507\n5785\n1463\n7339\n1575\n7801\n5445\n8283\n5951\n6995\n999\n5163\n6023\n6536\n5850\n3524\n3528\n4508\n6674\n2939\n8227\n4598\n7550\n8495\n8622\n1152\n4538\n1318\n739\n8202\n1552\n5236\n3576\n4699\n9238\n1879\n433\n5587\n1678\n8552\n6445\n7971\n6880\n7476\n7282\n7271\n6489\n8091\n9287\n7351\n1765\n5286\n6921\n542\n1762\n8553\n4987\n894\n3622\n7855\n92\n3131\n4811\n6517\n4510\n733\n4954\n1360\n5669\n2842\n8107\n5646\n5968\n1827\n7709\n8521\n5807\n5321\n9239\n5501\n3745\n4437\n1586\n5265\n7917\n1607\n6074\n7061\n1580\n8694\n8461\n4573\n618\n9173\n5243\n435\n8770\n2421\n7450\n3870\n8308\n2605\n2934\n9240\n6887\n4512\n1198\n7585\n7691\n7738\n2843\n8423\n6971\n7854\n86\n9128\n4298\n622\n6579\n2203\n7716\n1265\n1174\n7380\n623\n8936\n4306\n8082\n4312\n8661\n5753\n7243\n2768\n8155\n85\n4143\n3047\n8479\n7809\n2833\n5555\n7578\n1637\n1936\n8130\n5549\n8062\n7143\n5522\n8966\n5614\n8105\n8719\n7655\n7502\n8268\n5760\n6695\n5565\n7615\n9226\n4870\n4507\n3160\n4835\n1598\n4422\n5248\n7867\n1078\n5015\n6660\n1676\n6391\n5351\n7184\n6280\n5936\n6124\n1327\n2906\n269\n8292\n8809\n5167\n8142\n8204\n2713\n1910\n2930\n2494\n5592\n7384\n7726\n5727\n1735\n5710\n5518\n2491\n1410\n4989\n5183\n8777\n6562\n4947\n3692\n384\n1097\n5209\n3723\n7272\n6895\n2459\n543\n8621\n5394\n6211\n2074\n1511\n2524\n7776\n5055\n7191\n6207\n7922\n281\n8436\n2918\n3141\n4800\n6323\n7631\n8903\n3735\n5301\n3975\n2800\n7963\n105\n1920\n7391\n4909\n1754\n4816\n5145\n5139\n5268\n9317\n8631\n4346\n7318\n136\n3993\n1220\n2151\n308\n7483\n3071\n1339\n3777\n8191\n5378\n7087\n1056\n7465\n5608\n6564\n2754\n2687\n1596\n5376\n1512\n566\n6382\n1757\n8035\n2296\n4264\n1053\n4716\n8518\n254\n6253\n7132\n8557\n3490\n9267\n5473\n2412\n7539\n7136\n6670\n891\n1323\n1217\n2879\n9118\n1259\n2317\n7033\n2467\n6665\n6244\n2180\n2140\n7098\n4150\n547\n4307\n1725\n2737\n8549\n8195\n1245\n6286\n935\n1756\n1701\n1626\n7379\n3492\n3717\n5802\n2817\n1234\n1005\n4101\n21\n2576\n4650\n3381\n1030\n2844\n1641\n936\n2729\n6469\n8913\n5994\n341\n4083\n5152\n3380\n8739\n6615\n3829\n164\n7927\n4779\n4216\n8528\n3641\n4606\n2769\n6970\n8850\n4971\n5489\n2008\n4564\n8682\n7784\n5768\n9252\n901\n438\n3577\n2765\n5904\n664\n3348\n6298\n3602\n2502\n8617\n7684\n5805\n4126\n2451\n6906\n7234\n9243\n3778\n1087\n9053\n5026\n2504\n5283\n2820\n4242\n797\n3925\n1383\n8750\n7861\n1403\n6973\n7617\n3065\n5395\n4347\n8144\n2688\n6527\n8597\n8673\n7327\n6331\n1422\n7115\n244\n7013\n2092\n54\n7970\n5742\n4823\n8588\n2938\n3060\n4149\n2375\n6616\n8803\n1555\n4369\n1380\n3011\n6144\n3367\n7370\n1995\n2602\n985\n8785\n8480\n9125\n1927\n3269\n3771\n1032\n7378\n5726\n2731\n2020\n6727\n8793\n523\n6036\n58\n7993\n5512\n5049\n2721\n8482\n673\n7937\n1168\n4472\n8247\n7287\n9017\n6421\n9190\n3584\n1819\n1792\n2810\n6033\n6749\n7677\n981\n7160\n4726\n1886\n7845\n6975\n7422\n4613\n4501\n2569\n4263\n3206\n4133\n2420\n3706\n8894\n2263\n5774\n4925\n9180\n8888\n2945\n2091\n1873\n6303\n729\n2156\n3267\n1860\n6597\n4930\n5253\n938\n580\n5825\n166\n8198\n6892\n8701\n74\n7094\n8954\n3156\n6140\n4279\n2229\n5466\n8413\n7105\n8192\n2632\n7638\n9308\n8530\n832\n4643\n2201\n3268\n4322\n6510\n2967\n262\n403\n1258\n8828\n5838\n8529\n2788\n237\n3838\n1291\n4056\n5628\n7281\n6476\n7935\n2850\n6041\n2013\n4016\n4576\n5312\n6827\n6321\n8669\n830\n1519\n2750\n6106\n6993\n6235\n5899\n7313\n5331\n4371\n7086\n8600\n2660\n5409\n3465\n5499\n6231\n5745\n1801\n5337\n4468\n1451\n4192\n1275\n1114\n4960\n8860\n3900\n6468\n1505\n8868\n5588\n3858\n1947\n2565\n1472\n243\n6583\n7085\n5374\n4291\n4426\n492\n2311\n8305\n3662\n8780\n7488\n3890\n5005\n4680\n7358\n9116\n4397\n5999\n7902\n83\n3566\n2134\n8942\n4767\n6601\n1745\n5736\n5254\n8017\n4015\n7690\n3798\n8947\n1067\n7945\n590\n2547\n2535\n64\n2053\n5359\n2493\n6669\n7473\n6147\n7175\n6983\n5196\n745\n2657\n3497\n697\n3161\n7528\n2239\n5991\n3201\n7681\n5189\n2959\n2044\n8917\n2046\n6313\n6333\n5318\n4301\n2213\n2933\n4121\n3903\n4392\n7889\n5323\n1055\n707\n3857\n518\n6078\n5134\n6645\n9138\n1592\n680\n4446\n7943\n3461\n3887\n5601\n2321\n6621\n558\n4914\n913\n5637\n6453\n8511\n4531\n1218\n5508\n2603\n6802\n8426\n8297\n2947\n5971\n6552\n5262\n5935\n782\n7435\n8357\n6139\n1136\n5008\n3585\n3627\n5356\n2997\n2347\n881\n4849\n8808\n8351\n4017\n2010\n6836\n4391\n3630\n3712\n2969\n5238\n4333\n2301\n4406\n1236\n1050\n1864\n8408\n8251\n8795\n5879\n3365\n7481\n8206\n2452\n1767\n8859\n124\n3948\n4444\n8962\n4438\n5003\n8428\n3105\n5117\n1095\n8755\n7881\n3097\n4877\n155\n1917\n2455\n6042\n337\n6724\n6045\n8483\n7135\n2242\n4566\n1679\n834\n1746\n795\n3548\n2314\n2036\n4046\n9129\n7084\n5091\n2413\n8170\n5775\n1817\n529\n813\n2916\n5130\n126\n1243\n2370\n4831\n9122\n3010\n5104\n2613\n6761\n5340\n3512\n6283\n2346\n653\n6121\n2615\n7421\n1869\n1002\n8834\n2991\n8992\n632\n1093\n4543\n645\n2352\n4115\n373\n1483\n6966\n8598\n3896\n3434\n5987\n8318\n1815\n1223\n1548\n6885\n5073\n6330\n2573\n1369\n4095\n1431\n2185\n5766\n1301\n7258\n8048\n7598\n2847\n1996\n2378\n8561\n743\n6381\n271\n1956\n7439\n7134\n6636\n5804\n1858\n6214\n4730\n8536\n1203\n3118\n9202\n1875\n5885\n168\n5898\n4014\n4186\n3346\n3041\n5558\n9296\n8157\n4339\n3234\n2604\n6803\n5387\n5590\n125\n2173\n8012\n8005\n4858\n651\n372\n378\n8366\n6299\n1449\n7793\n8541\n3235\n8043\n3086\n3983\n6949\n4690\n6494\n8406\n7408\n350\n7021\n8224\n7044\n7662\n6697\n7679\n169\n528\n7029\n2790\n7432\n7602\n8333\n1582\n1378\n482\n9279\n8015\n4514\n3542\n628\n5053\n6699\n6227\n2094\n1621\n847\n3598\n2728\n7276\n6620\n8345\n4278\n4059\n9058\n4173\n8134\n1997\n3182\n3224\n8129\n5109\n4494\n189\n7640\n180\n2963\n1123\n5593\n3263\n4185\n7140\n8990\n6320\n9275\n4601\n4854\n5907\n1135\n8083\n5964\n7788\n1992\n8069\n9174\n6160\n35\n8572\n2865\n46\n3952\n6418\n2510\n5783\n3816\n2715\n3930\n2548\n5204\n708\n7756\n3825\n777\n3550\n3929\n5440\n6751\n7764\n4070\n7331\n3743\n9131\n9206\n3828\n23\n41\n4197\n234\n5723\n7622\n8832\n2169\n5599\n2976\n5266\n1967\n90\n822\n2538\n3169\n6771\n7442\n498\n4967\n5580\n7581\n7680\n4728\n1115\n1064\n3106\n6266\n4415\n9294\n5597\n7059\n197\n7218\n6948\n5690\n1653\n4485\n4019\n3370\n919\n1330\n6085\n2078\n5427\n4545\n2435\n8862\n3633\n8145\n5221\n1388\n5913\n8140\n7471\n7156\n6989\n1190\n6832\n2830\n4387\n3454\n7469\n2910\n4526\n5187\n2410\n9223\n4681\n1300\n7407\n6523\n3616\n6894\n7253\n4515\n5874\n5448\n7137\n7957\n1130\n3092\n7054\n3516\n5797\n1000\n4336\n9090\n6403\n7255\n8919\n6522\n6760\n8898\n4803\n374\n8686\n3985\n7045\n3475\n6065\n7991\n1409\n7851\n6671\n6090\n5826\n7857\n1155\n8964\n1117\n7072\n6064\n2497\n4899\n2397\n3189\n2369\n5027\n5754\n8950\n5617\n8391\n914\n6264\n279\n6174\n5184\n3733\n5278\n2924\n567\n7994\n352\n8084\n2148\n2723\n3359\n70\n1870\n7708\n220\n3994\n9013\n3191\n9220\n4155\n5717\n1110\n2198\n785\n5325\n4770\n4250\n52\n4634\n9037\n601\n8036\n7996\n2483\n7232\n8675\n8836\n1279\n5346\n7676\n6104\n1515\n4603\n5607\n5144\n2628\n68\n440\n3586\n3083\n4830\n4378\n7762\n1134\n4542\n7850\n6296\n4011\n8751\n4776\n7954\n7102\n5697\n2032\n5729\n5017\n6962\n2051\n1092\n9019\n2759\n8581\n8618\n912\n2382\n4892\n8447\n8176\n5491\n5695\n5504\n1060\n578\n4320\n2379\n7649\n8416\n1613\n5344\n7512\n7865\n3037\n6689\n6557\n1569\n5955\n3707\n9168\n8566\n1775\n5950\n6943\n7804\n434\n6179\n1142\n7947\n6456\n6291\n5789\n6538\n9134\n3049\n5075\n5161\n1623\n948\n6302\n6063\n7516\n117\n506\n3302\n7146\n355\n1081\n2827\n1496\n2574\n6167\n3183\n4287\n5482\n7319\n7277\n3860\n3443\n3298\n8364\n3826\n7254\n2360\n5093\n7039\n6325\n2567\n4443\n559\n2625\n4228\n8967\n6405\n1674\n3936\n4475\n8556\n8585\n896\n3713\n6259\n4297\n6718\n2392\n2279\n4927\n1283\n2860\n7665\n663\n596\n6293\n6805\n2811\n7383\n8306\n8330\n3153\n2153\n2618\n2441\n3615\n8092\n552\n5285\n8124\n9247\n5530\n8175\n6242\n5660\n3433\n1610\n1832\n3892\n3862\n640\n2127\n4196\n3495\n7217\n5206\n4836\n7759\n800\n4227\n3699\n9055\n5665\n6826\n7463\n9065\n4720\n5069\n3453\n3358\n6532\n5970\n7921\n4087\n1547\n3424\n8040\n7995\n6787\n9069\n8716\n2561\n8199\n1479\n2767\n7818\n7145\n604\n7597\n4896\n9281\n4666\n185\n7978\n3059\n9221\n2135\n1800\n2974\n1529\n5948\n446\n4436\n8672\n3508\n6208\n5673\n6998\n5203\n278\n7041\n9110\n5853\n8121\n1764\n3046\n6575\n4738\n2228\n7761\n9322\n7019\n6931\n6383\n6762\n283\n3935\n6785\n471\n8214\n231\n3844\n5746\n2011\n7209\n336\n6433\n756\n9167\n6741\n3345\n7685\n4018\n6682\n9147\n4790\n5836\n5906\n676\n3964\n6362\n3510\n7510\n2308\n1806\n5917\n3387\n5423\n8900\n147\n3780\n1696\n9111\n6783\n6497\n4104\n3987\n260\n4616\n2121\n9283\n1400\n4670\n2735\n2096\n6521\n1423\n4523\n2243\n6667\n6990\n3944\n6915\n6763\n404\n2691\n1015\n7092\n7562\n8624\n2291\n5934\n5503\n2326\n2960\n842\n1963\n5568\n9050\n3806\n439\n9154\n6055\n6451\n7633\n688\n4354\n8890\n2813\n2872\n8102\n6609\n1497\n8389\n6449\n1682\n3594\n5103\n5812\n863\n3054\n8079\n2260\n2027\n3091\n7687\n6703\n3557\n2019\n8427\n2799\n8182\n6641\n3168\n2284\n1934\n6507\n1658\n3811\n1774\n7897\n2238\n2943\n191\n3869\n3188\n414\n8072\n7838\n1382\n4962\n5363\n4042\n1983\n4077\n7429\n4044\n1109\n1295\n386\n5481\n3927\n311\n"
  },
  {
    "path": "lib/train/data_specs/got10k_vot_val_split.txt",
    "content": "1349\n5878\n562\n2202\n8904\n1501\n8654\n2975\n2689\n3680\n5180\n1900\n7707\n4723\n8912\n4029\n3579\n869\n2888\n8657\n6599\n741\n4288\n2244\n7357\n5704\n8791\n208\n4805\n8526\n4887\n8871\n7468\n3343\n886\n7794\n2646\n6454\n6101\n7885\n7744\n1297\n4119\n4856\n122\n2286\n2925\n5131\n5843\n5320\n5626\n540\n1862\n7335\n699\n7760\n9198\n3259\n7345\n8698\n1280\n6479\n3100\n3988\n1322\n5737\n1268\n3257\n6791\n3326\n4815\n7644\n1082\n2826\n6821\n8984\n2553\n5290\n5909\n4762\n8096\n8066\n4325\n6666\n7193\n7114\n8060\n7872\n6788\n3544\n5460\n3507\n2509\n6626\n3429\n5542\n4220\n2968\n5271\n3863\n1868\n5581\n2012\n6270\n8038\n4050\n121\n2845\n1565\n1998\n2275\n5524\n6068\n7624\n4913\n9277\n1506\n803\n8848\n5925\n2450\n2072\n8190\n4753\n9162\n825\n7303\n9028\n2088\n8516\n1556\n5937\n7847\n2367\n7549\n1049\n1521\n4739\n3931\n8958\n4130\n7876\n897\n5985\n7346\n7537\n111\n3700\n1126\n7896\n3419\n1051\n5720\n1068\n3458\n146\n291\n6256\n5514\n2857\n4580\n6239\n6525\n8717\n391\n4841\n6676\n4360\n4211\n73\n1675\n1987\n4025\n1321\n662\n8265\n6424\n2758\n7765\n7656\n3209\n7497\n7600\n9039\n7697\n5177\n2983\n5622\n9295\n3284\n964\n2024\n1269\n4551\n8088\n5659\n2212\n5199\n5551\n8607\n5573\n5200\n7951\n8429\n7720\n5919\n1273\n3529\n6707\n9176\n7552\n3255\n5649\n6110\n1137\n9272\n788\n5786\n5186\n2667\n7630\n3953\n1828\n8827\n6471\n7815\n467\n6387\n3195\n6238\n6508\n2373\n5983\n4931\n2948\n921\n2438\n517\n3949\n2137\n3216\n5683\n3695\n1719\n4837\n9159\n6981\n860\n7410\n5497\n1770\n5557\n8810\n5194\n4857\n9100\n6329\n2609\n1925\n3686\n9041\n4924\n349\n9187\n3393\n3661\n7120\n6858\n4587\n3831\n3130\n5060\n6486\n8023\n824\n1354\n8861\n5534\n7292\n4389\n6029\n6226\n3505\n4326\n7445\n581\n6089\n3450\n7324\n6516\n6775\n1207\n4575\n5135\n3918\n9020\n3473\n3898\n7812\n6571\n6757\n6639\n2557\n1206\n6148\n7325\n8790\n4938\n7026\n4383\n8041\n1250\n7267\n1952\n7561\n8811\n4941\n8373\n4848\n6602\n8355\n8104\n5214\n4330\n3181\n3422\n456\n1782\n3408\n6530\n719\n7587\n3058\n740\n4207\n5336\n2798\n2473\n4221\n1493\n3281\n171\n9157\n9139\n7766\n3324\n5308\n3708\n2431\n8080\n2093\n2585\n406\n7040\n5064\n5247\n4758\n6512\n4257\n4935\n2705\n2572\n3436\n8513\n1385\n2637\n7091\n2761\n6007\n6694\n2422\n4917\n2186\n6898\n1390\n6965\n7698\n2002\n2692\n7365\n7373\n4091\n947\n3962\n8692\n1788\n6862\n6856\n1950\n1914\n5658\n3635\n1620\n4780\n2580\n1454\n2786\n687\n7238\n3648\n6452\n1197\n3190\n5900\n9043\n4958\n1821\n1187\n1153\n7169\n7350\n5674\n6254\n3025\n6680\n1690\n2899\n3893\n1577\n5728\n9189\n5077\n3560\n2179\n5462\n1402\n3654\n1376\n5506\n1179\n5647\n4686\n8644\n1352\n2855\n6079\n2254\n2668\n2287\n2457\n3418\n7264\n677\n3074\n2655\n1042\n2210\n4504\n8309\n4209\n4280\n3258\n2977\n84\n4705\n1244\n3511\n6355\n8813\n3228\n9266\n1122\n613\n732\n5202\n8425\n2638\n6470\n3541\n8132\n2063\n5129\n2818\n7949\n8090\n4465\n7295\n5239\n7009\n9271\n8563\n2832\n952\n8136\n6776\n3565\n5188\n7288\n6999\n285\n5487\n7608\n8584\n2071\n7868\n2804\n3655\n6847\n3276\n4272\n3910\n1574\n4559\n7580\n5014\n8183\n6386\n7574\n356\n4937\n2487\n9315\n7572\n3040\n671\n2682\n8626\n3868\n387\n8679\n4074\n1481\n3527\n3595\n4754\n2453\n1579\n4638\n9123\n1829\n3009\n3691\n763\n4875\n3572\n4273\n2777\n6032\n4793\n233\n7147\n996\n3199\n8835\n3517\n7210\n6125\n6037\n3684\n3915\n3180\n7043\n4458\n2889\n57\n7667\n8375\n1434\n7493\n4733\n5827\n2111\n1313\n7986\n3075\n2614\n7547\n4977\n8527\n3212\n7300\n5842\n5244\n3291\n597\n1007\n2030\n227\n3830\n5540\n247\n5643\n9333\n1958\n1371\n5220\n7926\n2927\n1516\n7130\n193\n1522\n6165\n6923\n3794\n4223\n5535\n2472\n8630\n3971\n9101\n2946\n4609\n7291\n8542\n6501\n7548\n4557\n6274\n5226\n7309\n1317\n6275\n1099\n4191\n7270\n5392\n2316\n3819\n1670\n8045\n4807\n8864\n2391\n5908\n8338\n8218\n6400\n9193\n3165\n843\n6613\n6941\n5629\n7557\n4321\n3702\n681\n1159\n4665\n5959\n1697\n5509\n8774\n7389\n3832\n3751\n8637\n1680\n6841\n703\n684\n8293\n3682\n5733\n4818\n3231\n5562\n9001\n3889\n7024\n2519\n1713\n3287\n219\n8776\n2289\n7212\n4832\n4684\n4617\n4237\n2649\n8185\n6326\n3568\n551\n1426\n8869\n312\n2905\n4165\n8248\n2558\n900\n1044\n8613\n7743\n5437\n7604\n3122\n5708\n8649\n2878\n4695\n4491\n7533\n5223\n7711\n1844\n5751\n3008\n8055\n4636\n61\n198\n2271\n5698\n4596\n4500\n5709\n5819\n7972\n2992\n1643\n1048\n6281\n8886\n360\n4198\n6814\n3960\n2606\n7001\n5888\n450\n7133\n7015\n7034\n5153\n8920\n5066\n469\n1302\n8816\n463\n8651\n5869\n6582\n5578\n1231\n9274\n7260\n7751\n8052\n6799\n2089\n2342\n8451\n3260\n5550\n7795\n2288\n1205\n40\n496\n8367\n7836\n5973\n3908\n5242\n5062\n2706\n997\n5419\n9201\n1965\n6062\n3050\n5302\n8735\n358\n2398\n7470\n1644\n8179\n7047\n1549\n5414\n2539\n7381\n589\n8166\n8505\n6035\n3956\n4540\n6721\n8074\n1062\n2384\n2531\n7159\n3902\n4584\n2554\n264\n8720\n2849\n4916\n5218\n7202\n883\n4560\n1677\n4317\n7863\n4509\n6577\n2903\n1452\n1416\n5369\n473\n6233\n6359\n5992\n4934\n8059\n6834\n4907\n3320\n8267\n8280\n2066\n2402\n1485\n3772\n3732\n4764\n9126\n3575\n5564\n5641\n1884\n2330\n1804\n344\n698\n3089\n1532\n4454\n761\n8094\n3432\n6811\n8722\n8826\n3222\n8614\n2901\n7003\n652\n8663\n4266\n413\n810\n75\n3334\n4905\n6438\n4756\n5137\n6528\n6534\n6988\n6177\n8533\n889\n5384\n7201\n5132\n7802\n6864\n3973\n873\n4840\n1482\n8376\n3769\n5858\n6675\n4286\n2593\n5863\n4353\n7817\n7540\n4999\n4838\n2303\n7913\n1508\n7755\n2784\n4964\n3431\n6209\n3755\n6399\n3954\n455\n5416\n7591\n245\n140\n9210\n4084\n967\n7798\n6795\n7095\n6733\n3861\n9264\n1045\n755\n8042\n7074\n7778\n6415\n4724\n6450\n2049\n1307\n3485\n1790\n7869\n3282\n6907\n3920\n2868\n5801\n5632\n5009\n3955\n7517\n5128\n3417\n3019\n1784\n2312\n2753\n6976\n342\n8266\n1849\n2273\n5037\n7880\n3793\n7401\n5412\n8279\n1257\n3670\n9049\n3266\n8955\n6519\n8916\n2858\n694\n5650\n4669\n1785\n3533\n2704\n8603\n3726\n6668\n497\n6815\n6157\n6646\n6964\n8097\n5645\n8481\n8215\n3775\n2542\n7514\n5699\n3518\n3740\n1404\n8981\n4086\n6397\n4204\n6899\n682\n6589\n4340\n7424\n9208\n6504\n4409\n1\n145\n1882\n4620\n2634\n4992\n5453\n3377\n7875\n530\n1235\n7605\n504\n1771\n8489\n345\n7353\n7797\n7174\n5914\n2871\n5721\n6067\n3582\n5467\n6234\n691\n8758\n2122\n1213\n1492\n1437\n2187\n1266\n2395\n7278\n8491\n5256\n1554\n8163\n5966\n7128\n7904\n1691\n6272\n3996\n1706\n1334\n1316\n6478\n6935\n1518\n6700\n8703\n8744\n8152\n8778\n5367\n4218\n9007\n6312\n606\n7565\n5293\n2891\n675\n2120\n826\n7008\n5705\n7748\n8010\n1498\n5330\n5472\n2215\n7627\n3016\n6588\n1850\n4128\n8569\n6987\n148\n8151\n8789\n7907\n8596\n715\n9060\n3872\n1750\n5889\n4047\n5960\n3120\n3449\n1421\n1102\n3333\n9197\n8796\n8123\n8007\n2028\n8404\n1945\n1985\n8109\n5380\n3504\n6739\n4180\n5835\n4243\n25\n4002\n1976\n158\n5181\n4885\n8985\n11\n6425\n5926\n7062\n5083\n8394\n4259\n5844\n1990\n3942\n5532\n2220\n28\n5957\n149\n6748\n3559\n7647\n2566\n1359\n5259\n7010\n554\n6005\n8172\n8125\n1350\n9051\n1973\n1386\n159\n7007\n3220\n1846\n3093\n4445\n2056\n8370\n3211\n4384\n2231\n273\n642\n5311\n265\n226\n9012\n7879\n118\n7109\n7251\n1760\n8667\n2876\n7162\n3552\n6901\n6779\n5021\n6524\n4957\n3114\n4544\n441\n1848\n2136\n2458\n8662\n1127\n5541\n3026\n1080\n6780\n2224\n8259\n1073\n9000\n7244\n7977\n500\n4435\n7376\n7979\n1435\n9291\n7704\n3521\n210\n6269\n8570\n3285\n8039\n3546\n6203\n1183\n6107\n4147\n2234\n7185\n3192\n7155\n2001\n7777\n876\n944\n908\n7791\n6784\n65\n9172\n5675\n3886\n7891\n2978\n1008\n5630\n591\n5067\n1139\n577\n9015\n574\n8137\n7786\n5765\n4900\n4090\n7842\n"
  },
  {
    "path": "lib/train/data_specs/lasot_train_split.txt",
    "content": "airplane-10\nairplane-11\nairplane-12\nairplane-14\nairplane-16\nairplane-17\nairplane-18\nairplane-19\nairplane-2\nairplane-20\nairplane-3\nairplane-4\nairplane-5\nairplane-6\nairplane-7\nairplane-8\nbasketball-10\nbasketball-12\nbasketball-13\nbasketball-14\nbasketball-15\nbasketball-16\nbasketball-17\nbasketball-18\nbasketball-19\nbasketball-2\nbasketball-20\nbasketball-3\nbasketball-4\nbasketball-5\nbasketball-8\nbasketball-9\nbear-1\nbear-10\nbear-11\nbear-12\nbear-13\nbear-14\nbear-15\nbear-16\nbear-18\nbear-19\nbear-20\nbear-3\nbear-5\nbear-7\nbear-8\nbear-9\nbicycle-1\nbicycle-10\nbicycle-11\nbicycle-12\nbicycle-13\nbicycle-14\nbicycle-15\nbicycle-16\nbicycle-17\nbicycle-19\nbicycle-20\nbicycle-3\nbicycle-4\nbicycle-5\nbicycle-6\nbicycle-8\nbird-1\nbird-10\nbird-11\nbird-12\nbird-13\nbird-14\nbird-16\nbird-18\nbird-19\nbird-20\nbird-4\nbird-5\nbird-6\nbird-7\nbird-8\nbird-9\nboat-1\nboat-10\nboat-11\nboat-13\nboat-14\nboat-15\nboat-16\nboat-18\nboat-19\nboat-2\nboat-20\nboat-5\nboat-6\nboat-7\nboat-8\nboat-9\nbook-1\nbook-12\nbook-13\nbook-14\nbook-15\nbook-16\nbook-17\nbook-18\nbook-2\nbook-20\nbook-4\nbook-5\nbook-6\nbook-7\nbook-8\nbook-9\nbottle-10\nbottle-11\nbottle-13\nbottle-15\nbottle-16\nbottle-17\nbottle-19\nbottle-2\nbottle-20\nbottle-3\nbottle-4\nbottle-5\nbottle-6\nbottle-7\nbottle-8\nbottle-9\nbus-1\nbus-10\nbus-11\nbus-12\nbus-13\nbus-14\nbus-15\nbus-16\nbus-18\nbus-20\nbus-3\nbus-4\nbus-6\nbus-7\nbus-8\nbus-9\ncar-1\ncar-10\ncar-11\ncar-12\ncar-13\ncar-14\ncar-15\ncar-16\ncar-18\ncar-19\ncar-20\ncar-3\ncar-4\ncar-5\ncar-7\ncar-8\ncat-10\ncat-11\ncat-12\ncat-13\ncat-14\ncat-15\ncat-16\ncat-17\ncat-19\ncat-2\ncat-4\ncat-5\ncat-6\ncat-7\ncat-8\ncat-9\ncattle-1\ncattle-10\ncattle-11\ncattle-14\ncattle-15\ncattle-16\ncattle-17\ncattle-18\ncattle-19\ncattle-20\ncattle-3\ncattle-4\ncattle-5\ncattle-6\ncattle-8\ncattle-9\nchameleon-1\nchameleon-10\nchameleon-12\nchameleon-13\nchameleon-14\nchameleon-15\nchameleon-16\nchameleon-17\nchameleon-18\nchameleon-19\nchameleon-2\nchameleon-4\nchameleon-5\nchameleon-7\nchameleon-8\nchameleon-9\ncoin-1\ncoin-10\ncoin-11\ncoin-12\ncoin-13\ncoin-14\ncoin-15\ncoin-16\ncoin-17\ncoin-19\ncoin-2\ncoin-20\ncoin-4\ncoin-5\ncoin-8\ncoin-9\ncrab-1\ncrab-10\ncrab-11\ncrab-13\ncrab-14\ncrab-15\ncrab-16\ncrab-17\ncrab-19\ncrab-2\ncrab-20\ncrab-4\ncrab-5\ncrab-7\ncrab-8\ncrab-9\ncrocodile-1\ncrocodile-11\ncrocodile-12\ncrocodile-13\ncrocodile-15\ncrocodile-16\ncrocodile-17\ncrocodile-18\ncrocodile-19\ncrocodile-2\ncrocodile-20\ncrocodile-5\ncrocodile-6\ncrocodile-7\ncrocodile-8\ncrocodile-9\ncup-10\ncup-11\ncup-12\ncup-13\ncup-14\ncup-15\ncup-16\ncup-18\ncup-19\ncup-2\ncup-20\ncup-3\ncup-5\ncup-6\ncup-8\ncup-9\ndeer-1\ndeer-11\ndeer-12\ndeer-13\ndeer-15\ndeer-16\ndeer-17\ndeer-18\ndeer-19\ndeer-2\ndeer-20\ndeer-3\ndeer-5\ndeer-6\ndeer-7\ndeer-9\ndog-10\ndog-11\ndog-12\ndog-13\ndog-14\ndog-16\ndog-17\ndog-18\ndog-2\ndog-20\ndog-3\ndog-4\ndog-5\ndog-6\ndog-8\ndog-9\ndrone-1\ndrone-10\ndrone-11\ndrone-12\ndrone-14\ndrone-16\ndrone-17\ndrone-18\ndrone-19\ndrone-20\ndrone-3\ndrone-4\ndrone-5\ndrone-6\ndrone-8\ndrone-9\nelectricfan-11\nelectricfan-12\nelectricfan-13\nelectricfan-14\nelectricfan-15\nelectricfan-16\nelectricfan-17\nelectricfan-19\nelectricfan-2\nelectricfan-3\nelectricfan-4\nelectricfan-5\nelectricfan-6\nelectricfan-7\nelectricfan-8\nelectricfan-9\nelephant-10\nelephant-11\nelephant-13\nelephant-14\nelephant-15\nelephant-17\nelephant-19\nelephant-2\nelephant-20\nelephant-3\nelephant-4\nelephant-5\nelephant-6\nelephant-7\nelephant-8\nelephant-9\nflag-1\nflag-10\nflag-11\nflag-12\nflag-13\nflag-14\nflag-15\nflag-16\nflag-17\nflag-18\nflag-19\nflag-20\nflag-4\nflag-6\nflag-7\nflag-8\nfox-1\nfox-10\nfox-11\nfox-12\nfox-13\nfox-14\nfox-15\nfox-16\nfox-17\nfox-18\nfox-19\nfox-4\nfox-6\nfox-7\nfox-8\nfox-9\nfrog-1\nfrog-10\nfrog-11\nfrog-12\nfrog-13\nfrog-14\nfrog-15\nfrog-16\nfrog-17\nfrog-18\nfrog-19\nfrog-2\nfrog-5\nfrog-6\nfrog-7\nfrog-8\ngametarget-10\ngametarget-11\ngametarget-12\ngametarget-14\ngametarget-15\ngametarget-16\ngametarget-17\ngametarget-18\ngametarget-19\ngametarget-20\ngametarget-3\ngametarget-4\ngametarget-5\ngametarget-6\ngametarget-8\ngametarget-9\ngecko-10\ngecko-11\ngecko-12\ngecko-13\ngecko-14\ngecko-15\ngecko-17\ngecko-18\ngecko-2\ngecko-20\ngecko-3\ngecko-4\ngecko-6\ngecko-7\ngecko-8\ngecko-9\ngiraffe-1\ngiraffe-11\ngiraffe-12\ngiraffe-14\ngiraffe-16\ngiraffe-17\ngiraffe-18\ngiraffe-19\ngiraffe-20\ngiraffe-3\ngiraffe-4\ngiraffe-5\ngiraffe-6\ngiraffe-7\ngiraffe-8\ngiraffe-9\ngoldfish-1\ngoldfish-11\ngoldfish-12\ngoldfish-13\ngoldfish-14\ngoldfish-15\ngoldfish-16\ngoldfish-17\ngoldfish-18\ngoldfish-19\ngoldfish-2\ngoldfish-20\ngoldfish-4\ngoldfish-5\ngoldfish-6\ngoldfish-9\ngorilla-1\ngorilla-10\ngorilla-11\ngorilla-12\ngorilla-14\ngorilla-15\ngorilla-16\ngorilla-17\ngorilla-18\ngorilla-19\ngorilla-2\ngorilla-20\ngorilla-3\ngorilla-5\ngorilla-7\ngorilla-8\nguitar-1\nguitar-11\nguitar-12\nguitar-13\nguitar-14\nguitar-15\nguitar-17\nguitar-18\nguitar-19\nguitar-2\nguitar-20\nguitar-4\nguitar-5\nguitar-6\nguitar-7\nguitar-9\nhand-1\nhand-10\nhand-11\nhand-12\nhand-13\nhand-14\nhand-15\nhand-17\nhand-18\nhand-19\nhand-20\nhand-4\nhand-5\nhand-6\nhand-7\nhand-8\nhat-10\nhat-11\nhat-12\nhat-13\nhat-14\nhat-15\nhat-16\nhat-17\nhat-19\nhat-20\nhat-3\nhat-4\nhat-6\nhat-7\nhat-8\nhat-9\nhelmet-1\nhelmet-10\nhelmet-12\nhelmet-14\nhelmet-15\nhelmet-16\nhelmet-17\nhelmet-18\nhelmet-2\nhelmet-20\nhelmet-3\nhelmet-4\nhelmet-6\nhelmet-7\nhelmet-8\nhelmet-9\nhippo-10\nhippo-11\nhippo-12\nhippo-13\nhippo-14\nhippo-15\nhippo-16\nhippo-17\nhippo-18\nhippo-19\nhippo-2\nhippo-3\nhippo-4\nhippo-5\nhippo-6\nhippo-8\nhorse-10\nhorse-11\nhorse-13\nhorse-14\nhorse-16\nhorse-17\nhorse-18\nhorse-19\nhorse-2\nhorse-20\nhorse-3\nhorse-5\nhorse-6\nhorse-7\nhorse-8\nhorse-9\nkangaroo-1\nkangaroo-10\nkangaroo-12\nkangaroo-13\nkangaroo-15\nkangaroo-16\nkangaroo-17\nkangaroo-18\nkangaroo-19\nkangaroo-20\nkangaroo-3\nkangaroo-4\nkangaroo-6\nkangaroo-7\nkangaroo-8\nkangaroo-9\nkite-1\nkite-11\nkite-12\nkite-13\nkite-14\nkite-16\nkite-17\nkite-18\nkite-19\nkite-2\nkite-20\nkite-3\nkite-5\nkite-7\nkite-8\nkite-9\nleopard-10\nleopard-11\nleopard-12\nleopard-13\nleopard-14\nleopard-15\nleopard-17\nleopard-18\nleopard-19\nleopard-2\nleopard-3\nleopard-4\nleopard-5\nleopard-6\nleopard-8\nleopard-9\nlicenseplate-1\nlicenseplate-10\nlicenseplate-11\nlicenseplate-14\nlicenseplate-16\nlicenseplate-17\nlicenseplate-18\nlicenseplate-19\nlicenseplate-2\nlicenseplate-20\nlicenseplate-3\nlicenseplate-4\nlicenseplate-5\nlicenseplate-7\nlicenseplate-8\nlicenseplate-9\nlion-10\nlion-11\nlion-13\nlion-14\nlion-15\nlion-16\nlion-17\nlion-18\nlion-19\nlion-2\nlion-3\nlion-4\nlion-6\nlion-7\nlion-8\nlion-9\nlizard-10\nlizard-11\nlizard-12\nlizard-14\nlizard-15\nlizard-16\nlizard-17\nlizard-18\nlizard-19\nlizard-2\nlizard-20\nlizard-4\nlizard-5\nlizard-7\nlizard-8\nlizard-9\nmicrophone-1\nmicrophone-10\nmicrophone-11\nmicrophone-12\nmicrophone-13\nmicrophone-15\nmicrophone-17\nmicrophone-18\nmicrophone-19\nmicrophone-20\nmicrophone-3\nmicrophone-4\nmicrophone-5\nmicrophone-7\nmicrophone-8\nmicrophone-9\nmonkey-1\nmonkey-10\nmonkey-11\nmonkey-12\nmonkey-13\nmonkey-14\nmonkey-15\nmonkey-16\nmonkey-18\nmonkey-19\nmonkey-2\nmonkey-20\nmonkey-5\nmonkey-6\nmonkey-7\nmonkey-8\nmotorcycle-10\nmotorcycle-11\nmotorcycle-12\nmotorcycle-13\nmotorcycle-14\nmotorcycle-15\nmotorcycle-16\nmotorcycle-17\nmotorcycle-19\nmotorcycle-2\nmotorcycle-20\nmotorcycle-4\nmotorcycle-5\nmotorcycle-6\nmotorcycle-7\nmotorcycle-8\nmouse-10\nmouse-11\nmouse-12\nmouse-13\nmouse-14\nmouse-15\nmouse-16\nmouse-18\nmouse-19\nmouse-2\nmouse-20\nmouse-3\nmouse-4\nmouse-5\nmouse-6\nmouse-7\nperson-11\nperson-13\nperson-14\nperson-15\nperson-16\nperson-17\nperson-18\nperson-19\nperson-2\nperson-20\nperson-3\nperson-4\nperson-6\nperson-7\nperson-8\nperson-9\npig-1\npig-11\npig-12\npig-14\npig-15\npig-16\npig-17\npig-19\npig-20\npig-3\npig-4\npig-5\npig-6\npig-7\npig-8\npig-9\npool-1\npool-10\npool-11\npool-13\npool-14\npool-16\npool-17\npool-18\npool-19\npool-2\npool-20\npool-4\npool-5\npool-6\npool-8\npool-9\nrabbit-1\nrabbit-11\nrabbit-12\nrabbit-14\nrabbit-15\nrabbit-16\nrabbit-18\nrabbit-2\nrabbit-20\nrabbit-3\nrabbit-4\nrabbit-5\nrabbit-6\nrabbit-7\nrabbit-8\nrabbit-9\nracing-1\nracing-11\nracing-12\nracing-13\nracing-14\nracing-17\nracing-18\nracing-19\nracing-2\nracing-3\nracing-4\nracing-5\nracing-6\nracing-7\nracing-8\nracing-9\nrobot-10\nrobot-11\nrobot-12\nrobot-13\nrobot-14\nrobot-15\nrobot-16\nrobot-17\nrobot-18\nrobot-2\nrobot-20\nrobot-3\nrobot-4\nrobot-6\nrobot-7\nrobot-9\nrubicCube-10\nrubicCube-11\nrubicCube-12\nrubicCube-13\nrubicCube-15\nrubicCube-16\nrubicCube-17\nrubicCube-18\nrubicCube-2\nrubicCube-20\nrubicCube-3\nrubicCube-4\nrubicCube-5\nrubicCube-7\nrubicCube-8\nrubicCube-9\nsepia-1\nsepia-10\nsepia-11\nsepia-12\nsepia-14\nsepia-15\nsepia-17\nsepia-18\nsepia-19\nsepia-2\nsepia-20\nsepia-3\nsepia-4\nsepia-5\nsepia-7\nsepia-9\nshark-1\nshark-10\nshark-11\nshark-12\nshark-13\nshark-14\nshark-15\nshark-16\nshark-17\nshark-18\nshark-19\nshark-20\nshark-4\nshark-7\nshark-8\nshark-9\nsheep-1\nsheep-10\nsheep-11\nsheep-12\nsheep-13\nsheep-14\nsheep-15\nsheep-16\nsheep-17\nsheep-18\nsheep-19\nsheep-2\nsheep-20\nsheep-4\nsheep-6\nsheep-8\nskateboard-1\nskateboard-10\nskateboard-11\nskateboard-12\nskateboard-13\nskateboard-14\nskateboard-15\nskateboard-17\nskateboard-18\nskateboard-2\nskateboard-20\nskateboard-4\nskateboard-5\nskateboard-6\nskateboard-7\nskateboard-9\nspider-1\nspider-10\nspider-11\nspider-12\nspider-13\nspider-15\nspider-17\nspider-19\nspider-2\nspider-3\nspider-4\nspider-5\nspider-6\nspider-7\nspider-8\nspider-9\nsquirrel-1\nsquirrel-10\nsquirrel-12\nsquirrel-14\nsquirrel-15\nsquirrel-16\nsquirrel-17\nsquirrel-18\nsquirrel-2\nsquirrel-20\nsquirrel-3\nsquirrel-4\nsquirrel-5\nsquirrel-6\nsquirrel-7\nsquirrel-9\nsurfboard-1\nsurfboard-10\nsurfboard-11\nsurfboard-13\nsurfboard-14\nsurfboard-15\nsurfboard-16\nsurfboard-17\nsurfboard-18\nsurfboard-19\nsurfboard-2\nsurfboard-20\nsurfboard-3\nsurfboard-6\nsurfboard-7\nsurfboard-9\nswing-1\nswing-11\nswing-12\nswing-13\nswing-15\nswing-16\nswing-18\nswing-19\nswing-2\nswing-3\nswing-4\nswing-5\nswing-6\nswing-7\nswing-8\nswing-9\ntank-1\ntank-10\ntank-11\ntank-12\ntank-13\ntank-15\ntank-17\ntank-18\ntank-19\ntank-2\ntank-20\ntank-3\ntank-4\ntank-5\ntank-7\ntank-8\ntiger-1\ntiger-10\ntiger-11\ntiger-13\ntiger-14\ntiger-15\ntiger-16\ntiger-17\ntiger-19\ntiger-2\ntiger-20\ntiger-3\ntiger-5\ntiger-7\ntiger-8\ntiger-9\ntrain-10\ntrain-12\ntrain-13\ntrain-14\ntrain-15\ntrain-16\ntrain-17\ntrain-18\ntrain-19\ntrain-2\ntrain-3\ntrain-4\ntrain-5\ntrain-6\ntrain-8\ntrain-9\ntruck-1\ntruck-10\ntruck-11\ntruck-12\ntruck-13\ntruck-14\ntruck-15\ntruck-17\ntruck-18\ntruck-19\ntruck-2\ntruck-20\ntruck-4\ntruck-5\ntruck-8\ntruck-9\nturtle-1\nturtle-10\nturtle-11\nturtle-12\nturtle-13\nturtle-14\nturtle-15\nturtle-17\nturtle-18\nturtle-19\nturtle-2\nturtle-20\nturtle-3\nturtle-4\nturtle-6\nturtle-7\numbrella-1\numbrella-10\numbrella-11\numbrella-12\numbrella-13\numbrella-14\numbrella-15\numbrella-16\numbrella-18\numbrella-20\numbrella-3\numbrella-4\numbrella-5\numbrella-6\numbrella-7\numbrella-8\nvolleyball-10\nvolleyball-11\nvolleyball-12\nvolleyball-14\nvolleyball-15\nvolleyball-16\nvolleyball-17\nvolleyball-2\nvolleyball-20\nvolleyball-3\nvolleyball-4\nvolleyball-5\nvolleyball-6\nvolleyball-7\nvolleyball-8\nvolleyball-9\nyoyo-1\nyoyo-10\nyoyo-11\nyoyo-12\nyoyo-13\nyoyo-14\nyoyo-16\nyoyo-18\nyoyo-2\nyoyo-20\nyoyo-3\nyoyo-4\nyoyo-5\nyoyo-6\nyoyo-8\nyoyo-9\nzebra-1\nzebra-11\nzebra-12\nzebra-13\nzebra-15\nzebra-18\nzebra-19\nzebra-2\nzebra-20\nzebra-3\nzebra-4\nzebra-5\nzebra-6\nzebra-7\nzebra-8\nzebra-9\n"
  },
  {
    "path": "lib/train/data_specs/trackingnet_classmap.txt",
    "content": "Nf1aqv5Fg5o_0\tairplane\nAAB6lO-XiKE_0\tperson\nAACM71csS-Q_0\tperson\nAACM71csS-Q_1\tperson\nAARNQeeGCeM_1\tperson\nAARldOxX9Qc_0\tbird\nAATSbTthMRo_1\tperson\nAAVQ--F7Bk8_7\tbird\nAAVQ--F7Bk8_2\tbird\nAAVQ--F7Bk8_8\tbird\nAAWK6esRYaE_0\tperson\nAAWK6esRYaE_1\tperson\nAAjY2Ci68z8_0\tperson\nAA19zjGEPvg_1\tbear\nAA28Bcp5cJ4_0\ttrain\nABBGULxaufw_0\tperson\nABF8Qzi1y6k_1\tbear\nABIlEiPfEC4_0\tbird\nABJ_agLToOw_0\tbird\nABZMoeeFyek_0\tbicycle\nABny-jw1_S0_0\telephant\nABrhnT3LRWs_2\tcat\nABxlnMGfo5c_0\tumbrella\nAByCCGnybVU_1\tperson\nAB2MjrpRiEQ_0\thorse\nAB-q-hxh9XQ_4\tbus\nAB-q-hxh9XQ_1\tbus\nAB-q-hxh9XQ_3\tbus\nACDuy9fWQCs_1\tumbrella\nACFxVnoXE2k_1\thorse\nACMvGMt8Neo_0\tperson\nACM6PJWHfcM_0\tperson\nACOGOPL4ZH0_1\tperson\nACOGOPL4ZH0_0\tperson\nACS5TtaAdG8_0\ttruck\nACarEC5tuT8_0\ttruck\nACiNZsAvVTE_0\tperson\nACkYaVC9f9M_1\tumbrella\nACnQKLobnGE_4\tairplane\nACnQKLobnGE_5\tairplane\nAC0Z4yw1hf0_0\tperson\nAC0Z4yw1hf0_1\tperson\nAC-10OYYnLM_1\tperson\nAC-10OYYnLM_0\tperson\nADHNPU5iB_4_0\tcat\nADWpC6kDWFU_0\tperson\nADiIG2D8pds_2\tmotorcycle\nADiIG2D8pds_0\tmotorcycle\nADi674XOuRY_0\tdog\nADn8ZdVYOcc_0\ttrain\nADn8ZdVYOcc_2\ttrain\nAD1cVG81mpA_0\tperson\nAD4EACfWAIM_0\thorse\nAD4EACfWAIM_1\thorse\nAD531xkux4k_0\tperson\nAD7A6_o0Las_0\thorse\nAEQT6XxEeT0_0\tperson\nAEQT6XxEeT0_1\tperson\nAESfphazWKA_0\tperson\nAESfphazWKA_1\tperson\nAEokTVMPd4A_0\tperson\nAEtwwIR9UkI_0\tdog\nAE2TrzJHr2s_1\tmotorcycle\nAE3t_VNk3eo_0\tperson\nAE6G6W2CL9M_1\tperson\nAE7tEK8S9pk_0\tbird\nAE7tEK8S9pk_3\tbird\nAE-k9jcdaJk_1\tgiraffe\nAFLrK88FzTI_0\tmotorcycle\nAFOjy-9Kf-8_0\tperson\nAFSTw_O6inE_0\tperson\nAFSTw_O6inE_1\tperson\nAFT64SYoPTo_1\tperson\nAFeRUltwvNE_0\tknife\nAFeRUltwvNE_2\tknife\nAFf9I30fB6U_0\tperson\nAFkSCsJ_jeg_0\tperson\nAFkSCsJ_jeg_1\tperson\nAFnPp9mvoJs_0\thorse\nAFpVfranYCA_1\tknife\nAFrLubifeb4_0\tairplane\nAFrLubifeb4_2\tairplane\nAFsmSsZBS6I_1\tperson\nAFsmSsZBS6I_0\tperson\nAF0FDnfdpro_0\ttrain\nAF0-2lDeBME_1\tbird\nAF2bYjH_Q8c_0\tperson\nAF4nO1MeUis_1\ttrain\nAGV9gZ6ePKk_0\tairplane\nAGXVFK896Os_0\tcow\nAGYehDNUqx0_1\tairplane\nAGYehDNUqx0_0\tairplane\nAGdqwMVGRoU_0\thorse\nAGfcGfMXHPM_3\telephant\nAGsg2IV8FME_1\tskateboard\nZBPURFcpqDM_0\tmotorcycle\nZBXAMWkamQk_2\tknife\nZBXAMWkamQk_1\tknife\nZBcCcSynS3Y_1\tcar\nZBcTSnaCcqE_1\tperson\nZBcTSnaCcqE_0\tperson\nZBcjhADZaUk_0\tbear\nZBdz7fg01uE_0\tumbrella\nZBp5ICCzoK8_0\tperson\nZBriZpPQR6Q_0\tcat\nZBvEIHeKcKg_2\tzebra\nZBvEIHeKcKg_9\tzebra\nZBvEIHeKcKg_0\tzebra\nZBvEIHeKcKg_1\tzebra\nZBvEIHeKcKg_3\tzebra\nZBvEIHeKcKg_4\tzebra\nZBvEIHeKcKg_5\tzebra\nZBvEIHeKcKg_6\tzebra\nZBvEIHeKcKg_7\tzebra\nZBvEIHeKcKg_8\tzebra\nZB0EfmbWfng_0\thorse\nZB0kV8Ni0e8_0\tperson\nZB_pe6v1lVI_0\tperson\nZB_pe6v1lVI_2\tperson\nZCAOpABRfTI_10\telephant\nZCAOpABRfTI_0\telephant\nZCAOpABRfTI_3\telephant\nZCAOpABRfTI_4\telephant\nZCAOpABRfTI_6\telephant\nZCAOpABRfTI_7\telephant\nZCAOpABRfTI_8\telephant\nZCFCltdIjeg_1\tperson\nZCFCltdIjeg_0\tperson\nZCGB4r_lWmY_0\thorse\nZCS_eyAufDo_0\tperson\nZCTwXcewINc_0\tcow\nZCfqT4CDOYA_1\tbird\nZCgDbEHLsIg_0\tperson\nZClABNZVqqw_1\tperson\nZCmoG6WgVO4_1\tperson\nZCmoG6WgVO4_0\tperson\nZCnJ6weWtz8_1\tperson\nZCnJ6weWtz8_0\tperson\nZCnJ6weWtz8_2\tperson\nZCzrSOZhkx8_1\tperson\nZCzrSOZhkx8_2\tperson\nZC3Y42jSG_0_0\tperson\nZC5Jtr93Fc0_0\tcat\nZDDtjYsFrzY_0\tmotorcycle\nZDMLHna_uZU_1\tskateboard\nZDMSLfnIpw0_0\tperson\nZDS-TQTDheA_0\tperson\nZDWUEeCoa0c_0\tperson\nZDfRsMjEWrU_0\tperson\nZDucdx9SldA_0\tbicycle\nZDwG7VWIZ2E_0\tmotorcycle\nZDw-tgE8yQw_0\tperson\nZEA5lDwY3hY_0\tperson\nZERPmLuCNr0_1\tskateboard\nZEYyXBrvcIU_0\tperson\nZEbxfeAOLec_1\tmotorcycle\nZEdGptkowmk_2\tcow\nZEdsROg2ZAk_2\thorse\nZEgcTqeZxOk_1\tperson\nZEiW5hvCQyM_0\tbird\nZE16Mis16oE_0\tbus\nZE3Vro7d4pA_0\tcat\nZE415SbIjYI_7\tbird\nZE5h8vmL_Vw_0\tboat\nZE6oeN8ZzDA_1\tperson\nZE6oeN8ZzDA_0\tperson\nZFKQ9r76HHU_1\telephant\nZFKYTz9Jkhw_0\tumbrella\nZFSspVdQ_1M_0\tperson\nZFSspVdQ_1M_1\tperson\nZFe5vGzmYgY_0\tbear\nZFe5vGzmYgY_4\tbear\nZFfH8M8dMH8_5\tbird\nZFk9b7tQz1g_0\tperson\nZFn422HSENU_2\tairplane\nZFw7fJO3h3U_0\tmotorcycle\nZF2yE0Tm8D0_0\tcow\nZF5yV-qvHfg_0\tbicycle\nZF8rySXBivY_0\tperson\nZF_u1UFqAvg_0\tperson\nZGHtP6pLosk_0\tperson\nZGT9Ky1jJ0E_0\thorse\nZGWqLNy2PDM_2\tbird\nZGeWYNFOH7U_0\tperson\nZGhdqsb3kNA_0\tcar\nZGhdqsb3kNA_3\tcar\nZGhdqsb3kNA_1\tcar\nZGkmBkelEBU_0\tperson\nZGpMZT1HUiw_0\thorse\nZGsHiz0oPuw_0\tbus\nZGvfU-Fgk40_1\tperson\nZGyWFwMmdbs_0\tperson\nZG9dVnPGocw_0\tperson\nZHDkDNgRSz0_0\ttrain\nZHFPykjdFAY_1\tperson\nZHPeB20mRyI_0\tcow\nZHPeB20mRyI_1\tcow\nZHX1xXuU_Jw_0\tperson\nZHlb-NoDPiE_1\telephant\nZHlb-NoDPiE_2\telephant\nZHlb-NoDPiE_4\telephant\nZHl7b8RItn0_0\thorse\nZHnW6ge8wBc_0\tcat\nZHodaPFcFYU_0\tperson\nZHovXJVH8xk_0\ttruck\nZHpZ3CGHl44_0\tperson\nZHrrW673jzQ_1\tperson\nZHrrW673jzQ_0\tperson\nZHrsTuxP7aI_1\thorse\nZHu6CNOlw3g_0\tcow\nZHu6CNOlw3g_1\tcow\nZHxx4jT0QY8_0\tperson\nZH1tP4KBq4c_0\tgiraffe\nZH5HXdNA_Vg_0\tperson\nZH-X6nu5grI_33\thorse\nZH-X6nu5grI_2\thorse\nZH-X6nu5grI_3\thorse\nZH-X6nu5grI_6\thorse\nZH-X6nu5grI_7\thorse\nZH-X6nu5grI_8\thorse\nZH_6GNzE7AE_0\tperson\nZIAnd6kIMac_0\tbird\nZIAnd6kIMac_1\tbird\nZICz-o8kLz0_0\tskateboard\nAGx9YQ6C-6o_7\tcar\nAG1KXUn4YG0_0\tperson\nAG_bCNeWGbQ_0\telephant\nAHARpIfT490_0\tdog\nAHIF--VOeQs_0\tperson\nAHJcPNPqKmI_0\thorse\nAHKFqtjfRZA_2\tbear\nAHLL47_EdEA_1\tperson\nAHLL47_EdEA_0\tperson\nAHNC2jifaeA_1\tairplane\nAHQLEaBATbw_0\tperson\nAHQW1ru8IzY_0\tairplane\nAHQrFFp5yq4_0\tairplane\nAHiwgwMi8HU_0\tdog\nAHjEWaIP4Us_0\tcow\nAHkvSb7kMDQ_0\tperson\nAHn7KxEbpSw_0\tperson\nAHvhccaU6e0_0\tbus\nAHx-m9m2WSM_0\tperson\nAIAtwCnT8D0_1\tperson\nAIBVp_3pm4U_1\tperson\nAIBVp_3pm4U_0\tperson\nAIFwUvUUIAU_1\tperson\nAIPKb-NMVjk_0\tairplane\nAIPKb-NMVjk_3\tairplane\nAIVpT8BRXaQ_1\thorse\nAIYDjtWzamM_0\tbear\nAIYDjtWzamM_1\tbear\nAIZGolX95Do_0\tperson\nAIbvvs9Mppk_0\tperson\nAIduTWoo-tY_0\tskateboard\nAIeFzUH7L38_1\ttrain\nAIkHZuaZGZc_1\telephant\nAIkHZuaZGZc_2\telephant\nAIpwAHaTBsI_0\ttrain\nAI00Hva5A8g_0\tperson\nAI38cuNcfsE_0\tknife\nAI73dwp8OlI_1\ttrain\nAJAy74dPvNA_0\tperson\nAJCXZxF7mEU_1\tskateboard\nAJDMiWpRbdY_0\tperson\nAJILdTCo1mA_0\tdog\nAJKXpUsj3I0_0\tbird\nAJRdbCnFyVo_0\telephant\nAJTfeXepoNQ_0\tbus\nAJZ65x_ashE_0\tairplane\nAJaOK6nLWLU_0\tperson\nAJaOK6nLWLU_1\tperson\nAJaOK6nLWLU_2\tperson\nAJh6EhObuEU_0\tperson\nAJiQZJH_ZsU_0\tbird\nAJiYw7-oCvA_1\tknife\nAJiYw7-oCvA_2\tknife\nAJiYw7-oCvA_0\tknife\nAJkWw2b2Qjg_0\thorse\nAJor90pfjM8_0\tcow\nAJtuQLfNvSs_0\tcat\nAKBoEjrtQwE_1\ttrain\nAKDi2KVrR1Q_0\tskateboard\nAKIcyYzL9C0_0\tcat\nAKMl62ZFICw_3\tbus\nAKMl62ZFICw_1\tbus\nAKN6nvHB7P0_2\tairplane\nAKN6nvHB7P0_3\tairplane\nAKPDvaUNx94_1\thorse\nAKPDvaUNx94_2\thorse\nAKVUSpeg9Jk_0\tknife\nAKxpzCrmsi8_0\tbus\nAK4AJfDZfEo_0\tcat\nAK64udGI1BA_0\tumbrella\nAK8imx-InYk_1\thorse\nAK8imx-InYk_2\thorse\nAK_J57sNeeo_1\telephant\nAK_0-KHw9wc_1\thorse\nALCj6V-0pU8_0\tperson\nALKBlOms7sk_0\ttruck\nALLYkPepYRc_0\ttrain\nALRR_HHP500_0\tperson\nALRzJ2FzEoY_0\tperson\nALYKJChPG6k_0\tknife\nALjxXEqJFTg_0\ttrain\nALpnjTPWIN4_0\tbird\nAL73oE_aovA_2\tbicycle\nAL73oE_aovA_3\tbicycle\nAMDjY36EpsU_0\ttruck\nAMEZhZVe7hk_0\tperson\nAMEZhZVe7hk_1\tperson\nAMI4Xu1mmNw_0\telephant\nAMZeyszxY78_0\tknife\nAMn7aithVV8_0\tcar\nAMz8PhUkmpM_0\thorse\nAMz8PhUkmpM_3\thorse\nAMz8PhUkmpM_7\thorse\nAMz8PhUkmpM_2\thorse\nAMz8PhUkmpM_5\thorse\nAM5_HQ705r4_1\tgiraffe\nAM6sweCILPU_0\tairplane\nANHdxFi36CM_1\tbird\nANNbcEcj8Do_0\tperson\nANQZ1MB6gI4_0\tskateboard\nANVkluf6XZA_0\tcat\nANWtZTJoYYc_0\tdog\nANZDRJnX_Os_0\tperson\nANlhuKqnObE_1\tperson\nANlhuKqnObE_0\tperson\nANmJ_3l01rw_2\thorse\nANmJ_3l01rw_3\thorse\nANmkxc2V7qQ_0\tperson\nANufFQ7Fqao_0\tcar\nANufFQ7Fqao_1\tcar\nANvWNG7bZj0_0\tperson\nANwXehjlmOU_0\tgiraffe\nANwXehjlmOU_2\tgiraffe\nANwXehjlmOU_6\tgiraffe\nANwXehjlmOU_7\tgiraffe\nAOFbvqQZz1M_0\tperson\nAOJiO3o1Pgw_0\tperson\nAONi1Rhl0VI_2\tperson\nAONi1Rhl0VI_1\tperson\nAOmvm3OOZZQ_0\tperson\nAOn9I3GEHoU_0\tperson\nAOo1qXfZWsc_0\tbus\nAOq0zSQhX1E_0\tperson\nAOq0zSQhX1E_1\tperson\nAO9zthhr-og_0\tperson\nAO9zthhr-og_1\tperson\nAPAgxsDsZqs_0\tperson\nAPCppiM1SL4_0\tperson\nAPEd6F66jXU_1\tairplane\nAPHhGoshqFo_0\tumbrella\nAPIrIPchQwg_1\tperson\nAPIrIPchQwg_0\tperson\nAPJ4_CEV8HQ_0\tbus\nAPLJsXaOe1c_0\tperson\nAPQ99QCF6pA_0\tperson\nAPRuUBgcBZc_1\tperson\nAPYAGnOjUQQ_0\tperson\nAPa_Xoa9qgg_1\tmotorcycle\nAPcliMIvBe4_2\tperson\nAPcliMIvBe4_0\tperson\nAPcliMIvBe4_1\tperson\nAPp-0CsKxpY_1\tperson\nAPp-0CsKxpY_0\tperson\nAPqdtMhtWlU_0\tmotorcycle\nAPtqUIS_Hyo_0\tperson\nAPwqoNNZyaA_0\tperson\nAPyVeEcEt1U_0\tairplane\nAPyxRCm1XlY_0\tperson\nAP5QrGcnGoU_0\tcow\nAP_vNEBzhqM_0\tperson\nAQALHMjkeh0_1\tgiraffe\nAQKHDJ9HKck_0\tdog\nAQNEkyvgbeA_1\tcow\nAQRKvHpsUk8_0\tperson\nAQTk87BXkxk_0\tperson\nAQVhyDD8GEk_0\tperson\nAQVthZjIETQ_0\ttruck\nAQcg3TVkW1s_0\tperson\nAQcg3TVkW1s_1\tperson\nAQi0YSJ74cw_0\tperson\nAQj3enGQQeE_0\tboat\nAQminPRA2W8_0\tperson\nAQtIgG8RHRY_0\tperson\nAQvltP0EarU_0\tperson\nAQy7gL42wfo_0\tairplane\nAQzJp7Qi_yA_2\telephant\nAQzJp7Qi_yA_13\telephant\nAQ2bfY90nuU_0\tperson\nAQ7YDkmwB4M_0\tdog\nARAX6-JmsNQ_0\tzebra\nARAX6-JmsNQ_2\tzebra\nARFd2qxDhpQ_0\tairplane\nARNkmINZamQ_0\tcow\nARNkmINZamQ_1\tcow\nAROrQJq2sWY_0\tperson\nARRADkl3-30_0\tperson\nARW5DipSrBo_0\tdog\nARmfFWE2ruc_0\tperson\nARmsnBnMyPc_0\tperson\nARnGZQm8zOM_0\ttruck\nARqQUEVhu24_0\tperson\nARrbFDLoy0Q_1\tperson\nARtGNhHj2NU_0\tcat\nARyGQdkbuyM_0\tperson\nARyGQdkbuyM_1\tperson\nASBgE1svBKQ_0\tperson\nASD516fNs3g_0\tperson\nASExrIzixaM_0\ttruck\nASc0m6oxXVI_0\tperson\nASc0m6oxXVI_1\tperson\nASm_mkHCybA_0\tcat\nAS1xCm7MYs8_0\tperson\nAS1xCm7MYs8_1\tperson\nAS2tsNB9LBI_1\tknife\nAS5hg_3pOXM_0\tperson\nAS9kBpj7qvE_0\tperson\nATKytgCulZM_0\tumbrella\nATakdxmz3qU_0\tcar\nATkJNKtd8yo_0\tperson\nATk9e0fbxBk_0\thorse\nATk9e0fbxBk_1\thorse\nATk9e0fbxBk_2\thorse\nAT1zSxV6stw_0\tcat\nAT5urL0Fr0c_0\tbird\nAUGQ4XFEkGY_3\tknife\nAUI-RsDtk4s_0\tperson\nAUMHV6JiwU0_0\tbird\nAUZevw68t_s_0\tbear\nAUcOQ1L4Nj0_0\ttrain\nAUfaVvy5QxU_0\ttrain\nAUguk_8JO_U_0\tskateboard\nAUgw-t2MrtU_0\tperson\nAUzge-cBHfM_0\tbear\nAU0RtWdAXcU_0\tperson\nAU114x-Qif4_0\tperson\nAU3mKa0Npq4_0\tperson\nAU8GXMxyP9U_0\tperson\nAVHVVt5Srow_0\tbear\nZIGThAlQuUU_1\ttruck\nZIGkCx4o3G0_0\tperson\nZIMLdoIIFbg_0\tperson\nZIWkcVTlaRU_1\tperson\nZIamYwe-hJ8_0\tcar\nZIawXDt6JH4_0\tcat\nZIlyoSrDQQ8_0\tperson\nZImLYekhFBQ_3\tbus\nZI6J2WSiZy0_0\tgiraffe\nZI7DX2OSzzQ_0\tairplane\nZJCSQFa1W3M_0\tperson\nZJDAzZZQ38k_1\tknife\nZJDAzZZQ38k_0\tknife\nZJEQHkA9NLw_1\ttruck\nZJHeFXEtwNE_0\tknife\nZJJoit687Tc_0\tperson\nZJJpIPciUts_2\tskateboard\nZJL9WONxDB8_0\tperson\nZJMJBrWq8-o_0\tperson\nZJOVhmSGVMM_0\tperson\nZJXuyIEaSc4_0\thorse\nZJYXcUOxNRc_1\tperson\nZJdKrkzHR94_0\tperson\nZJdKrkzHR94_1\tmotorcycle\nZJe2QoJwNa0_0\thorse\nZJimYyH6VUI_0\tcar\nZJoQRLyRs8o_0\tperson\nZJpozi2Piqc_0\tmotorcycle\nZJwWllfPFjo_0\tperson\nZJyDrvmQwY8_0\telephant\nZJyDrvmQwY8_1\telephant\nZJ5n1Y-yXqM_0\tperson\nZKF4kfqyu6U_0\tperson\nZKIuqz6GDSA_0\thorse\nZKJuI7-4560_0\tcat\nZKKalWR8MBM_0\tboat\nZKSF-y6kC1I_0\telephant\nZKSF-y6kC1I_1\telephant\nZKTseP8JqIw_0\tperson\nZKk703iOFmY_0\thorse\nZKrJdHuvvR8_0\tperson\nZKy67yESvjM_0\tperson\nZK1zKp1iJY4_5\telephant\nZK1zKp1iJY4_2\telephant\nZK3-Em8w4HE_0\thorse\nZK6pkPtSd_4_0\tcow\nZK_BL_TGwo0_1\ttrain\nZLFXKnOp0LM_1\tknife\nZLH6HbQ5Miw_0\tperson\nZLSqYLLWQLc_1\tcow\nZLSqYLLWQLc_2\tcow\nZLcGyr4ZfJU_1\tairplane\nZLdb8-YkoiY_0\tperson\nZLm8Hen6OFM_1\tbicycle\nZLm8Hen6OFM_2\tbicycle\nZLnf4vSxfgo_1\tumbrella\nZLqSGXI7FdM_3\tknife\nZLuY9hS-wd4_0\tbus\nZLuY9hS-wd4_1\tbus\nZLuY9hS-wd4_2\tbus\nZLupIiWNPOY_0\tperson\nZL18xmfIKH4_1\tmotorcycle\nZL18xmfIKH4_3\tmotorcycle\nZL18xmfIKH4_2\tmotorcycle\nZL3DgidLXjw_0\tperson\nZL5SCZpZWtA_1\thorse\nZL-60We4drw_0\tdog\nZMDe7QMaLa8_0\tperson\nZMD2tP69gaU_1\tperson\nZMKFhrS_QnY_0\tcow\nZML6VoRZ_Tk_0\tperson\nZMMDA6nYXZs_0\tbird\nZMPdl-1FCMQ_0\tperson\nZMZU_V7d3-I_1\tumbrella\nZMa0bYeg_NE_0\tdog\nZMdAlm9Zx_A_1\tcar\nZMeQ1Vc3HZk_0\tperson\nZMuwZKOfK1s_0\tmotorcycle\nZMvdpTH-1Ug_9\tairplane\nZMxu4wRDuqU_1\tperson\nZMyEEXdgJeA_0\tperson\nZM1xadWQqKQ_0\tbus\nZM2SMTrxUr0_0\ttrain\nZM3QVkm1izg_0\tperson\nZM5-iyB8rFk_1\tdog\nZM_TO-0UDp4_0\tperson\nZNJ8aytwo1E_0\tperson\nZNP23sy27W0_0\tperson\nZNTqZ3wERJE_0\tperson\nZNUBh1ppeyo_0\tskateboard\nZNXCWGzmxK8_0\tperson\nZNZx7hTxCQE_0\tairplane\nZNaTV3nGl6M_0\tperson\nZNcUW5m7eRw_0\tgiraffe\nZNg9OZgsMqc_0\tbear\nZNoQrAOf3Ns_0\ttruck\nZNqpyPcacjY_0\tmotorcycle\nZNv_LrEIljc_0\tumbrella\nZNxw9kVCouU_0\tbus\nZNzeI_r7GT4_0\ttruck\nZN2bt7wkvH0_1\tbear\nZN5ukEMKLY4_0\tcow\nZN_gFe4IzxE_0\ttruck\nZODUj9lsCzk_0\thorse\nZOEa1JGwnwE_0\tperson\nZOEa1JGwnwE_1\tperson\nZOGP8-XsFYc_0\tperson\nZOIuTsiGyRY_0\tbird\nZOJSvR5KOsE_0\tdog\nZOMPRnYycak_2\tcow\nZOMnEZ4dWMk_0\telephant\nZOStUYUIEdA_0\tskateboard\nZOTSBcRwdRA_0\tperson\nZOX1xH7rOus_0\ttrain\nZOthVGHUcjo_3\tcow\nZOwhFlp5EiA_0\tperson\nZOxDsYnvl0M_0\tperson\nZOymkqw58fw_0\tperson\nZOzQfVh1LN8_1\tmotorcycle\nZO_5hZ2ex6Y_0\tperson\nZPKaBLqoKvQ_0\tperson\nZPNr3zZg6jk_1\tperson\nZPNr3zZg6jk_0\tperson\nZPNr3zZg6jk_2\tperson\nZPQ0lqiH9uw_0\ttrain\nZPQ0lqiH9uw_1\ttrain\nZPQ3tbJp33I_0\ttrain\nZPVOrRypdRM_0\thorse\nZPZjgecd6OQ_1\tboat\nZPaWYb_4S8Y_0\tperson\nZPeRU9CLLew_0\tperson\nZPgUlFmZyP4_0\tperson\nZPjN0Rp_1ZA_0\thorse\nZPkO4x8HPaI_1\tperson\nZPqs3xJ8sMY_0\tperson\nZPqs3xJ8sMY_1\tperson\nZPq9qgTZ4XI_0\ttruck\nZPyxQD17Fq4_2\tperson\nZPyxQD17Fq4_4\tperson\nZP7SN9kW5kg_0\tperson\nZP7sET2Y9dU_0\tperson\nZP8YaHDM_qE_0\thorse\nZQCFPzE41bg_0\tcow\nZQDoAEWZCQk_0\tperson\nZQG5CpZ3fLM_0\tperson\nZQRzkpfy378_0\tbus\nZQZRNVrE9hk_0\tperson\nZQarE1lLDl4_0\tperson\nZQdhjMVGJrk_0\tperson\nZQdhjMVGJrk_1\tperson\nZQmTc5C-h8w_0\tperson\nZQrMMWQidx0_0\tperson\nZQuVUoqiT_I_0\tgiraffe\nZQuVUoqiT_I_1\tgiraffe\nZQ3LAYCIDf8_3\tbear\nZQ8X2cqYANs_0\ttrain\nZQ9G0UkTR1c_1\tperson\nZQ_vGl5xbKY_0\tcat\nZRFMzM7kxuI_3\tcow\nZRFMzM7kxuI_0\tcow\nZRFMzM7kxuI_1\tcow\nZRFMzM7kxuI_2\tcow\nZRLkkoSR8o8_0\tknife\nZRMOgw0VYRI_0\tperson\nZRNQrzQlVwA_0\tperson\nZRNgdckx504_0\tperson\nZRQug2qT1tc_0\tperson\nZRSRBBpyBG8_0\tperson\nZRXjiNMKvis_0\tairplane\nZRc8GDK_9hc_1\tumbrella\nZRkHgC0EAz8_0\tperson\nZRmkeBogj-U_0\tperson\nZRoz_bGkPaE_0\tperson\nZRuQ3ipcK3o_0\tbus\nZRzOWgIAwe8_0\tbird\nZRzOWgIAwe8_3\tbird\nZR0Qj5P8snw_1\tbear\nZR4yO1ASDwo_2\tperson\nZR_VWPjxLTU_0\tdog\nZSDCxbSs-Hs_0\tperson\nZSFzv92w5z4_0\tmotorcycle\nZSGJwERlcvM_0\tperson\nZSXoUfKY7t8_0\tperson\nZSdzUC2BB8Q_0\ttrain\nZSdzUC2BB8Q_1\ttrain\nZSkkNWgXm6E_0\tskateboard\nZSkkNWgXm6E_1\tskateboard\nZSn4gRAJToo_0\tcat\nZSoJT194AtI_1\tskateboard\nZSoJT194AtI_0\tskateboard\nZSruK26cGuI_0\tdog\nZSs6Knma-Q0_0\tcow\nZSs6Knma-Q0_1\tcow\nZSu3GocMJzI_0\tcar\nZS29l3t9vK8_0\tperson\nZS6NQXztroI_0\tperson\nZS_wuZnVzbw_0\tperson\nZTLDJDjvSuQ_0\ttruck\nZTPTnzEs_Lc_0\tperson\nZTcRmNM1n8M_0\tperson\nZTjOZ-dZDEg_1\tcar\nZTmHHCmX7aw_0\tskateboard\nZTnEKCqMNHs_0\tperson\nZTo33r_63Wg_0\tknife\nZTw6Dkp-LPU_7\telephant\nZTw6Dkp-LPU_0\telephant\nZTw6Dkp-LPU_4\telephant\nZTw6Dkp-LPU_5\telephant\nZTw6Dkp-LPU_6\telephant\nZT5iwG3vEhM_0\tumbrella\nZUCf2cVBY08_0\tperson\nZUWSpLaJj4M_0\tbird\nZUYtIKrcaKo_0\tperson\nZUaHjAaQqF0_0\tbus\nZUdCQl7WU_U_1\tperson\nZUdCQl7WU_U_0\tperson\nZUd0IAbilBA_0\telephant\nZUoFqGf_ijs_0\telephant\nZUoJFmQ6ro4_0\tperson\nZUwniKcHERQ_0\thorse\nZU0WSpOWSak_1\tbear\nZU0_sT3EbVY_0\tzebra\nZU9LGiLzKJg_0\tmotorcycle\nZU-ZhVyhBpA_1\tbicycle\nZVAHreexSa0_0\tperson\nZVBjo5HM0Do_0\tknife\nZVD-ea5SjMg_0\tperson\nZVJpmiue5IA_0\ttruck\nZVKyUsgomW4_0\tperson\nZVOMkt8TORM_0\ttrain\nZVQo_9tFZGY_0\tbus\nZVY_873YYQY_0\tskateboard\nZVZJRbJ2h1A_0\tcat\nZViLnbCdjZM_1\tperson\nZVlOetMc3m4_0\tperson\nZVl8So4V1Ss_0\tcat\nZVnaHf8vAhA_0\tzebra\nZVtPRAs8Za0_0\tperson\nZV8NIO3XuLQ_0\tperson\nZV9eJe2grq4_1\tbear\nZWIPlBvd1DI_0\tperson\nZWIPlBvd1DI_1\tperson\nZWJv_-wAdws_1\tskateboard\nZWKHlq-W7_8_9\ttrain\nZWKHlq-W7_8_14\ttrain\nZWKHlq-W7_8_0\ttrain\nZWKHlq-W7_8_1\ttrain\nZWKHlq-W7_8_4\ttrain\nZWKHlq-W7_8_7\ttrain\nZWKHlq-W7_8_10\ttrain\nZWKHlq-W7_8_11\ttrain\nZWKHlq-W7_8_12\ttrain\nZWKHlq-W7_8_13\ttrain\nZWNe-zcl-IY_0\tboat\nZWNjUm5Uzh0_1\tbicycle\nZWNjUm5Uzh0_5\tbicycle\nZWXE7IAaWrg_0\tperson\nZWXSnELtawA_1\tknife\nZWXSnELtawA_3\tknife\nZWX1cGhJG98_0\tbicycle\nZWlTD6EbOTo_0\tperson\nZWqzdCz6UvY_0\tbird\nZWr6RECjqV0_1\thorse\nZWr6ZU_-ir4_1\tperson\nZWthtO1iGtQ_0\tperson\nZWwlzozPAk8_0\tperson\nZWxn8yT0bXo_0\tcow\nZW0HC4IRa64_0\tperson\nZW3CWoXzrn4_0\tbicycle\nZW3CWoXzrn4_1\tbicycle\nZW5VkDNSfWA_0\tcat\nZXMqiFE6KOE_0\tairplane\nZXRcWIcok2I_0\tperson\nZXgYAh2AWyk_0\thorse\nZXp6jOe8DUE_0\tperson\nZXyJafbGcBM_0\thorse\nZXzno8CjUyM_0\telephant\nZYB9yzoJ6jc_0\tperson\nZYG83auB9Lk_0\ttrain\nZYIgTdUmOWk_0\telephant\nZYKlgXftesk_0\tcow\nZYM0_4YzeeQ_0\tperson\nZYRgw5rNhE4_0\tperson\nZYS7WVlJbuU_0\tperson\nZYX53PWsBdk_0\tperson\nZYY8vkvB1zU_0\tperson\nZYkIkq9kfLc_0\tdog\nZYlANECCXnI_0\tperson\nZYocOIOyuqs_0\tperson\nZYsifQxv94s_1\tmotorcycle\nZYs7rbZt8Zw_0\tairplane\nZYs7rbZt8Zw_1\tairplane\nZYs7rbZt8Zw_2\tairplane\nZYtk2iVNC90_2\tairplane\nZYtk2iVNC90_0\tairplane\nZYxn9wmzRI4_0\tbicycle\nZYxn9wmzRI4_1\tbicycle\nZYzeKMdP2SE_0\tperson\nZYz6B5dwXcE_0\tperson\nZY_urkqeQLM_0\tbicycle\nZZANjG2Z5Jk_0\tperson\nZZFzCaL48sE_0\tcow\nZZNRG-ux4fw_0\tperson\nZZQDFjbEcHQ_1\tbird\nZZQDFjbEcHQ_2\tbird\nZZQSDwoLZ00_4\tknife\nZZSFKq4WH78_0\tcat\nZZVPKuh-2v8_0\tperson\nZZVx_IT4voA_0\tperson\nZZlf3LtDpH8_1\tbear\nZZpLkBcXUgs_1\tperson\nZZpLkBcXUgs_2\tperson\nZZxMtMlV-MM_0\tcow\nZZyW-2jZcIo_0\thorse\nZZyW-2jZcIo_1\thorse\nZZ20JXRExdg_0\tperson\nZZ8OuI39UTM_1\tperson\nZZ85EAvnAGU_0\tperson\nZZ85EAvnAGU_1\tperson\nZaDVUoq6h5o_1\tperson\nZaD5V9_Vw2w_0\tperson\nZaJb3JTan7Q_0\tperson\nZaLqPrH_aVo_0\ttrain\nZaLqPrH_aVo_1\ttrain\nZaNZV-lM-3o_0\tperson\nZaNZV-lM-3o_1\tperson\nZaPC288yVBg_1\tbicycle\nZaPC288yVBg_5\tbicycle\nZaPC288yVBg_7\tbicycle\nZaPltFe0S_o_1\ttruck\nZabt7ElK3jM_0\tperson\nZacHdhX9F9M_2\tdog\nZadGgAG3PzE_0\tperson\nZaew_bHz-PQ_11\tumbrella\nZaflj5gSZEw_0\tperson\nZanT0hXyJhk_0\tbird\nZavCWamLatc_2\tperson\nZavCWamLatc_1\tperson\nZa4BYhhaFFQ_1\tzebra\nZa6oX4aQR34_0\tairplane\nZbB-tdDvITQ_0\tmotorcycle\nZbDu8V7ppZE_0\tmotorcycle\nZbHt1sn7oTI_0\tperson\nZbJvtTVTTV8_0\tknife\nZbQXzueqj4Y_0\thorse\nZbgfg8usx-k_0\tperson\nZbgfg8usx-k_1\tperson\nZbm5_qB8fEs_0\tperson\nZbrJHC_mHlo_1\tperson\nZbrJHC_mHlo_2\tperson\nZbrJHC_mHlo_0\tperson\nZbrqZYGiMvE_1\tcow\nZb2Vz655gh4_2\thorse\nZb755JeGMpU_2\tperson\nZb-JKfQ5emU_1\tperson\nZb-JKfQ5emU_2\tperson\nZb-JKfQ5emU_0\tperson\nZcJPap_gVyo_0\tperson\nZcXA6CyQBi8_0\tcat\nZchU4DxP5A8_0\tperson\nZcw7wSfd2JM_0\tperson\nZcw7wSfd2JM_1\tperson\nZdElKzM-US0_0\tumbrella\nZdKO1sC4o60_0\tperson\nZdMbx0IXDzs_0\tperson\nZdMm6j__cQM_8\tbicycle\nZdTZrRX0dv4_0\ttruck\nZdXrQlOU7iw_1\tbicycle\nZdaFXJzLLUs_0\tperson\nZdaFXJzLLUs_1\tperson\nZdeTj7nyN-s_0\tboat\nZdevf1MbY8U_0\ttrain\nZdevf1MbY8U_1\ttrain\nZdevf1MbY8U_2\ttrain\nZdirtQF_sjE_0\tperson\nZdlnVpHrDcg_0\tgiraffe\nZdlnVpHrDcg_2\tgiraffe\nZdq2csZeJr8_2\tperson\nZdrk4yHmMXA_0\tperson\nZdtUPHscS-s_0\tperson\nZdxD4gqVioQ_0\tcat\nZdxHWwaivLc_0\tcow\nZdyBZtlMq-M_2\tbear\nZd3j0bQV6NI_0\tperson\nZeHLf0q4Z1Q_0\tperson\nZeZAZbMg1zY_0\tperson\nZeaoaXZDhPw_0\tperson\nZemOY1F1bVo_0\ttruck\nZemOY1F1bVo_3\ttruck\nZemOY1F1bVo_1\ttruck\nZerHfx3SLxU_0\tperson\nZerYXYTyhoc_0\tperson\nZetcbIDyydg_1\tcar\nZetcbIDyydg_0\tcar\nZeuqVhpsVu0_0\thorse\nZe6GIOUVxZU_0\tperson\nZe8W47hBrrE_2\tskateboard\nZfAFALQjUwI_2\tperson\nZfAFALQjUwI_1\tperson\nZfAM39o5Cbc_0\tbird\nZfDkxwMowSk_4\telephant\nZfF5Z0hrOQw_0\tperson\nZfHSyDaLaw0_0\tairplane\nZfHSyDaLaw0_2\tairplane\nZfHSyDaLaw0_1\tairplane\nZfJvZeaN7Ro_1\tperson\nZfTTW39iHJQ_0\tperson\nZflcz9EKz4g_4\telephant\nZflcz9EKz4g_1\telephant\nZflcz9EKz4g_2\telephant\nZfmwrq2aghI_0\tperson\nZf86HoPHmBs_1\tbird\nZf86HoPHmBs_0\tbird\nZf-rSx5ZNB8_0\tperson\nZgK0Y4PgWSM_0\tperson\nZgOr7facaIw_0\tskateboard\nZgP7q-rIhs0_1\tperson\nZgTDthFY-aI_0\tbird\nZgZ18HIfCGc_1\tmotorcycle\nZggirLBvHSw_0\tdog\nZgjspuwgTAc_0\tperson\nZgtG8Zy63UQ_0\tperson\nZg18GZ5OFWw_1\tperson\nZg2YrzGNuZs_0\tperson\nZg4f2iY8_zo_1\tcat\nZg4f2iY8_zo_0\tcat\nZg5MdsCXRWM_1\tcow\nZg5MdsCXRWM_0\tcow\nZhLB-laOg_g_9\tbicycle\nZhLB-laOg_g_3\tbicycle\nZhLB-laOg_g_5\tbicycle\nZhLB-laOg_g_6\tbicycle\nZhLB-laOg_g_10\tbicycle\nZhLB-laOg_g_12\tbicycle\nZhPafr5WTEs_0\tperson\nZhtgT8q5Gm4_0\tperson\nZhtr_XhO6_4_0\ttrain\nZhtr_XhO6_4_1\ttrain\nZh6QWGGQ9dU_0\tperson\nZiJFOBVGah4_0\thorse\nZiPO1UcM3IY_0\tdog\nZiP2ydBHuPs_2\tperson\nZiSl_Dy1ZB4_0\tperson\nZibk3bXvHCY_0\tcat\nZig1VrVbQc0_0\thorse\nZimvCFcji0A_0\tperson\nZisoM7y_CS4_0\tperson\nZitUYI22J54_1\tknife\nZitUYI22J54_0\tknife\nZi1etYbSUmQ_1\tperson\nZjCbmE2jLo4_0\tperson\nZjFb1VLHvyg_1\thorse\nZjPmZ4grIFA_0\tperson\nZjPmZ4grIFA_1\tperson\nZjQqfJ1Docg_0\tperson\nZjQ9lIlCehk_0\tskateboard\nZjSloqSrfWU_1\tairplane\nZjSloqSrfWU_3\tairplane\nZjWBw4tZUO4_0\ttrain\nZjWBw4tZUO4_1\ttrain\nZjWBw4tZUO4_2\ttrain\nZjWBw4tZUO4_3\ttrain\nZjWBw4tZUO4_4\ttrain\nZjWBw4tZUO4_5\ttrain\nZjWBw4tZUO4_6\ttrain\nZjbhM1ZiKW8_0\tperson\nZjbhM1ZiKW8_2\tperson\nZjcEfOHRyLQ_0\ttruck\nZjcevqmMJvY_0\tperson\nZjgTSjb7Vh4_1\tcar\nZjnaerD1MHM_0\telephant\nZjn6uD43ewg_4\tairplane\nZjn6uD43ewg_5\tairplane\nZjn6uD43ewg_1\tairplane\nZjn6uD43ewg_2\tairplane\nZjpmS5k09Ug_1\tperson\nZjpzw1n9Lvc_0\tskateboard\nZjsEX7nNYdQ_0\tperson\nZjxiHzcXOAs_0\tperson\nZjxiHzcXOAs_1\tperson\nZj2HBun9kBY_0\tperson\nAVW26zY72Ns_0\tperson\nAVXWb0s5LZw_0\tperson\nAVqCe7X9Pp4_0\tcow\nAVragVmWr8M_0\tmotorcycle\nAVvnZ-Ky-ew_0\tperson\nAV9y4LnUV84_0\tdog\nAWAQTemnBJc_0\tperson\nAWCUoghX20A_0\tcow\nAWD_KAfvb0U_0\tskateboard\nAWOhJ9RZReg_0\tperson\nAWOhJ9RZReg_1\tperson\nAWPNd7zPJzg_0\tperson\nAWPNd7zPJzg_1\tperson\nAWZt9EdU3BU_3\tzebra\nAWdKXFitdJI_0\tboat\nAWh2S4rI6kc_0\tperson\nAW1SjuoheU8_0\tcat\nAW2cvkaExG4_0\tcow\nAW8munaOGqw_0\tperson\nAW--f4fsLFY_0\ttrain\nAXB4hYQKqUw_0\tperson\nAXB4hYQKqUw_2\tperson\nAXQlwoC_K0g_1\ttruck\nAXX66Oq_RkU_0\tperson\nAXhx8hncZvA_0\tboat\nAXm0KvcIchQ_0\ttrain\nAXtXzxTXTqI_0\telephant\nAX2rS0bpAmM_0\thorse\nAX4Hsfdm-Fo_0\telephant\nAX8WoOXfJDA_0\tperson\nAX-xVtjP42Q_0\tperson\nAYLoR7L3CMs_3\tbird\nAYLoR7L3CMs_1\tbird\nAYUGoWokN_0_0\tperson\nAYYdBxTI_54_1\ttrain\nAYakvLR8aVM_0\tperson\nAYe6Wf0URgo_0\ttruck\nAYgbgSVClN4_0\tperson\nAYg1V2ol96s_0\tdog\nAYj70IRvvwI_2\tairplane\nAYj70IRvvwI_3\tairplane\nAYn-qtOy_nc_0\tperson\nAY7foLy1uok_0\telephant\nAY7foLy1uok_1\telephant\nAY-AbrJPyY0_0\ttrain\nAZHYXkv5rMk_0\tbird\nAZJsII37MPY_0\tbird\nAZMW1TyN6Z4_0\tperson\nAZQjsUm-CXk_1\tperson\nAZhH2ej_x_g_0\tperson\nAZjZ1ZSyCeE_0\tperson\nAZk4MAu-j90_0\tperson\nAZleWF5zAxc_1\tbear\nAZl3Emy9K3A_0\thorse\nAZouBTtQrtM_0\tperson\nAZpAuvQryZo_0\tperson\nAZpAuvQryZo_1\tperson\nAZ9SW8bxD3E_0\tbicycle\nAaGwVQ6UjOE_0\tperson\nAaRVwgGBmWU_0\tperson\nAaTW4oc5bBU_0\tperson\nAaZsdPwg9qg_3\tbus\nAac18k-eLZI_0\tperson\nAac18k-eLZI_1\tperson\nAac18k-eLZI_2\tperson\nAakpjcyvFSo_0\tperson\nAalaqaXsEbs_3\tumbrella\nAalaqaXsEbs_0\tumbrella\nAalaqaXsEbs_1\tumbrella\nAalaqaXsEbs_2\tumbrella\nAaoK6DPQKII_0\tbus\nAaotWWHg4eU_0\ttruck\nAaotWWHg4eU_1\ttruck\nAaotWWHg4eU_2\ttruck\nAasksRmCk1g_0\tperson\nAatNkWo2ryE_0\tperson\nAa0FU2EIMZ4_0\tbird\nAa-wzDtjCGc_0\tperson\nAa_biYfYp08_0\tperson\nAbEsU9EX9XQ_0\telephant\nAbEsU9EX9XQ_2\telephant\nAbO_VrlyQ8I_0\tumbrella\nAbTxhwSueZw_0\tperson\nAbd7Vn-Nyt8_1\ttruck\nAbeOAFhMXBY_1\tbird\nAbeOAFhMXBY_2\tbird\nAblKd4XIjqk_0\tperson\nAbmnNkzkXFg_0\telephant\nAbmnNkzkXFg_1\telephant\nAbuMVYzS0mw_0\tskateboard\nAbvoOuTpLtA_0\tdog\nAbwI4m0H9Hk_2\ttrain\nAbx126RTs10_1\telephant\nAb9zgKJnr9Y_1\tperson\nAb-vGS2mqFQ_0\tcow\nAb-vGS2mqFQ_1\tcow\nAcCU5YAWXlw_0\tdog\nAcReGpoHOZI_0\tperson\nAcSmnBYhEsg_0\tperson\nAcTgPRNars0_1\ttruck\nAcUEWZRPoGA_0\tumbrella\nAcZNiBe0Fgo_0\tperson\nAcZukbBG7tI_0\tboat\nAcc1yTFpH2c_0\tdog\nAcpBKywfL4o_1\tcat\nAcpOxyI_YPI_0\tperson\nAcprJcYvkbY_0\tperson\nAdDiiRHwZ2E_0\tcat\nAdEH-oHs1Qo_3\ttrain\nAdEiQT7Nm0o_1\tmotorcycle\nAdE2jnpk6AM_0\tboat\nAdbsyVjq_Xs_0\tcow\nAddL-M622TI_0\tknife\nAdgTVbi_kus_0\tperson\nAdsPsjswSGQ_0\tmotorcycle\nAd044xbRhE8_0\tperson\nAd2TSmaLvX8_0\tperson\nAeDfdgrccVw_0\tperson\nAeHbZ3U8S8U_2\ttrain\nAeWBkNuJmEA_0\ttruck\nAeWBkNuJmEA_3\ttruck\nAeWBkNuJmEA_4\ttruck\nAeWBkNuJmEA_5\ttruck\nAeakbNNwcW0_0\ttrain\nAec4uweTSes_2\tskateboard\nAeflYi3Sxss_0\tperson\nAegDGWXkWNw_0\tperson\nAenVUPH1ils_0\tbird\nAendE1XHSps_0\tbicycle\nAerUXP3Mmks_0\tperson\nAe5qWkNt6RU_2\tcar\nAe7ucKj40mw_0\tdog\nAe9Zd3lP7bg_0\tperson\nAfHkdkvxhNs_0\telephant\nAfNCSPijpao_0\tperson\nAfNGR5iEpvU_0\tcat\nAfNtKiB_rD8_1\tmotorcycle\nAfWHElsVCyM_0\tcow\nAfWfexnwsHg_0\tperson\nAfWfexnwsHg_1\tperson\nAfkKO6j4jWc_0\tperson\nAfmMpft13ZU_0\tperson\nAfnQoNimSjc_0\tperson\nAfynslRqwxI_0\tcar\nAfz2VDV4UHg_1\tperson\nAfz2VDV4UHg_0\tperson\nAf2MGhdZAn8_0\tperson\nAf2VyQEZtfk_0\tperson\nAf6Ve26JUOg_0\tperson\nAgBaUhTbzxA_0\tairplane\nAgBaUhTbzxA_4\tairplane\nAgBaUhTbzxA_5\tairplane\nAgBaUhTbzxA_3\tairplane\nAgBaZRmz8IY_0\tskateboard\nAgJCf77qxsY_0\tperson\nAgP2HoU83S4_4\tknife\nAgYhFemsFag_0\tperson\nAgZ2iflIKWc_1\tperson\nAgaetfTOzc8_0\tperson\nAgdrEW8jmw4_0\ttruck\nAgqmhFD0R94_2\telephant\nAgqmhFD0R94_3\telephant\nAgqmhFD0R94_1\telephant\nAgrKeQXSU2M_0\telephant\nAgrKeQXSU2M_1\telephant\nAgrKeQXSU2M_2\telephant\nAgtCW50wfig_0\tperson\nAgvxdVNj5Oc_0\tskateboard\nAgw5t7YSQbE_0\tskateboard\nAhAW4UKPzz0_0\tgiraffe\nAhE2vDF6Gbc_0\thorse\nAhE2vDF6Gbc_1\thorse\nAhjsDq9fEzQ_0\tperson\nAhv2jhPqRPg_0\tperson\nAhwGPZWtf3E_0\tperson\nAhxq6Rtu3lc_0\tperson\nAhx3IZujXDw_0\tbus\nAh0AGjta1qg_5\tbird\nAh04VeRs2hg_0\ttruck\nAh4x4EfR3BY_0\tmotorcycle\nAh4x4EfR3BY_1\tmotorcycle\nAiIc8FW3q98_0\tcar\nAiL_iCJ8HZI_1\tperson\nAiNLvzwt3_w_1\tbird\nAiNLvzwt3_w_2\tbird\nAiP7EOvTpK4_0\tmotorcycle\nAiP7EOvTpK4_2\tmotorcycle\nAiU_T3DZI2w_1\tbus\nAiU_T3DZI2w_2\tbus\nAieRY99VkmE_0\tperson\nAieVzbENJv0_3\tbicycle\nAiieCerOKpc_0\tperson\nAik2hirrxEo_3\tairplane\nAik2hirrxEo_0\tairplane\nAik2hirrxEo_1\tairplane\nAim6_lZQi4g_0\tperson\nAiqqXxqnPPM_1\tcow\nAiqqXxqnPPM_0\tcow\nAittR1dd2SI_0\ttrain\nAittR1dd2SI_1\ttrain\nAiv3XHMuVq8_0\ttrain\nAiyfw0Zh38k_0\tperson\nAi29fDmklxM_1\tperson\nAi29fDmklxM_0\tperson\nAi3S7n1Aofs_0\telephant\nAi-487iZv0E_0\tperson\nAjFhyF1XZw4_0\tperson\nAjJHvamHoMU_0\thorse\nAjPBAy1xgrY_0\tperson\nAjVe8d0vc1E_0\tperson\nAjamPk2Geuw_1\tbus\nAjg7q9zxJUo_0\tperson\nAjroIzI2OW8_1\ttruck\nAjroIzI2OW8_2\ttruck\nAjsu2bGngDw_1\tperson\nAjs4qdBK7Jk_0\telephant\nZkD_WAxZB3o_0\tcow\nZkHPsjy-YUQ_1\tknife\nZkbav-Qoxds_0\thorse\nZkbav-Qoxds_2\thorse\nZkbav-Qoxds_1\thorse\nZkidaaVx2VU_1\tbus\nZknqgRL504A_4\tbear\nZkqA2kLudwE_4\ttrain\nZkqA2kLudwE_0\ttrain\nZkqA2kLudwE_3\ttrain\nZku9JAotBZ0_0\tboat\nZkzM2jvV2AY_0\tperson\nZlBfF2yK2vg_1\tperson\nZlBfF2yK2vg_2\tperson\nZlBfF2yK2vg_0\tperson\nZlDsSDEHEzY_1\tcow\nZlDsSDEHEzY_0\tcow\nZlDsSDEHEzY_2\tcow\nZlFElBglnHA_0\tcat\nZlP8tmFYeyY_5\tbird\nZlfyrRfHDoc_0\tcow\nZljx0icnRa8_0\tperson\nZljx0icnRa8_1\tperson\nZlmsqen0qZo_0\tperson\nZln667JkWo8_0\tperson\nZmHKBIsSjQA_0\thorse\nZmHKBIsSjQA_1\thorse\nZmVLw9-fLDo_0\tcar\nZmbXlevaX2U_1\tboat\nZmgJjFt3JU4_0\tskateboard\nZmhKe4_d5Ag_0\tperson\nZmiCqFxUJSw_1\tairplane\nZmkKOYN1dRw_0\tperson\nZmrCaB8p3IM_0\tbear\nZmuzvhzN6EI_0\tcow\nZm3AU4TEpEw_0\tperson\nZm5VvBaQUwU_0\tbird\nZnRgQ1VBIGE_1\tperson\nZnWAM5ju8NM_0\tperson\nZne4XpVG2YQ_1\tperson\nZne4XpVG2YQ_0\tperson\nZnr-Uiobo-k_0\tperson\nZntDSf8cCPI_0\tperson\nZnvLWU_PCZ0_0\tmotorcycle\nZn-r14oEJwM_0\tairplane\nZoC1knYO0Tg_0\tcow\nZoJIup20AGU_0\tperson\nZoKfc3OL0JY_0\tperson\nZoK4wKRoZjY_0\tperson\nZoN4k6UNw6I_1\thorse\nZoOvu218D6M_0\tperson\nZoR1yoQzsbM_0\tperson\nZouHgocvjDI_0\tbird\nZo-8G7N2DXU_0\tperson\nZpAlbL-YE0E_0\tbus\nZpCrRb_a9QI_0\tperson\nZpCuVDLXQSw_1\thorse\nZpCuVDLXQSw_0\thorse\nZpSzmFLEm0c_1\tcar\nZpURI0wRgws_0\tperson\nZpXJ-0dv6Us_1\tcat\nZppFK22HdIk_0\tperson\nZpqXtZfe-3w_0\tcat\nZp1nQXN7dyg_0\thorse\nZp2CuvTAZLw_1\tperson\nZp740cgCPPE_0\tperson\nZp8GHxi_5l0_0\tknife\nZp8GHxi_5l0_1\tknife\nZqM9VL5DJ28_1\tperson\nZqOcOhiAI6k_0\tcow\nZqS1PqS3iT0_0\ttruck\nZqW027iDkCI_0\tperson\nZqXFvdeNrYI_1\tperson\nZqa0-AUnl9s_0\tperson\nZqm8A3wpeJQ_0\tperson\nZqtVs5joekw_0\tcow\nZq018zZzx1c_0\tperson\nZq1u84GLCHI_0\tmotorcycle\nZq5nK49UZ_o_2\telephant\nZq5nK49UZ_o_3\telephant\nZq5r3BwLg_c_0\tskateboard\nZq-RNCVoZFs_0\tperson\nZrA0NE09ipc_0\tdog\nZrDoGqu-A5A_0\ttrain\nZrI4ruv6B3o_0\tbird\nZrKpKmp29_o_1\tbird\nZrKpKmp29_o_3\tbird\nZrKpKmp29_o_6\tbird\nZrK5JKg83qU_0\tperson\nZrUx83OGIOk_0\tperson\nZrW7Si0hJKI_0\tperson\nZrbVa__ne-0_0\tperson\nZrfPtqkS_MY_0\tairplane\nZrfPtqkS_MY_1\tairplane\nZrfPtqkS_MY_5\tairplane\nZrfPtqkS_MY_6\tairplane\nZrfPtqkS_MY_7\tairplane\nZrgMnk8f_TA_0\tperson\nZrgMnk8f_TA_1\tperson\nZruJ2hhn9z0_1\tperson\nZrvWeRZ_dyU_1\tcow\nZrvWeRZ_dyU_0\tcow\nZrwXUWAxjIM_0\tgiraffe\nZrzdqF_ePkM_0\thorse\nZrzdqF_ePkM_2\thorse\nZr5eAtkuxQ0_0\tbear\nZr_AAxouNfg_0\tcow\nZsCaDsfPNec_0\tcow\nZsDDOO-bpFA_0\tperson\nZsDDOO-bpFA_1\tperson\nZsESx0nIYqI_0\telephant\nZsESx0nIYqI_6\telephant\nZsESx0nIYqI_7\telephant\nZsJCwiPEvkI_0\tperson\nZsLDBiZ0o14_0\tskateboard\nZsPVRik6m_c_1\tbear\nZsSkZhL-HOM_2\tbicycle\nZsb2ucv_mAg_0\tperson\nZsdv_3EWODM_0\tperson\nZsyMk67bjIM_0\tdog\nZs0j_1tuTDo_0\tperson\nZs1ltKMvRec_0\tperson\nZs1ltKMvRec_1\tperson\nZs79wUXMpx8_0\tbear\nZtA8n6dsH-w_4\tcar\nZtA8n6dsH-w_1\tcar\nZtA8n6dsH-w_2\tcar\nZtA8n6dsH-w_3\tcar\nZtDUifuLGrM_2\tbird\nZtEDTuHcM9U_0\tperson\nZtM6JRtVtpU_0\tmotorcycle\nZtToUMIMdYE_0\tperson\nZtlDJ70ap8Q_1\tbear\nZtlJcLPPjsg_0\tperson\nZtsGzhfZg9g_0\tperson\nZttTri7sEK4_0\ttrain\nZtyep9o6CLE_4\tbus\nZtyep9o6CLE_6\tbus\nZtyep9o6CLE_7\tbus\nZt9qKAA_xyA_0\tperson\nZuC0Jr3Y3s8_0\tcar\nZuGpcHtPLLA_0\tperson\nZuWlzE4F84c_0\ttruck\nZuhmoYvtP40_1\tperson\nZuicm6_fX9I_1\tbicycle\nZunjyc7DIP4_2\ttrain\nZuoBIQ-Kq74_0\tperson\nZuqXxaMAufU_1\tperson\nZuuL_Yi4FZQ_1\tdog\nZuuL_Yi4FZQ_0\tdog\nZuy59kV2M-0_1\tperson\nZu-vh46IwiU_0\tcow\nZu_dXJvDHdo_0\tperson\nZu_f8xuOweg_3\telephant\nZu_f8xuOweg_1\telephant\nZu_f8xuOweg_2\telephant\nZvDo2WbWL4g_1\tperson\nZvDo2WbWL4g_0\tperson\nZvJItzBdO04_1\tperson\nZvJrqHsPVL0_0\tbus\nZvSN_Y6vK3c_0\tperson\nZvV5mqJgbcQ_0\tcow\nZvfCrJvE1Tg_0\thorse\nZvfIYK-AWCw_0\tperson\nZvlx8vSlAPs_0\tbicycle\nZvtGPgtfhE8_0\tperson\nZvtuffxB5EY_0\tperson\nZvyOzgxu-4Y_0\ttruck\nZvzVi9irgvw_0\tbear\nZv6DWiKAux4_1\tperson\nZv9e9Vm6Vis_0\tmotorcycle\nZwDqCxCFpF4_0\tbicycle\nZwDqCxCFpF4_3\tbicycle\nZwH5xnh6Thw_0\tperson\nZwW6ybIP8ys_0\tbus\nZwdSYMz9ioo_0\tperson\nZwmRodW5wgg_0\thorse\nZwrtmR7ewc4_0\tperson\nZw7a69yU7f0_0\tmotorcycle\nZxAlVbDwlCc_2\tbird\nZxAuwcxhXxc_0\tperson\nZxE5MjV6i4w_0\tskateboard\nZxOVw-Lc-NI_0\tperson\nZxStkYy-wgo_0\tmotorcycle\nZxUKijmOWJc_0\tperson\nZxitXAY6Xsc_1\tknife\nZxqbwwO81Xc_0\ttrain\nZxv2BRQIWm0_4\tairplane\nZxv2BRQIWm0_5\tairplane\nZxv2BRQIWm0_7\tairplane\nZxv2BRQIWm0_8\tairplane\nAj7HWiU0iQg_0\tskateboard\nAj_E-ObfzoE_1\tperson\nAkGYKkcRyPM_0\tdog\nAkHT5Oo22rQ_0\tperson\nAkMpnm9JrLU_0\tperson\nAkWcVIeIx34_0\tboat\nAkaR-XgClv0_0\tperson\nAkaR-XgClv0_1\tperson\nAkeAdeJpbpg_0\ttrain\nAkeAdeJpbpg_3\ttrain\nAkeAdeJpbpg_1\ttrain\nAkh0VNTS6G4_0\tperson\nAkh0VNTS6G4_1\tperson\nAkh0VNTS6G4_2\tperson\nAkh0VNTS6G4_3\tperson\nAkkNBGH82Ic_0\thorse\nAknHhsIpRqc_0\tairplane\nAkxKeaxEnvQ_0\tdog\nAk3XQg9z8XQ_0\tperson\nAk8ygMb5ykk_0\tperson\nAk8y7dALcJI_0\tperson\nAlAUJSBL-e4_0\tdog\nAlNCPdpo1gg_2\tbicycle\nAlNCPdpo1gg_5\tbicycle\nAlNCPdpo1gg_6\tbicycle\nAlNCPdpo1gg_0\tbicycle\nAlNCPdpo1gg_3\tbicycle\nAlNCPdpo1gg_4\tbicycle\nAlPZeADzCKc_0\tperson\nAlPZeADzCKc_1\tperson\nAlXlVnkucyU_3\ttrain\nAlXlVnkucyU_1\ttrain\nAldX05MqOs0_0\tperson\nAleuxLN7VcU_1\tbird\nAlfbdsgKBAc_1\tperson\nAlhjN5qz_WI_0\ttrain\nAlikgfDMckk_0\tperson\nAlnIWAFamHE_0\tbear\nAltA5vQ7Icw_0\tbus\nAlzB8mXDcYc_0\thorse\nAl2hm71ia6E_0\tperson\nAl9l6-4QDz0_0\thorse\nAl9wCTPpSWM_0\tskateboard\nAmPe5gTOCTo_2\tperson\nAmPe5gTOCTo_0\tperson\nAmPe5gTOCTo_1\tperson\nAmQ_UrwLf3g_0\tperson\nAmRyW4hmSjw_0\tperson\nAmcAzvpvDRg_0\tbear\nAmeaTbvmKvo_0\tcar\nAmt8BGudD0w_0\tskateboard\nAmt8BGudD0w_2\tskateboard\nAmuX-Lv7OeM_2\tcow\nAmwvLxALyCw_0\tperson\nAm2wElVETcw_0\tcat\nAnD6ijSktyM_0\tperson\nAnEC6v3fXrE_0\tcow\nAnOwuTW7DKk_0\tcow\nAnOwuTW7DKk_1\tcow\nAnQ2ZY1JxAY_2\tperson\nAnWClR8yyu8_0\tperson\nAnZKri0xn-c_1\tcow\nAnZKri0xn-c_2\tcow\nAnb2IyxcJbk_0\thorse\nAnevw4PbqTo_0\tperson\nAnkgvW70F5E_0\tperson\nAnkgvW70F5E_1\tperson\nAn342tYqi5g_0\tperson\nAoI1hSI0PSI_2\tcar\nAoKs5jwMuHc_0\tperson\nAoP-So0vjIc_0\tcat\nAoSwFyY0f_A_0\tperson\nAoXHZgatpco_1\thorse\nAoXHZgatpco_2\thorse\nAoXHZgatpco_3\thorse\nAoXHZgatpco_4\thorse\nAof87CGS8NQ_1\tskateboard\nAoiCmKM8xz0_1\ttruck\nAojgueRMVCY_0\tperson\nAolLjcEFv5o_0\tperson\nAopGnIjKuEk_0\tmotorcycle\nAo0EDmBMIQk_0\tperson\nAo0EDmBMIQk_1\tperson\nAo7Iys-_lZs_0\tskateboard\nAo_b43xexzA_0\tperson\nApJMiJjCxCY_1\tcar\nApJMiJjCxCY_4\tcar\nApJMiJjCxCY_5\tcar\nApP4eoyM72g_1\tskateboard\nApWIa9pt-vk_0\tperson\nApilCZCROGI_0\tmotorcycle\nApjCOCv29N8_0\tperson\nAppgdYQTII8_0\ttruck\nAp1gZJZynL4_0\tperson\nAp-iaHj5SLk_4\telephant\nAp-iaHj5SLk_5\telephant\nAp-3HonA5go_0\tperson\nAqBYSr4wmpQ_0\tperson\nAqKP0V3Xj7E_0\tcow\nAqOxDunFl08_0\tairplane\nAqOxDunFl08_1\tairplane\nAqSP11-eje8_0\tboat\nAqUxRBRS-n0_0\tskateboard\nAqZhKjLLG70_2\tboat\nAqdAnSsQLI8_1\tperson\nAqdAnSsQLI8_0\tperson\nAqlHHwyJypE_0\tbird\nAqmXAZYmPJc_0\tperson\nAqmXAZYmPJc_1\tperson\nAqo5yZkzz8I_4\ttruck\nAqpinwPH8gM_1\tperson\nAqpinwPH8gM_0\tperson\nAqqs8XxA8gM_1\thorse\nAqqs8XxA8gM_0\thorse\nAqqs8XxA8gM_2\thorse\nAqqvZzLy3IE_0\tmotorcycle\nAqsuBaW1L0Q_0\tperson\nAqxTv7XRAH0_0\tperson\nAq_n86sub5o_2\tbicycle\nAq_n86sub5o_3\tbicycle\nArJNEsuLzDc_0\tperson\nArJaHKwfOEo_0\tperson\nArM6GXi6YnI_1\tdog\nArbpF1NIm-s_0\tcar\nArbpF1NIm-s_1\tcar\nArfeHbvYvKY_0\tmotorcycle\nAriIdq0ZPfE_1\telephant\nAroxRXjr3po_3\tbear\nArrB-hbOgf8_1\telephant\nArvYqb1hJSk_0\tperson\nAryOE3od43M_0\tperson\nAr7WaiToztg_0\tperson\nAr8Wk3m0uZ0_1\tperson\nAr8Wk3m0uZ0_0\tperson\nAr-vOeN30bM_0\tcat\nAsJt3MHLGiM_0\tperson\nAsKUm364aHg_0\tperson\nAsNy8gmdVec_0\tperson\nAsWWfQtZSHA_0\tperson\nAsY1dt4QojM_0\tperson\nAsZa3il8cZQ_0\tperson\nAsfAcK_laZA_2\thorse\nAsix5lGmXlg_0\tairplane\nAskNHLhn1t0_0\tcow\nAs_a3CyN-kQ_0\tbicycle\nAs_a3CyN-kQ_2\tbicycle\nAs_a3CyN-kQ_7\tbicycle\nAs_a3CyN-kQ_8\tbicycle\nAs_a3CyN-kQ_10\tbicycle\nAtFOIFqxLKs_0\tperson\nAtG98YoPQyg_0\tbird\nAtKUkiMSzfs_2\telephant\nAtKieG766oI_0\tperson\nAtawrCflbrM_0\tperson\nAtfXsIpaSgQ_0\tperson\nAtmVV-8Pjsg_0\tperson\nAtmVV-8Pjsg_1\tperson\nAt0-VpJyfBY_0\tskateboard\nAt81P33v_z8_0\tperson\nAuA4_FjCMvo_0\tperson\nAuJLIGyAoj4_1\thorse\nAuJalbdpJP8_0\ttrain\nAuLw9iNhPvw_0\tbird\nAuQYS5w13co_0\tbus\nAucK5ZDM060_1\tairplane\nAuchGbKLdmk_0\tperson\nAucxkj3w3nc_0\tperson\nAugnPC3tdso_0\tmotorcycle\nAunfkfLwN1w_0\tbear\nAunfkfLwN1w_3\tbear\nAunfkfLwN1w_2\tbear\nAutsbWiMLoY_0\tperson\nAuuZLhOpxcI_1\telephant\nAuuZLhOpxcI_6\telephant\nAvGLANxpJ-Y_1\tperson\nAvJexx39uCE_0\tperson\nAvOpMSLKXTM_1\tperson\nAvOpMSLKXTM_0\tperson\nAvP_DY8SuU4_0\tperson\nAvQgdEmyoFA_0\tairplane\nAvVBLLWgeWo_0\thorse\nAvdUsPyX5lE_0\tperson\nAvdgweWTeeg_0\tcat\nAvgusAC7DUU_0\tbird\nAvlg_B60Z0E_0\tbear\nAvlg_B60Z0E_4\tbear\nAvp80BzoG9Y_1\tperson\nAvp80BzoG9Y_0\tperson\nAvr6FKguO2o_4\tskateboard\nAvr6FKguO2o_1\tskateboard\nAvvWfbj5x88_0\tperson\nAv78r-lWmCs_0\thorse\nAv8Hkyi1fdc_1\tknife\nAv8k98IyQhs_0\tperson\nAwAX85eLJH4_0\tcow\nAwDIxdZSWKQ_0\tperson\nAwECiro8_h4_1\telephant\nAwEtKHnfKJ8_1\tcow\nAwEtKHnfKJ8_2\tcow\nAwFA2LuUWN8_0\tperson\nAwM3QWX5Jsc_0\tperson\nAwOJkAFe8Xs_0\tbicycle\nAwZ6nHwMMuA_0\tdog\nAwqZ_9G0pWg_0\tperson\nAwsAA0Xk1J8_0\tperson\nAw-D6USSthk_0\tbear\nAxAIZDsViZw_0\tperson\nAxAIZDsViZw_1\tperson\nAxAkf4tRXbI_0\tperson\nAxLiwCy5umU_0\tperson\nAxUFYNgnIq4_0\tperson\nAxg0nab1SDc_0\tperson\nAxvrCidcYqM_1\tperson\nAx2iIXU4Gyc_0\tperson\nAx5dd2_2sFA_1\tcar\nAx5dd2_2sFA_0\tcar\nAyAAL3Rd_Rg_3\tbicycle\nAyAAL3Rd_Rg_5\tbicycle\nAyAA5q5B-84_0\tperson\nAyAA5q5B-84_1\tperson\nAyH0zvW0ndQ_1\tbird\nAyKf0Ufaa_o_0\tperson\nAyfmwf4oW_k_0\tperson\nAyhXfIgl4Kk_0\tknife\nAyo9w6aKSY0_0\tperson\nAyqiYJuONPs_0\tairplane\nAyqvDNKC1CQ_0\tperson\nAy2VXLYZW50_1\tperson\nAzFaa7gRy0k_0\tperson\nAzMHek-Oow0_0\tcat\nAzNf4dneWFU_1\tperson\nAzVMbaXM_QM_1\tboat\nAzVoOWc-ueY_0\tperson\nAzaUz9OpHMI_0\ttruck\nAzeA4K-S0CI_1\tperson\nAzew3w3WZfI_5\tskateboard\nAzew3w3WZfI_1\tskateboard\nAzew3w3WZfI_3\tskateboard\nAze0ijK2t2M_0\tperson\nAze_lfqL6mw_0\tcow\nAzhTPVtwJVk_0\tperson\nAzh82KkzMVs_0\tbird\nAzh82KkzMVs_1\tbird\nAz0Hr5pa_Pw_0\tperson\nAz5vE5ssYxk_0\tperson\nAz5vE5ssYxk_1\tperson\nAz7glF28oOw_0\tperson\nAz_5XR0RSv0_1\tperson\nAz_5XR0RSv0_2\tperson\nA0JB0OdZ2NE_1\tknife\nA0L6M_8fDyM_0\tperson\nA0Nx4JbdXO0_0\tperson\nA0PQ6Si3nOU_0\tairplane\nA0XGvY-NO00_5\tairplane\nA0jhzA4HvrY_0\tumbrella\nA0n7dLEgCjo_0\tcow\nA02wb1V5W0A_1\tperson\nA02wb1V5W0A_0\tperson\nA08TTc4NLik_0\tperson\nA1Hvxm2NCpk_1\tairplane\nA1H8wrYSPlQ_0\tbicycle\nA1NBheOGWNE_0\tbird\nA1fdw6WBO_w_0\tcat\nA1oQZf9EXPg_0\tperson\nA1oQZf9EXPg_1\tperson\nA1oQZf9EXPg_2\tperson\nA1r3FpgoeP0_0\telephant\nA1unjHSiYuk_0\tskateboard\nA1w5Z9ryeJI_0\telephant\nA1w5Z9ryeJI_2\telephant\nA1w5Z9ryeJI_1\telephant\nA11L_7hymDI_0\ttrain\nA2ODL8T477o_0\tumbrella\nA2UiM17u3Ao_0\tbear\nA2Vhzr_2AAY_0\tperson\nA2WfZtUfAy4_1\tperson\nA2gisYdnTi0_0\tbird\nA2iD7VC-A9g_1\tcow\nA2p7Z_Ia9Ak_0\tperson\nA2p7Z_Ia9Ak_1\tperson\nA2rOJWkWoRo_0\tperson\nA23nZy9maYk_1\tperson\nA23nZy9maYk_0\tperson\nA29DgqMHeEQ_0\tperson\nA3EcM1p8r14_0\tperson\nA3FTEFw2Bo0_3\thorse\nA3JmvJSIxeU_0\tperson\nA3Lmb8E3Ovw_0\tperson\nA3L2pdrSYdE_0\tperson\nA3MpR785VH8_0\tperson\nA3MpR785VH8_1\tperson\nA3UoQh4P1_o_0\tperson\nA3ZIKfh-QPo_0\tperson\nA3b1bCXjWWE_1\tknife\nA3eocVVFaX8_0\tperson\nA3vXSLx3blY_0\tperson\nA4BVLpu2EQI_1\tcow\nA4CYcvyDGec_0\tperson\nA4P_7hjid7Q_0\tperson\nA4gw9TbmL54_0\ttrain\nA4ijVvmthCQ_0\tperson\nA4oNmb9PiYQ_0\tperson\nA4t4imYj0tA_1\tdog\nA4u61iOuzr0_0\tperson\nA4u61iOuzr0_1\tperson\nA4u61iOuzr0_2\tperson\nA4wLmZZODQU_1\tperson\nA4zzoIg6-W4_0\tskateboard\nA42uEePHr8c_0\tperson\nA438LRj4MN0_0\thorse\nA5Ho_qla_bQ_0\tskateboard\nA5Kii0lU4h4_0\tperson\nA5ZAKa7xw_I_0\tperson\nA5ciZloGW2o_2\thorse\nA5nuZ-mKcBE_4\tairplane\nA5nuZ-mKcBE_7\tairplane\nA5-RNkQ5yzU_0\tperson\nA5-yfb7-1NM_1\tperson\nA6DfgaqbLDM_0\tperson\nA6GND629_dg_0\tperson\nA6IIHamstQo_0\tperson\nA6KXKalaC7M_0\ttrain\nA6KXKalaC7M_1\ttrain\nA6LmIR6_mtk_1\ttruck\nA6L7XcS8oF4_0\tperson\nA6MkQdxLBSI_1\tbicycle\nA6MkQdxLBSI_6\tbicycle\nA6SipDli3dE_0\tperson\nA6Tx9smTdyo_0\tboat\nA6Zbpn5hd6Q_0\tperson\nA6jEv9bIawA_1\tbus\nA6rxrML8vyk_0\thorse\nA66pUkVBt_M_0\tperson\nA7GxuMCyr50_0\tcat\nA7KLi_xOQFc_0\tperson\nA7SDQoaalEY_0\tperson\nA7SIvy9srFU_0\tperson\nA7Zz2ESO-PM_2\tbear\nA7aEqy5QRJ4_0\tcat\nA7cjjAkLjfQ_1\tperson\nA7cjjAkLjfQ_0\tperson\nA7coVhNQrSs_0\tcow\nA7c_1Wcr5hM_0\tcow\nA7ltojA7WTk_0\tperson\nA729VkZvy_s_0\tperson\nA7_WDIFj23s_0\tcow\nA7_hPlvWyGc_0\tcow\nA8F5UnJOU5A_0\tboat\nA8MGPGEOAWk_0\ttrain\nA8PGaHrBO-g_0\tbus\nA8PlfHNTHVQ_0\tperson\nA8RztgyPvCE_1\thorse\nA8U5HWirVCk_0\tperson\nA8gL-e9dRa8_2\tbear\nA8oMFSrcteU_0\tbicycle\nA80V1BVUvf4_0\tairplane\nA89eQvkZ4go_1\tcar\nA89eQvkZ4go_0\tcar\nA89tFE_-szI_0\tperson\nA9ACfqLHRIM_0\tperson\nA9ACfqLHRIM_1\tperson\nA9LEZHrMOh8_0\tperson\nA9Mw5uHZ7WM_0\tdog\nA9UlOqoTO3A_0\tcar\nA9WAS-oLC8Q_1\ttrain\nA9WAS-oLC8Q_2\ttrain\nA9etwHCHkQM_0\tperson\nA9fblLjEn7E_1\tperson\nA9fblLjEn7E_0\tperson\nA9f0bktW-uM_0\ttrain\nA9sznaQipiM_1\tperson\nA9sznaQipiM_3\tperson\nA9tOXINxUeA_2\tperson\nA-BcgCHWiLE_1\tknife\nA-JRl34Jmok_0\telephant\nA-JRl34Jmok_1\telephant\nA-JRl34Jmok_2\telephant\nA-JRl34Jmok_3\telephant\nA-MMqq_FLXo_0\tperson\nA-R5A0HMT3w_0\tboat\nA-SdlQGGdZg_1\tperson\nA-Vo3GQZrd8_0\tskateboard\nA-gQnulNzVo_0\tperson\nA-gZpG3OWNM_0\tperson\nA-jGPkEGCdo_0\tperson\nA-qT3DcitzM_0\tskateboard\nA-0o6fFroLk_3\tbird\nA-1_sR8c39g_0\tskateboard\nA-1_sR8c39g_3\tskateboard\nA-37XpNHfQw_0\tcow\nA_AbA6K8Ouc_0\tperson\nA_AbA6K8Ouc_1\tperson\nA_B83i3dvWQ_0\tperson\nA_CDsn7za4c_1\tperson\nA_CDsn7za4c_0\tperson\nA_DqzmxTyPQ_0\tdog\nA_Eaoo5O71M_0\tskateboard\nA_Eaoo5O71M_3\tskateboard\nA_Nb1jSK7vY_0\tperson\nA_RHSgWC24U_0\telephant\nA_R7iK_MLgM_0\telephant\nA_Z7Cj10nKA_0\ttruck\nA_aN9LUuMY8_0\tperson\nA_g6G7vBr8I_1\tperson\nA_qnLTG_VBg_0\tperson\nA_uC3UuAVQE_0\tcow\nA_uxGLJDf9I_0\tperson\nA_xtvYH_7vg_0\tperson\nA__fHCZfwtM_0\tperson\nBACWpC6GdxY_5\tairplane\nBACWpC6GdxY_3\tairplane\nBANdhsMHpw0_0\tperson\nBANdhsMHpw0_1\tperson\nBANdhsMHpw0_2\tperson\nBAOR6YBIb8U_1\tskateboard\nBAO0Uce3vXA_0\tcat\nBARELTt_9Ko_0\telephant\nBAWN6Xpw7sg_0\tperson\nZx3x1-cBu7I_0\tperson\nZx3x1-cBu7I_1\tperson\nZx8LkdyJzG8_0\tperson\nZyDqefuyQfU_1\tcat\nZyDqefuyQfU_2\tcat\nZyNwfXl7s2w_0\tmotorcycle\nZyQL8Ugiq4Y_0\tperson\nZyQxolWsw2o_0\tcat\nZyQ_gFztNXU_0\ttrain\nZyQ_gFztNXU_2\ttrain\nZyqvHk5Ugjk_0\tbird\nZyrTKvb3Uq4_0\tperson\nZyuoNtTPexE_0\tperson\nZywGdneFaWs_0\tdog\nZyw6pIArS1g_0\ttrain\nZy04v73t_oU_0\tperson\nZy4s6kQgRAs_0\tperson\nZy7a1FYT_2I_0\tperson\nZy9BXzUqORk_0\thorse\nZzAgbPU4qoA_0\tperson\nZzBP5IPOX7Q_0\tperson\nZzBP5IPOX7Q_1\tperson\nZzFvfG2mfRU_0\tcow\nZzIeftZXBMw_0\tperson\nZzPUlKXnUgE_0\tperson\nZzRMRSyCzzU_0\tperson\nZzS_a0D4AhE_1\tskateboard\nZzWMnTc1LBY_0\tperson\nZzdl60FMu48_0\tperson\nZzeCPtqruzg_0\tperson\nZzgU7APbNfs_0\tperson\nZzgoobk2eIA_0\tperson\nZzgoobk2eIA_1\tmotorcycle\nZzhCWdZJAQY_0\tperson\nZzic21J3Ea8_0\tperson\nZznEoJsdkVI_0\tperson\nZzpccfyFyL0_0\tperson\nZzpccfyFyL0_1\tperson\nZzq_S3HujTo_0\tperson\nZztD-tmxwyc_0\tperson\nZzwlUbCfscM_1\tdog\nZzxRC2pLBVA_0\tperson\nZz2oIdSVB6Q_0\tperson\nZz5GwCMuMj0_0\tperson\nZ0D6uKz7v5Q_0\tperson\nZ0m37r4St5Q_3\ttruck\nZ0pLWU6Wg-o_0\tdog\nZ0stjlmfTpU_0\tcat\nZ0xYA5PwrjI_0\tperson\nZ02r-T2hINk_0\telephant\nZ04k6LBSuRk_1\tperson\nZ1G9pYdQwCY_0\tperson\nZ1HK6zDIJhg_0\tperson\nZ1MvNM4bmxs_0\tperson\nZ1SML4zVPik_0\tperson\nZ1U7Wnf_WiA_0\tcat\nZ1XafO8l8gs_0\tperson\nZ1aU1CigISE_0\tperson\nZ1a8Tqg-yjE_0\tperson\nZ1e-5FLWf6I_0\tcat\nZ1gxFkBk4EY_0\thorse\nZ1j81keSb9Q_0\tmotorcycle\nZ1j81keSb9Q_1\tmotorcycle\nZ1nr46t7EVk_0\tairplane\nZ1pv5a0as9c_0\ttrain\nZ1rB_fu2lKY_0\tdog\nZ1x8sEeQIuI_1\tmotorcycle\nZ13O2uGP1nE_0\tcar\nZ14p6heAJRc_2\tperson\nZ14p6heAJRc_0\tperson\nZ14p6heAJRc_1\tperson\nZ15QqHX1Z6M_1\ttrain\nZ2HF5_tyxR4_0\tbus\nZ2K03YbfcGg_0\telephant\nZ2QWOKCHkM8_0\tcow\nZ2QWOKCHkM8_2\tcow\nZ2QWOKCHkM8_1\tcow\nZ2SljfwK58g_0\tskateboard\nZ2SljfwK58g_1\tskateboard\nZ2VI7eM7BB0_0\tbear\nZ2acpS-e_cg_0\tperson\nZ2cvYI55Dps_0\tskateboard\nZ2dab1zmqv8_0\thorse\nZ2gvlPrX5HA_5\telephant\nZ2gvlPrX5HA_6\telephant\nZ2kcVxTMZtM_0\tperson\nZ2n2a39MxJQ_7\tbicycle\nZ2n2a39MxJQ_1\tbicycle\nZ2n2a39MxJQ_2\tbicycle\nZ2n2a39MxJQ_3\tbicycle\nZ2n2a39MxJQ_4\tbicycle\nZ2n2a39MxJQ_6\tbicycle\nZ21DONVXY1Q_2\tzebra\nZ23Gg06mNj8_0\tperson\nZ236ql8Tpvg_0\tperson\nZ23_3K28VSI_1\tgiraffe\nZ3AHrAB9qhw_0\tcat\nZ3AplkSO6kA_1\tcar\nZ3KMX_N6WSg_0\tperson\nZ3KMX_N6WSg_1\tperson\nZ3KMX_N6WSg_2\tperson\nZ3PzgfwbjLk_0\ttruck\nZ3i5sys0boU_0\tperson\nZ3i5sys0boU_1\tperson\nZ3sRLCOCxMY_0\tcat\nZ37dIpwPIqI_3\tbicycle\nZ4DQoYcs5mM_2\tperson\nZ4DQoYcs5mM_0\tperson\nZ4DQoYcs5mM_1\tperson\nZ4XLmQjbg7Y_0\tperson\nZ4XLmQjbg7Y_1\tperson\nZ4ZKg0KbSm4_0\tbicycle\nZ4ZPyzSGdRU_0\tdog\nZ4bO8cpjQZI_0\tperson\nZ4bO8cpjQZI_1\tperson\nZ4bW8HHeYP8_0\tcar\nZ4mYWGPFVkw_0\tperson\nZ4n5ieSA6cM_0\tcow\nZ4tOSluXWnE_1\tumbrella\nZ4u3PPkCYOs_0\tperson\nZ4u4zasFeAw_1\tbird\nZ4u4zasFeAw_0\tbird\nZ4vRtZE1WjQ_0\tdog\nZ4voZ3h_Dyk_1\tperson\nZ4xVMaYAqJ4_1\tbicycle\nZ446P08C8vE_0\tperson\nZ5KGx49qaAE_3\tbird\nZ5KGx49qaAE_5\tbird\nZ5KGx49qaAE_6\tbird\nZ5Qo8xdb8os_0\telephant\nZ5RKMhlNHEE_0\tperson\nZ5ZBRI0sc4Q_0\tbicycle\nZ5iJRTvm-Kw_1\tperson\nZ5iV683VDk0_0\tperson\nZ5ls93B1bBk_0\tperson\nZ5mQ_0ttu74_1\telephant\nZ5mQ_0ttu74_2\telephant\nZ5yNMm-TIjI_0\tbus\nZ5zGHZ82r9A_0\tperson\nZ53B8-gR640_0\tperson\nZ6BVtmEMfkI_0\tperson\nZ6FikDWrKkA_0\tperson\nZ6MfvYa9hCs_2\tcar\nZ6MfvYa9hCs_3\tcar\nZ6PyYboRq5c_0\tdog\nZ6Q3LdMwgi4_0\tcat\nZ6WrlM4ZZKA_0\tperson\nZ6j-7La25S4_0\tperson\nZ6j-7La25S4_1\tperson\nZ6j-7La25S4_2\tperson\nZ6k1unwmsfA_1\tperson\nZ6sd800eFC4_0\tperson\nZ6tGpP8q53A_9\telephant\nZ6tGpP8q53A_2\telephant\nZ6tGpP8q53A_4\telephant\nZ6vCDHs6NrM_0\tperson\nZ6yNyxXPPOw_0\telephant\nZ60iXtKpGMQ_0\tbus\nZ61B0fShfbs_1\tcow\nZ7AqkWEBwV8_0\tperson\nZ7DGMMQP79U_0\tcat\nZ7I8r1AqMhU_0\tperson\nZ7JHCdt48hA_0\tairplane\nZ7KEzuE_7hQ_0\tperson\nZ7LfnFm4OHs_0\tperson\nZ7WaJYiX_1o_0\tperson\nZ7WaJYiX_1o_1\tperson\nZ7bMdjLGiAo_0\tperson\nZ7eGCBjkKrU_0\tdog\nZ7gxE6ZSQXI_0\tairplane\nZ7iq45DtCTM_4\thorse\nZ7iq45DtCTM_5\thorse\nZ7zeXJ5lJRY_1\tperson\nZ7zeXJ5lJRY_0\tperson\nZ72sIqrQAF4_0\tskateboard\nZ74EGXvFjFM_0\tperson\nZ76Y_PNOgK4_1\tperson\nZ76Y_PNOgK4_0\tperson\nZ78P87kjtu4_0\tperson\nZ8CXvEObu4c_0\tdog\nZ8NfZN7WDKw_0\tperson\nZ8Oi5HJEyS4_0\tskateboard\nZ8k0TTq5BC8_0\thorse\nZ8s-Kg1PuSg_0\thorse\nZ86E7eIS9t8_1\tairplane\nZ89mG68LE2k_0\tperson\nZ8942_IPiTo_0\tbicycle\nZ8942_IPiTo_2\tbicycle\nZ9SwanypLJM_0\tbear\nZ9SwanypLJM_1\tbear\nZ9XS4cvVVy4_2\tperson\nZ9awHnw5J4o_0\ttruck\nZ9bt3xT5dCc_0\tcat\nZ9f--QLEQqI_1\tmotorcycle\nZ9jDpr533Cg_0\tcat\nZ9o5BEm1UeI_0\tperson\nZ9pHCguAO5c_0\tperson\nZ9wO9tftNG0_0\tbus\nZ9x_cPvKErA_0\tperson\nZ98EscJ1IG8_0\tperson\nZ98GFnZo-LA_0\tperson\nZ-I0S45eRT0_0\tperson\nZ-J0UQfvb5M_0\tperson\nZ-MvTXpMdm4_0\ttruck\nZ-PMnTjqAS8_0\tperson\nZ-QO3lrbh7c_1\tskateboard\nZ-VVWO3Ovgs_0\tperson\nZ-djkrj-5Cs_0\thorse\nZ-glDeBd2xA_0\tboat\nZ-lrIzXr9ck_0\ttrain\nZ-mTl_ipVa4_0\tumbrella\nZ-mXYrvubn8_0\tdog\nZ-zy-BzjLT0_0\tmotorcycle\nZ-zy-BzjLT0_1\tmotorcycle\nZ-7W_lh96xg_1\tairplane\nZ_JXyC6v_-s_0\tperson\nZ_KItWz0mTI_0\telephant\nZ_PViIzihe8_0\tperson\nZ_QVuM8wEmQ_0\tperson\nZ_QVuM8wEmQ_1\tperson\nZ_kPrUEqYXE_0\tbird\nZ_p4gYNjwG0_0\tperson\nZ_85vV3FHUg_0\tperson\nZ_85vV3FHUg_1\tperson\naACqXYewohQ_0\tperson\naAI7SN5_3CY_4\tbus\nBAhHrnCKvcM_2\tboat\nBAhHrnCKvcM_3\tboat\nBAhHrnCKvcM_5\tboat\nBAmy5TQke7w_0\tperson\nBAnfbsB8rIY_0\tbear\nBAnn4L-iNLE_0\tperson\nBAq_fnyQ6z4_0\tperson\nBA4ZGv8flRA_0\tperson\nBBCBbdz3Qvs_0\tdog\nBBCBbdz3Qvs_1\tdog\nBBLAyHVLHh8_0\tperson\nBBOd-YBAUgw_0\tbicycle\nBBPlqTbAphY_1\tperson\nBBQ2xu9OehQ_1\tdog\nBBS5owVJaTU_1\tskateboard\nBBS5owVJaTU_0\tperson\nBBVPb5z0x7k_0\tcat\nBBXs1J4j2mA_0\tskateboard\nBBdA1qc9H-g_0\tskateboard\nBBk7ZnOEjMA_0\tperson\nBBopEl_n3Fc_0\tperson\nBBpFu8j2fBc_0\tbus\nBBpFu8j2fBc_1\tbus\nBBqTHwpYeEc_0\ttrain\nBBrfgTTduuI_0\tperson\nBB9l_znmPls_0\tumbrella\nBCBCK2k2Bdw_0\tperson\nBCBgjRWuOcA_0\tperson\nBCGB6zaBDpg_1\tperson\nBCGB6zaBDpg_0\tperson\nBCI91i3aEek_0\tmotorcycle\nBCJbf6um28s_1\tairplane\nBCKVauIBDFM_2\tbear\nBCin0MjzM8Y_0\tcow\nBCoTKGNhMVw_0\tdog\nBCoTKGNhMVw_1\tdog\nBCo8e6n2dYQ_1\tdog\nBCqYnyGIols_1\tbicycle\nBCsmPvRqaNk_0\tperson\nBCuzA73UTl4_0\tperson\nBCwAdqAouFU_0\tboat\nBCwyoTwckSE_0\ttruck\nBDFBV8JbIF8_0\tperson\nBDFVkc87amI_0\tperson\nBDHUAJn9nnc_0\tperson\nBDHsXkbkS-w_0\tskateboard\nBDOemJGz04I_1\tperson\nBDcTOMebCHs_0\tperson\nBDcTOMebCHs_2\tperson\nBDcTOMebCHs_1\tperson\nBDdIKtFwnjA_1\ttrain\nBDdbk3ZQrP0_0\tcat\nBDdhenNSY9o_0\tperson\nBDk-BklqSdI_0\tperson\nBDroGke9Ogg_0\thorse\nBDroGke9Ogg_2\thorse\nBDtGFVFexaU_0\tperson\nBDzXi4ukhN0_1\tperson\nBDzXi4ukhN0_0\telephant\nBD30MTvTuYU_0\tperson\nBD7TQWBytfQ_0\tknife\nBEArUGKSB-Y_0\ttrain\nBEArUGKSB-Y_1\ttrain\nBEKMcritl6M_1\tperson\nBEMcwkY2beQ_0\tperson\nBERvmKL4Glc_0\tperson\nBESdHwoIDsA_0\tdog\nBEUB64a3AIY_0\telephant\nBEUB64a3AIY_1\telephant\nBEYy-ZRSWSk_0\tskateboard\nBEa_8wp0528_0\tcow\nBEqG56tHTEI_2\tbus\nBEqPniAgjaY_0\tcat\nBErty5GnulU_0\tperson\nBEuXjB1zLeE_1\tcar\nBExSp8l17GY_0\tperson\nBExlFv0scM0_0\tperson\nBE10HJUHUHw_1\tperson\nBE8KS4PZH54_0\telephant\nBE-crlUXSSE_0\tdog\nBFC3DWxOces_2\tairplane\nBFC3DWxOces_1\tairplane\nBFC3DWxOces_3\tairplane\nBFC3DWxOces_4\tairplane\nBFC3DWxOces_5\tairplane\nBFJ4v-XlKAg_0\tskateboard\nBFPQCoJqTRk_0\tperson\nBFeIwErwdS8_0\tperson\nBFeIwErwdS8_1\tperson\nBFggPKKt6wk_0\tperson\nBFggPKKt6wk_1\tperson\nBFhh8z0Fmk0_0\tperson\nBFponHgVsdA_0\tperson\nBFs239KuGa8_1\tperson\nBFxUyTrqZhU_2\thorse\nBFxUyTrqZhU_4\thorse\nBF4YTMGtDs8_1\tskateboard\nBGAQlsAiJ_0_0\tairplane\nBGAQlsAiJ_0_1\tairplane\nBGAQlsAiJ_0_2\tairplane\nBGAQlsAiJ_0_3\tairplane\nBGAQlsAiJ_0_4\tairplane\nBGAQlsAiJ_0_5\tairplane\nBGAQlsAiJ_0_6\tairplane\nBGLM4yl_Ka4_2\thorse\nBGO3DBbNozc_0\tskateboard\nBGR1gMrCTpA_0\tperson\nBGT-p0CgoFg_1\tperson\nBGW9SDHTWKY_1\tperson\nBGW9SDHTWKY_0\tperson\nBGee3Ar-Fbg_0\tairplane\nBGpx9Xow9Ew_0\tcat\nBGqNnzNtWkc_0\tperson\nBGq6TeZHkLU_0\telephant\nBGshZfVDb5w_0\tperson\nBG4QyYPKYvg_0\tperson\nBG4QyYPKYvg_1\tperson\nBG_x-4YUtFE_0\tdog\nBHA5UUg4lCw_2\ttrain\nBHH2sTfHwks_0\tperson\nBHH2sTfHwks_1\tperson\nBHPSyq8L5S8_1\tperson\nBHQkdwmXrtI_1\tskateboard\nBHQkdwmXrtI_2\tskateboard\nBHYrJ1yaM-w_0\tcar\nBHdbqcxv3Vw_0\ttruck\nBHfXgxJCcrw_0\tboat\nBH5fxWFpHvE_0\tairplane\nBH5npOcPlY0_0\tcar\nBH6nqU68dWo_0\tperson\nBH74QV_0vtc_0\tbird\nBH9Ob6Uiw1w_1\tperson\nBH_SlBCiQ_8_0\tperson\nBIETPRRGGgY_4\telephant\nBIETPRRGGgY_5\telephant\nBIIU36E15Vo_0\tperson\nBIMggdk7AHQ_0\tcat\nBIQeL2o_Ogg_0\tperson\nBIUQ935UkDo_0\tcow\nBIVLmUTNYbk_0\tperson\nBIV-1bNQ7pI_0\tskateboard\nBIfqcruNiic_0\tperson\nBIkDAHYmcFw_0\tperson\nBIkDAHYmcFw_1\tperson\nBInC--gFqHM_0\tperson\nBIvTK9qvP1w_0\tskateboard\nBIxCP9ck4-8_0\tcat\nBI5i3aDb_FQ_1\tperson\nBI-kr0tFSDg_0\tperson\nBJIZYdOZHzg_0\tumbrella\nBJK_SXpLtnI_0\tbird\nBJMP05du3Eg_0\tperson\nBJQstPOa8Wk_0\tperson\nBJS2YLbErJg_1\tperson\nBJfRrRcfmF4_0\tskateboard\nBJf9nFjqLvg_1\tbird\nBJlcWhfsg_g_0\tperson\nBJriJT6zJl8_1\tskateboard\nBJwoZcHbBK0_0\tumbrella\nBJ05o1_UKzw_0\tdog\nBJ44CIPaDf8_0\tperson\nBKAo6GZ_kNs_0\ttrain\nBKTCaKgjiag_2\tperson\nBKUKi0vTt0A_0\tperson\nBKdSO_PNJ4U_1\tperson\nBKdSO_PNJ4U_2\tperson\nBKdSO_PNJ4U_0\tperson\nBKl0wLRzoD8_0\tperson\nBKw9UQxZ3a8_1\thorse\nBK-rIrwen6U_1\tmotorcycle\nBLB0F-XD8IA_1\tperson\nBLB0F-XD8IA_0\tperson\nBLEdcnrUmEo_0\tcat\nBLE9cZ8L3a0_1\tskateboard\nBLFYe-dU9ZU_0\tairplane\nBLO7KJUu8t4_0\telephant\nBLSwwE9mtTQ_1\tknife\nBLcOGv-0-dc_1\tdog\nBLfmgLou27o_0\tcat\nBLvowRU6z7s_0\tbird\nBLxsg2_sjDM_1\tperson\nBLy6RcifNl0_0\tbus\nBLy6RcifNl0_1\tbus\nBLy6RcifNl0_3\tbus\nBL6tcorHrT4_0\tbicycle\nBMH2ReDeKuc_0\tperson\nBMUnKa8FUGQ_0\tperson\nBMUnKa8FUGQ_1\tperson\nBMavrQABR1Y_0\tperson\nBMa4xJ1U3Zk_0\tperson\nBMbZc-jxEfo_0\tperson\nBMbZc-jxEfo_1\tperson\nBMfsf9tDz8o_0\tcow\nBMfsf9tDz8o_1\tcow\nBMhy1f7EuXM_0\telephant\nBMptIGI1Il8_0\tcar\nBMuO2fjJoOw_0\tcar\nBMweJTmvCBg_0\tperson\nBMweJTmvCBg_1\tperson\nBMypDovEOEE_0\tperson\nBMypDovEOEE_1\tperson\nBM0QiiStqd8_1\tskateboard\nBM6XrBQQ7NE_0\tperson\nBM6609PpfO0_1\tperson\nBM6609PpfO0_0\tperson\nBNGDM8sFM8Y_0\tperson\nBNIVhG5pZh8_1\tdog\nBNJwAx3eUKc_0\tperson\nBNK68rC7RdI_0\tumbrella\nBNTS3OPHAP4_0\thorse\nBNXKRPSr66c_0\tperson\nBNXKRPSr66c_3\tperson\nBNXKRPSr66c_1\tperson\nBNXKRPSr66c_2\tperson\nBNbPQGMLs2w_0\tperson\nBNbPQGMLs2w_1\tperson\nBNbSUPI8feg_0\tperson\nBNcj3161E9o_0\tperson\nBNeWUyqXAC0_1\tairplane\nBNmMB68b1PA_0\tperson\nBNnVfaIfBx0_0\tairplane\nBNnVfaIfBx0_1\tairplane\nBNyK_4tt2fg_0\tcar\nBNybc47kPjg_0\tperson\nBN1HT0FOOhI_0\tdog\nBN7YfmbYuVs_0\telephant\nBOE82LEqzWw_0\tcow\nBOF3tFvEu0o_0\tperson\nBOHE8JNUcQc_0\tboat\nBOMeyjZNH5k_0\tbicycle\nBOQiuL9QlIo_1\tperson\nBOUcPea33eY_2\tskateboard\nBOfgzvAgVQw_0\tbus\naAMhdGuR5DE_0\tcat\naARa5-CLhG8_0\tperson\naAVaqjgY1m8_1\tperson\naAZ2fVjhcIE_0\tperson\naAj0EN1Rnc0_0\tbird\naAj0EN1Rnc0_1\tbird\naAlTiBaLr8M_0\tperson\naAmVIu8X7p4_1\tperson\naAma36YlaAo_0\tzebra\naAsr-Rf6rEE_0\tperson\naAsr-Rf6rEE_1\tperson\naAuz7EfR_fU_0\tcow\naAyTLM_PmzA_0\tskateboard\naAzpA1iK_bE_0\tperson\naA0FrWtkjXk_0\tperson\naA3okCsYx6Y_0\tbird\naA5DYzky6o4_0\tcow\naA8Tz4nZ99g_0\tperson\naBBtHXQoEtM_2\tperson\naBBtHXQoEtM_1\tperson\naBQm5kN1TfY_0\tcat\naBexNnNkORk_0\tairplane\naBq4NF1upak_0\tperson\naBvvXrP1BJs_0\tperson\naB-tGXFmyFU_0\tperson\naCQAel27T4o_2\tperson\naCSzhpU1heQ_0\tcow\naCXfvvg8CF8_0\tairplane\naCiDDC9KFS8_0\tmotorcycle\naClye1Ctc9E_3\ttruck\naCl98J6O9Hk_1\tperson\naCuXZ3LmfSo_0\tperson\naDGpg2xtDk8_1\tperson\naDRE08tF2Wc_1\tbus\naDTQRnSeu_E_0\tskateboard\naDTTYd0Z5Vk_1\tperson\naDjhOS5Xa9Q_0\tboat\naDmLwCb_o30_0\tdog\naDtJSv7XR90_0\tcar\naDte-e70l7U_0\tcow\naDte-e70l7U_2\tcow\naDte-e70l7U_3\tcow\naDt4Puik-kU_0\thorse\naDwTy9yiOms_0\tumbrella\naDxRlCI40wo_0\tperson\naD2q00X0-eg_0\tperson\naD2q00X0-eg_1\tperson\naEJy28mvKPk_0\tperson\naEJy28mvKPk_1\tperson\naEMPa2NvIl4_0\thorse\naERed6pg_h8_0\tperson\naER-VrHLWwY_0\tperson\naER-VrHLWwY_1\tperson\naEZ9vBpXNKU_0\tperson\naEw_vtKlegE_0\telephant\naExRtJpfZEs_0\tknife\naE1veVneq04_0\tperson\naFC2Zy2-0dY_0\tperson\naFFKeUdtPcQ_4\tknife\naFL2V522q9A_0\tperson\naFZ03eEOZFE_0\tbird\naFbVlCimys8_0\tbird\naFdPuo5xB-c_0\tperson\naFhKp8gVZSE_0\tperson\naF86vrld8V4_0\tperson\naF-CmWo8ooM_0\tperson\naF-CmWo8ooM_1\tperson\naGAB6WQFklc_0\tperson\naGE8AphnkNU_0\tknife\naGGiVuwB1p8_0\tbear\naGY3LCiYRnQ_0\tmotorcycle\naGgnovv6T3U_0\tdog\naGgxdwCpAN0_1\thorse\naGhNzJSHCOU_1\tknife\naGmxZatPe60_0\tperson\naGmxZatPe60_1\tperson\naGuWVv6XS8Q_0\tperson\naGuWVv6XS8Q_1\tperson\naGwPRbsru-4_0\tcat\naGxOl5SXjtM_0\tperson\naG1c8x5Dl-w_3\tbicycle\naG1c8x5Dl-w_2\tbicycle\naG1c8x5Dl-w_4\tbicycle\naG20iwkTd_o_0\tperson\naG6D_te6V3s_0\tperson\naHEFx7Zz6E4_0\tperson\naHb4yEpCinw_0\ttruck\naHiGSUMMfBQ_0\tperson\naHnMWEvjLzI_0\tcar\naHrTcxckS-A_0\tperson\naHrTcxckS-A_1\tperson\naHsgQAyd8ss_0\tperson\naH2ZxImdwaU_1\tmotorcycle\naH2ZxImdwaU_2\tmotorcycle\naH5Cd20kdJw_0\telephant\naILjXrLJpHw_0\tumbrella\naIQf8LQ5QPU_0\tperson\naISEbZGZH68_1\tcar\naITryMUZ2b8_0\tperson\naIUYT8pblHs_0\ttruck\naIU5E5tHvdc_1\tperson\naIVWVNBI-n0_0\telephant\naIcFi8LMv0w_0\tairplane\naIjLf6T_K3o_1\tbear\naIoZO3mu_tQ_0\tperson\naI311E3BWwI_0\telephant\naI7axTZFW4A_0\ttruck\naI80ysvYFG4_0\tperson\naJChqX9Ki8A_6\tairplane\naJChqX9Ki8A_1\tairplane\naJChqX9Ki8A_2\tairplane\naJChqX9Ki8A_5\tairplane\naJN9lRsvUv8_0\tperson\naJQ9scZQmz8_0\tperson\naJTABCCQtK4_0\thorse\naJYmkpuijrk_0\tmotorcycle\naJYurtxV0Og_0\ttrain\naJYurtxV0Og_1\ttrain\naJcPyWppCcI_0\tmotorcycle\naJgpAyFnpeI_0\tcat\naJ0dUcEIE_U_0\tperson\naJ1SzcgNcxI_0\tcat\naJ8w4L7E368_0\tperson\naKLf2yC2diM_0\tcar\naKMqeCkIJSg_0\tperson\naKOMIxz2RsM_0\tperson\naKOMIxz2RsM_1\tperson\naKiwOUy71Lo_1\tperson\naKiwOUy71Lo_0\tperson\naKqrwq-Sigg_0\tskateboard\naKtBD-3wFMA_2\tbear\naKtBD-3wFMA_1\tbear\naKu-1-TFl1g_0\tknife\naK-rgio7orw_2\tbus\naLDq7roX-SU_0\tcat\naLFDqtBMblI_0\tcat\naLFxGnCM1zs_0\tperson\naLIa7x90hQc_0\tperson\naLUSnANtUlE_0\tairplane\naLX9cIe12C8_0\tskateboard\naLZAMgiWcXk_0\tbird\naLZ0lbLzg8Y_0\tperson\naLZ0wCY2j2s_1\tperson\naLeeoZ1uVcc_0\tboat\naLjomcNk9fc_0\tperson\naLj4N9Tp6C0_0\tskateboard\naLj4N9Tp6C0_1\tskateboard\naLo-gekX9j0_0\tperson\naLo-gekX9j0_1\tperson\naLuNNRUC09A_1\tbus\naLuNNRUC09A_6\tbus\naLvCIWJQJbY_0\tcar\naLvg1CWrY0Q_0\ttruck\naLxJ8T4CFuM_0\tperson\naLzL_Gldhzk_1\tperson\naLzhO0EqNcc_3\thorse\naL6H2Jatw0k_0\tcat\naL70_drPJtA_0\ttrain\naL8hELYDnTc_0\tperson\naMAKznXul5M_2\tknife\naMAYLrcEnZY_0\tbus\naMAeSegIdJg_0\tperson\naMAeSegIdJg_1\tperson\naMHtvIvWTBU_0\tbear\naMNbQ1Cl5GY_0\tmotorcycle\naMRtQFBcLNM_0\tperson\naMX0jhSq6UY_0\tperson\naMb78Ixlbfw_0\tskateboard\naMqHsdXJ7UU_0\tperson\naMzZxN9uvMc_2\thorse\naNB5rIhRL7g_0\tairplane\naNEpBEnAUhw_0\tmotorcycle\naNF18KgxGHA_0\tskateboard\naNJuTWrnIfo_0\tperson\naNJuTWrnIfo_1\tperson\naNKleFpxS4M_0\tperson\naNKleFpxS4M_1\tperson\naNNWNDoOM_4_0\tperson\naNNWNDoOM_4_1\tperson\naNOXvvKZ3qU_0\tperson\naNZMe4tov6w_0\tcow\naNdJrRu4imo_0\tperson\naNjs-khPjiU_0\tperson\naNj1xwowXYU_0\tperson\naNqkQnGfWEc_2\tskateboard\naNqkQnGfWEc_0\tskateboard\naNwIHwPqFPc_0\tcar\naN4Na3OaY4I_0\tbicycle\naN4NmH-GafU_0\tperson\naN770kOQCD8_0\tperson\naN82X1hXgEE_0\tperson\naN82X1hXgEE_1\tperson\naN9XAd7-rzE_0\tperson\naN9XAd7-rzE_1\tperson\naN_3Pwk-7oY_0\tperson\naOHPVt_93RE_0\tbicycle\naON6RKmi-YQ_2\ttrain\naOPbvY62dMQ_0\tairplane\naOQ-8RoQYEU_0\tperson\naOQ-8RoQYEU_1\tperson\naOQ-8RoQYEU_2\tperson\naOW81s5KlyA_0\tperson\naOcGv3kcyhg_0\tbear\naOcGv3kcyhg_3\tbear\naOjjUIWuG6Q_1\telephant\naOp2NlwNeoY_0\tcat\naOz0l6mLHmA_1\tdog\nBOlBcGufEU8_0\tperson\nBOlBcGufEU8_1\tperson\nBOmgqlRxGlM_1\tperson\nBOmgqlRxGlM_0\tperson\nBOnvGIZd58M_0\tperson\nBOowRuwiNhU_0\tperson\nBOowRuwiNhU_1\tperson\nBOr7CffDWEU_0\tperson\nBOsNz8L3PXI_0\tperson\nBOtfIOm5kag_0\tdog\nBO1T_-iFGdM_5\tbird\nBO1T_-iFGdM_2\tbird\nBO1T_-iFGdM_3\tbird\nBO3UKxe7nyo_0\tperson\nBO5EdP_PO9M_0\tperson\nBO7sWBaaL7g_0\tperson\nBO7sWBaaL7g_1\tperson\nBO-3uvHhUdI_0\tperson\nBO-3uvHhUdI_1\tperson\nBPBBMIdFoiE_0\tperson\nBPEwUVhfaOk_1\tknife\nBPVpq7UrI-k_0\tperson\nBPX5EquoyCU_0\tmotorcycle\nBPX5EquoyCU_3\tmotorcycle\nBPX5EquoyCU_1\tmotorcycle\nBPX5EquoyCU_2\tmotorcycle\nBPiWTYUA7eI_0\tperson\nBPjkQ-lEqcw_0\tperson\nBPrrZpiDdo4_0\tcow\nBPsTDg4C4o0_1\tperson\nBPsTDg4C4o0_0\tperson\nBPxPfFzwlQA_0\ttruck\nBP-GGAbCOhE_1\tbus\nBQDxNNWRtas_0\tcar\nBQDxNNWRtas_1\tcar\nBQEzj9pP1SU_0\tperson\nBQIO94PF6RE_0\tperson\nBQIO94PF6RE_1\tperson\nBQVcvMWyWpU_1\tperson\nBQZGptzIdjE_0\tcow\nBQgPk0vRreM_0\tbird\nBQgPk0vRreM_1\tbird\nBQgPk0vRreM_3\tbird\nBQgPk0vRreM_6\tbird\nBQgPk0vRreM_9\tbird\nBQh5Ib9nynM_0\ttruck\nBQtDUi4BxRg_0\tperson\nBQwLGv7fgQg_0\tperson\nBQxCcefrjSk_0\tcat\nBQyowuIZqFQ_0\tperson\nBQzzKQ9ejzw_1\tknife\nBRCb183ELe0_0\tperson\nBRHPsi_0nTg_0\tmotorcycle\nBRQiSnowTss_0\thorse\nBRVNuDR5WzI_0\tcow\nBRcQS0dQqEU_0\tcar\nBRfegSv5VEk_0\tperson\nBRfegSv5VEk_1\tperson\nBRi_AMaK3kc_0\tdog\nBRjvUtQdukg_0\thorse\nBRlWBt4WHdU_1\thorse\nBRnsmPzoEsM_0\tskateboard\nBRtCCpXG_N8_1\telephant\nBRt1o8xqxFs_0\tperson\nBRt5hLASRMU_0\tbird\nBRxrw0-skYM_0\telephant\nBR0SGq2ioqU_2\ttrain\nBR0SGq2ioqU_7\ttrain\nBR1gOlJPEdk_2\telephant\nBR8cOV8KYX4_0\tperson\nBR-XwELzLV0_1\tdog\nBSDy_dzOSS4_0\tcow\nBSHg9I0V6Yc_2\tbus\nBSJgV2iO0jc_0\tperson\nBSOCno_3bfI_0\tperson\nBSSyaPq1EoM_0\ttrain\nBSWNCcyXeR4_1\thorse\nBSWpwtIPQ9U_0\telephant\nBSWpwtIPQ9U_1\telephant\nBSWpwtIPQ9U_2\telephant\nBSWpwtIPQ9U_3\telephant\nBSqz3i60KPw_4\tbicycle\nBSqz3i60KPw_1\tbicycle\nBSqz3i60KPw_2\tbicycle\nBSutEBx3H4A_0\ttruck\nBSvCnoryvn4_0\telephant\nBSyxB7X9SH0_5\ttruck\nBSyxB7X9SH0_7\ttruck\nBS1lexD0ugY_1\tperson\nBS1lexD0ugY_0\tperson\nBS5mJ0Y7Rys_0\tperson\nBS-S0nYSwkQ_0\tperson\nBS-S0nYSwkQ_1\tperson\nBTBmlFGHK-8_2\tperson\nBTBmlFGHK-8_0\tperson\nBTBmlFGHK-8_1\tperson\nBTKLizyvgcA_0\tperson\nBTR83oP1vpo_0\tperson\nBTlwglCdzOk_0\telephant\nBTpBteZfK7Q_0\tcat\nBTxSuijXVPY_0\tperson\nBTywlpNCABw_0\tcow\nBTzWqg8vHQI_0\tcar\nBT9sKGDb0Qw_0\ttrain\nBT9sKGDb0Qw_1\ttrain\nBUF45g7KGB8_0\tmotorcycle\nBUX8raEGFZk_0\tdog\nBUX8raEGFZk_2\tdog\nBUX8raEGFZk_3\tdog\nBUY-_l8_v9s_0\tperson\nBUZ7x7JaQ1k_0\tperson\nBUrMlyUBryI_0\thorse\nBU4SnrK9UiY_0\thorse\nBU4SnrK9UiY_2\thorse\nBU4yiA6qKAQ_0\tbicycle\nBU5PaU-UTss_0\tperson\nBVAi_zqhIeg_1\tperson\nBVCe2emxuTQ_0\thorse\nBVFYmsvoNTA_0\tcow\nBVS5Q8eBmRs_0\tperson\nBVWEvs3lq0Y_0\tperson\nBVWEvs3lq0Y_1\tperson\nBVXMpcHTg80_2\tmotorcycle\nBVm9KRW0iu8_0\tmotorcycle\nBVo3XdFnAJM_0\thorse\nBVxr6TGFsMQ_1\tperson\nBV5tXmVwddI_1\tperson\nBV-UtDJNS2w_1\tmotorcycle\nBWA5eWlt6Lg_0\tcar\nBWFYpOE-8yo_0\tperson\nBWcaU8lR4rM_0\tperson\nBWdhK5cwgt0_0\tbus\nBWjRZ-aKRX4_1\tperson\nBWlnPrI8FLk_0\tperson\nBWnFU-Li_8E_0\tperson\nBWn3QGOyZJc_0\telephant\nBWn7EPWkJ2I_1\tbear\nBWp2oVJMG1A_0\tperson\nBWqYVuIKaNA_0\tperson\nBW5r0Kv6h2U_0\tboat\nBW56O_QhBmc_0\tperson\nBW7uP0jcst8_0\thorse\nBXA3uMFAA9M_0\tcow\nBXCd65rDsk4_0\tdog\nBXCrD4eGGWw_0\tperson\nBXHktSPnW24_0\tperson\nBXTGSkuESqU_0\tperson\nBXUL3aLVZM4_0\tperson\nBXWXLNGacmc_1\tmotorcycle\nBXWXLNGacmc_0\tmotorcycle\nBXdMv9s3Rtw_0\tperson\nBXiQhR0Zj70_0\tperson\nBXrwbMjK_ZU_0\ttrain\nBX8AJD8uL3U_2\tperson\nBX-SAZsC6yc_2\tknife\nBX-SAZsC6yc_4\tknife\nBYQfvvAP9rY_0\tperson\nBYRNeh3RRZs_0\tperson\nBYS-DmtMpWE_0\tcat\nBYVhHLCSZ_M_1\tdog\nBYYakMVK6Ko_0\tperson\nBYi8dYVDYak_0\tperson\nBYkytpBqzHQ_0\tairplane\nBYq45niURL8_1\ttruck\nBYq45niURL8_0\ttruck\nBYud6fy8t8A_1\tknife\nBYud6fy8t8A_0\tknife\nBYud6fy8t8A_2\tknife\nBYud6fy8t8A_3\tknife\nBYxg5sQjvQ4_0\tperson\nBYyATiWsxZs_2\tcar\nBYyATiWsxZs_0\tcar\nBYyATiWsxZs_1\tcar\nBYyrXwDFF5U_0\tperson\nBY0XhpATtuI_0\tumbrella\nBY2Fs4KDDbU_0\tmotorcycle\nBY7KYQ_Qf3Y_0\tcow\nBY8mmPl_K_A_0\tperson\nBY-5sA1BbFE_0\tdog\nBY-5sA1BbFE_2\tdog\nBZDa7e9EFvI_0\tknife\nBZERyxrpvg4_1\tperson\nBZIzw3XdAgI_1\tperson\nBZI3ovXxotQ_0\tknife\nBZeIe9Nkb1E_0\tcat\nBZgZ1H4t3hQ_0\tperson\nBZgxjWSM7Vc_0\tbicycle\nBZhfYzqKuu8_0\tperson\nBZkYWI_qxz4_1\tbird\nBZldivEoOo8_0\tperson\nBZli_iMMV8k_0\tbear\nBZli_iMMV8k_7\tbear\nBZ94WX4wHn0_0\tskateboard\nBaDQg_CCQpU_0\tperson\nBaDQg_CCQpU_2\tperson\nBaHS1WcgbbE_0\tbird\nBaHS1WcgbbE_1\tbird\nBaJTQLa-vuU_0\tperson\nBaOQYsYuC6A_1\telephant\nBaRsW_taGVY_0\tcat\nBaWQb_lSjYs_0\ttrain\nBaYLeM_yk_Q_1\tskateboard\nBafH7BetIyk_0\tperson\nBakCr5HeDNE_2\tboat\nBakCr5HeDNE_0\tboat\nBauKE-faLzM_1\tperson\nBavQVUFfmBU_1\tperson\nBavQVUFfmBU_0\tperson\nBavoG7kb0wo_0\tcar\nBaxc5TW06FU_1\tknife\nBa1sC-X1OF8_0\tperson\nBa1sC-X1OF8_1\tperson\nBa2T3joy6BQ_0\tperson\nBa3CWVKFpBE_0\tboat\nBa5BO-nvDnE_1\thorse\nBa-SiAqH09k_2\ttruck\nBbAdBjyFFEA_0\tbird\nBbAdBjyFFEA_1\tbird\nBbAdBjyFFEA_2\tbird\nBbEfZ9mUKOY_0\tcat\nBbOabnT5V-E_0\tperson\nBbQyfmZx-2Y_2\tbear\nBbRarKH6D_Q_0\thorse\nBbYZ7Ee3Ixs_0\tperson\nBbYqjT1OzLY_0\tperson\nBbYqjT1OzLY_1\tperson\nBbfOXQD21Ac_1\tmotorcycle\nBbnSU5sRdBs_0\tperson\nBbnxzNL5tMk_0\tperson\nBbq8h83cFE8_0\tperson\nBbu_YM_GBG4_3\tbird\nBbu_YM_GBG4_0\tbird\nBbv9Y9Goufk_5\telephant\nBbv9Y9Goufk_0\telephant\nBbv9Y9Goufk_1\telephant\nBbv9Y9Goufk_2\telephant\nBb4uwSjmtKk_2\tbird\nBcHl4OuJLT4_0\tperson\nBcHl4OuJLT4_1\tperson\nBcSXX5O_YDw_0\tbicycle\nBcVn38vI_Zk_0\tperson\nBcV5QdDIrMg_0\tperson\nBcg-TsdpO-Q_0\tperson\nBcjVHV-6WWM_0\tperson\nBcjZaclf1m0_3\tbird\naO4uLNN4Gt0_0\tbear\naPCEyodWBU4_0\tperson\naPPUf7JUJRo_0\tperson\naPf5SoOgmhQ_0\tmotorcycle\naPheJtUTSps_1\tboat\naPm89i_7aKs_0\ttrain\naPm89i_7aKs_1\ttrain\naPswSvCaFDQ_0\telephant\naPvqWgeR03U_0\tperson\naQAieL0LKIo_0\thorse\naQB2gAnqQi0_1\tperson\naQGQKDLwRqM_0\tperson\naQVn7fJi_l4_0\tcat\naQaKnTZ4hDg_0\tperson\naQfQqr5W5uI_1\ttruck\naQfQqr5W5uI_2\ttruck\naQfQqr5W5uI_4\ttruck\naQlLjT95Hgs_3\thorse\naQub6VGWKzQ_0\tcar\naQzKS5Sn9u0_0\tperson\naQ1c75hfANo_0\tperson\naQ6larydXgI_4\telephant\naQ6larydXgI_0\telephant\naRBWB79BIIg_1\tumbrella\naRHGn50eToQ_0\tbear\naRQQ75s9Ni4_0\tboat\naRRUAfurxVU_0\tperson\naRcw_PTSf4o_0\tperson\naRdAN9jVvqQ_1\tdog\naRnJ4lIPIL4_0\tbus\naRueDRgWEOs_0\ttruck\naRzwrPXsTRI_0\ttruck\naR6P3PtMIZc_0\tperson\naSDuIU0pzYY_0\tperson\naSH88cb0kww_0\tperson\naSMzQpOjAc8_0\ttrain\naSUtY_pSN0k_0\tbird\naSWGbO-Nfcg_0\ttrain\naSWGbO-Nfcg_1\ttrain\naSb-LY3vBsg_0\tgiraffe\naSkBoJ55w2Y_0\tperson\naSqwAZJaQIk_0\tbus\naSqwAZJaQIk_2\tbus\naSsjyvISV94_0\ttrain\naSw1yhbXHuA_0\telephant\naS2Zw7-j7p4_0\tcar\naTBr31jkThQ_3\tbus\naTOn74Inw24_0\tbird\naTR3FylgTkA_1\tperson\naTR3FylgTkA_2\tperson\naTS8hur_yyo_0\tperson\naTcDiEXEhhk_1\thorse\naTdIOtWasSE_0\tperson\naTeFjqoG9fM_0\tperson\naTeFjqoG9fM_1\tperson\naTj38bNIsQo_0\tcow\naTvgsqSb5aA_0\tperson\naTvoRXrEvG4_0\tbicycle\naTvoRXrEvG4_2\tbicycle\naT3idINTybY_0\tumbrella\naUFHlj5AVrU_0\tperson\naUNlQPWMFHo_0\tcar\naUQh47P34C0_0\tperson\naUQh47P34C0_1\tperson\naUX-HZraWQs_3\tzebra\naUh41vv5vdE_3\ttrain\naUh41vv5vdE_0\ttrain\naUh41vv5vdE_2\ttrain\naUv4LjbJxLs_0\tbus\naU5AZMYHZ2o_0\tdog\naU5tePXE5qE_1\telephant\naVFbcdQrobU_0\tperson\naVGtibXVt40_0\ttrain\naVMpwmT7ojA_0\ttruck\naVPIHMyNEw8_0\ttruck\naVZJ8qaxG3s_0\tperson\naVif6Qc9Prw_0\tcow\naVknWcQimJA_0\tbus\naVm9jp_ttsk_0\telephant\naVm9jp_ttsk_1\telephant\naVm9jp_ttsk_4\telephant\naVm9jp_ttsk_5\telephant\naVm9jp_ttsk_6\telephant\naVm9jp_ttsk_7\telephant\naVm9jp_ttsk_8\telephant\naVo-jvGoUGs_1\tboat\naVo-jvGoUGs_0\tboat\naVq4ezzbcTc_0\tbird\naVvuGEexwy0_1\tperson\naVy9mhLlo5U_0\tumbrella\naV2_0JBmw8o_1\tperson\naV7mSkydynI_4\tbicycle\naV7mSkydynI_1\tbicycle\naV7mSkydynI_2\tbicycle\naWCNGGW4Qew_0\tperson\naWDtrDYqivs_0\tperson\naWQxqFyyzng_0\tcow\naWQxqFyyzng_1\tcow\naWWMT0webCY_0\tperson\naWWtWhgt_V0_0\tcow\naWYoUCAev64_2\tbicycle\naWYoUCAev64_0\tbicycle\naWcaF85RIM8_3\telephant\naWgSKxQO5Ps_0\tcat\naWi51gAEIkY_0\tperson\naWma4eTtHv0_0\tperson\naWqBSBc-XpU_2\tknife\naWt13fGkYuA_0\tcow\naW9D5rT3GCo_0\tbear\naXFFLOGR_yI_0\tperson\naXFgCWZLFj8_0\thorse\naXFgCWZLFj8_5\thorse\naXFgCWZLFj8_1\thorse\naXKbkyjRqkU_8\tbear\naXKbkyjRqkU_0\tbear\naXKbkyjRqkU_7\tbear\naXOPdDTpvxc_0\tperson\naXWkAKNw0Dg_0\tbird\naXXfrIsIqi0_0\tperson\naXhd5BhT4hs_0\tcow\naXhd5BhT4hs_1\tcow\naXml5kCJyDY_0\tskateboard\naXml5kCJyDY_2\tskateboard\naXn1cwN8vng_0\tairplane\naXn1cwN8vng_1\tairplane\naXxKLf5m61g_1\tperson\naXxKLf5m61g_0\tperson\naXxPxBeZjQI_0\tperson\naX0JOJY-BDc_0\tperson\naX0JOJY-BDc_2\tperson\naYCA7dz0nbI_0\tperson\naYJzxhE8-Rs_5\tknife\naYPCTMucy6A_0\tperson\naYgA8AxT0V4_0\tgiraffe\naY1i2TADX0c_0\tperson\naY1i2TADX0c_1\tperson\naY1i2TADX0c_2\tperson\naY4dOYabpbs_0\tcow\naY6lI7qO6kI_0\tperson\naZF83PK7HKU_0\tperson\naZF83PK7HKU_1\tperson\naZGZbrCAFl4_0\tperson\naZGZbrCAFl4_1\tperson\naZHznZSD2uE_0\tperson\naZJ_vArnOC0_0\tcow\naZL_n-gon0U_0\tboat\naZT_v5WnLio_0\tperson\naZVtxAF_Imw_0\tdog\naZZcXyRJwyI_0\tperson\naZ4tzgju18s_1\ttrain\naZ-3jypmJiY_0\tperson\naaAAXDB7ml4_0\telephant\naaAAXDB7ml4_1\telephant\naaA_qcyN3eM_1\tcow\naaBf3fxpR7E_1\tperson\naaQjh2_8aVw_1\tmotorcycle\naaQjh2_8aVw_0\tmotorcycle\naaUXN-xWi1c_0\tperson\naaWV0TEIbhM_0\tskateboard\naaWV0TEIbhM_2\tskateboard\naacFWGARp08_0\tperson\naacLCDo8Zus_0\tumbrella\naacZc8VUtxg_0\tbird\naaoYsiVAFDY_0\tairplane\naas39xgvbfg_0\tcat\naatdoixvb4w_0\tdog\naazC6OJV2GY_0\tperson\naa0jo00Yxz0_2\tboat\naa-J6xg9RH4_0\tperson\nabCu1bwDisA_0\tumbrella\nabHvXnWduQQ_0\tperson\nabQ7YCx3QQM_0\ttrain\nabbympAEM_k_0\tcow\nablCJGTLCow_1\telephant\nablCJGTLCow_3\telephant\nablCJGTLCow_4\telephant\nablCJGTLCow_0\telephant\nable--ZWvkg_1\tperson\nabnCzyC9R28_0\tperson\nabpyt2p-uMg_1\tbird\nabrKRGgLV0o_0\tdog\nabrKRGgLV0o_1\tdog\nabxcR1X4UIo_1\tbird\nabxuxX4aHFI_1\thorse\nabxuxX4aHFI_2\thorse\nab1RpuefUA0_3\tbicycle\nab2b2WA-fQs_1\tperson\nab2b2WA-fQs_0\tperson\nab2b2WA-fQs_2\tperson\nacDY2Ono9WA_0\tdog\nacL58vxHnnc_0\tperson\nacOdf26jldk_0\tperson\nacYxvpS0b7s_2\tairplane\nacZFDZif1ww_0\ttrain\naciCzrBQsM0_0\tperson\nacnOEnTXwJY_0\tcow\nacnOEnTXwJY_1\tcow\nac4feYMso4k_0\ttrain\nac6NdTBtc6U_1\tperson\nadAkRe99CDA_0\ttruck\nadE0Nk3CKyI_0\tcar\nadKIteGSOIM_1\tskateboard\nadY8EtfOO_w_0\ttrain\nadcv2A70AoA_0\tperson\nadiBUyRiBfo_1\tperson\nadiBUyRiBfo_0\tperson\nadskAqVAdFQ_1\telephant\nad2C17MGAEo_0\tbus\nad94BZD75ck_1\tcow\naeAjL4rCjIM_1\ttruck\naeAjL4rCjIM_0\ttruck\naeIzIOSHZek_0\tperson\naeJKW7m42xo_2\tairplane\naeJKW7m42xo_0\tairplane\naeKckIdL0io_0\tbird\naeUVIIEtwdw_1\tmotorcycle\naeUVIIEtwdw_2\tmotorcycle\naeUVIIEtwdw_3\tmotorcycle\naeUVIIEtwdw_4\tmotorcycle\naeboOU_vdjo_0\tperson\naeboOU_vdjo_1\tperson\nBc2pPI9s8bM_2\thorse\nBc26F0eEyBg_0\tperson\nBc5QvTVd-04_0\tperson\nBc64C5jdZDg_0\tperson\nBc7NXuSycR4_0\tskateboard\nBc-b4WhkWxw_0\tperson\nBdBZuvI8oak_0\ttruck\nBdBZuvI8oak_8\ttruck\nBdBZuvI8oak_1\ttruck\nBdBZuvI8oak_2\ttruck\nBdBZuvI8oak_3\ttruck\nBdBZuvI8oak_4\ttruck\nBdBZuvI8oak_7\ttruck\nBdB6NgtqioE_1\tbear\nBdCnusBWLuw_0\tbicycle\nBdC5wdGWMCw_0\tperson\nBdLMnBBX7rc_0\tperson\nBdQ8AC4jpkk_0\tperson\nBdR02myBXHY_0\tperson\nBdTRTQRbNqI_1\tskateboard\nBdT2u0kYx90_0\tbicycle\nBdT2u0kYx90_1\tbicycle\nBdT2u0kYx90_2\tbicycle\nBdT2u0kYx90_4\tbicycle\nBdZOawocL-c_0\tperson\nBddRmrmaI6M_0\tperson\nBd0JDJL6yXk_0\tairplane\nBd21KrWCyCg_0\tcat\nBd-WW1Hs9kk_1\ttrain\nBeAD9m4Yu_U_0\tperson\nBeCQkxXRRww_1\tperson\nBeCQkxXRRww_0\tperson\nBeCmkGB-RCw_0\thorse\nBeQWoctTF5I_0\tbear\nBeQWoctTF5I_2\tbear\nBeQupBkL2y8_0\ttrain\nBeTu3Ag6XIw_4\tbicycle\nBeTu3Ag6XIw_1\tbicycle\nBeVqWRYzPkY_0\tknife\nBebzr4dP1Ug_2\tperson\nBebzr4dP1Ug_0\tperson\nBedgXkpLAOs_0\tperson\nBefMC4f6Z3s_0\tperson\nBefMC4f6Z3s_1\tperson\nBefq3kL0E7o_0\tperson\nBegwn2Da_j8_0\tperson\nBepRWdKn0QA_0\tcat\nBetAKo6E3rw_0\tperson\nBezlbA5t77I_1\tperson\nBe4NCK9GwQU_0\tperson\nBe4V9lpSpJw_0\tknife\nBfIBlw1RkXc_1\ttruck\nBfJUkGEnxvE_0\tperson\nBfOXYUOsSf8_0\tairplane\nBfSxTA9yZak_0\tperson\nBfT3bVAeXLU_2\tboat\nBfWpLwfDFbc_0\tperson\nBffFognyZOA_1\tskateboard\nBffFognyZOA_0\tskateboard\nBfkXvdTkYF4_0\tperson\nBfkXvdTkYF4_1\tperson\nBfwHmAlZdKA_0\tperson\nBf1cF3BfY18_0\tperson\nBgBDqhuoTr0_0\tdog\nBgHvkS4H7w0_0\tperson\nBgamGCKlzTI_0\tperson\nBgbxYgCIde8_0\tcow\nBggPqcJz12g_1\telephant\nBgjdCfaJfsE_0\telephant\nBglxBESIjlE_0\tperson\nBgsTkbznAjI_0\tperson\nBgwZN0Ui-Q8_0\tperson\nBg0_DcQLOys_1\tknife\nBg3Zox43xGI_0\tskateboard\nBg4NtG5QkwM_0\tperson\nBg_cKljiGGE_2\tperson\nBg_cKljiGGE_0\tperson\nBhA7KMeJYAE_0\tskateboard\nBhL184lkUcw_0\tperson\nBhPyQcTHRmg_0\tboat\nBhXpOqm8Q5o_0\tbird\nBhZl6ZTtKDo_0\tperson\nBha-PhOr-bU_0\tbird\nBhdcIu_nQYs_0\tbus\nBhqZrCcQpD4_0\telephant\nBh4QFujTqIo_0\ttrain\nBh5wIL7IE9A_0\tperson\nBh5wIL7IE9A_1\tperson\nBiGYFhnDhMI_0\tairplane\nBiQ4cYnaGPo_0\tperson\nBiYzQbOwhWY_1\ttrain\nBiYzQbOwhWY_2\ttrain\nBipPdxUV2PY_3\tboat\nBirMOPf7k0I_0\tknife\nBizSBnzOzy0_0\tperson\nBizSBnzOzy0_1\tperson\nBi1KsDpJT8w_0\tperson\nBi1KsDpJT8w_1\tperson\nBjGhd-Eq5ig_1\tcar\nBjGhd-Eq5ig_7\tcar\nBjJSECIrsd0_0\tdog\nBjLJqIPSyUM_0\tbicycle\nBjQO2ipch-w_1\tdog\nBjRyA1cPxA4_0\tcow\nBjZ9JRI_WkM_0\tperson\nBjbCdEHhCjI_0\tperson\nBjfwCDsBoeg_2\tbicycle\nBjhITTFavAk_0\tperson\nBjiJ7HAaOj8_0\tperson\nBjj4KdIbDBY_0\tperson\nBjk2IA4thIE_0\tbear\nBjogwheL3BI_0\thorse\nBjpX2nla914_1\tcar\nBjqdFABBqxA_0\tperson\nBjqdFABBqxA_1\tperson\nBjraW0bXW-0_0\tperson\nBj8lO8Jag3Y_0\tperson\nBj9wPwHXNQo_1\thorse\nBj9wPwHXNQo_2\thorse\nBj9wPwHXNQo_3\thorse\nBj_fS2abD9o_1\tbird\nBkFws1J8IM0_0\tbird\nBkMb48QM-zQ_0\tperson\nBkco3wJWvp0_0\tperson\nBkdBnU65i7Y_0\tperson\nBkdWJT3sWro_3\tairplane\nBkdWJT3sWro_4\tairplane\nBkfKa-zgphc_1\tairplane\nBklBU6Epydc_4\thorse\nBklBU6Epydc_1\thorse\nBkoQ8_W4drM_0\tumbrella\nBkteTGu81tQ_0\tbus\nBkwpJBHM_DM_0\tdog\nBk3VbRagAwg_0\tdog\nBlXhR1rRct8_0\tbicycle\nBlfVNiQZtko_1\tcow\nBlfVNiQZtko_0\tcow\nBlhT8WFfI54_0\tperson\nBlj4FY__L6Y_0\tperson\nBllnWV-BIDo_0\tbird\nBlqsGIq2hNg_0\tperson\nBlqsGIq2hNg_1\tperson\nBlzUBgB6BEc_0\tperson\nBl-1081HLyM_0\tmotorcycle\nBl--N1EQpuA_5\tairplane\nBmCAiO-WNmE_0\tskateboard\nBmG7dEBuS6s_0\tcow\nBmHShiZ1Xus_3\tairplane\nBmNwfiFBeRo_0\tperson\nBmNzw5vNQNI_0\tskateboard\nBmRZWeMzQLg_3\tbicycle\nBmRZWeMzQLg_0\tbicycle\nBmSBpZrrEt8_0\tcat\nBmXdIzhVZ0Q_2\tbear\nBmZN0ljGa84_2\tmotorcycle\nBmfHrAPEMrk_2\tperson\nBmfHrAPEMrk_0\tperson\nBmfHrAPEMrk_1\tperson\nBmjBM58PfZE_0\tcow\nBmjEEjKDJVI_0\tperson\nBmjLZgp38NI_0\tcat\nBm3l_RLjYpo_0\tmotorcycle\nBm3wZ63Ymvo_2\tmotorcycle\nBm7e-qOAcKQ_0\tperson\nBm8qAGd91Gg_0\ttrain\nBnADRMlWOsM_0\tairplane\nBnNJUP6xfG8_0\tbear\nBniJFr7IJRo_1\tperson\nBniJFr7IJRo_0\tperson\nBniJr-iCh9M_1\ttruck\nBnkIFwVPh8w_0\thorse\nBnkIFwVPh8w_2\thorse\nBnkIFwVPh8w_4\thorse\nBnkU89Dq2IQ_0\tperson\nBoA6CUl4t70_0\tcow\nBoGAxXRzHWs_0\tcow\nBoLSvTrm3d8_3\tcow\nBoNtUpvusGM_3\tmotorcycle\nBoNtUpvusGM_4\tmotorcycle\nBoNtUpvusGM_0\tmotorcycle\nBoNtUpvusGM_1\tmotorcycle\nBoNtUpvusGM_2\tmotorcycle\nBoOANS5_U9I_0\tmotorcycle\nBoPj2W_G2Qg_0\tairplane\nBoYvNfndu60_0\tskateboard\nBoZ3ZvdEZ4o_0\tcar\nBoZ3ZvdEZ4o_1\tcar\nBoiPpDeQ2mQ_0\tairplane\nBomNEWAGolQ_0\tperson\nBomVU8_LL_Y_2\tdog\nBowyw_fhWZ8_0\tperson\nBoy5toMvMwo_0\tgiraffe\nBo2qsQNYATk_3\tskateboard\nBo5bT8QP_Og_0\tperson\nBpDLFqS9EAE_0\tperson\nBpVyiSvjk4o_1\tdog\nBpdZmCkSHco_0\tgiraffe\nBpjdKB7AJ8U_0\tskateboard\nBpkMUQLoJUM_0\tperson\nBpoWgamMMro_0\tcow\nBp1zluIhHzc_0\tperson\nBp4vXfVIVxA_0\tskateboard\nBqBkvlijWKg_1\tperson\nBqDnDPIE18k_3\thorse\nBqPcqKW3uAM_0\tdog\nBqoRxXUz7q4_2\ttruck\nBqpA7iBOQ_s_0\tperson\nBqqPm3F1F_w_0\tperson\nBq4id5zA48c_2\tbear\nBq_emgXftMI_0\tperson\nBrDdbgxB7qI_1\tbird\nBrHDj1biLlA_0\tairplane\nBrHDj1biLlA_1\tairplane\nBrJiBbRF25U_0\tperson\nBrKgWUQnUWI_0\tcow\nBrQNhzCKfxs_0\tperson\naelph1Y8yPk_0\tskateboard\nae161Zq0QBg_0\tskateboard\nafCYMTTgbMw_1\tdog\nafD_y2ZEHn4_0\tskateboard\nafLO-CD48TI_0\tmotorcycle\nafLO-CD48TI_1\tmotorcycle\nafWl3lTglsw_0\tperson\nafbS6cTlE5Q_0\tperson\nafu5-raaJEc_1\telephant\naf9Z_LR-L7M_0\tperson\naf-MtTvmPic_0\tperson\nagFlIZmS0zU_0\tperson\nagF_eyIgF3g_0\tperson\nagGuxSx4UdI_0\tmotorcycle\nagIme93Q6WA_0\tperson\nagMdtESL5kE_2\tcow\nagSpfpV4EsQ_0\tperson\nagVHBb-qLAw_1\tbus\nagWS48KnYWk_0\tmotorcycle\nagXPzkjMl4c_0\tbird\nagYR35aJ1no_0\tperson\nag1ohTMq9Iw_0\tcar\nag5Gy7ZNbfw_2\tknife\nag5Gy7ZNbfw_3\tknife\nag6NY6nrTvw_0\tbear\nahE37MgcoUs_0\tperson\nahMgOG4Bpcw_0\tcar\nahQD9PpYoqE_1\ttrain\nahYD0J4XzC0_0\tcat\naheVwPx1egw_0\ttruck\nahiO1CwoaY4_0\tperson\nahnbyNWfvpM_1\tcow\nahsHWgQGPNI_0\tperson\nahv6_xBxvmg_0\tperson\nah03BOnPUqs_0\tcow\nah-2yN1cKOg_0\tbus\naiINQVIMx5o_0\tperson\naiNcNIUbY3E_1\tdog\naiX8ymgR1g0_0\tboat\naiX8ymgR1g0_3\tboat\naierZPItkn8_0\tbicycle\naierZPItkn8_1\tbicycle\naiiN3X-f5Ss_0\tperson\naiklFoEJX1Q_0\tperson\nainWSZibSIM_1\tbicycle\naio5SboRXGU_0\tperson\naio5SboRXGU_1\tperson\naizJI68M2SY_2\ttruck\naizJI68M2SY_1\ttruck\nai1CTuarr50_0\tbus\nai3xYb_xvFA_0\tperson\nai7WTyMnl1g_2\thorse\nai7WTyMnl1g_3\tperson\nai7WTyMnl1g_0\thorse\nai7WTyMnl1g_1\thorse\nai9-_EMwk4U_0\tskateboard\nai_jmsLJTR0_0\tperson\najAuKSOFBKQ_2\tbus\najAuKSOFBKQ_3\tbus\najB-QUVDyXI_0\tcat\najO4xx5beuE_1\tbicycle\najPP5EY_nAo_0\tperson\najPP5EY_nAo_1\tperson\najPY1htweXM_0\tperson\najPY1htweXM_1\tperson\najtvjEY9TPA_0\tairplane\najxcj5ovYdw_0\tskateboard\naj0Ll84jtZs_0\tperson\naj0Ll84jtZs_1\tperson\naj3UwQNtZPo_0\ttrain\naj6sqeG0k54_0\tumbrella\nakH9ouIrOds_0\tskateboard\nakIlFKpZAtk_0\tperson\nakOLIpAsxqc_1\tperson\nakQU-s0RCWE_1\tbus\nakoVZ50spRM_0\tperson\nak6iAVUNU7c_0\tdog\nak6iAVUNU7c_2\tdog\nak6iAVUNU7c_1\tdog\nak89dpHVmHc_1\tperson\nalAFNWeSJts_0\tskateboard\nalDkqPNUFLU_0\tperson\nalDkqPNUFLU_1\tperson\nalKgZTVxcV4_0\tmotorcycle\nalX9MOY80Aw_0\tperson\naluZTs_Ys8I_0\tcar\nalvKKzlOBKM_0\tperson\nalzWhOivD0E_0\tperson\nal2Vh0In4HU_0\tbear\nal2Vh0In4HU_2\tbear\nal2Vh0In4HU_3\tbear\nal8Of2FWy80_0\tcat\nal8vzWgNDbs_2\tbicycle\nal8vzWgNDbs_7\tbicycle\nal8vzWgNDbs_8\tbicycle\namIvXQ6aZkE_0\tcow\namL9Dar_hp0_0\tperson\namTcWqrgBBg_3\tairplane\namjpcHzuYb4_0\tperson\nams9MCDF15I_1\tperson\nams9MCDF15I_0\tperson\namvLPTONS1U_0\tcow\nam-3XKJkCqg_0\ttrain\nanAXVexurxo_2\tdog\nanJbsuTwShw_0\tperson\nanLTttUpag0_0\tskateboard\nanR9cuXRv6Q_0\tperson\nanWxwjzPRBA_0\tperson\nanYy3XNTTGw_0\tperson\nanZ9lxr24eY_0\tperson\nangay7OmUwA_0\ttruck\naniCxSPm8Uc_0\tcar\nanlydfnmv7g_0\tperson\nannQpJsk6NI_0\tbus\nanpsTMr_HIo_0\tcat\nanrBShdHOz4_0\tperson\nanvk-OdKLBE_0\tperson\nanvngue8Qh8_0\tcat\nanzrRzyYAAc_0\tdog\nan-QcnhNhL4_0\tperson\nan-mFuTYuCk_0\tperson\nan_FRcZ669c_0\tperson\naoBqV2Guvso_0\tperson\naoDJu0KrrQs_0\tmotorcycle\naoOJR-0sPM0_0\tperson\naoSWWKtf8mU_0\tperson\naohLKKJxjIM_0\tperson\naoizdynEVYU_0\tdog\naoqMoScEfqE_1\thorse\naotBl0tvpFs_0\ttrain\naotBl0tvpFs_1\ttrain\nao9uUinn2WY_1\ttruck\napKAwFA4oP0_0\tbird\napQKmVEucLQ_0\tperson\napZAEWvk8XY_0\tperson\napcgot45Ql0_0\tperson\napdP6_tCdls_0\tperson\napfZjUpoTy0_0\tskateboard\napfZjUpoTy0_1\tskateboard\napprUmnQTcI_2\tcow\naqGKBg0azPA_0\tcow\naqGp6tCGLOU_0\tmotorcycle\naqKiwfY3Oqc_6\tbus\naqKiwfY3Oqc_5\tbus\naqKiwfY3Oqc_7\tbus\naqNz8TCica4_0\tzebra\naqUHuS5ALXE_0\tcow\naqWN-Q0wDHI_0\tperson\naqWN-Q0wDHI_1\tperson\naqZfqhHJPLo_0\tperson\naqdSuLpYlwQ_0\tperson\naqe_mdIg6k0_0\tperson\naqmie50AFwE_0\tdog\naq2UMxzwliQ_0\tperson\naq50xKvuSFg_0\tskateboard\naq59B_-6ilw_0\tperson\naq9Sfxn9vMg_5\tknife\naq-QzG14KJ4_0\tperson\narFKRc7lAo0_0\tperson\narFKRc7lAo0_1\tperson\narPGoY7uh4E_0\tperson\narS7aqpkAU0_0\tmotorcycle\narT4jZLX8pg_1\tknife\narW0ZUPkah8_0\tperson\narZ_mIhaJMo_0\tcat\nare5LvOB2nQ_1\tskateboard\nare9NykT9FM_0\ttruck\narn0j0l_IWI_0\tperson\nartWKQTC7CQ_0\tperson\nartcASpzYrU_0\tperson\narwZ6ZPJuN4_0\tcat\nar7TRjurXMY_0\tperson\nar-fzXT8Juc_0\ttruck\nasT-GJNeJok_0\tperson\naseOdDcbIRE_2\tperson\naseOdDcbIRE_0\tperson\naseOdDcbIRE_1\tperson\nashHnkqFz7g_0\tbicycle\nashHnkqFz7g_3\tbicycle\nasl-XTE0jsE_0\tperson\nasrDocOfGQE_0\tcar\nasrDocOfGQE_1\tcar\nasrDocOfGQE_3\tcar\nasrDocOfGQE_4\tcar\nasrDocOfGQE_5\tcar\nasrDocOfGQE_6\tcar\nastLiScyoaQ_0\tperson\nasx2CkH0O6I_0\telephant\nas1twjKe3Cw_0\tskateboard\nas6Y3-EaaCg_0\tperson\nas6Y3-EaaCg_1\tperson\nBrgRnN_LBGk_1\tperson\nBrgRnN_LBGk_0\tperson\nBrhMkJ6n-hQ_1\ttrain\nBrnBTne3NBw_0\tbear\nBrnBTne3NBw_1\tbear\nBroiAN_qtCI_0\tperson\nBrpRmX410DU_0\tperson\nBrrAlsmwDnk_1\tperson\nBrrAlsmwDnk_0\tperson\nBrrlyds8g1A_0\tperson\nBrwABvccCWs_0\tperson\nBrzEfM8nWCw_0\tcow\nBr3M-xsvXFQ_0\tperson\nBr9CVteHFEc_0\tperson\nBsCH_ABy0WE_0\tperson\nBsRC5xbG6uY_0\tperson\nBsXphFpnOxE_0\tbird\nBsXwLsR6dm8_0\tperson\nBsv8dNYzPkY_0\tbear\nBs1rRAtP7bw_1\tbear\nBs3BPJZMD9E_0\tperson\nBs94h8vMmwg_0\tperson\nBs_9E_Rq524_0\tperson\nBs_9E_Rq524_1\tperson\nBtFwcgeJjsY_0\tperson\nBtKVAhU1LdI_0\tknife\nBtKl-iqkgoY_0\tcat\nBtN0FlaISuY_0\tperson\nBt19SM8BenY_0\tperson\nBt41QF0ze6E_1\tperson\nBt7B7nkGO_4_0\ttruck\nBt7B7nkGO_4_1\ttruck\nBuFYI1vYj1k_1\tperson\nBuH65mVX5yM_0\tperson\nBuPWtDPEJ-0_0\tperson\nBuXvxclES0s_0\tbird\nBuco16wWyFA_1\tmotorcycle\nBuco16wWyFA_2\tmotorcycle\nBuco16wWyFA_3\tmotorcycle\nBuco16wWyFA_0\tmotorcycle\nBufY7NdKUlM_2\tmotorcycle\nBufY7NdKUlM_4\tmotorcycle\nBunvBFXoGPg_0\tbus\nBuqljdjPWWc_0\tknife\nBuqljdjPWWc_1\tknife\nBuumm7rgDPY_0\tperson\nBu0gJwoDkRw_0\tcat\nBu5Bgr9asUU_0\tperson\nBu_HdLSyLSI_0\tperson\nBu_3ep-qAi0_0\tperson\nBvEAIc3hmkk_0\tmotorcycle\nBvHzGHjR6rk_0\tperson\nBvLCgNWIHfA_0\tperson\nBvLJZAhIR3A_1\ttruck\nBvTLdUcIH5I_1\tperson\nBvTbuvBeunI_0\tairplane\nBvTjf9mG5MU_0\tperson\nBvZ8DqslB-U_1\tairplane\nBvZ8DqslB-U_2\tairplane\nBviGbtAujq0_0\ttruck\nBvrORC4d2yg_0\ttrain\nBvrORC4d2yg_1\ttrain\nBv4rjfW9RsM_0\tdog\nBv9IXbrDYLk_0\tbird\nBwDccOS7_vw_0\tperson\nBwIoxW7Ee8M_4\ttrain\nBwUYR-ZnpX8_0\thorse\nBwW4Fs1eTRg_0\tairplane\nBwW4Fs1eTRg_1\tairplane\nBwergWBqOOs_2\ttrain\nBwgJmjOzlRk_0\tperson\nBwoTsoC3hvQ_3\thorse\nBwo1MaJvxRs_0\tperson\nBwrh4q5KLVg_1\tdog\nBwsHsSpS0dQ_0\tbird\nBw2RhmesY5g_0\tperson\nBw5iwcbP4eM_0\tgiraffe\nBw6f2OXYtSo_0\tcow\nBxHIRvoGZMM_0\tperson\nBxMoEE7XwL8_0\tperson\nBxNE34BGZ-4_0\tperson\nBxQp3-SCUGs_0\tperson\nBxQp3-SCUGs_1\tperson\nBxWs9aINEEI_0\tperson\nBxWs9aINEEI_2\tperson\nBxWs9aINEEI_1\tperson\nBxYdU6vB2YQ_1\tmotorcycle\nBxaEaD7zeX4_0\tperson\nBxhktnvjtLA_0\ttruck\nBxmeqCev3Kw_2\tboat\nBxmeqCev3Kw_3\tboat\nBxm3EvRZAI0_0\tskateboard\nBxvlWueS9vA_0\tmotorcycle\nBxwmNnxcI7o_1\tperson\nBxzVlf9-SLc_14\tbicycle\nBxzVlf9-SLc_4\tbicycle\nBxzVlf9-SLc_6\tbicycle\nBxzVlf9-SLc_8\tbicycle\nBx2YQSFETcw_1\tperson\nBx4ELKBw9PU_0\tcow\nBx4ngxnRjvM_0\tmotorcycle\nBx-is-dL1ko_0\tperson\nBx_z_4bt8O4_0\tperson\nBx_z_4bt8O4_1\tskateboard\nByBWtiJJNqk_0\tperson\nByBWtiJJNqk_1\tperson\nByFCiUvKd4E_0\tcow\nByFCiUvKd4E_1\tcow\nByFCiUvKd4E_2\tcow\nByJNGLp-Q1Q_0\tboat\nByRne1VtDow_1\tperson\nByfeHjkm0NA_0\tbus\nByhpLi9sRUs_4\ttrain\nByhpLi9sRUs_5\ttrain\nByhpLi9sRUs_0\ttrain\nByn2Qo7ghaQ_1\tperson\nByvWskJDMGg_0\tairplane\nByvW2VADH6w_0\tmotorcycle\nBy1cSo8DcUw_0\tbicycle\nBy8jq7bVrkw_0\tperson\nBzKADkfj5sM_0\tcow\nBzNlO4ccRRY_0\tperson\nBzOo01dGJkw_0\tperson\nBzT8xDTB14c_2\ttruck\nBzWiQPw-vQc_0\tperson\nBzX2DmrGvp0_0\ttrain\nBzeW7KdQ818_0\tskateboard\nBzeW7KdQ818_1\tskateboard\nBzehenf5vSI_0\tairplane\nBzgqI8VBlSE_0\tperson\nBzpY-JMNW4c_0\tperson\nBzrM5QG9q2o_0\ttrain\nBzr3gVS8SzI_1\tboat\nBz5rpBZ1dzs_0\tperson\nBz7A9QxD1nY_0\tknife\nBz9MqNlU7KM_0\tperson\nB0AazXeFQIU_0\tperson\nB0BXcxFMgrk_0\tknife\nB0EZ9LIObGc_1\tmotorcycle\nB0FupWyYbG8_1\tperson\nB0NJSrhuWwA_1\tperson\nB0NJSrhuWwA_0\tperson\nB0QFrtXczzE_0\tperson\nB0SYog80Y78_0\tperson\nB0WaLst2GGg_1\tperson\nB0YrdZ7s3UY_1\tperson\nB0YrdZ7s3UY_2\tperson\nB0aFuZP3nYE_0\tperson\nB0aFuZP3nYE_1\tperson\nB01lwUoyl90_0\tperson\nB03gLj0lJrk_0\thorse\nB0-L6VbxLcU_0\tcat\nB0-lAJ4tBN4_0\ttrain\nB0-lAJ4tBN4_1\ttrain\nB1IQyTNE7eg_0\tskateboard\nB1Ojfucympw_0\tperson\nB1Ojfucympw_1\tperson\nB1YzUGPZQWo_0\ttrain\nB1hkAet1OQI_0\tperson\nB1isEeljBFI_0\tperson\nB1pC6hfF_Do_0\tperson\nB1qSE-7JgXE_0\tperson\nB1yiSrv4Ocw_1\thorse\nB1zPD20nhTg_0\tperson\nB12C84by_eA_0\tperson\nB12C84by_eA_3\telephant\nB12C84by_eA_1\tperson\nB12C84by_eA_2\tperson\nB12C84by_eA_4\tperson\nB12C84by_eA_5\tperson\nB12C84by_eA_7\tperson\nB12C84by_eA_10\tperson\nB12C84by_eA_11\tperson\nB2EMVGU5pNA_4\ttrain\nB2VryVb5p54_0\thorse\nB2VryVb5p54_2\tcow\nB2V7kk7fqSc_0\tperson\nB2X9JzMNZb0_0\tperson\nB2ZpqEJpVX0_0\tperson\nB2fTIk9eCNc_1\telephant\nB2gJVve4I58_0\tperson\nB2hKNbDmBtM_0\tcat\nB2lAxi3jIR0_0\tperson\nB2lAxi3jIR0_1\tperson\nB2lAxi3jIR0_2\tperson\nB2xcdU4Qoz8_0\tbicycle\nB2xcdU4Qoz8_12\tbicycle\nB23TpirETNE_0\thorse\nB26AQtx7Xic_0\tperson\nB3HZSrALQYc_0\tskateboard\nB3IjPORG3_w_1\tbird\nB3J2umsYK7E_0\tperson\nB3QykPv8TnI_0\tperson\nB3X5wDENAUw_0\tcat\nB3kTu0B4OjM_0\tperson\nB32uNSxqzgs_0\tcow\nB33seWCiea4_1\tperson\nB33seWCiea4_0\tperson\nB4Q6pRC_mZ8_0\tbicycle\nB4Q6pRC_mZ8_1\tbicycle\nB4Srj2O1AWQ_0\tcow\nB4dFepwxEOU_0\tperson\nB4iP6lAoNYo_0\tperson\nB4jbThMFW00_0\tperson\nB4mWkc8-_6A_0\tbird\nB4oO-miJ6VU_0\tumbrella\nB4vM2iKb8cs_0\tperson\nB4_mRuPC7o0_0\tperson\nB5BNEoIaQL4_0\tperson\nB5GwJoM3aX8_0\tperson\nB5NgN9mocgI_0\tperson\nB5PHI2HVtuc_0\tperson\nB5fv91yB4Gw_0\tbicycle\nB5qSvRpXLS8_0\tcat\nas7rVUFzyzg_0\tskateboard\nas_Rz9F3slw_0\tcat\natA-Cgv2XHY_0\tperson\natE1O6J4Wls_0\tperson\natLGWZUbEuM_1\ttrain\natMjLEIbsBI_0\tcow\natxnLL4Vjuo_0\tperson\nat2dmAEDdmg_1\tperson\nat4pXKjEDic_0\tperson\nat4pXKjEDic_1\tperson\nat5edW3lMVA_0\tperson\nauA-q9fWwn4_0\telephant\nauDJ1xtxFlw_0\tperson\nauDJ1xtxFlw_1\tperson\nauFLAZb-gD8_4\ttruck\nauGyhsy8iLA_0\tcow\nauNciV4eLVo_0\tbus\nauOl1mbGUlk_0\tbicycle\nauOo1Lg_wvU_0\tdog\naubLDLbxxsk_0\tperson\naueT5WO4e_c_0\tgiraffe\naueT5WO4e_c_1\tgiraffe\naugKp60fa5Q_1\tcar\nauiPa0HNOEQ_0\tperson\nauu_tYb3G1Y_0\tperson\nauzy4oPzM5Q_0\tmotorcycle\navCqOSeS7WU_0\tperson\navC67gaD1NM_0\tcat\navHbY1Q3vyw_1\telephant\navLxYBedm_c_1\telephant\navT7Q6Wibdg_0\tperson\navl9d-bL57Q_0\tairplane\navl9d-bL57Q_1\tairplane\navob12vGzmU_0\thorse\navonCFmxPyg_0\tperson\navonCFmxPyg_1\tperson\navpWY3czerE_1\tcar\navpf9VVT6CU_0\tmotorcycle\navvQ5wNPiew_1\tperson\nav475qBV4QY_0\tskateboard\nawC9zxAeP54_0\tperson\nawQ1n9aQEco_0\tperson\nawVBieSP5Zw_0\tperson\nawVa7pqR9DU_0\thorse\nawfg9NsCVQ0_0\tperson\nawjHSQ5uPi4_0\tbus\nawkpYVN-fJw_1\thorse\nawmHGFkxxlw_0\tperson\nawwWMuOKe3c_0\tperson\naw059qHbVm0_0\tbus\naw2lOvXUAPg_0\ttruck\naw5C9nQgLcA_0\tperson\naxB1Gk85UtQ_0\tperson\naxEK7nZ8W3I_0\tperson\naxJZ92uWnkA_0\tperson\naxXs2oUd4ow_0\tbear\naxcDoOd0G0s_0\ttruck\naxjSgDsN6t8_0\thorse\naxltu5Qf6ok_0\tskateboard\naxn6QuPBPqA_0\tperson\naxulii3UXSQ_1\tperson\naxulii3UXSQ_0\tperson\nax4YUE-PcF8_0\tairplane\nax4YUE-PcF8_2\tairplane\nayD3RJIjplM_0\tdog\nayRmnUb2LAI_0\tairplane\nayax5k3PJMs_0\tperson\naybdlOdul0U_1\tperson\naybdlOdul0U_0\tperson\naydxF0r6n9s_0\tperson\naydxF0r6n9s_1\tperson\nayg0x1glF2s_2\thorse\nayg0x1glF2s_0\thorse\nayg0x1glF2s_1\thorse\naylBB_8cv60_0\tumbrella\naysqPEtZvsg_0\tperson\nayuF_8chcKM_0\tperson\naywW_Wvo49w_0\tperson\nayzzG8M0fzo_1\tperson\nay1d8NBbrl0_2\tbird\nay1d8NBbrl0_3\tbird\nay5RnrQple4_0\ttrain\nay5tx1Rovwk_0\tcat\nay7LLDO9Ecc_0\tdog\nazC7-_wC8N8_0\tbus\nazDn4DU7cGA_0\tperson\nazKKcIb4Ufw_1\tboat\nazOInI_CMHM_0\tbus\nazbls7-iaEU_0\tperson\nazbls7-iaEU_1\tperson\nazbls7-iaEU_2\tperson\nazfLb8VvI-4_0\tperson\nazfLb8VvI-4_1\tperson\nazfLb8VvI-4_2\tperson\nazlRI_Jydpw_4\tcow\nazmZDijLihI_0\tperson\nazmZDijLihI_1\tperson\na0FDxoXtFyM_1\tairplane\na0NOwUio_n8_1\tperson\na0NOwUio_n8_2\tperson\na0NdjlW5H_U_0\tcow\na0N_vetshbg_0\tperson\na0N_vetshbg_1\tperson\na0OjB7xzRx4_0\tperson\na0RusP9ATfw_0\tperson\na0dHPtoBS3U_0\tperson\na0hRgBpppWs_0\tperson\na0jpiOFS7eM_0\tbear\na0oeBV6-20U_0\tperson\na0uoJdAwobA_0\tperson\na085oeXd0RE_0\tperson\na0-Pmmyi8js_1\tperson\na1ADw1megCI_1\tairplane\na1Fzn7iUHO8_1\tmotorcycle\na1RVXQl4rlY_1\tcat\na1RinDI9Hgw_2\tknife\na1SaKvoO2Og_0\tcow\na1U6U_pntMo_0\tperson\na1XDxiP1hNA_0\tperson\na1ctjjNUZ-4_0\tdog\na1kLNA-KACs_1\tbicycle\na1lQwuhicQI_0\tperson\na1lQwuhicQI_1\tperson\na14VlgxHS3M_0\tperson\na2AT0Xo7uLY_0\tperson\na2Osa5aleJ0_1\tbus\na2Qp2Grx3_8_0\tperson\na2XMK6mjiZg_0\tdog\na2XvXs2guuE_1\tperson\na2XvXs2guuE_0\tperson\na2fEq8oS3M8_0\tbus\na2gYRtJhP1E_0\thorse\na2gYRtJhP1E_1\thorse\na2hv4szlq-Q_0\ttrain\na2kH2_9zoWU_0\tairplane\na2o_-GSpXXk_0\tcat\na2qmS6AhUYk_0\tmotorcycle\na2vx_F1NOas_0\tperson\na26mRIQUPoU_0\tdog\na26mRIQUPoU_1\tdog\na26mRIQUPoU_2\tdog\na27UC8vu1hI_1\ttruck\na29AS00WJrY_0\tcow\na3AIwQnG0Ek_0\tcow\na3FLLhQu768_0\tperson\na3THrQYDkqw_1\tbird\na3UCtF8nZIY_1\tskateboard\na3dbdHben-o_0\telephant\na3dbdHben-o_3\telephant\na3dbdHben-o_9\telephant\na3dbdHben-o_1\telephant\na3dbdHben-o_2\telephant\na3dbdHben-o_4\telephant\na3dbdHben-o_6\telephant\na3rGEI8MdMs_0\tcow\na3uvEIsI1no_2\tperson\na32oJ0GsAYw_0\tperson\na35UuVw16Ks_0\tperson\na37D3FoqIJA_1\tknife\na3-oi7T-Lw0_1\tzebra\na3-tURw95Xo_2\tperson\na4IU4va7hp0_1\ttruck\na4LYVAPbEwI_0\tmotorcycle\na4LaeeZXIc0_2\tskateboard\na4Nt5QxFqmY_1\tboat\na4PwZfJZVPA_2\tbear\na4arqJgXHDA_0\tperson\na4pR_YBd4yY_1\tbicycle\na4uNoGpllg4_3\tbear\na4v1ptMpyi0_0\tcow\na4v1ptMpyi0_3\tcow\na41TZwhyyP0_0\tcow\na46BqT5Mo5I_0\tcow\na5HZnFcvdyA_1\thorse\na5P8pVrcSRk_0\tmotorcycle\na5brvs-fct0_0\tperson\na5tSaF5GCKE_1\tcat\na5ye5BUJFlY_1\tperson\na5znd3aNwLk_3\tbicycle\na58tMy0mhIk_0\tperson\na6G_DBEFdFA_0\thorse\na6ZXi7Qqls0_0\tperson\na6fBYYEgBvs_0\tdog\na6jDeIJbF7Q_0\tperson\na6uyjrBkkXs_0\tboat\na61piN6ffE4_0\tperson\na67zz0CSEpk_0\tperson\na67zz0CSEpk_1\tperson\na7B81Zeqgfw_2\ttruck\na7HKuyv2qLQ_0\telephant\na7HKuyv2qLQ_1\telephant\na7Q6eb6feT8_0\tperson\na7S9rFNKVMI_7\tmotorcycle\na7Zr0-1LIPc_1\tdog\na7hwm4TORvY_0\tperson\na7pC7IjO2ik_0\ttruck\na7peWR4xJwQ_1\tcow\na7ygZsaDMis_0\tperson\nB5_Hyk-p7kE_0\tcat\nB6E15pe4UR8_0\thorse\nB6LGwD1E9SQ_1\tperson\nB6P8B8BO-6U_0\tgiraffe\nB6SaDYczlDQ_1\tperson\nB6U92N9hh6k_2\thorse\nB6V4xqX67OA_0\ttruck\nB6bDVhRNw00_0\tairplane\nB6cEdaWTjeU_0\tperson\nB6dBkoOhfBU_0\tcar\nB6lU93wtaDA_1\tboat\nB6mP9KsnQPc_1\tbear\nB6mngUQtFJ4_0\tcow\nB6nlTJYtmws_0\tcow\nB6pXMjH4geU_3\tboat\nB6qshzfLYzs_0\tperson\nB6x2dNbgPjM_0\tcow\nB6y439-imys_0\tperson\nB6z7eCsgfM0_0\tbear\nB61Wf8NFvcU_0\tairplane\nB645r0hkdmg_0\tperson\nB645r0hkdmg_1\tperson\nB67FwwZfIEA_0\tperson\nB6_IcyhOHpE_0\tperson\nB7BjhnnQ2K4_0\tperson\nB7GRNv2opSY_0\tbird\nB7MHQOUO4f8_0\tumbrella\nB7Z9UV6aQuM_0\tbird\nB7a8WkaWmH4_0\tperson\nB7a8WkaWmH4_1\tperson\nB7cXCz7jJKQ_0\tcow\nB7gX18_mDyQ_0\tperson\nB7hmqrwe88o_1\telephant\nB7hmqrwe88o_2\telephant\nB7iAvi5riV8_0\tmotorcycle\nB7nwfSMbEL8_0\tcow\nB7pEEUJ-J1g_1\tmotorcycle\nB7rCxgg3F_s_0\ttrain\nB8Bp9yKWV9c_0\tperson\nB8D4fPARFvo_0\tperson\nB8HQglK444U_2\tairplane\nB8HQglK444U_0\tairplane\nB8HQglK444U_4\tairplane\nB8LGL1Tt_wg_0\tperson\nB8MxJKDkvkE_0\tperson\nB8eeoykmq1E_1\tperson\nB8eeoykmq1E_2\tperson\nB8f7NnYq5sg_0\tperson\nB8f7NnYq5sg_1\tperson\nB8sWL2syyA8_0\tperson\nB8uIyRkm9YA_0\tairplane\nB8zGkBkQw4c_0\tperson\nB87W__RIE-E_0\tperson\nB8_Z7m50I_E_0\tmotorcycle\nB9AXF91pIUs_0\tairplane\nB9Ed_vAN9mc_0\tdog\nB9Y_LrDVbg4_0\tperson\nB9aqDsvGy5Q_0\tperson\nB9aqDsvGy5Q_1\tperson\nB9j233QxEuQ_0\tperson\nB9oJSA_NJ2s_0\tbicycle\nB9z17FOPd5A_0\tperson\nB99mIPKaChY_3\tcow\nB-CR7vl67W8_0\tperson\nB-QiQvJcSVk_0\tperson\nB-T1YNe09SU_4\tbear\nB-T1YNe09SU_3\tbear\nB-bDxAN93a4_0\tairplane\nB-dlnlRKA5s_2\tairplane\nB-dlnlRKA5s_7\tairplane\nB-tukWZbXp8_0\tperson\nB-wJpt4zl0c_0\tperson\nB-x2pu-ux3w_0\thorse\nB-z1uE4iuz4_0\ttruck\nB-0WNs2QYPk_1\telephant\nB-48lEXzIS8_0\tumbrella\nB-7cqxw95Ro_0\tperson\nB_BqrY2eeCY_0\tmotorcycle\nB_Gjc7J18qg_1\tperson\nB_Gjc7J18qg_0\tperson\nB_M6X41emhY_0\tperson\nB_O8idmfoCQ_0\tperson\nB_Tj79jaRXs_1\tperson\nB_Tmq51dx1g_0\tperson\nB_jGC2tlhRo_0\tperson\nB_k6vEEPHK0_0\tperson\nB_lEJv31TlI_1\tperson\nB_lEJv31TlI_2\tperson\nB_nZdcreecE_0\tperson\nB_wWPH9kbxM_0\tperson\nB_ylVg-TN2Q_0\tskateboard\nB_4Kfa8_9ms_0\tperson\nB_4eJYakoRY_0\tmotorcycle\nCAEqRvJLY-M_1\tmotorcycle\nCAe1SZKZ9T0_1\tcar\nCAq4CxCpeQE_0\tcat\nCA4UqnJCs58_0\tmotorcycle\nCA9SLI7TOKQ_0\tperson\nCBASqWyp4yk_0\tperson\nCBJQ5dL6Df8_2\thorse\nCBNqNe7G-QQ_0\tperson\nCBnYDFRfYgo_1\tbus\nCBqyVKttAwU_0\tcow\nCBtgGOzZtLQ_0\tperson\nCBz3ZOrTAjI_0\telephant\nCBz3ZOrTAjI_2\telephant\nCCAsEc2oRAM_0\telephant\nCCGg17i4vMU_0\tperson\nCCHay2RSnJI_0\tskateboard\nCCHay2RSnJI_1\tskateboard\nCCLRdGNDgdc_0\tcat\nCCoGim--jEg_0\ttrain\nCCp6NLBil8k_0\tbicycle\nCCwovjgEx1k_0\tperson\nCCwovjgEx1k_1\tperson\nCC0aX78fQFo_0\tcat\nCC-qoxEyocI_0\tperson\nCDCLLCkr87I_0\tcow\nCDY4TXCreQ0_0\tperson\nCDbWYF89944_0\tperson\nCDb6uyrYrZA_0\tcar\nCDfjcWI7iBQ_0\tboat\nCDgBHxiVkFw_0\ttruck\nCDnrG74PXbI_0\tperson\nCDpQZEjohRc_1\tcow\nCDpQZEjohRc_0\tcow\nCDrU-q6QdEs_0\tperson\nCD0cWR7d9yI_0\tperson\nCD4SGfIdfSg_3\telephant\nCEDTshbJOaI_0\tperson\nCEJoHSbb4gg_0\tperson\nCEJoHSbb4gg_1\tperson\nCEMCCDAYzQs_0\tperson\nCENd4xI4dnY_0\tperson\nCETUG_G0I4k_0\tcow\nCETUG_G0I4k_1\tcow\nCETUG_G0I4k_2\tcow\nCEUjuyvgrB0_0\tperson\nCEUqqi8y4sg_0\tcat\nCEVHrP5OzJ0_1\tknife\nCEafe_JTk8g_0\tknife\nCEqA0cqMfzg_1\tcow\nCEsjzJHOUBw_0\tdog\nCEzWiyTQOMA_0\ttruck\nCE1gHqc8aqU_0\tperson\nCE3KdY0X0QE_1\tperson\nCFD0NOl12CA_1\ttrain\nCFD6d4OweGQ_3\tmotorcycle\nCFD6d4OweGQ_1\tmotorcycle\nCFD6d4OweGQ_2\tmotorcycle\nCFD-UQW1aQU_1\tcar\nCFD-UQW1aQU_2\tcar\nCFRsGLeMJKc_0\tperson\nCFXkKgig7Io_0\tperson\nCFee6F2rbjc_1\tbird\nCFxObg2ebKQ_0\tairplane\nCFxObg2ebKQ_1\tairplane\nCF0JmXACTww_0\tperson\nCF01UBuV76Q_0\tperson\nCF7DZCaSqIg_0\tbird\nCF7DZCaSqIg_1\tbird\nCF7KYbTChlg_0\tperson\nCF71f3YLQ9U_1\tperson\nCF-cX0etaAw_1\tcat\nCF_NSKkrwjg_0\tperson\nCGCNTZsml7Y_0\tcow\nCGQoaYTzfaU_0\ttrain\nCGQoaYTzfaU_5\ttrain\nCGQoaYTzfaU_7\ttrain\nCGgxp3ycSWs_0\telephant\nCGoqd4n_qJg_0\tperson\nCGsUTzKzV4U_1\ttrain\nCGwrXZ2fUqg_0\tperson\nCGy0nn1MCqY_0\tperson\nCGy0nn1MCqY_1\tperson\nCG1sXlDy2Yg_4\thorse\nCG1sXlDy2Yg_5\thorse\nCHH1SlvOzfI_0\tperson\nCHIVYSnFst8_1\tbear\nCHJFpAcH8NM_8\tbicycle\nCHMzSMq0ui4_0\tskateboard\nCHZU6sP-loU_0\tperson\nCHZU6sP-loU_1\tperson\nCHbhzxurZNM_1\tperson\nCHbhzxurZNM_0\tperson\nCHnWGkGAnos_0\tperson\nCHo3jSv3HIA_0\ttrain\nCHwNoZ55z6c_0\tcat\nCH6ptLNxppU_0\tperson\nCH8zCsamj44_0\tperson\nCH-_pvq3am4_0\tperson\nCIJ-q_X_y7E_0\tperson\nCIKrCLz06-4_0\tcat\nCIQLvytEu6E_0\tperson\nCIQz5we_nHI_0\tperson\nCITgpk4GyMA_0\tbear\nCITgpk4GyMA_9\tbear\nCIV_VaLTf5c_0\tmotorcycle\nCIc1KbOeijU_0\tperson\nCIgzZOf3uA0_0\tperson\nCIgzZOf3uA0_1\tperson\nCIlb5C929mc_0\tknife\nCImmRnndBuo_0\tperson\nCItr4F49wO4_0\tperson\nCIxs-77bPrM_1\tperson\nCI2GrLRwQR4_0\tperson\nCI3rFXxUPtI_0\tbird\nCI6fYr7IJJM_0\tperson\nCI_9TEXzQE8_0\tperson\nCJD7b_dMrVE_0\tperson\nCJG8ou9QuY0_0\tperson\nCJIpdb7wZEc_0\tperson\nCJNAMf-R_J4_0\ttruck\nCJNj2wqp8QU_0\tbear\nCJOJBhvHmCE_0\tperson\na79_ETe4ego_0\tperson\na7_ixAbhsRI_0\telephant\na8MHgXPiRZU_0\tperson\na8as0DkifS0_0\tperson\na8eQTqlG-6o_0\tperson\na8insUA82jQ_1\tdog\na8insUA82jQ_2\tdog\na8insUA82jQ_3\tdog\na8r9Xss8Es0_0\tperson\na8wT4T21reQ_0\tperson\na8z4RhTT02c_0\thorse\na82uXl_fE7A_2\tcow\na82uXl_fE7A_3\tcow\na892r_pD5PM_0\tperson\na9FI5hfZsG0_0\tboat\na9GBRb_g82o_1\tbicycle\na9GBRb_g82o_2\tbicycle\na9YciDJw4wo_0\tdog\na9Y2Jm4-FDM_0\tperson\na9ZvcKL6lEg_0\tperson\na9fG2p2YO7k_0\tbus\na9fG2p2YO7k_2\tbus\na9g4dt8Lszw_0\tperson\na9g4dt8Lszw_1\tperson\na9riNB4_uhk_0\thorse\na90AssqciQk_1\telephant\na90AssqciQk_2\telephant\na-EIC5v0X4o_0\tdog\na-EIC5v0X4o_1\tdog\na-MNXAJ2mZo_0\tperson\na-NocjWzZtY_2\tperson\na-QTXZfMMT4_0\tperson\na-ZWAMyDG3o_0\tperson\na-iJ2J3oI-A_0\tperson\na-lm-MyKchM_0\tcow\na-s461-Ddxc_0\tskateboard\na-u5tm8bZnc_0\thorse\na-yRjCC5TTM_0\thorse\na-1bMCU5aj8_0\tmotorcycle\na-8RK3OMAOo_0\tskateboard\na-8RK3OMAOo_1\tskateboard\na_KVzTF1RIA_0\tperson\na_KZ5mevNfs_0\tbear\na_OkB8q7LMc_1\tperson\na_SryCna8Rk_0\tperson\na_UjbYab9UM_0\ttrain\na_YIQ1VvpcU_0\tperson\na_YIQ1VvpcU_1\tperson\na_gLFD5d04A_0\tperson\na_wdiSqtOK4_0\tairplane\na_xkGO87GsU_0\tskateboard\na_1zKb6B-bs_0\tperson\na_6uxh_4kb8_0\tperson\na_-WUUfn_l4_0\tperson\na__R_Y49D54_0\tperson\nbALr5X95BQ8_1\tperson\nbAMbXytHB7Y_0\tperson\nbAMbXytHB7Y_1\tperson\nbAdtKFYWQcE_0\tperson\nbAfpD53Vjic_0\thorse\nbAinSo2I3HI_0\tperson\nbAinSo2I3HI_1\tperson\nbAp653-8UZI_0\tperson\nbAtWugkhW88_0\tbus\nbAutb-z3rvw_0\tcow\nbAwVg4MVWds_1\telephant\nbAwVg4MVWds_0\telephant\nbAwVg4MVWds_5\telephant\nbAwVg4MVWds_9\telephant\nbAwVg4MVWds_10\telephant\nbAwVg4MVWds_11\telephant\nbA2bnjEnbus_0\tperson\nbA4v5gLC700_0\tperson\nbA5elX54rTQ_0\tcat\nbA6JRlAu2yE_0\tperson\nbA8lz4kTY-0_0\tbicycle\nbA8lz4kTY-0_3\tbicycle\nbA8lz4kTY-0_5\tbicycle\nbA8lz4kTY-0_6\tbicycle\nbA_NwRpP6Tw_0\tperson\nbA_6OElyKFo_0\ttrain\nbBPPJNf59kQ_0\tumbrella\nbBT4o_qtgWU_0\tperson\nbBgRYIPlqAQ_0\tperson\nbBm9VYnMO9g_0\tbird\nbBt5A6pwnxY_0\tperson\nbB1rIuXXQFA_1\tbus\nbB4Xm1LS9CI_0\tdog\nbB6PWM19eMo_0\tperson\nbCB5mMgiGnk_0\tperson\nbCRN4AZbr6o_0\ttrain\nbCbqiJ6Ales_0\tperson\nbCuWk5NSB0k_0\tperson\nbCuWk5NSB0k_1\tperson\nbCuuL9wxM7E_0\tperson\nbCuuL9wxM7E_1\tperson\nbCvbst3iM94_0\tmotorcycle\nbCwUgQIL5cE_0\tknife\nbCx54wbopXs_1\thorse\nbDBjT69DcT4_0\tcow\nbDEPo_ZJ8BY_0\ttruck\nbDJyFQqK69A_0\tperson\nbDOeksOYoHc_0\ttruck\nbDOeksOYoHc_1\ttruck\nbDOeksOYoHc_2\ttruck\nbDOeksOYoHc_3\ttruck\nbDO5jSIN9C4_0\tperson\nbDZrANNzYZY_0\tskateboard\nbDaTeoyWI4g_0\ttrain\nbDcapf9qqwU_0\tperson\nbDjiXPhFyUA_0\tperson\nbDu9DwJEoHs_0\tcow\nbDu9DwJEoHs_1\tcow\nbDu9DwJEoHs_2\tcow\nbDxvHkJLr2M_0\tbus\nbD9LGwYECDw_0\tcat\nbD-NwifgK0w_1\tskateboard\nbEDI6tCMZXU_0\tperson\nbEIh6sX-Tl4_0\tperson\nbEKdkY9RBEY_0\tperson\nbEM1_c0lvzs_0\tbear\nbEOBKFTwR2Q_0\tgiraffe\nbETxZfOvyHY_3\tbear\nbEUZ0kW5UxE_1\tperson\nbEawSJKPt-Q_0\tperson\nbEhFibV8au4_0\tperson\nbEqXwB3xaWk_0\tperson\nbErIbiSkE10_0\tskateboard\nbEwALd1GaT4_0\tbicycle\nbEzk1Y4QUKs_0\tbus\nbE2p5KejqaA_0\tperson\nbE54N9ho-us_0\telephant\nbE9RuKWeuuo_0\tperson\nbE--xARlZGI_1\tbird\nbFA9McooYzo_0\tcar\nbFCSt5rQdmU_0\tperson\nbFEO4MHzBto_0\tperson\nbFIAwyZ6uuE_2\tperson\nbFIAwyZ6uuE_0\tperson\nbFIAwyZ6uuE_1\tperson\nbFNUtoXNMlQ_0\tbus\nbFORQXIUbxA_0\tperson\nbFXutLP--Cw_0\tcow\nbFXutLP--Cw_1\tcow\nbFXutLP--Cw_2\tcow\nbFXutLP--Cw_3\tcow\nbFYfbtcZvsM_1\thorse\nbFe5fer15nk_1\tbus\nbFm95kiEE_Q_0\tbicycle\nbFnZbMhDMQ8_0\tperson\nbFrVmI5XvFw_0\tperson\nbF2D0pMJqLQ_1\tknife\nbF65L0Tc9w8_0\tperson\nbF8lUYDQNgc_0\tperson\nbGCRyP03o54_1\tskateboard\nbGFqTDkSuTA_1\tbird\nbGMKF81Sy6c_0\tperson\nbGcugFPOZ98_0\tperson\nbGeFOznVAdA_0\telephant\nbGmggiJ7Hrk_2\tboat\nbGpuuVQyMOY_0\tperson\nbGsY4wldptk_1\thorse\nbGsY4wldptk_0\thorse\nbGyLNR-ZWRY_1\tcat\nbG7btkvllWc_0\tskateboard\nbG9Q1zv6YZ4_1\tperson\nbG-X3irBEO0_0\tperson\nbHALJVsPIWo_0\tperson\nbHBuapxTSS0_0\tperson\nbHB5zkcU4DY_0\tperson\nbHO746jxL2Y_0\tskateboard\nbHP9bh7-qNQ_0\ttruck\nbHWmtSkc1qY_0\tperson\nbHWmtSkc1qY_1\tperson\nbHbgFvCFkb0_0\thorse\nbHcNLuPTrTk_0\tperson\nbHcbcNIxs_o_0\tknife\nbHdxB4LnmGY_2\tmotorcycle\nbHdxB4LnmGY_0\tmotorcycle\nbHdypdEXRYY_0\tskateboard\nbHim6VG9R7E_1\tboat\nbHoVPJGd7EU_1\ttruck\nbHoVPJGd7EU_2\ttruck\nbHoVPJGd7EU_3\ttruck\nbHvVd9-u80E_0\tperson\nbH5d5crxmiw_0\tcat\nbIFUXEvQb_4_0\ttruck\nbIFUXEvQb_4_1\ttruck\nbIV7YZEPqTo_0\tperson\nbIiV4e5w280_0\tperson\nbInwFKVbP2c_0\tperson\nbIqcbjzOQ0Y_0\tcar\nbIslKUiw6YQ_4\tairplane\nbIslKUiw6YQ_0\tairplane\nbIyfjvesRuY_0\tboat\nbIzzvd9q2po_0\tcow\nbI19pnS1D7Q_0\tmotorcycle\nbI8htXUqQkI_0\tcat\nbJADjJacbIY_1\tperson\nbJAxqtGR-MY_0\tperson\nbJBnGIqBiuw_0\thorse\nbJDJ5yePi6M_0\tperson\nbJITjrxz5Ns_0\tperson\nbJI1844s-tU_0\thorse\nbJKrgOW0nMk_0\tperson\nbJMS4sT7XRo_5\thorse\nbJMS4sT7XRo_6\thorse\nbJMS4sT7XRo_8\thorse\nbJMS4sT7XRo_9\thorse\nbJMS4sT7XRo_0\thorse\nbJMS4sT7XRo_1\thorse\nbJWTtXkyZHg_0\tperson\nbJcrA1AOfI4_2\ttrain\nbJcrA1AOfI4_3\ttrain\nbJfHVvueTbo_0\tperson\nbJh3iPv6jYc_0\tcow\nbJqhWaDN0hQ_1\tdog\nbJ0SdP6bjnQ_0\tperson\nbJ24-WqB1xs_0\tperson\nbJ6hIJWstDo_0\ttruck\nbJ6-RBgHmRU_0\tperson\nbJ8k9v22vJA_0\tperson\nbKBLXhOMUi8_0\tdog\nbKCfbZIUSZI_0\tperson\nbKCjZrT7jIY_0\ttruck\nCJfXDO8EqQ4_0\tperson\nCJfXDO8EqQ4_1\tperson\nCJm40KxFN5E_1\tperson\nCJm40KxFN5E_0\tperson\nCJqFjtBvN9Y_0\tskateboard\nCJqHpmU9iSk_2\tperson\nCJqHpmU9iSk_0\tperson\nCJrxPkQa2GE_1\ttrain\nCJ0sXsga9bM_0\tbus\nCJ35smVDZW0_0\tperson\nCJ4qgeMiaOQ_0\tairplane\nCJ6n8mmO1b4_0\tcat\nCKB_--5AbfU_0\ttrain\nCKC6BopJKyk_0\tperson\nCKGpdOkI6P4_0\tperson\nCKNmSha1fz0_0\tperson\nCKQHLTDcKyk_1\tbird\nCKSN1SlM9ug_0\tcat\nCKZ1xRX4dh8_4\tknife\nCKcBs841bV0_0\tperson\nCKhADB_ssaI_0\telephant\nCKjQxzl__Fw_0\tbicycle\nCKkp1wLGtks_0\tperson\nCKmTbQn6J9U_1\tperson\nCKsvfQdlYfo_0\tperson\nCKuBMM3fZ84_0\tairplane\nCKxmvXSrPIg_0\tbicycle\nCKzh_WuJFng_0\tperson\nCK29cIxMNP0_0\tperson\nCK39c3vr6gc_0\tskateboard\nCLAjvvAM-K4_0\tperson\nCLB6UiAOkP0_1\tbus\nCLMUcOgZdNQ_2\tcow\nCLQOTITDBeo_0\tperson\nCLXlbsB7sLY_0\tperson\nCLdyznsISW8_2\tcar\nCLosaFzMFeI_1\tperson\nCLzV3TNXkFo_0\tperson\nCL1Bt58elWc_1\tperson\nCL1Bt58elWc_0\tperson\nCL1z2IBwWkA_0\tperson\nCL1z2IBwWkA_1\tperson\nCL4fc23TpVo_0\tperson\nCL5zmQikk-A_0\tperson\nCMBw6j8-QzY_0\tperson\nCMBw6j8-QzY_1\tperson\nCMIMzbsGXk8_0\tbus\nCMLOYaDEQ9g_0\tperson\nCMMGX4SFyIs_2\tperson\nCMOEwqoxxwo_0\tperson\nCMP-dHylUas_1\tperson\nCMlE5HjD19w_0\ttruck\nCMlNU8W7Lsk_0\tcow\nCMrJ3Hog9z4_0\telephant\nCMrJ3Hog9z4_1\telephant\nCMrJ3Hog9z4_2\telephant\nCMsMnTwn9o8_1\ttruck\nCMwy_JpVNwc_3\tbird\nCMwy_JpVNwc_1\tbird\nCMwy_JpVNwc_2\tbird\nCNDd5De0h98_0\tperson\nCNEdjudh1lE_0\tperson\nCNID7GMZCtU_1\thorse\nCNiuz-9TxDo_0\tperson\nCNqKVUmynPk_0\tairplane\nCNt_itMBqgs_0\tperson\nCNua3gOk0oM_0\tbus\nCNwRXN4wSAk_0\tknife\nCN6-VQgDfe4_0\tperson\nCN8AktLgwN8_0\tgiraffe\nCN8AktLgwN8_6\telephant\nCOAed-b3LTY_0\tperson\nCOFcQrVSFcc_0\tperson\nCOTylrR16zU_1\tboat\nCOc8fmI9wQ4_0\thorse\nCOh7aoqTWjY_0\telephant\nCOj_p56dMLI_0\tmotorcycle\nCOksm121JZ0_0\ttrain\nCOxq73j4_rY_0\tperson\nCOyU6vUfxXQ_1\tperson\nCOyU6vUfxXQ_0\tperson\nCO2cK7r8MNQ_0\tperson\nCO33VpWw45s_0\tskateboard\nCO_0l5Z12kw_0\tcat\nCPManZ0i9vw_0\ttruck\nCPN9sc_XrbM_0\telephant\nCPOp_zZsQJk_0\tcow\nCPQXOFjv2LM_0\tperson\nCPXyJXYL8yY_0\tmotorcycle\nCPXyJXYL8yY_4\tmotorcycle\nCPYxpWVVj_M_0\tcow\nCPZSesZALiI_1\tcat\nCPuy90LHgrc_0\tbus\nCP3cZfEx36E_2\tbear\nCP3u7XjYteQ_1\tperson\nCP3u7XjYteQ_0\tperson\nCQEjDKzTc3Y_2\tperson\nCQE_vEzLzMQ_0\tperson\nCQPAMu_3qwY_0\tbear\nCQUUCXr0Idg_0\tperson\nCQU9LkJ1PlA_0\tperson\nCQU9LkJ1PlA_1\tperson\nCQbUivUBlJ8_1\tbear\nCQbUivUBlJ8_3\tbear\nCQihoSP1KLM_0\tperson\nCQite5jXihw_2\tperson\nCQlL5sCIaM4_2\ttrain\nCQlL5sCIaM4_0\ttrain\nCQlL5sCIaM4_1\ttrain\nCQmCFDEszdc_0\tcat\nCQyxRGB9-_o_1\telephant\nCQzQkumb_iw_0\tperson\nCQ0hdku_Mu0_3\telephant\nCQ0hdku_Mu0_4\telephant\nCQ0hdku_Mu0_6\telephant\nCQ0hdku_Mu0_8\telephant\nCQ0hdku_Mu0_11\telephant\nCQ2pa82Muc4_0\tperson\nCRGhEOLOPLw_0\tbus\nCRHfpplogUY_2\tcar\nCRHfpplogUY_1\tcar\nCRPfcUOT10Q_0\ttrain\nCRQ8kzUgpGE_0\tcat\nCRS3P9ePDug_8\ttrain\nCRS3P9ePDug_0\ttrain\nCRS3P9ePDug_4\ttrain\nCRS3P9ePDug_7\ttrain\nCRS3P9ePDug_9\ttrain\nCRS3P9ePDug_1\ttrain\nCRYLa0UnCJY_0\tdog\nCRZQQc-7Cr4_0\tperson\nCRZQQc-7Cr4_1\tperson\nCRcL9sc8Z_Q_0\tperson\nCRihNgUldQg_0\tperson\nCRpG5Auclh4_0\ttrain\nCRscoQhOT24_0\telephant\nCRteSMMhdfo_1\tperson\nCR2Qbth78ug_0\tperson\nCR7gNMR7aFk_0\tperson\nCSBnYbN-fwQ_0\tperson\nCSBnYbN-fwQ_1\tperson\nCSCN35ZL4gk_0\tperson\nCSCmLaLpgec_1\ttrain\nCSGkGWkJnIo_0\tperson\nCSKOzx-8MRM_0\tperson\nCSKhQtYbLiY_0\tperson\nCSTEfDaVq_w_3\thorse\nCSgIyZrF2Xw_6\tbear\nCShE1WLp4V4_0\tperson\nCSlYtyS3ekI_0\tcat\nCStjlkpuH8I_0\tknife\nCSwiprmAnWk_0\tperson\nCS4LhFaTdRc_1\tperson\nCS4TVHuh-OI_1\tperson\nCS4TVHuh-OI_0\tperson\nCTBCSXpoCNw_0\tknife\nCTGjM7vaWkc_0\tcar\nCTNN0vCWthk_0\tcow\nCTOTTFDvM9g_0\telephant\nCTOTTFDvM9g_1\telephant\nCTpK5Ywqj4E_0\tperson\nCTtActqncZs_1\tperson\nCTtActqncZs_0\tperson\nCTty0Fesx4k_1\telephant\nCTty0Fesx4k_2\telephant\nCT6O84zfmoY_0\tperson\nCT8VKdB074U_0\tdog\nCUB_Y4U0gNU_0\tperson\nCUE1Oj2b7oo_0\tperson\nCUIv9zU0_7M_2\tdog\nCUQZtS7SlyM_0\ttruck\nCUVQtlpfthI_0\tperson\nCUVqn-7LP_k_0\tcat\nCUjEVN0BT58_0\tperson\nCUjbAz30mdA_0\tperson\nCUvi-gOiEak_0\tairplane\nCUvi-gOiEak_1\tairplane\nCUzrNlKejnA_0\tperson\nCU-5HeXnZag_0\tperson\nCU_cxu2KrzY_1\tcow\nCU_4MsJSWGw_0\thorse\nCVCPdF3TevY_0\tcar\nCVJEcVS63rM_0\tperson\nCVJu9kpxa0o_0\tskateboard\nCVQq3Lnsmb8_0\tskateboard\nCVRQkAzvHOI_0\tcat\nCVXbWRarjGI_3\tbicycle\nCVa-tmxG3G8_0\tbus\nCVfXcK9LvU4_0\tcat\nCVnQzQjIfdo_0\tperson\nCVnQzQjIfdo_1\tperson\nCVtUo7t1tg4_0\tknife\nCVtdQUWrMFo_0\tperson\nCV1gdpxyUvQ_0\tumbrella\nCV7yBA-RY-s_0\tperson\nCV9Mv-Z5ywo_1\tknife\nCV9_qaQ3bOc_0\tdog\nCWNPg3hbbCc_0\tperson\nCWRUw47fnHQ_0\tdog\nCWcpGIObSb4_0\tperson\nCWcpGIObSb4_1\tperson\nCWhtecFS3Ps_0\tperson\nCWh66yU69HI_1\tperson\nCWq2nbpnjkw_0\tperson\nCWsgkyp-Wv8_1\tperson\nCWsgkyp-Wv8_0\tperson\nCWu6nT2qW2Q_0\tperson\nCWydCxGJyck_0\tcat\nCW0GVWegie4_0\tperson\nCXEi_k33z08_0\tperson\nCXF-MNV21Uw_1\tperson\nCXF-MNV21Uw_2\tperson\nCXF-MNV21Uw_0\tperson\nbKIEzYSD9LU_0\tbird\nbKM4LmiXX5k_3\tknife\nbKM4LmiXX5k_0\tknife\nbKQQdBiIraA_0\tdog\nbKT6s25xsS4_0\tperson\nbKh8FyKvOq8_0\tumbrella\nbKic74m-XKg_0\thorse\nbKnsY1ytgqc_0\tperson\nbK0HzQHKqhg_0\tmotorcycle\nbK0HzQHKqhg_1\tmotorcycle\nbK0IN2qoSjQ_1\tperson\nbK7Wo0UxDyQ_0\tperson\nbLBmIVS2T-0_0\tperson\nbLLFtAMqoF0_0\tperson\nbLOW53I2oWw_0\tknife\nbLU0G55kWgs_0\tcar\nbLU0G55kWgs_1\tcar\nbLYGpYiiF7Q_0\tperson\nbLg0SdwRkKc_0\tperson\nbLneVyHWwdk_0\tperson\nbLoyRVgQcTk_0\tcat\nbLoyRVgQcTk_1\tcat\nbLoyRVgQcTk_2\tcat\nbLs4dUFZzcQ_0\tperson\nbLs4dUFZzcQ_1\tperson\nbMEbcFBdRsA_0\tairplane\nbMM1OZMZ_WY_0\tperson\nbMNzE6F4WK4_0\ttruck\nbMPPnTHvu8c_1\tcow\nbMQlfzj9vCE_0\tperson\nbMZPcnVc1K0_0\tperson\nbMakr2vwfqQ_0\tperson\nbMdfLBSo6jw_0\tbicycle\nbMfQw6tBALo_0\tcow\nbMgWjlwilqA_0\tbicycle\nbMk8JyTyvUo_0\tskateboard\nbMojajeogfY_0\tperson\nbMphaUsZuqU_2\telephant\nbMrDB2JI0QM_0\telephant\nbMuSXdxvaWY_0\tbicycle\nbMumJTM0f28_0\tperson\nbM3OcevX9F4_0\tperson\nbM6fRimkPZg_0\tcow\nbM6peJ4lQyU_0\telephant\nbNGoGllCEj0_0\tcar\nbNGoGllCEj0_1\tcar\nbNJ5ygVB-GI_0\tperson\nbNPtMp-AuhY_5\ttrain\nbNPtMp-AuhY_4\ttrain\nbNR89JLsh7Q_0\tmotorcycle\nbNZe9vwuE8E_0\tcar\nbNcTCIgwqNY_0\tboat\nbNinDD5s0LQ_0\tperson\nbNo2RseLYYs_0\tperson\nbNqXgNLQX3s_0\tperson\nbNtivYIWtQE_0\tperson\nbNtivYIWtQE_1\tperson\nbNtivYIWtQE_2\tperson\nbNyyHqBZnmQ_0\tairplane\nbN4vggzwxWI_0\tperson\nbN-epcJfRJ8_0\tperson\nbOPvxhSlnZI_0\ttruck\nbORQv_d22gA_0\tbear\nbOTYFfq_264_0\tperson\nbOXM6ibmbG0_0\ttruck\nbOarvmUMdLs_0\tperson\nbOarvmUMdLs_1\tperson\nbOb4k6pTF-k_0\tmotorcycle\nbOeUzXPOIWw_0\tmotorcycle\nbOfrPHjROWI_0\tdog\nbOm9Qgnl2KI_1\tumbrella\nbOor15z5M5Y_1\ttruck\nbOuuxRt7ugE_0\tbear\nbOwOVcqeajs_1\tboat\nbPAO0nyCO8Y_2\tcow\nbPLKx5uJaZY_0\tbear\nbPTTPAsH7v8_0\tairplane\nbPZdC3oRr1c_0\tdog\nbPanGwtU82U_0\tairplane\nbPavgNJxZnI_0\thorse\nbPavgNJxZnI_4\thorse\nbPcXQrlHs60_0\tzebra\nbPddyJH2fm4_0\tcow\nbPeFwxV66_s_1\tcow\nbPfaS8RIHVw_1\ttrain\nbPjZsDes9ck_0\tbird\nbPvvA8Wm5Ts_0\tperson\nbPw91vtx0rY_0\tdog\nbP17881jyH4_0\telephant\nbP17881jyH4_2\telephant\nbP17881jyH4_1\telephant\nbP6QvQUfZSI_0\tperson\nbP7ZU4wl_xs_1\tperson\nbP7lN2WyBTg_2\tbird\nbP7lN2WyBTg_0\tbird\nbP7pux4nQa4_0\tperson\nbQJQKEfdctc_1\tperson\nbQKuVB3YmRI_1\tknife\nbQNLK-43XKM_0\tperson\nbQNXrSVq4r4_0\tperson\nbQQS-amRhxU_0\tperson\nbQQr8FzMTHE_0\tperson\nbQR6KxB4qjg_1\ttrain\nbQWO4r5DLWY_7\tbicycle\nbQWO4r5DLWY_8\tbicycle\nbQZ8WQ2mS9o_0\thorse\nbQd1k1RNZZA_0\tperson\nbQwDt3XOok0_1\tskateboard\nbQy9W_tIPJg_0\tcat\nbQ7FEMZ309U_0\tbicycle\nbRElYolSzbI_2\thorse\nbRKfUmz_7hE_0\tbicycle\nbRKfUmz_7hE_5\tbicycle\nbRP4TElBetA_0\tskateboard\nbRUtCCY00Yw_0\tperson\nbRd_NGjRFpU_0\tcow\nbRgNc063rsk_0\tperson\nbRgNc063rsk_1\tperson\nbRiVaIWzo4k_0\tperson\nbRiVaIWzo4k_1\tperson\nbRpbblTb1VU_1\tperson\nbRq06zdCv4k_0\tdog\nbRsjD1GTjeE_0\ttruck\nbRuSrTOibGY_0\tskateboard\nbRw2PFlL8l8_0\tcat\nbRxyuZTXkWo_0\tperson\nbR61bP65wdI_0\tperson\nbR_EeaX8Kns_0\tcat\nbSC7MwTZ0Og_0\tperson\nbSJbBDA3-rI_0\tperson\nbSJbBDA3-rI_1\tperson\nbSSSYoS7HhY_2\tperson\nbSSX8qJnGak_0\tperson\nbSVCTx_L7lU_0\tperson\nbSbZuDkimC8_1\tcow\nbScFgdC-DH8_0\tmotorcycle\nbSkEsUu7aBI_0\tcat\nbSqX5D_GrEc_0\tperson\nbS4mTtP-Ud4_0\tperson\nbS4mTtP-Ud4_1\tperson\nbTAxiISsPNE_0\tcow\nbTHRXr-yw54_0\tperson\nbTOZp15gd24_0\tairplane\nbTOZp15gd24_1\tairplane\nbTOZp15gd24_2\tairplane\nbTO9Pid9808_1\tcow\nbThFysASYJg_0\tperson\nbThX-5t7OWM_3\tbus\nbTl-dt761p8_2\tbird\nbTp1hk4dhPE_0\tperson\nbTuho6CpJpg_0\thorse\nbT7mzx9P1Yo_6\tbird\nbT7mzx9P1Yo_8\tbird\nbUDYPhSFyyw_0\tairplane\nbUFCsL247kY_1\tperson\nbUFCsL247kY_0\tperson\nbUIov_O62GU_0\ttrain\nbUVi7VVygmM_0\tperson\nbUa61WY6E38_1\tperson\nbUu6iW_nRvM_0\tperson\nbU8cBepgoMY_4\telephant\nbU8cBepgoMY_1\telephant\nbU8cBepgoMY_3\telephant\nbU8r7rNDaHQ_0\tmotorcycle\nbU8r7rNDaHQ_1\tmotorcycle\nbVCLNxl4PQY_0\tperson\nbVPgCZmg1CY_0\tperson\nbVTzUiTPtww_0\tperson\nbVZixqlT1AI_0\tperson\nbVbT4F3I0s4_0\tperson\nbVbdO8rj6TQ_0\tperson\nbVbdO8rj6TQ_1\tperson\nbVdjQbIzOGc_0\thorse\nbVgKe0-_228_0\tbear\nbVkYqw1YJ6c_0\tperson\nbVnmeQsd3xk_1\tcar\nbVph6GZ3jLE_0\tskateboard\nbVrck_XYsR8_0\tbicycle\nbVtMukuPx9A_0\tmotorcycle\nbVtWuhD1L1s_0\tcar\nbVvVMOxHOT4_0\tcat\nbVwWkzYdrvk_0\tperson\nbVw9txmBeX0_0\tperson\nbVz-pHuWNfc_0\tperson\nbV3UXbGCshc_3\telephant\nbV3UXbGCshc_4\telephant\nbV3UXbGCshc_0\telephant\nbV3UXbGCshc_2\telephant\nbV8k_w0cphI_0\tperson\nbV9tUYWi-9o_0\ttruck\nbV9tUYWi-9o_1\ttruck\nbWCW4QZTIXE_0\tperson\nbWCxObc3uVo_0\tperson\nbWEnwFThRlA_0\tperson\nbWEtMBeQQCA_0\tbus\nbWEw8rNQ-kI_0\tperson\nbWJg9jatoBY_0\tperson\nbWLcKJauKIs_0\tperson\nbWO4NBx37Vk_4\tairplane\nbWdWgIB371Y_0\tperson\nbWdWgIB371Y_1\tperson\nbWkKy-_YzW8_0\tumbrella\nbWotjBNgmiA_1\tmotorcycle\nbWotjBNgmiA_2\tmotorcycle\nbWo4CzHWaZ8_0\tdog\nbWqayCqhqVQ_0\tperson\nbWtXkAzA6zE_0\tperson\nbWtXkAzA6zE_1\tperson\nbW1JoZnZpXs_0\tbicycle\nbW1JoZnZpXs_2\tbicycle\nbW2I1hUiWgg_1\tbear\nbW2I1hUiWgg_3\tbear\nbW2I1hUiWgg_2\tbear\nbW6PJACBEFo_0\tboat\nbW6PJACBEFo_1\tboat\nbW7x14tLsxU_0\tcow\nbW7x14tLsxU_1\tcow\nbXGa-FIGViQ_0\ttruck\nCXOKkaurfXo_0\tperson\nCXVmfrDfalE_0\tperson\nCXVyHpmc_fU_1\tcat\nCXXWvUVLBBE_1\ttrain\nCXXWvUVLBBE_3\ttrain\nCXaF0E3wEzI_4\tboat\nCXaF0E3wEzI_1\tboat\nCXaF0E3wEzI_2\tboat\nCXdGDPRtlo4_1\tcat\nCXdjIo4q-w4_0\tdog\nCXoeLQPShqU_3\thorse\nCXoeLQPShqU_0\thorse\nCXrwHki5ShI_0\tperson\nCXw5HMRQwEk_7\tbear\nCXxPPuZcT2k_0\tknife\nCXyujV2S5aE_0\tperson\nCX1US3Y-2jI_0\tperson\nCX5Y01eJ_g0_0\tknife\nCX838M4iPkw_1\tbear\nCX_YxpWurRk_0\tperson\nCYEtgx1uVTM_0\ttrain\nCYEtgx1uVTM_1\ttrain\nCYFtiy8FtgM_0\tperson\nCYGBUw8HZ8Q_0\tperson\nCYKbj5BgaiI_0\tperson\nCYPFpTJXCp8_1\tperson\nCYXd3muNlJ8_0\tperson\nCYcxxdqG02k_0\tperson\nCYcxxdqG02k_1\tperson\nCYghFhQySik_1\tperson\nCYghFhQySik_2\tperson\nCYghFhQySik_0\tperson\nCYg8fy66poA_0\ttrain\nCYjEASXRoys_0\tperson\nCYkow-sm2pA_0\tperson\nCYmpj4UFFtA_0\tcow\nCYsgb4GhJ_0_1\tcat\nCYtehjvIIIE_0\tcat\nCYw9ONxIi0M_4\tbear\nCY3-VTI7lQU_1\tcow\nCY48729zIgM_0\tbus\nCZAt34OJpoI_0\telephant\nCZGoAqWEDQM_1\thorse\nCZJz6zZt3cE_0\tperson\nCZXHqexGqfI_0\tcow\nCZduQndn_Eg_0\ttrain\nCZfMxEFk9hc_0\tmotorcycle\nCZfe1GuZxPI_1\tperson\nCZws8sfLA8M_0\tperson\nCZ8bjG4wdZU_0\tperson\nCZ9MT7tZZ2E_0\tknife\nCZ-Kodbg_2A_0\tbus\nCaA-PFuqaXw_0\ttruck\nCaFlo5YQHXw_0\ttrain\nCag3vCKRh6c_0\tbicycle\nCajF9IxbOvI_0\tperson\nCajF9IxbOvI_1\tperson\nCam_wHie6XQ_1\tperson\nCa4_dI-Ii8o_0\tperson\nCa5GzZ-rifE_2\thorse\nCa5GzZ-rifE_0\thorse\nCa5GzZ-rifE_3\thorse\nCa5mOzqFz70_2\tbear\nCa6g367yxss_3\tdog\nCa9JsTGifmQ_1\tknife\nCa-l5zpgIL0_0\thorse\nCa-wDaXxSn8_0\ttrain\nCa_LwXljv5I_2\tdog\nCbBrv9GkBDM_0\tperson\nCbKVR2EGoWU_0\tcat\nCbO4r5w5NEM_0\tcat\nCbTbpHHYfGo_1\tcow\nCbYQk8GFQwY_0\tperson\nCbYXzAv9G40_0\tperson\nCbZA75LYWsk_0\tboat\nCbZA75LYWsk_4\tboat\nCbZA75LYWsk_7\tboat\nCbZA75LYWsk_8\tboat\nCbZA75LYWsk_6\tboat\nCbbsxxHKQBs_1\tbicycle\nCbbsxxHKQBs_3\tbicycle\nCbfML92fBFc_0\tperson\nCbrOGI6D5oo_0\tdog\nCbz0hgvZtyM_0\tperson\nCb0EbSTABAw_0\tperson\nCb31aGVbcGE_0\tperson\nCcJ-51mUw00_0\tperson\nCcNfpk8tVxA_2\tperson\nCcNfpk8tVxA_0\tperson\nCcNfpk8tVxA_1\tperson\nCcadL-XHA8w_0\tperson\nCccC-FK79hM_0\tskateboard\nCceETksmvEc_0\tbus\nCcfAKl1kCRM_0\tperson\nCcl3EZzzNhc_2\tbird\nCcl3EZzzNhc_3\tbird\nCcmiWGPbuT4_0\tcar\nCcyRYeSG3sQ_0\ttruck\nCcyqd4ZzDtQ_0\tperson\nCc5DUip1-eE_0\tperson\nCc9-Kd--ejs_0\tcar\nCdA-Gg7O6d4_0\tperson\nCdD0W0pS7gk_0\tskateboard\nCdG8sd9UZFM_1\telephant\nCdG8sd9UZFM_3\telephant\nCdOwMZqCiMs_0\tbird\nCdRgo9V_e_U_0\tperson\nCdTDo40rdz4_3\tumbrella\nCdVnK1TcGcQ_0\tknife\nCdW2qTShGbY_2\tperson\nCdW2qTShGbY_1\tperson\nCdYkEASWMqQ_0\tperson\nCddXUsFqg4Q_10\tbicycle\nCddXUsFqg4Q_12\tbicycle\nCdeUORbvfgs_0\tperson\nCdkbBdQwTX0_0\tperson\nCdmrCOVxj8c_0\tperson\nCdosWRXaOgY_0\tperson\nCdtY-oTmACc_0\telephant\nCd3qxnZC6s4_0\tairplane\nCd8dfcT-D9U_0\thorse\nCd8zY0wsrLc_0\tumbrella\nCd_ZgXZ7qKw_0\tperson\nCd_ZgXZ7qKw_1\tperson\nCeCnRUGvs9Q_1\thorse\nCeEMUoHNeVA_0\tperson\nCeICmGeQXOk_0\tmotorcycle\nCeICmGeQXOk_1\tmotorcycle\nCeVjsWpfoCY_0\tperson\nCekBpSMLr08_0\thorse\nCetmVa_LV2A_0\tbird\nCetw-N1I1bA_0\tdog\nCew6y9K7ynI_0\tcat\nCezGmkW4sRY_0\tperson\nCe1tW6uV_lw_0\tperson\nCe1tW6uV_lw_1\tperson\nCe_dgPawIkU_0\tperson\nCfC--i0DQ-o_0\tcar\nCfThv8Vk-oM_0\tumbrella\nCfbzDUZ6PyQ_0\ttruck\nCfqtCB_f_Z8_3\tskateboard\nCfwk3niR9Uc_0\tmotorcycle\nCfyvbbrxquI_0\tcat\nCf_GVLLQaTA_0\tperson\nCgB0fwUOZd4_2\tbus\nCgDcN1Lk7ag_0\tcar\nCgDcN1Lk7ag_1\tcar\nCgDcN1Lk7ag_2\tcar\nCgDyrbc-LLo_0\tperson\nCgHCCqADKys_0\tperson\nCgQl21vwrqk_0\tperson\nCgQv6o97KqY_0\tperson\nCglmlO92nKA_0\tperson\nCglmlO92nKA_1\tperson\nCgod2p17L48_0\tperson\nCgwHXWDGAak_1\tperson\nCgzt1Kv6Sqg_0\tcow\nCg9H20lr5Uk_0\tperson\nCg9H20lr5Uk_1\tperson\nCg9H20lr5Uk_2\tperson\nChBKKPEO8N0_0\tperson\nChOKPIVr5XE_2\tbicycle\nChPBGkSbJ0g_0\telephant\nChRNCk9Bq-k_0\tcat\nChZB3vAX8sk_0\tperson\nChc7poZ9r-k_3\tskateboard\nChmcE3Lz1Vc_0\tperson\nCh2_CQg4r1o_0\tperson\nCh-PosNzqZ8_4\telephant\nCh-PosNzqZ8_0\telephant\nCiCqdFq_a7U_1\tperson\nCiCqdFq_a7U_0\tperson\nCiLbnwjSJ9w_0\tperson\nCiQOmR8VCzs_0\tperson\nCiQOmR8VCzs_1\tperson\nCiQS0RMaLZQ_0\ttruck\nCiT09gfBJPA_1\tperson\nCiVwjoLvdAs_1\thorse\nCiWhBWV1zGM_0\tcow\nCiWhBWV1zGM_1\tcow\nCiYOn9VW1eY_0\thorse\nCihCAad2Duo_0\tperson\nCilRWTfS8e4_0\tperson\nCiwaaMNfvCo_0\tairplane\nCi0S27Qp1w4_0\tcat\nCi2vW1OGHe0_0\tcat\nCi6mTJ6BqYI_0\tperson\nCjJ3l2smqAc_0\tperson\nCjMaorKuwf0_1\thorse\nCjRX9J2BM4Y_0\tskateboard\nCjUf3D9IsCQ_0\tperson\nCje7Ip85T1I_0\tperson\nCjm9Wky44TM_0\telephant\nCjm9Wky44TM_1\telephant\nCjn-mt97y-w_0\tperson\nCjq3dda3PlA_1\tperson\nCjq3dda3PlA_0\tperson\nCjw2f0M_eB8_0\tbird\nCj1CpXDG_Qw_0\tperson\nCj3PTZcRbd4_0\tperson\nCj3ZEx4SDe4_0\tcow\nCj-a9t9yiiA_0\tperson\nCj-a9t9yiiA_1\tperson\nCkBGaJnF9vo_0\tperson\nCkC43WVctnk_0\tcat\nCkKQhDP2FGY_1\tperson\nCkKQhDP2FGY_0\tperson\nCkKQhDP2FGY_2\tperson\nCkLE-s6CsgY_0\tcow\nCkLwgOIBF_I_0\tperson\nCkLwgOIBF_I_1\tperson\nCkP_70u-2zU_1\tboat\nCkX8laawskQ_2\thorse\nCkZeki9RVDI_0\tperson\nCkZhHtevDk8_0\tperson\nCknHFY05prw_0\tperson\nCkoK8C4Rzj0_0\tperson\nCkvEr5T38Wc_0\tperson\nCkvEr5T38Wc_1\tperson\nCkvEr5T38Wc_2\tperson\nCkyU5jU74Js_1\tdog\nCkyU5jU74Js_0\tdog\nCk8GRgUrpoE_0\tperson\nClBCXl7l2pw_0\tskateboard\nClH2-R5LeVo_0\tcat\nClLZcmIHrTw_0\tperson\nClM3Ftm0S7o_0\tcow\nClRLFlpMUhU_1\thorse\nClSzHW4AuJ0_0\tperson\nClV1oHNuF9o_0\tperson\nClV6A8WNCvw_0\tcow\nbXcKQNGRBvw_3\tairplane\nbXcKQNGRBvw_0\tairplane\nbXcKQNGRBvw_1\tairplane\nbXcKQNGRBvw_2\tairplane\nbXjVvJ8eOJc_0\tskateboard\nbXkjwotai0Y_0\tbicycle\nbXnvGCFA9Dg_0\tperson\nbX9TcejzzTM_0\tperson\nbYCvd_BTMsk_0\tdog\nbYE-vUOh10s_0\tboat\nbYN8lkupLt4_0\tbird\nbYQiCAwebzs_1\tbicycle\nbYSbuWYiixQ_0\tperson\nbYVgzwF1hNw_0\tbicycle\nbYWGnwi8nDQ_0\tmotorcycle\nbYm9aUK2zzk_0\tperson\nbYpG750b7pE_0\tmotorcycle\nbYvzSXZ0w_I_1\tperson\nbYwwOO6vMAw_0\tperson\nbYwwOO6vMAw_1\tperson\nbYyFEbIGMfo_1\tdog\nbY3sDu5BZDI_0\telephant\nbY3sDu5BZDI_1\telephant\nbY3sDu5BZDI_2\telephant\nbY6vPIaJDGA_0\tperson\nbY8BdyCsCAw_0\tperson\nbZL41d9eFyc_0\tcow\nbZRpdnJtcT4_0\ttrain\nbZRpdnJtcT4_2\ttrain\nbZVMygQQgNg_0\tperson\nbZVZbn0oTjo_1\tgiraffe\nbZdq8Rk75M8_0\tknife\nbZgZihlL0IU_0\tperson\nbZsoMlw4CnI_2\tbus\nbZuOWV67gnY_0\tcat\nbZwJl6ye9Cc_0\tmotorcycle\nbZwJl6ye9Cc_1\tmotorcycle\nbZzzlD0C8Jg_0\ttrain\nbZ2u1x38Qbg_1\tairplane\nbZ6gk6FLGss_0\tperson\nbaDesUZ9Pyc_0\tbear\nbaRyXrRn_ls_1\tmotorcycle\nbaWLnj87FOc_0\tcat\nbabQ3FBdeqQ_0\tcow\nbagbzsb-tg4_0\tperson\nba1hwKdPRx8_0\tcow\nba3cGHmc_OA_0\tperson\nba5407XQYAQ_0\tcow\nbbHdRyrdpDA_0\tboat\nbbH4CQx07Go_2\tknife\nbbLW6902ITg_0\tperson\nbbLW6902ITg_1\tperson\nbbLW6902ITg_3\tperson\nbbLW6902ITg_4\tperson\nbbM0SbH_pgk_2\tbear\nbbZAdo3awRs_0\tcar\nbbZeVbzmLVw_0\telephant\nbbaUzB0Na2o_0\tperson\nbbfDHSIT9ys_0\tperson\nbbhyEgEjfvQ_0\tcow\nbbjuucY5QQc_0\tperson\nbbkjnF0iGrs_0\thorse\nbbkjnF0iGrs_2\thorse\nbbkjnF0iGrs_3\thorse\nbbkjnF0iGrs_6\thorse\nbbnb-beW0p0_0\thorse\nbb0DRm0ueKk_0\thorse\nbb4sgALviyc_0\tbear\nbb5OO1wMKr8_0\tperson\nbcJ1MAj_A_w_1\tperson\nbcLW7YqnUGs_0\tskateboard\nbcdQmV1-Z5k_0\tmotorcycle\nbcgTPCycRIw_0\tskateboard\nbcksTLjC1fs_0\tmotorcycle\nbcrQdxrU_vI_0\tperson\nbc1C8HrNVqE_0\thorse\nbc28CjoKODI_0\tperson\nbc28CjoKODI_1\tperson\nbc3rySF6iao_0\tperson\nbc6jeLN-DUo_0\ttrain\nbdU9JALjnmw_0\tperson\nbdYKw4SpkQQ_0\tzebra\nbdZpXHSW4Ps_0\tcat\nbdbVAdua3uI_0\tairplane\nbdbVAdua3uI_1\tairplane\nbdcoNmelRw4_1\tdog\nbdcoNmelRw4_2\tdog\nbdcwT2ufUBg_0\tbird\nbddes6RyfCI_0\tskateboard\nbddes6RyfCI_1\tskateboard\nbdeoe5gmCd4_0\telephant\nbdeoe5gmCd4_2\telephant\nbdgSMIY2A8Q_0\thorse\nbdoNsiMM1RY_0\tbird\nbdwlZMpXPJo_8\tbird\nbdwlZMpXPJo_7\tbird\nbd--DVCeT-s_0\tcow\nbeE5VOzxibM_0\tgiraffe\nbeLTv9YiY78_0\tdog\nbeLTv9YiY78_1\tdog\nbeLTv9YiY78_2\tdog\nbeQOHdCA8KM_16\telephant\nbeQOHdCA8KM_3\telephant\nbeQOHdCA8KM_6\telephant\nbeQOHdCA8KM_7\telephant\nbeQOHdCA8KM_10\telephant\nbeQOHdCA8KM_12\telephant\nbeSTl1azmTY_1\tskateboard\nbeVVM2pBQdA_0\tcow\nbeVVM2pBQdA_1\tcow\nbecTICXjrg4_0\tperson\nbeliMXc3JE8_0\ttrain\nbesXR1P9Oew_0\tcar\nbeu-edT1daM_0\tperson\nbe9BCy6kHvY_2\tperson\nbe9CXLatX9I_0\thorse\nbe-ggiVD4V0_0\tknife\nbe-5ARU_aHA_0\tperson\nbe_IhYef3hE_0\tperson\nbfBZLLwpNWA_0\tgiraffe\nbfJaD1qZ2gE_0\tbus\nbfJaD1qZ2gE_3\tbus\nbfJtapJ86Gw_0\tperson\nbfRgL9oanEc_1\tperson\nbfRgL9oanEc_0\tperson\nbfS8FB_HOlY_0\tperson\nbfZfMA1mLrQ_0\tdog\nbfZfMA1mLrQ_1\tdog\nbfaMdaYiK90_0\tcat\nbffC89pE6fo_0\tperson\nbffC89pE6fo_1\tperson\nbfkNVFr6Cwg_0\tcow\nbfkNVFr6Cwg_1\tcow\nbflVgDgAHSo_0\tumbrella\nbfrY2wEePwY_0\tperson\nbfrY2wEePwY_2\tperson\nbfwWF0XO7bE_0\tboat\nbf9YySHJcdQ_0\tperson\nbgAOYaooc18_0\tperson\nbgAo5vgwe2M_0\tzebra\nbgBK4sMnLig_0\tcow\nbgBK4sMnLig_1\tcow\nbgC-r6p-XHU_2\telephant\nbgE_uy3Ml6g_1\tumbrella\nbgHMLwWY4Qo_0\tperson\nbgV-FqQ8Tv8_0\tumbrella\nbgXZ3BpIOh8_0\ttrain\nbgaD7K2iEPI_0\tperson\nbgbS11O9lSw_0\tbus\nbgelX1blhpQ_0\ttruck\nbglPgA_0LAk_0\tmotorcycle\nbgpB-A04RLI_0\tperson\nbgyEHsMav4U_0\tperson\nbhBMa8wQ5KA_0\tbird\nbhGJ9gZmP90_0\tperson\nbhGJ9gZmP90_1\tperson\nbhH_pqCQ3Co_0\tcow\nbhJGFbgXlts_1\tperson\nbhNfsUPLKDg_1\ttrain\nbhWmpmnXSlc_0\tperson\nbhZZubkX8_o_1\tbird\nbhdtzsUvieg_1\tperson\nbhqr680CLr0_0\tperson\nbhrOzwB-7qA_0\tperson\nbhsCCw1J_JU_0\tperson\nbhuOX61sk8M_0\tperson\nbhz6HG2KpnI_0\tskateboard\nbh0ZZ4Z76cc_0\tperson\nbh3QacG9JYk_0\tairplane\nbh3QacG9JYk_1\tairplane\nbh3QacG9JYk_2\tairplane\nbh8aMNVny8s_1\ttruck\nbiAdsjypETI_0\tknife\nbiFm-y7gSrc_0\thorse\nbiGJ8vHOsZM_0\tumbrella\nbiLY6NMsqJU_0\tcat\nbiUFB3c0Ucc_0\tbus\nbiZU5SOHQvc_0\tumbrella\nbibJ3Bv5YmQ_0\tmotorcycle\nbik9GuCughc_1\tbird\nbiuEbYnn68k_0\tbus\nbiwbqbVsZeE_1\telephant\nbiyu3sxIOYc_0\tperson\nbi1kYvu5Irg_0\ttrain\nbi1kYvu5Irg_1\ttrain\nbi3GSUnfzd8_0\tperson\nbi5Bkz2MVP4_0\tbird\nbi5Bkz2MVP4_3\tbird\nbi6BNwvsR_0_0\tperson\nbi-GKlUZMR8_0\tmotorcycle\nbjBwCQ5z4IQ_0\tcat\nbjH2OQR68Vc_0\tperson\nbjRQ69TaeKs_2\tperson\nbjgooTfy3JM_0\ttrain\nbjgooTfy3JM_1\ttrain\nbjgooTfy3JM_2\ttrain\nbjhEqucWULo_0\tcow\nbjq8de0pw5M_0\tperson\nbjq8de0pw5M_1\tperson\nbjrq_Kj-wSU_0\tairplane\nbjrq_Kj-wSU_1\tairplane\nbjrq_Kj-wSU_2\tairplane\nbjrq_Kj-wSU_3\tairplane\nbjwdTl5zyaI_0\tskateboard\nbjx96uw-Q24_0\tperson\nbj-Grf4s790_0\telephant\nbkElaSUqJjM_0\ttrain\nbkIBcqXKARI_0\tperson\nbkMU7xViDvA_0\tperson\nbkXBjOrn2yI_0\tperson\nbkggwniG4vc_0\tperson\nbkiQTbQF_TA_0\telephant\nbkigtjV1zA0_1\tmotorcycle\nbklheVvsfac_0\ttruck\nbkoOiNz6Zmo_0\tperson\nbkok3wr4188_0\tperson\nbk2l-O9wSEc_0\tperson\nbk8UlOzFy7U_1\tperson\nblAiGXbJxmI_0\ttrain\nblIpNvBakFI_0\tperson\nblW8z3TPVvo_0\tmotorcycle\nblhCjXE5cRo_0\tperson\nbli5Z83QY_U_0\tperson\nblnFzQdaVRc_0\tperson\nbluU1CAbJfo_0\tperson\nblubKbt8mLE_0\tcar\nbluqyqDv2eE_0\tcar\nblv0QslQ524_5\tbus\nblv0QslQ524_6\tbus\nblzDAgvxJMw_0\tperson\nbl1XJCtyP2E_0\ttruck\nbl2xZSpcZqs_0\tcat\nbl6wIjxfuJo_1\tbicycle\nbl6wIjxfuJo_2\tbicycle\nCloG2hcM5nU_9\tbicycle\nCloLHr7NJqg_0\tperson\nCloOQkTkYfY_0\tbus\nClpDLu1qCx4_2\tperson\nClpDLu1qCx4_3\tperson\nClpDLu1qCx4_1\tperson\nClvAi34e1zM_1\telephant\nCl1mEpQ3wy4_0\tboat\nCl1mEpQ3wy4_1\tboat\nCmEoz728tlo_2\tbear\nCmGSMnkcvrg_1\ttrain\nCmIXZuJDwt0_0\tperson\nCmNv_yKt5oM_0\tperson\nCmOIqZyQpPI_3\tbird\nCmOIqZyQpPI_1\tbird\nCmVoggJ6fxY_1\thorse\nCmYL2EyELbA_0\telephant\nCmezWT8A2i8_0\tbus\nCmjUCOwcOT8_4\tbicycle\nCmjUCOwcOT8_11\tbicycle\nCmjjEuS9_Ww_0\tbicycle\nCmjw8kbfDCw_1\tknife\nCmoknpL1cMA_0\tperson\nCmqXoT7CXJs_0\tdog\nCmq1qVX-Ugo_1\tcat\nCmsqpFOcosw_0\tperson\nCmtmoydPH08_0\tcow\nCmxhIEztsyg_1\tskateboard\nCm1y7USHcrg_0\tperson\nCm3tYZlSc0o_0\tskateboard\nCnBJ9TMTRAA_0\tperson\nCnBJ9TMTRAA_1\tperson\nCnCTVtsK5Kw_2\tbear\nCnEXHgq3AE4_2\telephant\nCnGp9Wq2rTs_0\tbear\nCniS9Q6Y200_0\tperson\nCn0UKsWocEI_0\telephant\nCn0UKsWocEI_1\telephant\nCn1dXZ_p3dw_1\tperson\nCn9Bj5B29UI_0\tmotorcycle\nCoBuNWx_OwM_0\tperson\nCoDB7ZeilsQ_0\tperson\nCoKMowfrd5Q_2\ttruck\nCoKMowfrd5Q_3\ttruck\nCoKVaYX3c1k_0\tperson\nCoKVaYX3c1k_1\tperson\nCoKVaYX3c1k_2\tperson\nCoOwm7ccDrs_0\ttruck\nCoSIyrW5lvA_1\tskateboard\nCoSSvI2-U_w_1\tbicycle\nCoZY8o0c-h8_0\telephant\nCoZY8o0c-h8_1\telephant\nCocSNWws-Qo_0\tperson\nCodelARKQ10_0\tskateboard\nCosYvoW04Uk_0\tperson\nCot7Xj8C308_0\tboat\nCoz9g_0N91c_0\tperson\nCo_XBpd6lxE_0\tperson\nCpDHwc5JmK8_3\telephant\nCpFiT_6KvM4_0\tperson\nCpF-80dM2aY_0\tperson\nCpF-80dM2aY_1\tperson\nCpxxxHYsJy8_0\tperson\nCp0lT2opaL0_1\tperson\nCqANE5ByBvY_0\tperson\nCqDjHjvw8T0_0\telephant\nCqDjHjvw8T0_1\telephant\nCqVeLNnA0vk_0\thorse\nCqZz9FnLLjk_0\tknife\nCqkhrld_7LU_0\tperson\nCqzahbOVzO4_0\tperson\nCq02-pFNn6w_0\tmotorcycle\nCq02-pFNn6w_1\tmotorcycle\nCq4KAVAWq7g_0\tperson\nCrAxPJajbcs_0\tairplane\nCrCNqDd18fw_0\tumbrella\nCrUmEDCjFtU_0\tperson\nCrUmEDCjFtU_1\tperson\nCraDHWuN4Q0_0\tperson\nCrgMhrCYmOo_2\tmotorcycle\nCriTKYemGmo_0\tperson\nCrmzwYKpLAY_0\tumbrella\nCrn24ZKAP1k_0\tperson\nCrsjxpJoY5Q_0\tperson\nCru8KBJqhng_0\tperson\nCrz3l2CEDzA_0\tperson\nCr0SWcS1qX0_0\tcow\nCr_B3I0QPEQ_6\tairplane\nCsM_GTD0TZE_0\tperson\nCsPLGd2dgl0_1\tairplane\nCsTntmE8EWs_0\tperson\nCsa542XNEXo_0\tperson\nCsfkuwD6-nA_0\tperson\nCsh_4yR8bFk_1\ttruck\nCsh_4yR8bFk_2\ttruck\nCsii4vkefsM_0\tboat\nCsii4vkefsM_2\tboat\nCsw3kLrhjoM_0\tperson\nCs38JY7Gqjo_3\tskateboard\nCs-Vx_ym23o_1\tbicycle\nCtC2yC9NGTk_0\tbird\nCtD4wnIU0Pw_0\tbicycle\nCtF9IxfLhaQ_1\tperson\nCtF9IxfLhaQ_2\tperson\nCtF9IxfLhaQ_0\tperson\nCtHIoS1lGKA_0\tperson\nCtLVK2j48gA_0\tperson\nCtO5dmTdzYQ_0\tperson\nCtPEAoFPnE4_0\tperson\nCtQPPKpIEIc_0\tperson\nCtTcyoZvRvU_2\tskateboard\nCtUPPSKU8cE_0\tbus\nCtVUqIFqqr8_2\tbus\nCtYDJRkhtpg_1\tumbrella\nCtYDJRkhtpg_5\tumbrella\nCtfPPnpBKHs_2\tbird\nCtipU0GHAEo_1\telephant\nCtjTAe-FFe4_3\telephant\nCtkjh9fntpQ_0\tbird\nCtkjh9fntpQ_4\tbird\nCtkjh9fntpQ_5\tbird\nCtkjh9fntpQ_2\tbird\nCtkjh9fntpQ_3\tbird\nCtnjw80kgcw_0\tperson\nCtxK3wGlqx0_2\tmotorcycle\nCt1QrXUgBGg_0\tperson\nCt1QrXUgBGg_1\tperson\nCt8S9nC7sfk_1\tperson\nCt870xrnBGU_0\tperson\nCuDfCpgoIjg_6\tboat\nCuGfRQMwYd8_0\tcat\nCuHF9Hd0uwI_0\tperson\nCuIkNejeZrY_0\tcat\nCuUJUrjEcc4_0\tperson\nCuWdZPYMLww_0\tperson\nCvDW2A8hD78_0\tperson\nCvRJwKt7FfY_1\tskateboard\nCvVVS4SUiuw_1\ttrain\nCvZaA28QUK4_1\tknife\nCvajmAL3sjQ_0\tperson\nCvda-hutmbg_0\tdog\nCvqylkq9fwI_0\ttruck\nCvxsoaCV1_8_0\tperson\nCvzsX_s6tek_0\tperson\nCv2T8U0uQcQ_2\tperson\nCwAdBrBzIcA_0\ttruck\nCwBiMh4zHWQ_0\tperson\nCwFcmrnz1yw_0\telephant\nCwFcmrnz1yw_1\telephant\nCwFcmrnz1yw_2\telephant\nCwR2tJptu0Y_2\tmotorcycle\nCwVLRawns04_0\tperson\nCwVTSONqnVw_6\tknife\nCwnHi50fuuQ_0\tperson\nCwnHi50fuuQ_1\tperson\nCw22-zpE1UY_0\tperson\nCw3iLs4yV4g_0\tperson\nCxFRYsUCyWc_0\tcat\nCxH8vGqLVM0_0\tbicycle\nCxH8vGqLVM0_1\tbicycle\nCxH8vGqLVM0_3\tbicycle\nCxH8vGqLVM0_6\tbicycle\nCxJ7Uww1mSk_0\telephant\nCxN5CG94Q5Q_1\tairplane\nCxN-YEErXFg_0\ttrain\nCxPyIeBtRec_2\ttruck\nCxWaiU0rF9g_1\tcow\nCxWaiU0rF9g_0\tcow\nCxXdw0Cqr4Y_2\tairplane\nCxa8q3QXoRs_0\tperson\nCxgqklOxSfo_0\tairplane\nCxgqklOxSfo_2\tairplane\nCxnCTBBNWCY_0\tperson\nCxnCTBBNWCY_1\tperson\nCxoZT0--IBo_0\tperson\nCxooWldim98_0\tperson\nCxs-xZDDZWw_0\tperson\nCxug83tjWyc_0\thorse\nCxzJV_HYpAc_0\tairplane\nCxzJV_HYpAc_1\tairplane\nCx0XeFKQ06o_1\ttrain\nCx7ZY8oqOmE_10\tbicycle\nCx7ZY8oqOmE_6\tbicycle\nCx7ZY8oqOmE_8\tbicycle\nCx9efnltcUY_0\tperson\nCyE1kuECzfg_0\tperson\nCyH0woBc0zU_0\tboat\nCyI7nyp65bI_0\tperson\nCyI7nyp65bI_1\tperson\nCyLLTzV_lAg_0\tcat\nCyOXSqLm7ao_1\tperson\nCyb4-vF1WMM_0\tairplane\nCyedl__okwE_0\tperson\nCyedl__okwE_1\tperson\nCynfaDsQ1AI_0\tzebra\nCysFfEkdDT4_0\tbear\nCytiPd_Wbkg_0\tairplane\nCytiPd_Wbkg_1\tairplane\nCyvInNqvQyE_0\ttruck\nCy002CigJRQ_0\tperson\nCy_hvqOd0RY_0\tknife\nCzFRG22Jmvs_0\tcow\nCzHeIzQZUEg_0\tperson\nCzNFSb4N6p8_0\tperson\nCzQ03Z7Dv5U_2\tskateboard\nCzQ03Z7Dv5U_3\tskateboard\nCzQ03Z7Dv5U_6\tskateboard\nCza2-_wwpd4_0\tperson\nCza2-_wwpd4_1\tperson\nCzcwXF0Z1TQ_0\tcow\nCzt8McI8UTE_0\tperson\nCzze2Jy6Ook_0\tcat\nC0Tk6QryTA0_0\tbus\nC0Tk6QryTA0_1\tbus\nC0a9pkujXQg_1\tperson\nC0lvs-UEqKs_0\tperson\nC0pOQ36uosU_0\tperson\nC0pOQ36uosU_1\tperson\nC0qbh7OJTHI_2\tskateboard\nC0tGKqnFyZA_0\tperson\nC0xTDmlUYSA_0\tperson\nC0xZYHsXNws_0\tperson\nC0xZYHsXNws_1\tperson\nC0xjvq51pVA_0\thorse\nC0xl46ieUxg_0\tskateboard\nC0zUOQoeQrA_0\tperson\nC0zrmcMf8D4_0\tbird\nC05P4mCw-xA_0\tbear\nC1DCcNlUQDk_0\tboat\nC1DX9TjKTrE_0\tbus\nC1MfcNYih9c_1\tperson\nC1RCXQFjvvc_1\tperson\nC1RCXQFjvvc_0\tperson\nC1bdSMUVy2Q_1\ttruck\nC1bdSMUVy2Q_0\ttruck\nC16ZlJRDfUc_0\tbird\nC16_rFYBwUA_0\tperson\nC17jwrOnSCI_0\thorse\nC19rR4b8CSQ_0\tdog\nC1_gk-bIL6Y_0\tairplane\nC1_tauCAYjs_0\tperson\nC2GvHXU8mIc_0\tperson\nC2HZBTrCAf8_0\thorse\nC2Hcs2itPTc_1\telephant\nC2H_P7MX3zw_0\tbus\nC2H_P7MX3zw_1\tbus\nC2IJYHPWHJM_1\tcow\nC2K7zu49SKw_0\tperson\nC2K7zu49SKw_1\tperson\nC2LdkQMjxJk_0\tcow\nC2ROFMcXam4_0\tcat\nC2S4CV9mnC0_0\ttruck\nC2VjZHe3ID8_0\tperson\nC2r9VGslxTE_0\tperson\nC2v7hcs3Ax0_0\tzebra\nC2zRn25TBOo_1\tairplane\nC2zRn25TBOo_2\tairplane\nC2zRn25TBOo_4\tairplane\nC2zRn25TBOo_6\tairplane\nC23ZGYnWhgo_0\tperson\nC26HiGgIjYg_0\tperson\nC2-glFtt9Vw_0\tumbrella\nC3LbuiUjzvo_0\tcat\nC3LbuiUjzvo_1\tcat\nC3LbuiUjzvo_2\tcat\nC3Qu-KUydyg_1\tcow\nC3UX9hrlLeE_0\tperson\nC3YcvZKgCgY_0\tperson\nC3terpXzPm4_0\tperson\nC3z1zbkmwdU_0\tbird\nC30B6KXg9vs_0\tperson\nC3399zrSQ6A_0\thorse\nC34_EkCWJaU_0\tmotorcycle\nC4HzsadhLW0_0\tboat\nC4QHknuNLYI_0\tperson\nC4RAj-omUMo_0\tperson\nC4W_g9eheB8_0\tskateboard\nC4XGGPoj4q8_0\tperson\nC4dV8SPq6Mk_0\tperson\nC4e-5QS1FmU_0\tumbrella\nC4e-5QS1FmU_1\tumbrella\nC4irKghQYTE_0\thorse\nC4jghf6KKYI_0\tskateboard\nC4vFHmzTY-s_0\tcat\nC4xJ3_Wrrn4_0\ttrain\nC4yVuAqcr0U_0\ttrain\nC409K0fAxiM_0\tperson\nC42397qio9c_1\tskateboard\nC4317zxtzKA_0\tperson\nC4-k1XW5O3U_0\tdog\nC5DAyL_gEQU_0\tcow\nC5GJx1VFRm8_2\tcow\nC5HT9La1jDY_0\tperson\nC5JobuZa590_0\tskateboard\nC5MJ8fSfmLw_2\tbear\nC5dPwnswp8Q_0\tcat\nC5jo-fCBqmA_0\tperson\nC5jo-fCBqmA_1\tperson\nC5jo-fCBqmA_2\tperson\nC5pop0SvnOM_0\tperson\nC5r41vkLsKE_0\tperson\nC5sXGZRLfmU_4\ttruck\nC5sXGZRLfmU_6\ttruck\nC5umaWklWFQ_0\tboat\nC5ybfGh51LM_0\tcat\nC55z9Fe6H7A_0\tdog\nC56Bp4toMG8_0\tperson\nC6NYuB7zIzs_0\tperson\nC6NYuB7zIzs_1\tperson\nC6XCgppHkHA_0\tbus\nC6Yy8uEd0bQ_0\tperson\nC6aB6M0DHrU_0\tperson\nC6cOmWIisxU_0\tperson\nC6eN6sMtuXY_1\tboat\nC6gNbZUU7xg_0\tperson\nC6ia-W4TV1U_0\thorse\nC6nHtSy67OY_0\tcow\nC6n6ECY5h84_0\tcow\nC6qWzx58kxo_0\telephant\nC6qWzx58kxo_2\telephant\nC6rqmPvlIlI_0\tperson\nC6upTeuDG4E_1\tskateboard\nC6xv6Wmy97M_0\thorse\nC62nD-_VXpM_0\thorse\nC62nD-_VXpM_1\thorse\nC66OM90TFXI_0\ttrain\nC66OM90TFXI_1\ttrain\nC66z-I_UHqQ_0\tairplane\nC6_p7BXwCTA_0\telephant\nC7CB2A_bxa0_0\tperson\nC7COsB9pcOQ_0\tperson\nC7CXGBdoJWo_0\tcat\nC7KZnM_0j8s_0\tperson\nC7QYoT22ZYo_0\ttrain\nC7W0oxkg-nU_0\tbicycle\nC7kKR6pqYzw_0\thorse\nC7to6tRsC9U_0\tperson\nC72k6hv1NPM_1\tcow\nC72k6hv1NPM_0\tcow\nC7-sqpILAXM_0\tperson\nC7-sqpILAXM_1\tperson\nC7_HhvBNDSw_0\tperson\nC8ETc2K6ef0_0\ttrain\nC8G_kcqjspU_0\tknife\nC8IE7aLZvIA_0\tperson\nC8IUB4Opf44_0\tperson\nC8IUB4Opf44_1\tperson\nC8PqOHn0izQ_6\tbird\nC8Zex-ptYyk_0\tperson\nC8daRmtyPo0_0\tperson\nC8fcFW4HKGs_0\tairplane\nC8mEWe-TWYs_0\tknife\nC8n1dTEDWvk_0\tskateboard\nC8ukXeoRjbI_0\tcow\nC9Zq_rDHwgg_1\tcow\nC9dD6oS_Zs0_0\tperson\nC9je005HOlA_0\tbus\nC9jqFBMRyPs_1\tperson\nC9vG5qPPhzE_1\ttrain\nC9wgqGACPso_2\telephant\nC95TX0IOPa8_0\tskateboard\nC97oHqKqdBk_0\tperson\nC97t3TGT2oc_0\tperson\nC-AoVBwcBUw_0\tperson\nC-FX5hgFDd0_2\tperson\nC-Q9RDsPyOw_0\tperson\nC-Q9RDsPyOw_1\tperson\nC-S34-Drg7M_0\tcow\nC-TWHpbtVNY_1\tperson\nC-WsGZQoLx0_0\tboat\nC-cL2hzThKI_3\tairplane\nC-cL2hzThKI_6\tairplane\nC-omy9mzD7E_0\tperson\nC-q9nO8X1rs_0\tperson\nC-seg-BCK0U_0\tbird\nC-v3Ttrvuo8_0\tairplane\nC-38hraIyOs_0\tperson\nC-47EdafspI_1\tairplane\nC-54wttM4AA_0\tperson\nC-9LBJqCMm0_0\ttrain\nC-_ebeJtjyE_0\tperson\nC_BX3dg-lc4_0\tperson\nC_DOGAVETwk_1\tbird\nC_EMJm-Z2I8_1\tbird\nC_EMJm-Z2I8_2\tbird\nC_EwPB6zgIA_0\tperson\nC_EwPB6zgIA_1\tperson\nC_GnC_IEwJM_0\tperson\nC_GnC_IEwJM_1\tperson\nC_HBU7EUsoE_1\tperson\nC_HBU7EUsoE_0\tperson\nC_IjqR1NOxw_0\tperson\nC_POS7ndKw0_0\ttruck\nC_PXq5TsPRQ_1\ttrain\nC_TfufSsuEU_1\tperson\nC_VePcGhr10_0\tknife\nC_aP0fKyudQ_0\thorse\nC_aYcFttRC8_1\tperson\nC_aYcFttRC8_0\tperson\nC_cUky_0p2Q_0\tcow\nC_uGdKk79X0_1\tperson\nC_ykabkQ2U0_2\tperson\nC_2EFIuyDSA_0\tperson\nC_2p_N8Kvpk_0\tperson\nDAJkfl5W8Vc_0\thorse\nDANymtBuoIs_0\tdog\nDAOBGjTf7xI_0\tperson\nDAQ9-YTrpp0_0\tcat\nDAU6UNdxbRI_0\tperson\nDAn4fH-1Ucs_0\tperson\nDApkEgrJX0Q_0\tperson\nDAqHnZA6tBQ_0\ttruck\nDAtSTeTmg8I_1\thorse\nDAwdyKiZyzM_0\tperson\nDA1bsx2RsGA_0\tperson\nDA1bsx2RsGA_1\tperson\nDA4LF3u2VTI_0\tcar\nDA5X-ADHM1w_0\tperson\nDBFMXaS9LRg_1\tumbrella\nDBLaZSSthxo_0\tperson\nDBR0l2rW6Ew_0\thorse\nDBVbRonJkb8_0\tperson\nDBaAVcI4Ftw_0\tperson\nDBaAVcI4Ftw_1\tperson\nDBmVOTuCJ8Q_0\tperson\nDBvOm1qnWrA_0\tcow\nDBySPDEqsO8_0\tperson\nDB1Cvyyike0_0\tairplane\nDB3lsf7fD84_0\tdog\nDB6TJh9r1Dw_0\tperson\nDCE8Dg_ycjo_0\ttruck\nDCHv6sxfCAs_0\tperson\nDCPk1uyVNlU_0\tperson\nbmHyfvCZWsg_0\telephant\nbmHyfvCZWsg_2\telephant\nbmHyfvCZWsg_3\telephant\nbmLLdC88ohM_0\ttrain\nbmMB6Mr1uKI_1\tperson\nbmPhh5NpV7U_0\tperson\nbmQbHpw-4fY_1\tbird\nbmUFMo3pjyo_1\tairplane\nbmhSkbKIg0U_0\tcow\nbmhSkbKIg0U_2\tcow\nbmhSkbKIg0U_1\tcow\nbmhfPSKCY8I_1\tdog\nbmqPIwMWGj4_0\tperson\nbmuIwo4T6rk_0\tcow\nbmvh7yxyWcY_1\thorse\nbm2eU4uLgQE_0\tskateboard\nbm8MRDfmerA_2\tperson\nbm8MRDfmerA_0\tperson\nbnOUoCjxIvA_0\tbird\nbnWQnn3a2xE_0\tcat\nbnZwZd6xdHY_0\tperson\nbnc1LyPUCLg_0\ttrain\nbnfN43NoRbA_0\tperson\nbnqbJR2oSPk_1\tperson\nbnqbJR2oSPk_0\tperson\nbnsuTEBQy44_0\tperson\nbnw6G0Prvc0_0\tbus\nbnyALwWqo4Y_3\tcow\nbn8epY7auRE_1\tperson\nbn8epY7auRE_0\tperson\nbn9y-iIDoUU_0\tperson\nbn9y-iIDoUU_1\tperson\nboHeJDDjRf4_1\tperson\nboIKCyPzxr8_0\tbicycle\nboNYwNYmh1E_0\tcat\nboVVWwoXNDw_0\ttruck\nboZ6xZrNpzc_0\tperson\nboadjC5Lci8_0\tperson\nbocql7vYA4o_0\tbus\nboja3N4XQVo_0\tperson\nborBr_AiOmM_0\tperson\nbornws-twE0_4\tairplane\nbosTHwpZ8Ao_1\tdog\nbo7P3hYkeog_0\tperson\nbo9sUjViaHQ_0\tperson\nbo-qyHCKssw_0\tbird\nbo-qyHCKssw_4\tbird\nbpI4nUgSqbE_2\tperson\nbpI4nUgSqbE_0\tperson\nbpI4nUgSqbE_1\tperson\nbpJNbivFLKE_0\tskateboard\nbpdgYRz5hPs_0\tperson\nbpiM4FHf540_0\tperson\nbpjVhXyB4M0_0\tairplane\nbpjVhXyB4M0_2\tairplane\nbpsMni7yj3M_0\ttruck\nbps3HXPsekI_0\tbear\nbpu9NYWxcEE_0\tskateboard\nbpyH8PRkBQM_0\tperson\nbp1zW8j_ajo_3\tbus\nbp26IdTs4XE_0\tperson\nbp3rDJju8n4_0\tperson\nbp3xwI_FfOI_0\telephant\nbp6K7EUtORo_0\tcow\nbqBtysMz94c_0\tperson\nbqEmBkEnR1c_0\tperson\nbqGkchWbZYE_0\tcar\nbqJcZwUB1Go_0\tperson\nbqPKigpT9AY_0\tperson\nbqQk37pcpVA_0\tperson\nbqaeUBH6J3Y_0\tperson\nbqhQG8t_2XA_0\tperson\nbqjcNzWyaC4_1\tairplane\nbqoG__OO_5g_0\tperson\nbquLxAXnaww_0\ttruck\nbqwFWjwCZas_0\ttruck\nbq6n9q-Qpv8_0\tperson\nbq6870eY1a8_7\tbicycle\nbrDq8RFzVTo_1\ttruck\nbrIIDuCmk-E_0\tperson\nbrLbzZeRz1o_0\tperson\nbrLeJHMfMXQ_0\thorse\nbrNR68fKeMk_0\tbus\nbrWg7FAeBEA_0\tperson\nbrZj8bv9oxY_1\tperson\nbrhA4NqjrgQ_0\thorse\nbrh4hrmrs0Y_1\tskateboard\nbrpbaoTNe4s_4\tbicycle\nbrpbaoTNe4s_0\tbicycle\nbr3e--6oH8Y_0\tairplane\nbsGmFJGua4w_0\telephant\nbsR9KXIHlCM_0\tumbrella\nbsVBX8u9pW8_0\tbus\nbsXpGvnXpmk_0\tcow\nbsa-G_HEllM_0\tperson\nbsbzpk_ejJk_0\tperson\nbsbzpk_ejJk_1\tperson\nbsgdfqE8ySk_0\tperson\nbspbqjb3wAg_0\tperson\nbsv_swJ9_KY_0\tknife\nbs2FVeXKiYQ_0\tperson\nbs3u00S0eu0_0\tperson\nbtI7FYFXsfI_0\tperson\nbtL1Ptjq7pM_0\tmotorcycle\nbtMmnZdL_uQ_0\tperson\nbtO34shZMZo_0\thorse\nbtSyjckocDA_0\tperson\nbtVQJbFp8Dw_0\tcow\nbtdt4lysW6U_0\tdog\nbtihrVidTTg_0\tcat\nbtk27mnJY_A_1\tperson\nbtrdQ6N7QJc_0\ttruck\nbtrdQ6N7QJc_1\ttruck\nbtsT4XRF0nI_2\tcat\nbtul_U3BMKI_0\tbus\nbtvg47tz3Ps_1\tperson\nbtvg47tz3Ps_0\tperson\nbtz7EwI5rYY_0\tperson\nbt75khQG0w8_1\tbird\nbuFiFNHj41w_0\tperson\nbuOqwfPnqkI_0\tcow\nbuRfiT3Mq6Q_0\tbear\nbuSgd-PrRmA_0\telephant\nbuSgd-PrRmA_2\telephant\nbuSgd-PrRmA_6\telephant\nbuSgd-PrRmA_8\telephant\nbuWf8ffXWTs_0\tperson\nbue8SUcqigE_0\tcat\nbugTv6zkE0Q_0\tperson\nbuh8d20UxNw_1\tairplane\nbulc7gZ_YQY_0\tboat\nbuqR3s7EZeQ_0\tperson\nbuq0_IIvQqc_0\tperson\nbusJdrzEeJU_0\ttruck\nbuyJwHRaSYc_0\tperson\nbuyJwHRaSYc_1\tperson\nbuzd3FYmwQQ_0\tbus\nbu6QE_qf8fw_0\tskateboard\nbvLQLfRAI9s_0\tperson\nbvW_ZJYSOLg_0\tperson\nbva98_iD8pI_0\tperson\nbvc6dUfKFpM_0\tskateboard\nbvg-QHsENSc_0\tumbrella\nbvnuyMz5Pk4_1\tperson\nbvnuyMz5Pk4_0\tperson\nbvqPJIDHXHI_0\tperson\nbvqPJIDHXHI_1\tperson\nbvwJ75OkrTk_0\tperson\nbvwJ75OkrTk_1\tperson\nbvwwPOK7lN8_0\tskateboard\nbvw4raRDAys_0\tperson\nbvxAWBUG1zk_0\tdog\nbv6ASjMljew_2\tperson\nbv6ASjMljew_0\tperson\nbv6ASjMljew_1\tperson\nbv7NOTxSDhg_0\tperson\nbv7lroHoMyE_0\tperson\nbv8CHN4kwyM_0\tperson\nbv9J7oplKjY_1\tbird\nbv-ps8hofSY_0\tperson\nbv_rrakMnsY_0\telephant\nbwB-cfh8UFY_0\tcat\nbwIBXBulTRg_0\tperson\nbwM3RKdZAd0_1\tairplane\nbwM3RKdZAd0_2\tairplane\nbwSSE1XeKkg_0\tperson\nbwSSE1XeKkg_1\tperson\nbwTJKRhesM4_0\tperson\nbwZEDD10b44_0\tperson\nbwd7bbxG4Kw_1\tperson\nbwjUOg-CI1E_0\thorse\nbwotbTZHoPA_0\thorse\nbwotbTZHoPA_1\thorse\nbwv4Q2VqV5A_0\tbus\nbwv4Q2VqV5A_3\tbus\nbwwud6bxEeY_3\telephant\nbw1HepCVmL8_0\tperson\nbw3c96BQrRU_0\tcar\nbw3c96BQrRU_1\tcar\nbw96DHOgI1I_0\tairplane\nbw_opOTzI6k_0\tdog\nbxRX_05rH9Y_0\tbus\nbxXWi1nvXjI_1\tbird\nbxYeOYlqDPc_0\tcow\nbxaC_opt7IU_0\ttruck\nbxjIDI2ZkO4_0\tcat\nbxnu-AITJt4_0\tperson\nbxoclb4AFb8_0\tperson\nbxsI00qOi6c_0\tperson\nbx0h8tvY6kw_0\tperson\nbx6BVBAcBtM_0\tperson\nbx6BVBAcBtM_1\tperson\nbx7PtvZe6O8_1\tairplane\nbx7-RzWnIe4_1\ttruck\nbyDPGQJdn1s_0\tperson\nbyQIRt1JF9I_2\tdog\nbyQIRt1JF9I_0\tdog\nbyQIRt1JF9I_1\tdog\nbycJD4U6rIs_0\tbird\nbyehVoG0_eg_0\tperson\nbye0FepI8wg_0\tbird\nbyi-4Qx3vx4_0\tperson\nbykN9ap_QTw_0\tbird\nbyvddKaL_kw_0\tperson\nDCRIRGz2xhc_0\tperson\nDCRIRGz2xhc_1\tperson\nDCUcxHDfYiE_1\tcow\nDCUvhnZnRGQ_0\thorse\nDCXrBMEdS4E_1\tperson\nDCrv8CyK9zM_0\tbus\nDCx698xXxjs_0\tperson\nDC0PPRyXlD4_0\tperson\nDC4ZTdVoj2o_0\tboat\nDC5fRZmUZV8_1\tairplane\nDC8lKdla6rE_0\tperson\nDC8lKdla6rE_1\tperson\nDC_Kd2iaw9U_0\tperson\nDDZILIDFFXc_0\telephant\nDDd8CfnxkYM_0\tperson\nDDgtm9B7Yj0_0\ttrain\nDDhlugZ-vro_0\tperson\nDDhlugZ-vro_1\tperson\nDDjUzAM4mLE_0\tbus\nDDjUzAM4mLE_1\tbus\nDDjUzAM4mLE_2\tbus\nDDjUzAM4mLE_4\tbus\nDDoBBLQQ1Mg_0\ttrain\nDDtWIKexWpM_0\tskateboard\nDDw2iF2W4HI_0\tbird\nDD4YGjlBsHc_0\tboat\nDD844YVVMXE_6\tbicycle\nDD844YVVMXE_0\tbicycle\nDD844YVVMXE_1\tbicycle\nDD844YVVMXE_3\tbicycle\nDD844YVVMXE_4\tbicycle\nDD844YVVMXE_5\tbicycle\nDEHHjz2xiz4_0\tperson\nDEI-qJD08Pc_0\tperson\nDELUfY3m37k_0\tperson\nDEVUyfQt_G0_0\tcow\nDEVUyfQt_G0_3\tcow\nDEVUyfQt_G0_1\tcow\nDEXhh5rt_24_0\tmotorcycle\nDEXhh5rt_24_1\tmotorcycle\nDEZHoMWiFBQ_1\tperson\nDEau5L3A9S0_0\tperson\nDEjPKQLASJg_0\tumbrella\nDEtj0Fb-Jbo_0\tskateboard\nDEuYWYNXbw4_0\ttruck\nDE3kl7rbakE_0\tskateboard\nDE6z5oB-0vo_0\telephant\nDFBlkKPYtl0_1\tcow\nDFBlkKPYtl0_0\tcow\nDFI7_dtUb0U_1\tgiraffe\nDFI7_dtUb0U_3\tgiraffe\nDFRmdyjR_Dc_0\tgiraffe\nDFb4KWUX31Y_0\tperson\nDFpZ6f1iWT4_0\tperson\nDFwPVEPK4-Y_0\tcat\nDFzgqOHlnAk_0\tperson\nDGC_pivLAEE_0\tperson\nDGMfSMlhL4w_4\telephant\nDGMfSMlhL4w_6\telephant\nDGMfSMlhL4w_13\telephant\nDGMfSMlhL4w_17\telephant\nDGM9CDF3ks8_2\tmotorcycle\nDGM9CDF3ks8_0\tmotorcycle\nDGM9CDF3ks8_1\tmotorcycle\nDGbZYKPp7XI_0\tperson\nDGc9VSWQUyQ_2\tperson\nDGc9VSWQUyQ_1\tperson\nDGp5vBVf28g_0\tperson\nDGsQAjKXPBw_0\tcat\nDGs0ZHnAtkg_1\tperson\nDGs0ZHnAtkg_0\tperson\nDGvsndSWlBw_0\telephant\nDGx5aC4h8wg_0\thorse\nDGygUuHcJhs_0\tperson\nDGygUuHcJhs_1\tperson\nDG8TJBoerZ0_1\tperson\nDG8TJBoerZ0_0\tperson\nDG93jIsco3E_0\tperson\nDG93jIsco3E_1\tperson\nDHB_RgHOHdo_0\tumbrella\nDHB_RgHOHdo_1\tumbrella\nDHLK8xDGwL0_2\tknife\nDHLg5KzzoOM_2\tcow\nDHLg5KzzoOM_0\tcow\nDHPWnuYI2qA_0\tperson\nDHSGQLguGZ4_0\ttruck\nDHdFVfp7SvM_1\thorse\nDHl_QoiyZ2I_1\tperson\nDHl_QoiyZ2I_2\tperson\nDHl_QoiyZ2I_0\tperson\nDHqrGwHgnAA_0\tperson\nDHr77uGYi-g_0\tdog\nDHsorh6ngMI_0\tumbrella\nDHs1KtWx2n4_0\tperson\nDH0OVsYB2vs_0\tperson\nDH5nSZZ6uJE_0\tumbrella\nDH_wEdP1Glk_2\ttrain\nDIFEQ3rorSw_0\tperson\nDILtO1oyoCY_0\tperson\nDIOuJC_mv_k_0\tperson\nDIO8l6DAJX0_0\tperson\nDIO8l6DAJX0_1\tperson\nDIP8d1YC6vM_0\tperson\nDISU2i6bJqs_0\tcow\nDIaTXSXAfJM_1\tperson\nDIaTXSXAfJM_0\tperson\nDIpJyhb8gzw_3\tmotorcycle\nDI7rj5AAYEE_0\telephant\nDI801ysby74_0\tknife\nDJD4Xlf0eNg_0\tperson\nDJKFzJe6KAk_1\tskateboard\nDJKokwprK90_2\tskateboard\nDJLSHLPE0po_0\tperson\nDJQ8goQ4xyo_0\tperson\nDJV-ft_10HY_1\tperson\nDJjjrdYts2s_0\telephant\nDJ4oQ03HqyE_0\tbicycle\nDKBIz_MLIpw_2\tknife\nDKC58UBq-0w_1\tairplane\nDKEmSml-t4c_1\tperson\nDKEmSml-t4c_0\tperson\nDKHCjzNZE3U_0\telephant\nDKHCjzNZE3U_4\telephant\nDKICHseWnGQ_0\tperson\nDKJ3As_9Mlw_0\tperson\nDKKsGGUWero_0\tperson\nDKLxBVm3HHk_0\tairplane\nDKMUARFnh2Q_0\tperson\nDKShwn6Xk8w_0\tcat\nDKZ21QA0lBM_1\tperson\nDKcpPg_tEUU_0\tskateboard\nDKj3fFeAaL8_0\tperson\nDKq7d2C6gOI_0\tmotorcycle\nDKxIadOj4D0_0\thorse\nDKyckH3XY8Y_0\tbicycle\nDKydJWySeUw_0\tcar\nDLKE31mt2Qc_0\tbird\nDLLrkv1aF-k_0\ttrain\nDLMDzB4XBPg_0\tperson\nDLPmEX5pwY0_0\tcow\nDLT57E3vm98_2\ttruck\nDLct7_2tyWI_0\tperson\nDLd6kxxgSUM_0\tperson\nDLkx4w5oteM_0\tperson\nDLmCj6q5vD0_0\tperson\nDL3V2mhMX7M_0\tskateboard\nDL3eQSTbZ9Y_0\tskateboard\nDMB6Mr7lTSI_0\tperson\nDMEXGsc-PaU_0\tperson\nDMFEU87_IrU_2\tboat\nDMR4kX1M_zk_2\telephant\nDMR4kX1M_zk_1\telephant\nDMTP7OyjdJ4_4\tbus\nDMT_n1VJG80_2\tbird\nDMbwyGKLF4c_0\tperson\nDMb-AjUXKe8_0\tgiraffe\nDMiFC67o2P0_1\thorse\nDMiFC67o2P0_2\thorse\nDMiFC67o2P0_3\thorse\nDMn1JpU6MBE_0\tperson\nDMn-kaSNd5Q_0\tperson\nDMuLn7wJTcc_0\tperson\nDM7c57qvjgs_0\tperson\nDNAMMWkSfLY_11\tumbrella\nDNAjFU24eK8_0\tboat\nDNB4bgEP-8Y_0\tperson\nDNGlLqzJF6Q_0\tperson\nDNGlLqzJF6Q_1\tperson\nDNOZeC0gZzs_0\ttruck\nDNXuVh_X_qY_1\tperson\nDNXuVh_X_qY_0\tperson\nDNhOrRaOe2M_0\tperson\nDNul7ILzxkQ_0\tperson\nDNul7ILzxkQ_1\tperson\nDN0xWDfCAM0_0\tmotorcycle\nDN1ujoUaAKU_0\tperson\nDN1ujoUaAKU_1\tperson\nDN4TuB3csDg_0\tperson\nDN4e8ljPm1g_0\tbicycle\nDN5mGCGzOOY_0\tperson\nDN7FitWe9k8_0\tperson\nDN8yb60bxNc_0\tperson\nDOAU-JodN0U_1\tairplane\nDOAmtFxCuKA_1\tperson\nDODU9JghuAA_0\tcow\nDORauVZJhAU_1\tperson\nDORauVZJhAU_0\tperson\nDOhLqHOLbQY_0\tperson\nDOiUy3AGiKw_0\tperson\nDOiUy3AGiKw_2\tperson\nDOoTpSSHVho_0\ttruck\nDOoTpSSHVho_1\ttruck\nDOsVwDV787M_0\tbus\nDOuULWa1RKM_0\tperson\nDOvC_-Yrn5k_0\tcat\nDPAEt1AqwbQ_1\tcar\nDPCyQOQdLHE_0\tcat\nDPFO_O_f3hc_0\tcow\nDPIm8x0i2yo_0\tmotorcycle\nDPJ7ZSWY2Qs_0\tskateboard\nDPXJpAVtRfM_0\ttrain\nDPXJpAVtRfM_1\ttrain\nDPZi4DZaTmk_0\tperson\nDPZi4DZaTmk_1\tperson\nDPelBJ73uaU_0\tbicycle\nDPo9M61p8gI_0\tumbrella\nDPvxwOvedrQ_1\tknife\nDPz3CG4lD2Q_5\ttruck\nDPz3CG4lD2Q_6\ttruck\nDP2q1TrqjAE_0\tperson\nDP2q1TrqjAE_1\tperson\nDP6ZB5PxNfc_0\tperson\nDP-JZPR9HFc_2\telephant\nDQDV1Wr7qo8_0\tbear\nDQOglBZHFCs_0\tbear\nDQZiSQmMBnc_0\tbird\nDQcCfbTKP1s_1\tperson\nDQcCfbTKP1s_2\tperson\nDQcCfbTKP1s_0\tperson\nbywgcqNg6RU_2\tcar\nby7PLb7MqM0_0\tmotorcycle\nby_OJvQqlKE_0\tperson\nbzKVRbSQpZE_0\tknife\nbzLdvZQAWgA_0\tperson\nbzO5MBTTrdQ_0\tperson\nbzRELZo9WMU_2\tdog\nbzRELZo9WMU_0\tdog\nbzZgsynjAGk_0\tcow\nbzfE3U02_44_1\tperson\nbzfE3U02_44_0\tperson\nbzimWzymgu0_0\tperson\nbzquVP0NUms_2\ttruck\nbz5Ht4jyT0k_0\tbus\nbz66OedbeoI_0\tperson\nb0C_2T7-IfU_0\tcat\nb0GlXXGkfRQ_0\tperson\nb0GlXXGkfRQ_1\tperson\nb0HXAfyZ7Sk_1\tperson\nb0Q3EfK70fg_2\tairplane\nb0Q3EfK70fg_4\tairplane\nb0Q3EfK70fg_5\tairplane\nb0Q3EfK70fg_6\tairplane\nb0a7ewqE8S4_0\tdog\nb0nOQfZSaUo_0\tperson\nb0nt17hBmDw_0\tboat\nb0qXUUs3-WE_1\tperson\nb0t8uuynzIM_0\ttrain\nb0xQRq8njAI_0\tcat\nb0z1nalEX08_0\ttruck\nb0-UOt-DT1A_0\tperson\nb1ETK4nP9ag_0\tdog\nb1EnXvOZQbQ_0\ttruck\nb1Gd5IWJBRI_0\tperson\nb1R3uk0VLc4_0\tperson\nb1SyeZsSk80_5\telephant\nb1SyeZsSk80_3\telephant\nb1UAPTD4s74_0\tperson\nb1UpjRRBrTw_0\tcat\nb1cpAYk99_U_0\tperson\nb1cpAYk99_U_2\tperson\nb1cpAYk99_U_3\tperson\nb17OiOMReIs_0\tperson\nb1-WFxZ7Lcs_0\ttruck\nb2DqNP9s4t0_0\tperson\nb2Tm_7DUimQ_0\tperson\nb2Y6KLIX5vE_1\tmotorcycle\nb2Y6KLIX5vE_0\tmotorcycle\nb2azzMxEH84_0\tmotorcycle\nb2fq5Ba1L8M_0\tperson\nb2fsE3wZfWM_1\tperson\nb2m2gaVpjNE_0\tperson\nb2qNS9qjYbE_1\tperson\nb2tlrwd_LIg_0\tperson\nb28pEbOSeUs_0\tdog\nb2_dSc2NxNI_0\tperson\nb3KP0d-WX38_0\tbicycle\nb3KP0d-WX38_1\tbicycle\nb3KP0d-WX38_2\tbicycle\nb3R6fHlRZu4_1\tbicycle\nb3R6fHlRZu4_3\tbicycle\nb3R6fHlRZu4_4\tbicycle\nb3SsKosfjOA_0\ttrain\nb3SsKosfjOA_1\ttrain\nb3SsKosfjOA_2\ttrain\nb3UOZHA5jRI_0\tcat\nb3Z1Ay2o1zQ_0\tknife\nb3bkNCYQbwc_0\tcow\nb3p-fFVYM4E_2\ttrain\nb3p-fFVYM4E_4\ttrain\nb3p-fFVYM4E_6\ttrain\nb3tgGsan2vc_0\ttruck\nb3x6f5xFPTQ_0\thorse\nb3x6f5xFPTQ_1\thorse\nb3x8Gwk4V8o_1\tperson\nb3x8Gwk4V8o_0\tperson\nb323CLKf_vM_0\tperson\nb34Cdm6l5_k_1\tairplane\nb34JUq19S0E_2\tmotorcycle\nb34JUq19S0E_0\tmotorcycle\nb34JUq19S0E_1\tmotorcycle\nb344je6lVYA_0\tairplane\nb35ihWGyz_4_0\tcat\nb37tPdAEkEw_0\tperson\nb39uBVwcm48_0\tmotorcycle\nb4E8uT19QkY_0\tbus\nb4E8uT19QkY_1\tbus\nb4FBbr4Pud8_0\tperson\nb4GXrkSKAdA_0\tcat\nb4HAPQ_xX5E_0\tperson\nb4HAPQ_xX5E_1\tperson\nb4KwBIif5OY_0\tcow\nb4KwBIif5OY_2\tcow\nb4KwBIif5OY_3\tcow\nb4KwBIif5OY_4\tcow\nb4UXSjdnqZ0_0\tperson\nb4Xn8--nfvI_0\tperson\nb4aEJNvYqtU_0\tbear\nb4j8lkkY_lE_0\tzebra\nb4tTUDVt6Gk_0\tperson\nb42WUwHAKPs_0\tboat\nb455pPKgTj4_0\tperson\nb5D9lQq3uf8_0\tbear\nb5IshxZjL7o_0\tmotorcycle\nb5NxbNaAo_8_0\tperson\nb5R1HVvc040_1\ttrain\nb5S8Db1Gu7I_1\tbicycle\nb5S8Db1Gu7I_3\tbicycle\nb5T_VSM7nbg_0\tmotorcycle\nb5nwFyniymA_0\tdog\nb5ud9dsnS1c_1\tperson\nb5ud9dsnS1c_0\tperson\nb51dSWD8MF4_0\telephant\nb59pPUKW_78_0\tcar\nb5-eXPHW4Mg_0\tperson\nb6AoStVIzkw_2\tperson\nb6IE2imnfp4_0\tperson\nb6MtzhRufn4_2\tskateboard\nb6MtzhRufn4_0\tskateboard\nb6RIavVJ660_1\tperson\nb6dVZMAHwro_1\tairplane\nb6gsIu7Pxbc_0\tdog\nb6ndIInoIzU_0\tboat\nb6xUAyNCbdY_0\tperson\nb61MghNCCTI_0\tperson\nb61MghNCCTI_1\tperson\nb65S2P2Pfms_0\tperson\nb66BE9WdQP0_2\tbicycle\nb7HqfhRNtAQ_0\tcow\nb7H_n_w2eFQ_0\tperson\nb7Igw_OO-P4_0\tperson\nb7LHlx86tk0_0\ttrain\nb7RYkf4oXv0_0\tskateboard\nb7WQe48-0NI_1\tgiraffe\nb7WQe48-0NI_0\telephant\nb7WiE1a8IAM_0\tperson\nb7go-l8jA5s_1\tboat\nb7hJ62ORLHc_0\tperson\nb7iLQoOKVrM_1\thorse\nb7ivqvv6s6A_0\tmotorcycle\nb7mawJlPASQ_0\tperson\nb7u0NZEc8OI_1\tperson\nb7ycKg8GLHA_0\tperson\nb71SThzfrDg_0\tbird\nb78PYqyYWZA_0\tperson\nb8LqaxvNRHw_0\tperson\nb8LqaxvNRHw_1\tperson\nb8VoRclgULc_0\tcat\nb8aWJIa4RFI_0\tgiraffe\nb8es8BWiC5c_1\tperson\nb8g4M9Yov8M_11\tbear\nb8g4M9Yov8M_3\tbear\nb8xtOCMwjJM_1\tbird\nb8x1qHT8nvE_2\tboat\nb8yA8bHlrtQ_0\tbus\nb8yqEFXS8Ck_0\thorse\nb82N91HYnUo_0\tknife\nb9O_mJTNj2A_0\ttrain\nb9SLHObDJzQ_0\thorse\nb9Y5tpPv-LQ_0\tcar\nb9iCmG9fIHc_1\tmotorcycle\nb9melHkIeV4_0\tbird\nb9oiO21MJh0_0\thorse\nb9oiO21MJh0_1\thorse\nb9u4WV9ft4s_0\tmotorcycle\nb9wwfAu5DCs_0\tskateboard\nb96WdT0DXKk_2\tbicycle\nb96WdT0DXKk_0\tbicycle\nb96WdT0DXKk_1\tbicycle\nb98Gs0d8AKo_0\tmotorcycle\nb9-xiVm1Xck_0\tskateboard\nb9-2bW13faI_0\tperson\nb-Cp0i6fBOU_0\tperson\nb-Cp0i6fBOU_1\tperson\nb-S7G5A0MNI_0\tperson\nb-T0AS7CuxI_1\tknife\nb-VYy9eEU6w_0\tperson\nb-W1PY33nQg_0\tperson\nb-hT8zKObfM_0\tperson\nb-hqwYjKCH8_0\ttruck\nb-i49sLOjBo_0\tperson\nb-i49sLOjBo_1\tperson\nb-mQajOHUAA_0\tperson\nb-mQajOHUAA_1\tperson\nb-mQajOHUAA_2\tperson\nb-ncxt38EFw_0\tperson\nb-wiIOBccF0_1\tperson\nb-x--HjbnpM_0\tknife\nb-5K7RwiHdw_3\tboat\nb-8ARNgk-Tw_0\tperson\nb-_FeNpM_wI_0\tperson\nb_B3oYiBWi4_1\tskateboard\nb_KBD-NL4Vo_0\ttrain\nb_ZVDwMrcEU_0\tairplane\nb_exMPY7gnM_0\tperson\nb_fR7aS10Z0_0\tbear\nb_h4xugql44_0\tumbrella\nb_kksCK6cbw_0\tcat\nb_n776bwyJo_0\tboat\nb_n776bwyJo_1\tboat\nb_vDLf3193s_0\tbus\nb_1TwBIgwKE_0\tcar\nb_7EvlxDWFc_0\ttruck\ncAARR6q3Qq8_1\tskateboard\ncAARR6q3Qq8_0\tskateboard\ncAFqK_6ltXw_0\tcat\ncAJsxlkMG_s_0\tdog\ncAJsxlkMG_s_2\tdog\ncAJsxlkMG_s_1\tdog\ncAKfCLDFg34_1\tperson\ncASL6wZ33vA_0\tboat\ncAYIECe6Bvs_0\ttruck\ncAnDryag2FA_0\ttruck\ncAqs3d9KNzk_0\tperson\ncArYvJEUdOg_0\thorse\ncA0HCmGOK84_8\thorse\ncBAG9pjaV70_0\tcow\ncBBDfwkH23A_5\thorse\ncBBDfwkH23A_2\thorse\nDQk3Xvbv57I_0\tcat\nDQqBXfTgqTE_0\ttrain\nDQ04rtHIqHQ_0\telephant\nDQ7GZOJxra8_0\tperson\nDQ-vQygnOx0_0\ttrain\nDQ-vQygnOx0_1\ttrain\nDQ-vQygnOx0_2\ttrain\nDQ-vQygnOx0_5\ttrain\nDQ-vQygnOx0_7\ttrain\nDQ_yyvagS0g_0\ttruck\nDRMoOpmUgn8_0\tperson\nDRO4MalcQFk_0\tperson\nDRSSiSNzV7Y_0\tperson\nDRXxJArWrQA_0\tperson\nDRaIGIiQXd0_1\ttrain\nDRaX3P2ysBk_0\tperson\nDRhRKwI26n8_0\tbear\nDRhRKwI26n8_1\tbear\nDRseWxukwaI_0\tperson\nDRsoi5DxAJk_0\tcar\nDRuDqkZ0zfE_0\tperson\nDRuDqkZ0zfE_2\tperson\nDRuDqkZ0zfE_1\tperson\nDRxLQ6we5YU_0\thorse\nDRybt0Cgr_U_1\tbird\nDR0QGL0n_wM_0\tperson\nDR4mzyMklY8_0\tskateboard\nDR82KhNzs1w_0\tperson\nDR-AMnnLCCQ_0\tcat\nDR_jo4aSqn0_0\tperson\nDR_jo4aSqn0_1\tperson\nDSAbzYpUW5w_0\tcow\nDSB9X3bgG2A_0\tperson\nDSCt67aveiw_0\ttruck\nDSCt67aveiw_2\ttruck\nDSEt02E1kJE_0\tperson\nDSM_BlK-ggg_1\tperson\nDSM_BlK-ggg_2\tperson\nDSRGbK9rPbo_0\ttrain\nDSWlLGL3xj8_0\thorse\nDSZkEwhJEI4_0\tskateboard\nDSaSooZZeAg_2\tbus\nDSn5-dKW_P0_0\tperson\nDSoRmFNRxiE_0\tperson\nDSoRmFNRxiE_1\tperson\nDSqy2MlVOxE_0\tperson\nDSq0q8dCuCw_0\ttruck\nDS5z-K8Cpzs_0\tperson\nDS-V_NKOawo_0\tknife\nDTBhYAFcQ94_0\tskateboard\nDTFg8SeWhbE_3\tskateboard\nDTYiSIRTXW8_0\tknife\nDTZkCYvGZ9E_0\tperson\nDTm5L6IAHC4_0\tperson\nDTnIC_Q8YoY_1\tboat\nDTs2uXh47Xw_0\tperson\nDTtejx1VYBs_0\tperson\nDTvjWj60ixI_0\tperson\nDTvzQwX0KRQ_1\thorse\nDT4KxrhD89E_0\tperson\nDT7TSCbFXek_0\tperson\nDUAhVOWkluQ_0\tperson\nDUAhVOWkluQ_1\tperson\nDUBzIIKht_w_0\tperson\nDUBzIIKht_w_1\tperson\nDUB3OOi7dQc_0\tperson\nDUHEv94Tyno_0\tperson\nDUHEv94Tyno_1\tperson\nDUHEv94Tyno_2\tperson\nDUPQ3fPhomY_0\tperson\nDUQa7q5NTQI_1\thorse\nDUZhPq4FiJM_1\tperson\nDUb6-VQcokc_0\tcat\nDUlYPwiuBrw_0\ttruck\nDUlYPwiuBrw_1\ttruck\nDUmKu-rc7jI_0\tperson\nDUwVOy7IYvA_0\tperson\nDUxGnuYB_GI_0\tcow\nDU1ww3ryP7s_0\tperson\nDU4acd1_vuI_0\tperson\nDU8jvzO9tEA_0\tzebra\nDVFfZw4HW3E_0\ttrain\nDVFfZw4HW3E_1\ttrain\nDVK9BrG_Y_8_0\tperson\nDVOFKTeh9BY_0\tperson\nDVgCgSDZVw0_0\tperson\nDVjOMylPUfU_0\tperson\nDVlEnd5Ra2Y_0\tperson\nDVm_-u6oWwA_0\tcar\nDVqsCPYrMrg_0\tperson\nDVqsCPYrMrg_1\tperson\nDV4GPAloBks_1\tperson\nDV4GPAloBks_0\tperson\nDV79-MpnE1Y_0\tperson\nDWQ0kmCIT0E_0\tperson\nDWZNfCg0W8o_0\tperson\nDWjj9U_lr30_0\tperson\nDWoRZEAFpUI_0\tperson\nDWqyeu4eovM_0\thorse\nDWuaB5j6-CQ_0\tperson\nDWwGWBcxL0k_0\tperson\nDW1iqzQEWkE_0\tperson\nDW4OTTF7Jc4_0\tperson\nDW8G3A0trOk_9\tbear\nDXEqDJWN72E_0\tperson\nDXEqDJWN72E_1\tperson\nDXI2AmrILgw_1\tcat\nDXa15hEKLAc_0\ttruck\nDXgs-pfW-0M_0\ttrain\nDXpyVrXMs1w_0\tperson\nDX5AP4s6u0k_0\tbird\nDX867I2CNRk_0\tairplane\nDX-PbjeeB6o_1\tgiraffe\nDYJJBRoUlnU_0\tknife\nDYUiMLisOzs_0\tperson\nDYbb8_mMeLs_0\thorse\nDYhTdNMuv5g_0\tknife\nDYkV2TPfOBk_0\ttruck\nDYlrCUMDv_g_0\tcat\nDYpBOmbclGY_0\tperson\nDYqIQv97tuE_0\tperson\nDYvHdc4rnxk_2\tperson\nDYvHdc4rnxk_1\tperson\nDY0ggbU0cIk_0\tperson\nDY3h0Y3ijmo_0\telephant\nDY3h0Y3ijmo_2\telephant\nDY6eQdk8jaE_0\tperson\nDZESlirYB3I_1\ttrain\nDZGEjl9U78c_0\tperson\nDZIFKtO6y2Q_0\tperson\nDZIFKtO6y2Q_1\tperson\nDZMd9NPNnLE_0\tperson\nDZRZg1gGn1g_0\tbus\nDZWsGelqCPg_0\tperson\nDZXldsAgY7o_4\tskateboard\nDZYjfZMMVAE_1\tperson\nDZgbeXD-bZg_0\tbear\nDZqs7ie6HPU_0\tperson\nDZ3JlgmRHQ8_0\tperson\nDZ4G9EBImOM_1\tperson\nDaMdWu7CyRE_0\tperson\nDaRYBq6zsmY_2\telephant\nDagKzwyphZY_0\tperson\nDapmUIRDw3o_0\tairplane\nDaqVTidNtg0_1\tperson\nDatNYbTqxlw_0\tperson\nDaz5kZBXn5c_1\telephant\nDa10JheIcaw_0\tperson\nDa25bjhf1WQ_0\tperson\nDbAZPBnTh3U_1\tperson\nDbGX12xMbWM_0\tperson\nDbNOHXsDP5I_1\tboat\nDbSGsjNmQ8A_0\tcat\nDbXz_8anwSM_0\tperson\nDbZGV4ixs2E_0\tbird\nDbdZugU9GWk_0\tbus\nDbeCxvMCD-Q_0\tperson\nDbfJ2s7qQJ8_0\ttruck\nDbivV-It_rM_0\tperson\nDbmwr1_ObHM_0\tperson\nDbnhReILFSs_0\tperson\nDboUAm-F7Rg_0\tperson\nDbpte835xwc_0\tperson\nDbqj1XCvcGw_1\tcow\nDbrGY3BalZ0_0\tskateboard\nDbrGY3BalZ0_3\tskateboard\nDbrGY3BalZ0_2\tskateboard\nDbvkTKJjRj8_0\tperson\nDbwEevYFGrg_0\tperson\nDbzakdG34mg_0\tcar\nDbzakdG34mg_1\tcar\nDb3OG025sz0_0\tperson\nDb74WjMmf-0_0\tbear\nDb74WjMmf-0_1\tbear\nDcAxPsNVe28_0\ttrain\nDcFWetycnqY_0\tperson\nDcKjrocJ8iM_0\tperson\nDcKjrocJ8iM_1\tperson\nDcOl0Ec1kuI_0\tperson\nDca5CTtFQZ8_0\tmotorcycle\nDcexSE28IOA_2\tperson\nDcexSE28IOA_0\tperson\nDcexSE28IOA_1\tperson\nDcfs-bFQcxk_0\tperson\nDcj-1vKe6iI_0\telephant\nDckRd1CpSm0_0\tskateboard\nDckTHE_Pn5Q_0\tperson\nDcknQtmjIDA_0\telephant\nDclr-tDJMO8_0\tperson\nDcpuJSx5z78_0\tperson\nDcpuJSx5z78_1\tperson\nDc3yhv5mfN8_0\tperson\nDc4EXPP0fqU_0\tcat\nDc9dWfPxIEM_0\tbicycle\nDdGvFcujfxo_0\tperson\nDdHWfz7kw4I_0\tperson\nDdJuIi7LexI_0\tbus\nDdKvI-6rMII_1\tperson\nDdNpi-Pmvgc_0\tperson\nDdNpi-Pmvgc_1\tperson\nDdNpi-Pmvgc_2\tperson\nDdOk9lG9b1k_0\tknife\nDdUa-CozM14_0\tperson\nDdUa-CozM14_1\tperson\nDdYyeGgXLKw_0\tperson\nDddB5joJQC4_0\tairplane\nDddRHyvYqFI_0\tperson\nDddRHyvYqFI_1\tperson\nDdf4T9I0sdI_0\tperson\nDdz7VVJXgHs_0\tperson\nDd2qrXASEzk_1\tperson\nDd2qrXASEzk_0\tperson\nDeCtt_QZqjk_0\tperson\nDeCtt_QZqjk_2\tperson\nDeFuoRV0yCw_0\tperson\nDeFuoRV0yCw_1\tperson\nDeHiMvczAD4_0\tperson\nDeIpwOsUzjw_0\tperson\nDeVZ83g93sE_1\tbird\nDeViLrLvD1Y_0\thorse\nDefHSc2VTOo_0\tperson\nDfGzSVv2ELQ_4\thorse\nDfGzSVv2ELQ_1\thorse\nDfGzSVv2ELQ_3\thorse\nDfS7lvAcDQc_0\tumbrella\nDfS7lvAcDQc_12\tumbrella\nDfT_7BUGNQA_0\tperson\ncBI2gZhpA-8_0\tperson\ncBMnKBVcoOE_0\tperson\ncBMnKBVcoOE_1\tperson\ncBQJU95uwwM_0\tperson\ncBQJU95uwwM_1\tperson\ncBSbDKv-Z_o_0\tcar\ncBb6VPKgF1M_0\tknife\ncBeH0xcCCWE_1\tperson\ncBhDn0TkAdc_0\telephant\ncBhDn0TkAdc_2\telephant\ncBhDn0TkAdc_3\telephant\ncBhDn0TkAdc_1\telephant\ncBlqBEElvDI_0\tperson\ncBpFzTn_uOo_0\tperson\ncBvZAwlCN4M_1\thorse\ncBvZAwlCN4M_2\thorse\ncB1RhnpteUg_3\tairplane\ncB9XRu3bb_0_0\tperson\ncB_RQN9IXg8_2\tskateboard\ncCA7llOU4HQ_0\tperson\ncCEUd1IZ6OQ_0\tperson\ncCEUd1IZ6OQ_2\tperson\ncCMe4KdqzeI_0\tperson\ncCaz75u-bCM_0\tmotorcycle\ncCfInBOvqkk_0\tperson\ncCfVriTflG8_0\tperson\ncCnjh5F8dvM_2\tboat\ncCvpQCZ33xQ_0\ttrain\ncCwB7O-yg4Q_1\tairplane\ncCxZRIxh_yk_0\tcow\ncC2UgNbG7Rs_0\tcat\ncC3-bziiNKk_0\tcow\ncC3-bziiNKk_4\tcow\ncC4nZNGoC-g_1\thorse\ncC4nZNGoC-g_2\thorse\ncDGz5cnIzK0_0\ttrain\ncDIc8cs3igI_1\tperson\ncDL0YZ_vXOk_1\tperson\ncDaR5WdXvIo_0\tdog\ncDfSk2g6wRM_0\tdog\ncDg-vYWO3AI_0\tumbrella\ncDvCYN97QYU_0\tdog\ncDvWWER9oeI_0\tperson\ncD_EAISZcwM_0\tperson\ncD_zwwrcvkI_1\tperson\ncEAwCEnfITY_2\thorse\ncEFLP7rdZSU_0\tperson\ncEIAg54WPCs_0\tskateboard\ncEOHFcu3Uic_0\tperson\ncEOqnkbgfMQ_0\tperson\ncEXYVwmcpSg_0\tperson\ncEdeOfPvcQ0_0\tperson\ncEomNeUqQUI_0\tumbrella\ncErRs5qv8mc_0\telephant\ncEyCX-t8Jlo_0\tbird\ncEyCX-t8Jlo_1\tbird\ncEzC3hwdO_o_0\tperson\ncE7AS1hrlYA_1\tperson\ncE7AS1hrlYA_0\tperson\ncFBoLads7vA_0\tperson\ncFHTt7uFxH4_4\tumbrella\ncFOk-AMS2Aw_0\tmotorcycle\ncFOk-AMS2Aw_1\tmotorcycle\ncFkmNa2nYEk_0\tperson\ncFoUf9UmoZ0_0\tperson\ncFq4fzO00qE_0\tcat\ncFtfKwaxphA_0\tperson\ncFuoJPf6prU_0\tskateboard\ncFzjl_SiNhg_2\tdog\ncFzjl_SiNhg_0\tdog\ncF0SM2Lf82s_0\tperson\ncF7uQwB8sEg_0\tperson\ncF9YklqKEp0_0\tcow\ncGBOBTCgzP8_3\thorse\ncGBOBTCgzP8_4\thorse\ncGCbcyeQqG8_0\tperson\ncGCbcyeQqG8_1\tperson\ncGC4pGWPOUk_0\tperson\ncGC732t-itM_0\tperson\ncGEvxRn1UtQ_0\tperson\ncGNmKg25XMs_0\tboat\ncGUXUioIa4o_0\tperson\ncGVaIIV18ug_0\tperson\ncGcyxMp1ZQc_0\tperson\ncGcyxMp1ZQc_1\tperson\ncGdeftwBWL4_0\tperson\ncGiVzhQI2a0_0\tperson\ncGpNQ9Vk-5E_0\tperson\ncGtaJVgvTJg_0\tperson\ncG1_sZqy7lU_0\tperson\ncG2fL1nRZmE_0\tperson\ncG5TxH-1Sf4_0\tperson\ncG65cBtyj20_0\tcow\ncG7BBtumZnQ_0\tdog\ncHCYX0EqsfE_0\tperson\ncHQLun1YTiM_1\tperson\ncHQLun1YTiM_0\tperson\ncHSjCxvPumA_0\tmotorcycle\ncHWE72lnzZo_0\tperson\ncHYcXW7HAkA_0\tperson\ncHaBQgTFdr4_2\tknife\ncHjKy80ojXM_2\tbear\ncHkm25QAG8A_0\ttruck\ncHnV0yZTha4_0\tcar\ncHpaD5PtHnM_0\tcow\ncHv3ulnF1fo_0\tperson\ncHyjhzLIeO0_0\tperson\ncH2A35uULdc_0\tperson\ncH2g9vV4SyM_0\tbird\ncH27awicc50_0\tperson\ncH8zYhvzdb8_0\tperson\ncICrfFzHoZs_0\tperson\ncIFXOWG5Dd0_1\tperson\ncIF9coXttVs_0\tperson\ncIIlWssV9Sk_0\tperson\ncIJSKwcTQ10_2\tbicycle\ncIJSKwcTQ10_3\tbicycle\ncIPlCULXXHQ_3\telephant\ncIPlCULXXHQ_2\telephant\ncISwax-t_78_0\tperson\ncIVGJQrNkT8_0\tperson\ncIV9T5ZQmdI_0\tperson\ncIh9baL5Hzw_1\tperson\ncIjMwiaApEc_0\tperson\ncIvqOdvwX6w_0\tperson\ncIwDGqmKrfY_0\tperson\ncJH4RK9aVR0_0\telephant\ncJJDfdbopiQ_0\tperson\ncJSjHpF7ILg_0\tairplane\ncJUj9q6wgis_0\tperson\ncJfW0Gfkzrg_0\tknife\ncJjaVdNaUko_0\tbus\ncJnihDxg0wg_1\tdog\ncJtGcHMJlMA_0\tperson\ncJ0hAba-pck_2\tgiraffe\ncJ0_u3Ta6kU_2\tskateboard\ncJ0_u3Ta6kU_0\tskateboard\ncJ2f7qDBm7M_0\thorse\ncJ41GQMsJIA_0\tdog\ncJ6BfbrgwDM_0\tperson\ncJ7Akre7-Sc_1\tcow\ncJ7ZHI-8gU0_0\tperson\ncKO8G1ZXQgo_0\tperson\ncKdank8BDik_0\tperson\ncKgqIdOoBmE_0\tperson\ncK4yj3jgWek_0\tperson\ncK5MabT7iIA_2\ttrain\ncK5MabT7iIA_0\ttrain\ncK5MabT7iIA_1\ttrain\ncK9R8KdVuIE_0\tperson\ncLKgng5yuC4_0\tperson\ncLKgng5yuC4_2\tperson\ncLKgng5yuC4_1\tperson\ncLPSEK3_jEE_2\thorse\ncLPSEK3_jEE_3\thorse\ncLPSTXefj2Y_0\tperson\ncLY_N1jEC8E_0\tperson\ncLg1pn5Oh1k_0\tperson\ncLlL2uHDyBw_0\tbird\ncLnQAhX42Eo_1\thorse\ncLnQAhX42Eo_0\thorse\ncLn0Kz_p2U0_0\ttrain\ncLrXQvFZ-y0_0\tknife\ncLvgs19Vm18_1\tperson\ncL2jFa-Zd_M_0\tperson\ncL4k6bdNmbs_0\tboat\ncL6G_y5LoDo_0\tmotorcycle\ncMGnmOyYWcM_1\tperson\ncMIyGPpW9Xw_0\tperson\ncMJhk7y1Nng_2\tbird\ncMJhk7y1Nng_0\tbird\ncMJhk7y1Nng_1\tbird\ncMOULCqujvs_0\tcat\ncMRhR707ZfA_11\tbear\ncMRhR707ZfA_13\tbear\ncMeXNjQUwe0_0\thorse\ncMg1O__kPFA_0\thorse\ncMwsAfZMG1c_0\tperson\ncMwt7xBZ9i4_1\tperson\ncM6-id-uhMg_0\tperson\ncM6-id-uhMg_1\tperson\ncNLuZxPpWho_9\telephant\ncNLuZxPpWho_14\telephant\ncNLuZxPpWho_1\telephant\ncNLuZxPpWho_4\telephant\ncNLuZxPpWho_8\telephant\ncNLuZxPpWho_11\telephant\ncNLuZxPpWho_13\telephant\ncNalYSGXOkM_0\tperson\ncNnMvF7oiUo_0\thorse\ncNr9rjOJ0ps_0\tperson\ncNxEreBWMRc_0\tperson\ncNxEreBWMRc_1\tperson\ncOD8xhwGfME_0\tperson\ncOD8xhwGfME_1\tperson\ncOYK17trE9k_0\tperson\ncOYK17trE9k_1\tperson\ncOZOzY6XDLU_0\tperson\ncOalncX8fwg_0\tairplane\ncOalncX8fwg_1\tairplane\ncOalncX8fwg_2\tairplane\ncOalncX8fwg_3\tairplane\ncOalncX8fwg_4\tairplane\ncOkVxYbnFRs_0\tperson\ncOkiG4LRtQU_1\ttruck\ncOp33oi4C8E_0\tskateboard\ncOzNmIBhiMY_0\tperson\ncO1F_0l1vSU_0\tperson\ncO1MbnbgUbU_0\tdog\ncO3WA2g_UeM_4\tbear\ncO3WA2g_UeM_2\tbear\ncO5xsG3ud_0_0\ttrain\ncO7nCAZ-uLk_0\tperson\ncPBvSHKPNvk_0\tperson\ncPdRddyxsVA_0\tcow\ncPdjr1zTQQ4_0\tperson\ncPeGSXSLepg_0\tperson\ncPkbg5bdpcE_1\tperson\ncPkbg5bdpcE_0\tperson\ncPn5c5t2g6w_3\tskateboard\ncPqAK1E1Ajo_1\tdog\ncPqAK1E1Ajo_0\tdog\ncPsXS3_4zOk_0\tbus\ncPu-riLrt1c_0\tperson\ncPu-riLrt1c_1\tperson\ncP-gl2IN_AI_1\tperson\ncP-gl2IN_AI_0\tperson\ncP_nenKIU4g_2\tbear\nDf70QgKA_Hc_0\tperson\nDf70QgKA_Hc_1\tperson\nDgSwJVCLkYM_0\tperson\nDgcSsQKaX7Q_0\tperson\nDgoFmJFWpUw_0\tbear\nDgtiaphLkMc_0\tperson\nDguiMPx8nn0_2\tperson\nDgvI1azs_0E_0\tairplane\nDgwM5b-eKvc_0\tperson\nDg2sU0bmBho_0\tperson\nDg8r8QlJw80_0\tperson\nDhAkswxLuAs_0\tperson\nDhJZwbql4dc_1\tperson\nDhLD44-KIUU_0\tperson\nDhYbvvwSsEA_1\tperson\nDhYbvvwSsEA_0\tperson\nDhd-0-xOF6I_0\tcow\nDhl-jIQaam0_3\tperson\nDhl-jIQaam0_0\tperson\nDhl-jIQaam0_1\tperson\nDh6APdqkNZ0_0\tperson\nDh_6tF8ndZs_0\tperson\nDiAj24Xsadk_0\tperson\nDiDELcBJWh4_0\tperson\nDiPjO5frbNc_0\tperson\nDiQ-VgXIDMo_0\tperson\nDiVX_-kQv0k_0\tperson\nDiVX_-kQv0k_1\tperson\nDiWi-oWT9EI_0\tboat\nDiXsD6VHEr4_0\tperson\nDiZ4OCT30AM_0\tperson\nDia6QIxORbM_4\tairplane\nDihnxPkojnQ_0\tgiraffe\nDihnxPkojnQ_1\tgiraffe\nDi41WoS7T1M_1\tbear\nDjAQs68BiwA_1\tgiraffe\nDjB4dpC4TVs_0\thorse\nDjD15NlLBYI_1\ttruck\nDjD15NlLBYI_0\ttruck\nDjK1R_LBqgM_0\tperson\nDjMnoAbMiIU_0\tperson\nDjMnoAbMiIU_1\tperson\nDjQF34GUthk_0\tperson\nDjS-0VOep0Y_2\tperson\nDjXtIIwfITI_0\tperson\nDjb2blFeoNM_0\tperson\nDjdAxUWgSdk_0\tknife\nDju4Bl2fx88_0\tbicycle\nDjyldIzPJbA_0\thorse\nDjy5UE0Ofa8_0\tperson\nDjy5UE0Ofa8_1\tperson\nDj7DVsCVqqY_0\tcow\nDj9npayKJqk_0\telephant\nDkAG7dFDk94_0\tperson\nDkC_iJTIrYc_1\tperson\nDkC_iJTIrYc_0\tperson\nDkF-LqA7wSk_0\tbus\nDkNY4yun6ek_0\tboat\nDkPYbKRQBE4_1\tmotorcycle\nDkTfU9q9U_I_0\tcat\nDkTqTY04y30_0\tperson\nDkTqTY04y30_1\tperson\nDkbRBY4ZlFY_0\tbicycle\nDkbRBY4ZlFY_5\tbicycle\nDkbRBY4ZlFY_6\tbicycle\nDkbikYoLycQ_0\tbus\nDkmab-wxSy4_0\tperson\nDkmab-wxSy4_1\tperson\nDknRMqifZFE_0\tskateboard\nDkpZP7RtrJM_1\tbus\nDkqy-okNDVM_0\tperson\nDkrkY6blx3U_1\tperson\nDkrkY6blx3U_0\tperson\nDk0wXCp-USs_0\tboat\nDk1QPiNji5I_0\tskateboard\nDk4V0c6Yzbs_1\tboat\nDk47lOWl3NM_2\tcat\nDlCMYyDhSVY_1\tperson\nDlCMYyDhSVY_0\tperson\nDlDFQ88ui2A_0\tperson\nDlDJpNWKuPM_0\tknife\nDlFJTfO-mc0_0\tcat\nDlG-VsdsPCk_0\tmotorcycle\nDlTE01-45gQ_0\tairplane\nDlX2Yvp20gY_0\tperson\nDldXGda7zfE_0\tperson\nDldXGda7zfE_1\tperson\nDlg5BFm20wI_0\tperson\nDlg5BFm20wI_1\tperson\nDl3fDWG23zU_0\tperson\nDmG9v9xVPbg_0\tperson\nDmIeMGzqZEc_0\tcow\nDmJ9x-DFdqA_0\tperson\nDmJ9x-DFdqA_1\tperson\nDmLGGv6YNEo_1\tbus\nDmL_6_a_54g_0\tbird\nDmNmgatXwU8_1\tknife\nDmSRZp63qTo_1\ttruck\nDme3Rfsqbz8_0\tperson\nDmiucPhqXMg_1\tbus\nDmiucPhqXMg_4\tbus\nDmlMgF-BuRo_0\tperson\nDmt8pgQG3M4_1\tskateboard\nDnLVGRyXAR4_0\tperson\nDnN9tjwPn-0_0\tperson\nDnR4VFNo44s_1\tairplane\nDndaJVRuOoo_0\tperson\nDniy3zze90s_0\tperson\nDniy3zze90s_1\tperson\nDnj_fhGXHC8_1\tbird\nDnkUzsPqjE8_1\tperson\nDnkUzsPqjE8_2\tperson\nDntJ297deXI_1\tperson\nDntJ297deXI_2\tperson\nDntJ297deXI_0\tperson\nDnx6TlTvRfI_0\tperson\nDn80jV69sbs_0\tperson\nDoEWhY2BkZo_0\tperson\nDoOq_FhWze0_0\tperson\nDoPKGr2HJwM_3\tbird\nDoRoLk97UqY_0\ttruck\nDobAdZVysXc_0\tcow\nDohloSZ6YdA_0\tperson\nDomgj6ptFOs_0\tbus\nDpH2eSmcTk4_0\tbus\nDpJA_qYLobk_11\tbicycle\nDpJA_qYLobk_0\tbicycle\nDpJA_qYLobk_2\tbicycle\nDpJA_qYLobk_5\tbicycle\nDpJA_qYLobk_6\tbicycle\nDpJWhFnF2Fo_0\tdog\nDpR63uhHTjo_1\thorse\nDpWw1SaCdTQ_0\tperson\nDpbGsvglx7Q_0\telephant\nDpbGsvglx7Q_1\telephant\nDpimIW1T2Sw_0\tperson\nDpp32dLn0hQ_0\tperson\nDpvuhymOiUM_0\tperson\nDpwjQ_KcYAc_0\tperson\nDpxoJ_GWJA4_0\tgiraffe\nDpxoJ_GWJA4_3\tgiraffe\nDpxoJ_GWJA4_4\tgiraffe\nDpxoJ_GWJA4_1\tgiraffe\nDpz-s6E9VWg_0\tperson\nDp2pGcutqDQ_0\tperson\nDp2pGcutqDQ_1\tperson\nDp4XaG6247k_0\tperson\nDp5KRKUJBGE_0\tcow\nDp6qJvgV4fQ_0\tperson\nDp71z8eyq7o_0\tbus\nDqBNoutsr4M_0\tperson\nDqBNoutsr4M_1\tperson\nDqDElT9H4Tg_0\tboat\nDqESUtRuhPw_0\tdog\nDqVUeH6XI2Q_0\tperson\nDqegnRXQd5Q_0\tairplane\nDqi5KTmt04s_0\tbus\nDqy6NbRkVPE_2\tskateboard\nDrAnw0S9Pmc_0\tperson\nDrCKp4YB7rI_0\tperson\nDrE7aW7O0eQ_0\tperson\nDrFxlXYC6-o_0\tperson\nDrGCtlmxxVc_0\tperson\nDrPpkd-UxFY_0\tcat\nDrc0Grdb_LU_0\tcat\nDrgjySu3e-c_0\tmotorcycle\nDr9XXUA4UKc_0\tperson\nDr9XXUA4UKc_1\tperson\nDr--We7lD3I_0\tperson\nDsA5QOOIZJw_0\tperson\nDsP87b0IuoU_0\tperson\nDsZ6Cf42EdQ_0\tperson\nDsiAcCUi8iE_2\tbear\nDsm48Msjw6k_0\tbird\nDsxyH6AKBd0_0\ttruck\nDs0GIUe1AFo_2\tperson\nDs0GIUe1AFo_0\tperson\nDs0GIUe1AFo_1\tperson\nDs3E7n1kRQk_0\ttrain\nDs44yYfSEr8_0\tbird\nDs8xwquSVkw_0\tskateboard\nDtKSEQhjq2I_1\tcat\nDtQGDwZ1PIU_0\ttruck\nDtQGDwZ1PIU_2\ttruck\nDtSpyLMbD9o_1\tmotorcycle\nDtU93_s53sI_0\ttrain\nDtc3hZBmn9Q_0\tperson\nDteEg93cINc_0\tperson\nDtf2WRyd4OA_0\tairplane\nDtgUpKmdw_g_0\tperson\nDtuRiD_E6HU_0\tperson\nDtyatJX8J1A_0\tbicycle\nDt1MDqN3TCs_1\telephant\nDt1PLFoRvoM_7\tairplane\nDt1PLFoRvoM_0\tairplane\ncQAr7IVeBrU_0\tperson\ncQC7jBc1pC0_0\tperson\ncQIviFGN-_M_0\ttrain\ncQOFvBNN9to_0\tairplane\ncQOFvBNN9to_1\tairplane\ncQPP6SqX-uk_0\ttruck\ncQbqByuUnW8_1\tcar\ncQgUGmyvkJ8_0\ttrain\ncQttS-GIM5c_0\tperson\ncQttS-GIM5c_1\tperson\ncQw1wXvFnLM_0\tperson\ncQ29m5z8Cnk_1\tcow\ncQ4aR8OLr74_0\tmotorcycle\ncRGrqg7y9tE_0\tboat\ncRVqyVvxjHI_0\ttrain\ncRczdkzrJ-w_0\tcat\ncRnDFinbH-s_0\tbird\ncRrjU515FKg_0\tperson\ncRvAv1Nn-WQ_0\tcat\ncR6qM7wjtDw_0\tknife\ncSDafQMsYwc_0\tcat\ncSJ2ISog6Pw_0\tbird\ncSJ2ISog6Pw_1\tbird\ncSLerMX3IBg_0\tperson\ncSNwXF8OcR8_0\tcow\ncSO-70KCypM_0\tskateboard\ncSVIvCYuDtU_0\tcow\ncSdBaGsGWKk_4\tbird\ncSdBaGsGWKk_9\tbird\ncSdBaGsGWKk_1\tbird\ncSdBaGsGWKk_3\tbird\ncSdBaGsGWKk_6\tbird\ncSdBaGsGWKk_7\tbird\ncSdUwiTGXPc_2\tmotorcycle\ncSor-u6VHHw_1\tdog\ncSqMDH0-sDs_2\tperson\ncS398dAyQ9k_0\tcow\ncS-QgqiUgLQ_0\tperson\ncS-QgqiUgLQ_1\tperson\ncTGOQnmi7bo_0\tperson\ncTLa1dxk76g_0\tperson\ncTUTNgp9rZ4_0\tperson\ncTUTNgp9rZ4_1\tperson\ncTayBCWq6xo_0\tperson\ncTiETDBrGv4_0\tskateboard\ncTiETDBrGv4_1\tskateboard\ncTk8pacLUcc_0\tbus\ncTmv-vp89sY_0\telephant\ncTmv-vp89sY_1\telephant\ncTsipIh7xF8_0\tcow\ncTvxGA-EvvY_1\tperson\ncTzz_ZCUpxc_0\tperson\ncT4Y0HSeBgg_0\telephant\ncT5UlPnc5MQ_0\tperson\ncT5UlPnc5MQ_1\tperson\ncT7LjXG7ByI_0\tairplane\ncT7LjXG7ByI_1\tairplane\ncT7LjXG7ByI_2\tairplane\ncT7kZP5B_2s_0\tbus\ncT_US5II64I_0\tperson\ncUEWtKzcAsM_2\tairplane\ncUEWtKzcAsM_1\tairplane\ncUM5ajI3KJg_3\thorse\ncUNExkBml18_0\tperson\ncUSRVmcbXxI_0\tperson\ncUS9QgCXcPo_0\tperson\ncUWmN_HuZiA_0\tperson\ncUYlfMGqB_8_0\tdog\ncU7JEUo5qdM_1\tperson\ncU7sT9UHs7s_0\tperson\ncVCqOzgt2vI_2\ttrain\ncVCqOzgt2vI_0\ttrain\ncVM2h5qbyUw_0\telephant\ncVXIaONp5o8_2\tperson\ncVYqiMXSh9g_1\tperson\ncVbcrOx7768_0\tperson\ncVfH0tFh5Kc_0\tperson\ncVfWBtl-qK4_0\ttruck\ncVq5VnfZtNw_0\tperson\ncVr16pInr5k_0\tperson\ncVsZMfMaxSM_0\tperson\ncVtyGQKWFcI_0\tmotorcycle\ncV0a2ScBxpE_0\tperson\ncV0a2ScBxpE_1\tperson\ncV1mBGRlLe8_0\tbird\ncV1szYodba0_0\tmotorcycle\ncV8BGLBROa8_0\tperson\ncWBCCAo3pUM_0\tbird\ncWBTkrImlLQ_0\ttrain\ncWBTkrImlLQ_1\ttrain\ncWGCbw5I6cI_0\tskateboard\ncWIDcoPB3Rg_0\tperson\ncWKf_KANUSM_0\tperson\ncWRO27zzxF4_0\tperson\ncWaVXNQ5cvg_0\tperson\ncWb-i8hj8uc_0\tperson\ncWcJrAQuNA4_0\tbird\ncWtIT6V98zc_1\tperson\ncWxELKsh43s_0\tperson\ncW2hQE3lS9k_1\tperson\ncW4fmuV2JuU_0\tskateboard\ncW7OrsSn-m8_0\tperson\ncXP1Lit5Pmk_0\tperson\ncXS9VytLIjM_0\tcat\ncXT5_AFSI8Q_0\tperson\ncXUdqfIp-Hs_1\tperson\ncXUdqfIp-Hs_2\tperson\ncXWgDE6boPQ_0\tperson\ncXZt2UZe6QQ_0\tmotorcycle\ncXaAcHkHUzU_0\tperson\ncXsRP67GHA0_0\tperson\ncXsRP67GHA0_1\tperson\ncX0yQ5KIAKw_0\tperson\ncX3mnglolLE_2\telephant\ncX3mnglolLE_3\telephant\ncX6lyv1DI80_1\tairplane\ncX-s4BNxb0c_0\tperson\ncYHq8xoYMO4_1\tbus\ncYVLbgGxJMM_1\tperson\ncYnyDXx580I_0\tperson\ncYpas0B5zEo_0\tcow\ncYvyTVEqiEU_0\tgiraffe\ncYwkpA75A8Y_0\tperson\ncY1cmlwRnaE_2\tbicycle\ncY1cmlwRnaE_1\tbicycle\ncY6HDOEiINs_0\tskateboard\ncY_INarfLQ4_0\tperson\ncZA_Yoq3vy8_0\tperson\ncZB5MQY5kVA_0\tskateboard\ncZDoXwn5lv8_1\tperson\ncZPvtKaqRxc_0\tperson\ncZU2LAWtwUM_0\tknife\ncZZT6OJ6xGk_0\thorse\ncZZT6OJ6xGk_1\thorse\ncZe888DWA8M_0\tperson\ncZgt8s4mARc_1\tperson\ncZugy4cYVng_0\tcat\ncZz6eOuSV9Y_0\tperson\ncZ155yARalk_0\tperson\ncZ155yARalk_1\tperson\ncZ7siEIFHlI_0\tcow\ncaAnHYU-Gwk_0\thorse\ncaGQ2b4L930_0\tperson\ncaGzwv3HLKU_0\tskateboard\ncaLKu0yKW0Y_0\tdog\ncacCjMLNpIg_2\tbird\ncarYHHE3y3A_3\tknife\ncavT34ZvciI_0\telephant\nca4_gKs6MN0_0\tbear\nca8aNafTzeY_0\tperson\nca_weHSJH80_1\ttrain\ncbRztq6KZn0_0\thorse\ncbVll1hxlDA_1\tperson\ncbVll1hxlDA_0\tperson\ncbvbRxOMJ-A_0\ttruck\ncb6YFX4CVqc_2\tairplane\nccIWh5JBil8_2\tbear\nccIWh5JBil8_0\tbear\nccQ7JnYrTL8_0\tbird\nccQ7JnYrTL8_1\tbird\nccRdzj5Zi-U_0\tperson\nccR-h9z3bRI_1\tknife\nccR-h9z3bRI_2\tknife\nccVJXErLdOo_0\tdog\nccWTUq_mvsU_0\telephant\nccWTUq_mvsU_1\telephant\nccaCWXJ0jKY_0\tperson\nccaYdn2p4Uk_6\tknife\nccaYdn2p4Uk_10\tknife\nccfTQmE0zsA_0\tperson\nccfTQmE0zsA_1\tperson\nccwFXG9D98w_0\tperson\ncc0S9924O-s_0\tskateboard\ncc76qcSHNMM_1\tdog\ncc76qcSHNMM_0\tdog\ncdBO6xYUmzE_0\tperson\ncdBO6xYUmzE_1\tperson\ncdKEh34fsYk_0\tperson\ncdNWg2zU6bY_0\tperson\ncdOQ7lTQJBw_1\tcow\ncdOQ7lTQJBw_2\tcow\ncdSG1fcxNAA_0\tperson\ncdS-7_Egk88_0\tperson\ncdW8PgwFm6o_0\tmotorcycle\ncdZqtqh5PwE_1\tperson\ncdZqtqh5PwE_0\tperson\ncdZ1ODMJYKM_0\tbird\ncdbmvoa89QU_3\ttrain\ncdbmvoa89QU_4\ttrain\ncdbmvoa89QU_5\ttrain\ncdf-C-P2bW0_0\telephant\ncdkSgKIMQEM_0\ttruck\ncdkSgKIMQEM_1\ttruck\ncdoGDD6m8Og_3\tperson\ncdpYTik8eL4_0\tperson\ncdruQqCvfrI_0\ttruck\ncdxkCeoDX6Y_1\tperson\ncd80Ii4FB1Q_0\tbird\nceH46gqMWak_0\tperson\nceIoRNo5FBk_0\tperson\nceIoRNo5FBk_1\tperson\nceLI06w8-Yo_0\tperson\nceVkcz1wysc_2\tdog\nDt5UnNOUlZA_0\tmotorcycle\nDuMGrFowOWE_0\tairplane\nDuUmKpZym5U_4\tboat\nDuV6ahfZ_yw_5\tknife\nDupWsV-iiys_0\tknife\nDur1W4FemFs_0\tperson\nDu7sKt25RiA_1\tknife\nDu8hVxuK10c_1\tairplane\nDu8hVxuK10c_2\tairplane\nDu8hVxuK10c_3\tairplane\nDu8hVxuK10c_4\tairplane\nDu9r_1zpPkA_0\tperson\nDvEWbWxGJvQ_0\tbus\nDvEykMsNibg_2\tbicycle\nDvIS9FV5pag_0\tperson\nDvIS9FV5pag_1\tperson\nDvKLYYQzmas_0\tperson\nDvNTMqUwwWo_0\tperson\nDvR9Ctfk8lg_0\tperson\nDvWCGbG9LT4_0\tcar\nDvWDBQ9eMNQ_0\telephant\nDvWDBQ9eMNQ_2\telephant\nDvuQOS7UVI0_2\telephant\nDv1e0Y8A8yg_0\tcow\nDv4azGPr4YI_0\ttruck\nDv7eGdF004Y_1\tperson\nDv7eGdF004Y_0\tperson\nDwJntGNV4Gw_0\tperson\nDwWzbtiIs7k_0\tskateboard\nDwhCZK1eUPw_0\tperson\nDwi-kq9Gcsw_0\tzebra\nDwi-kq9Gcsw_1\tzebra\nDwlOBOv0IC8_1\tbicycle\nDwlOBOv0IC8_0\tbicycle\nDwvclcpHQNY_0\thorse\nDwzuhLu_Jew_0\tbicycle\nDw2QHLXWmos_0\ttruck\nDw7BXQFtH60_0\tperson\nDw8lXatl4wE_2\tperson\nDw8lXatl4wE_0\tperson\nDw8lXatl4wE_1\tperson\nDxAMNpw-4qg_0\tperson\nDxB962sZJ_c_0\tairplane\nDxB962sZJ_c_1\tairplane\nDxB962sZJ_c_2\tairplane\nDxFjGsjegtk_0\tperson\nDxHhkA1fVdA_0\tperson\nDxPOOsSCJpc_0\tcat\nDxU9ZTI7KzY_0\tbird\nDxXEapsjhOg_0\tcow\nDxYW3ZMCXUw_0\tperson\nDxegJbsalCo_0\tperson\nDxegJbsalCo_1\tperson\nDxl8-fknJjM_0\tbird\nDxl8-fknJjM_1\tbird\nDxmdjAoDhkE_4\tknife\nDxpMePWSgjs_0\tperson\nDxsdKCCUvCY_0\tperson\nDxw3Y-UB0jk_0\tairplane\nDx0fgXYBRV0_0\tknife\nDx4a9ZiekrQ_0\telephant\nDx4a9ZiekrQ_1\telephant\nDx5VMmCltKo_0\tperson\nDx8eIjF--eo_0\tperson\nDx8eIjF--eo_2\tperson\nDx8eIjF--eo_1\tperson\nDyFNZgEaw24_1\tbird\nDyZHVNsbZeE_0\tperson\nDyceiTbkpMw_0\tbicycle\nDyd1Aj3RO3I_0\tcat\nDyfyfDI4jqk_0\tperson\nDytAOZD9DLU_1\tperson\nDy1-ch56AMc_0\tboat\nDy5kD11Wnbk_0\tperson\nDy5kD11Wnbk_1\tperson\nDzAi_cumPY4_0\tperson\nDzCPCgkI8XA_0\tmotorcycle\nDzCPCgkI8XA_1\tmotorcycle\nDzFhvnd07Ck_0\ttrain\nDzKdERTAA8U_0\tcat\nDzMXxF7XRaI_0\tperson\nDzW2oC31Gcs_1\tperson\nDzXDPH8p-6Y_0\tmotorcycle\nDziXgWdCrvY_3\thorse\nDzkCtRPiI-Q_0\tcat\nDzlPtZXxtpU_6\telephant\nDzlPtZXxtpU_4\telephant\nDzlfBATujA8_1\thorse\nDzp0BrJSMBU_0\tperson\nDz0d79BMerc_0\tmotorcycle\nDz34hVhjpzA_0\tperson\nDz7kWPDxgbg_1\tbicycle\nDz73CrM7pH8_0\tperson\nDz8_y0iOjLM_0\tskateboard\nD0DtV2eD7cs_0\tknife\nD0HGjOZ5XWU_1\telephant\nD0O-T4E2DVo_0\tcat\nD0R59ANL6o4_0\tperson\nD0TQLmGtPm4_0\tairplane\nD0TTR7qCVXQ_0\tperson\nD0WAC7ByU0M_0\tperson\nD0Yx5cLcrqk_0\tskateboard\nD0mf15dFGhk_0\tperson\nD0pcdPd6hwY_0\tdog\nD0qo2f2Cshk_0\tperson\nD0xc1K3BQnQ_1\tbicycle\nD0zhUpZhZi4_1\tairplane\nD04tMZ7n3YM_0\tskateboard\nD09x5ezi5hU_0\telephant\nD0-sW80X3kI_1\telephant\nD1Ct81qiyT4_0\ttruck\nD1Ct81qiyT4_1\ttruck\nD1DYQay-d_E_0\tcat\nD1IQfkEa2-8_0\ttruck\nD1KUzeiWmUE_1\tcow\nD1XPuPzMvv4_1\tbus\nD1cTj9Fy4yE_0\tdog\nD1dWoFMnKhc_0\tperson\nD1f92BE9HmI_0\tperson\nD1ktXwG0_jM_0\tperson\nD1plKiNFzvI_0\tcat\nD1tZzoBOWfA_0\tperson\nD1yVIEgFGrY_1\tairplane\nD10WSuM8eqU_0\tperson\nD19A7AUqZJ0_0\tperson\nD2CXHzxp1TU_0\tcow\nD2Iqqb3RP6c_0\tperson\nD2Iqqb3RP6c_4\tperson\nD2Iqqb3RP6c_2\tperson\nD2Iqqb3RP6c_3\tperson\nD2KcVzav3YU_2\tairplane\nD2KoBI6R7W8_0\ttrain\nD2Qw63hsi1E_3\tbear\nD2RT-qUSw_U_0\tdog\nD2RZP8Y6VT8_0\tdog\nD2Ri5Wy9XPQ_0\tperson\nD2RkdlTKlsE_0\tperson\nD2VABHjSM6E_0\tbus\nD2VABHjSM6E_2\tbus\nD2co1ZGkwCs_0\tskateboard\nD2rbERtPxNM_0\tperson\nD2t36StaDcc_0\telephant\nD2t36StaDcc_1\telephant\nD2wSgbAelUc_0\tcat\nD2yQaYJDNvs_2\tbicycle\nD2yQaYJDNvs_0\tbicycle\nD24GJS9nKC0_0\tperson\nD3EIh6pBTdQ_0\ttrain\nD3F3xWCoWD8_0\tperson\nD3IDGSQSrFY_3\tgiraffe\nD3IDGSQSrFY_4\telephant\nD3IDGSQSrFY_5\telephant\nD3IDGSQSrFY_7\telephant\nD3IDGSQSrFY_8\telephant\nD3OvvA5jYlM_2\tbird\nD3OxudXglSM_1\tcow\nD3XqhAXefSA_0\tperson\nD3Zg90Ib5GI_0\tcat\nD3b-w5J-wR0_0\tperson\nD3tuGaFbdbE_0\tperson\nD36Pwfuad5E_0\thorse\nD4CWBceBJEk_0\tperson\nD4OMvYw25w0_0\tbus\nD4aL-0UevEY_0\tperson\nD4do8kCWydY_0\tperson\nD4do8kCWydY_1\tperson\nD4goZXgzVC8_0\tperson\nD4oLradsvXE_0\tperson\nD4qq5Olmh24_0\tperson\nD410FuTGoPI_0\tbicycle\nD4_2g_M4CXM_1\tperson\nD5GNIcodIw0_0\tbird\nD5KLVLNs7-0_0\ttrain\nD5KWKhPhqWE_0\tdog\nD5OtHFsiXiI_0\tperson\nD5UGpkiG-CQ_0\tperson\nD5hYrAC2iIg_0\tperson\nD5jUPc4nQO0_0\tperson\nD5kSwHOWPBU_1\tbird\nD5kSwHOWPBU_0\tbird\nD5n4B-O8y8g_0\tperson\nD5tLtHWe0Jk_0\tperson\nD5uTmoMYXDE_0\tcow\nD5x402SaAk8_0\ttruck\nD537kaRoYEk_0\tperson\nD552mK5tfLU_0\tdog\nD59Eb3u0iPs_2\tperson\nD59Eb3u0iPs_0\tperson\nD6EDJA1bO3s_0\tzebra\nD6G1X8WFAA8_0\tperson\nD6LDq6Q1Aic_0\tperson\nD6NzaXWZGEA_1\tperson\nD6UsriFwkjQ_0\tperson\nD6XIhwBoaik_0\tperson\nD6XUUDKA1CA_0\tperson\nD6d20KAVyzk_0\tperson\nD6f2wdAt_Ug_0\tperson\nD6kIRV5rEPk_0\tperson\nD6qXaD6WnVQ_0\tbicycle\nD6zUwxeZ1zU_0\tperson\nD7c2tRlXz5k_0\tskateboard\nD7dAkMkQf4I_5\telephant\nD7kHPyS4Gw0_0\tperson\nD7r_HLTwhWY_0\tperson\nD71B5jrYOig_2\telephant\nD77yNiFrtmw_0\tperson\nD78FDAi2log_0\tskateboard\nD7_S2hp6aKI_1\tairplane\nD7_S2hp6aKI_0\tairplane\nD7_tUVFGy2o_0\tperson\nD7_zjfakeYM_0\tdog\nD7_zjfakeYM_3\tdog\nD7_zjfakeYM_4\tdog\nD8GQWYiVK1U_0\tdog\nceczRgI6HDM_0\tboat\ncev1umQFsVA_2\tperson\ncev1umQFsVA_1\tperson\nce8j1r_CDH8_0\tdog\ncfD9yGF5XmY_0\tcar\ncfFAjaziwn4_0\tperson\ncfWqngaDvvg_0\tperson\ncfWqngaDvvg_1\tperson\ncfex3QJFkTY_0\tdog\ncfex3QJFkTY_1\tdog\ncfpiw6KGB70_0\tdog\ncfyY4mfwN7A_0\tairplane\ncf0a6xp7r9s_0\tbus\ncf3VOLwZdKY_0\tdog\ncf6daxmvx6M_1\tperson\ncf6kCO9JdOM_1\tperson\ncf6kCO9JdOM_0\tperson\ncgAiH_9c5DU_1\tbird\ncgD7Gr2Y-c8_0\tperson\ncgQ_34JYUkU_0\tcar\ncgT26vQK-4A_0\tperson\ncgZo7nUeCNE_0\tbus\ncgjjdvXBsFI_0\tperson\ncgj_bzL4vsQ_0\tskateboard\ncgmkRlhxVQ8_0\tperson\ncgmkRlhxVQ8_2\tperson\ncgmkRlhxVQ8_1\tperson\ncgxIrs3ySiA_0\tskateboard\ncgyRQ1a79c0_0\ttrain\ncgyRQ1a79c0_1\ttrain\ncgzHPxfb-R4_2\tperson\ncg4GIYiUNiI_0\tperson\ncg9Y2DTUiDQ_0\tcow\nchc30sNO6KA_0\tperson\nchl-Wa4_hic_0\tperson\nchrXgx4NWck_0\tperson\nchrXgx4NWck_1\tperson\nchwYzLEqKp4_0\tperson\nchyVy1kdL5M_0\tperson\nch_yUR9RHIM_0\tdog\nciEhviIYSFY_0\tbicycle\nciFKNPdVskg_0\tairplane\nciUZ2LoiaCs_0\tperson\nciZNBF9RdaA_1\tknife\nciZNBF9RdaA_0\tknife\nciZNBF9RdaA_2\tknife\nciZNBF9RdaA_4\tknife\ncifpYBLq6dM_0\tperson\ncit4hdvCIp0_0\tmotorcycle\nci83tdO3GuM_0\thorse\ncjAhjjWOj24_1\tcat\ncjL-hMHdmN8_0\tperson\ncjdImYwFXEI_0\tperson\ncjlPNeNKoSo_0\tcar\ncjmps6UKu_Y_0\tperson\ncjtjQu1YoTc_0\tperson\ncjuRQJf1_qs_0\thorse\ncjvMLM_Uzbw_0\tperson\ncjye6t7P2XY_0\tperson\nckIaNsLDst8_0\tperson\nckJHbJCefVc_0\tbear\nckY7Izfnggc_0\tperson\nckfgZsmJEbs_1\telephant\nckyL1lkCzU8_0\tperson\nckzaUAcrtY4_0\tperson\nck6hJJVJfvQ_1\tperson\nck6hJJVJfvQ_2\tperson\nck6hJJVJfvQ_0\tperson\nclCQhmV8nf8_0\tperson\nclL4lyl6J7I_0\tperson\nclO2SRgOzAk_0\tperson\nclQ98CON1pE_0\tperson\nclUGOwaYaPg_0\tcat\nclaqhrkmhPg_0\tperson\nclmsmTFOSLo_1\tdog\ncl410aCQA8k_0\ttrain\ncl6C5KiOEHQ_0\ttrain\ncmAN1SqRkDM_0\tperson\ncmGz-63gi5Q_0\ttrain\ncmHjbUBM4q8_0\telephant\ncmKnHqPGlTw_0\tperson\ncmV1BLuEvpU_0\tcow\ncmeGuaSUg34_1\tcar\ncmqxX05lPiI_0\tperson\ncmtruoCpSec_0\tperson\ncmwRk4-z_BQ_0\tperson\ncmwzhxa6Kd8_0\tboat\ncm7Xd_WXZAs_0\tperson\ncnAC9g_fYUY_0\ttrain\ncnAC9g_fYUY_6\ttrain\ncnAC9g_fYUY_1\ttrain\ncnAC9g_fYUY_3\ttrain\ncnAC9g_fYUY_7\ttrain\ncnAC9g_fYUY_8\ttrain\ncnAC9g_fYUY_9\ttrain\ncnJKH5dTKyI_0\tskateboard\ncne8MAKWcjo_2\tperson\ncne8MAKWcjo_1\tperson\ncnoIwn3cQ7Q_0\tbird\ncnplEeb8Iuk_0\tmotorcycle\ncnp30cLXzq8_0\tskateboard\ncnrSdMSCW6w_0\ttruck\ncnrSdMSCW6w_1\ttruck\ncnrSdMSCW6w_3\ttruck\ncnryAbqs0sM_0\thorse\ncnryAbqs0sM_2\thorse\ncnt7MyeNlHA_0\tperson\ncnvzLGyGalU_0\tcow\ncoBLne1vSV0_0\tperson\ncoDrWV3qbQE_1\tcar\ncoIhjdND3yY_0\tperson\ncoVT-MPjIsc_1\tcat\ncobC6BjJahk_0\tperson\ncodE_-LtIRY_0\tboat\ncofwfK4F5ac_0\tperson\ncohdkT2S_oA_0\tskateboard\ncoh6clK_Q6A_0\tperson\ncomEv_WJ4Uc_0\tperson\ncousEghehEo_1\tperson\ncousEghehEo_0\tperson\nco17Vvf3bag_0\tknife\nco17rRdOvwc_1\tmotorcycle\nco5rBTsE2i0_0\tknife\nco7SR4bgOM4_0\tknife\nco9DJtEU4eg_0\tperson\ncpEYJnyJ9XM_0\ttrain\ncpLmgivniko_3\tknife\ncpLmgivniko_2\tknife\ncpO5pHTOelo_0\tcow\ncpQ9HawKR-Q_0\tairplane\ncpQ9HawKR-Q_1\tairplane\ncpUTjBksgdA_0\tperson\ncpmMEngbDHE_3\tperson\ncpmMEngbDHE_0\tperson\ncpmMEngbDHE_1\tperson\ncpnZFfnjGYs_0\tcar\ncpre_wIt0hs_0\ttrain\ncpre_wIt0hs_1\ttrain\ncptcOzotQ0E_0\tperson\ncpuYK9y7zu8_1\tboat\ncpxkLEREnwo_0\tcow\ncp4ttild7EA_0\ttrain\ncqEdqz5F7tg_0\tcat\ncqOLpxxqIBw_1\tperson\ncqOLpxxqIBw_2\tperson\ncqOclzkqkVg_0\tperson\ncqO2VRSBGGg_0\tbus\ncqRNPM3jgNs_0\tcow\ncqS_ZvZF4Kk_0\tperson\ncqS_ZvZF4Kk_1\tperson\ncqez5FuSf44_0\tperson\ncqf4Vh7Vy9M_0\tperson\ncqkZZqtr3z8_0\tperson\ncqkZZqtr3z8_1\tperson\ncq3TwUTSBFA_0\thorse\ncq84vJoKj0A_0\tperson\ncrXlnYSuCuw_0\tperson\ncrgSyPjbLBw_0\tperson\ncrh-ncEjMd8_0\tumbrella\ncriMO4N0K5E_0\tperson\ncrmw_2KCRlY_1\thorse\ncrmw_2KCRlY_0\thorse\ncruWABLWvD0_0\tperson\ncrzo7x07GTs_1\telephant\ncr02TlSWnkI_6\telephant\ncr5ddm3njdQ_1\tbird\ncsGJS_sNJx4_0\tperson\ncsKSGFZyk04_0\thorse\ncsTChnltOdg_0\tcow\ncsiWQna-zcg_0\tskateboard\ncsl1NFlhS0I_0\tperson\ncswk8vZ6th8_0\tperson\ncs16RhEpmu4_1\tperson\ncs16RhEpmu4_2\tperson\ncs3PfcpDro8_0\tcow\ncs_yLDexfXk_0\tperson\nctAtCH6V1Dw_1\tperson\nctAtCH6V1Dw_0\tperson\nctCQsTBheHg_1\tperson\nctJATSvGLTo_0\telephant\nctJATSvGLTo_4\telephant\nctJATSvGLTo_1\telephant\nctJATSvGLTo_2\telephant\nctK8CQu6Nvg_2\tboat\nctLUri8cnqU_0\tbear\nctNE8tj4Z18_0\ttruck\nctOTsI_RZps_1\tperson\nctOTsI_RZps_0\tperson\nctPfu5shFA0_0\tperson\nctPfu5shFA0_1\tperson\nctRpeLVhC50_0\tbicycle\nctWUEkluOFo_0\ttruck\nctWrHmTAoxw_4\tdog\nct24BXc-tWg_0\tperson\nct8_KhvMuHo_0\tmotorcycle\nct_TbfWVBQc_0\tperson\nct_TbfWVBQc_1\tperson\nct_TbfWVBQc_2\tperson\nct_TbfWVBQc_3\tperson\nct_vznHYblc_0\tairplane\ncuHFcWEuUNo_0\tskateboard\ncuQ5swAtzfk_0\tperson\ncuRuiFR7bNY_0\tperson\ncuU3htRHPgM_0\tperson\ncuWjLEIrs8k_5\tbus\nD8btdwmdRNU_0\tknife\nD8sBFUu104g_1\tknife\nD8urBZQXl6o_0\tperson\nD8wVRKGVcLw_0\tdog\nD804JptI7_4_0\tmotorcycle\nD8-J5NgmOQg_0\tperson\nD9J-SuKzTU4_0\tbicycle\nD9RlyV_QhoQ_0\tbear\nD9WsxKDzM80_1\thorse\nD9WsxKDzM80_3\thorse\nD9WsxKDzM80_5\thorse\nD9XDsr6tkug_0\tdog\nD9XDsr6tkug_1\tdog\nD9XwHuLUv_E_0\tcar\nD9ixoNe1mQ8_0\tperson\nD94_XdBnfjQ_0\thorse\nD97nupvam-4_0\tperson\nD97wkVsbfJk_0\tperson\nD97wkVsbfJk_1\tperson\nD98TSSeEEXc_0\tperson\nD9-PVz9eRtA_0\tperson\nD9-PVz9eRtA_1\tperson\nD-DNyYPMTvE_0\tcar\nD-EA0oKq0qI_0\tcat\nD-UToJ9lT9w_0\tperson\nD-YgpB48Efg_0\tperson\nD-YtknfK7cQ_0\tperson\nD-a0sdpLGlI_0\tumbrella\nD-gTVzHdFAE_0\tbus\nD-gxEOUdm98_0\tperson\nD-jl7sUktcE_1\tperson\nD-pfJT6Nyfo_0\tperson\nD-pfJT6Nyfo_1\tperson\nD-u2wEUntuI_0\tperson\nD--GMbo7meg_0\tperson\nD_FozyNGP_g_0\tperson\nD_OvU_wvmsg_0\tskateboard\nD_QDxlwnenM_0\tbird\nD_TbGwH_U4I_0\tperson\nD_XHitiDPXI_0\tperson\nD_XwOiOHuZU_1\tperson\nD_XwOiOHuZU_0\tperson\nD_g7kf5F2CE_0\tmotorcycle\nD_kMPno6xDw_1\tperson\nD_r43ev6HHs_0\tairplane\nD_uO4kxnCwM_0\ttrain\nD_vXQa4wYoY_0\tperson\nD_vxl0ffX4U_6\tbicycle\nD__WGD95lSY_0\tcat\nEABbbYMrVPo_0\tperson\nEABxiYRLhro_1\tknife\nEANBKNPscIU_1\tdog\nEANBKNPscIU_0\tdog\nEATgn3uQFCc_0\ttruck\nEAecqVilQ60_0\tairplane\nEAh-eJriiEM_0\tcat\nEAlTNLBenas_0\telephant\nEAmeB0UClfE_0\tperson\nEAoS9E3JQM0_0\tknife\nEApLpwcDY04_0\tcow\nEApLpwcDY04_1\tcow\nEAvGskBbSsI_0\tperson\nEAvUn45orps_0\tperson\nEAvhz7EUrHs_1\tperson\nEAvhz7EUrHs_0\tperson\nEA2Zq7j78Zw_2\thorse\nEA33eNV3TsM_0\tbus\nEA4Pppxm9q8_2\tairplane\nEA9IwJGPZFo_0\tperson\nEBBWzGDSfhQ_0\ttrain\nEBCEcy1RAZU_0\tbear\nEBDSyGzaeVM_0\tperson\nEBDSyGzaeVM_1\tperson\nEBGwUwk8_KI_0\tmotorcycle\nEBL5WSEhHwQ_0\tcow\nEBTH0ShVz5s_1\thorse\nEBYJEkaJizQ_0\ttruck\nEBmABlnU3Ns_0\tperson\nEBpvJEz7GAs_0\tcow\nEBqxBh52uek_1\tperson\nEBrNePUYA80_0\tcat\nEB0XdJ6nl5Q_1\tbear\nEB5sThk9G-k_0\tperson\nEB7yZ9myXmo_2\thorse\nEB7yZ9myXmo_1\thorse\nEB-GUW188Kc_0\tperson\nECDxDS-R1ZU_0\ttrain\nECEv0inW5Cs_1\tdog\nECKwTK9kBHk_0\tcat\nECLYb63wsdY_0\tperson\nECT7_2qKJJw_0\tperson\nECUpMJzxafs_0\tperson\nECXdLGCGSRU_1\tperson\nECdvMn526ho_0\tskateboard\nEChWuqD2kxc_0\tperson\nECofUr-jIIU_0\tperson\nECpmJNOAfZU_0\tperson\nECuo32_WqfU_0\tperson\nEC0Q7uMrJh0_0\tcow\nEC1pupdSC2Y_0\tperson\nEC-RADUn0SA_0\tskateboard\nEC-RADUn0SA_1\tskateboard\nEDBYWaa97hs_0\tperson\nEDUY2xl1Jkw_0\tcat\nEDYGYkJTUAw_0\tperson\nEDZ9Cu6WUAU_1\thorse\nEDcpyGbwAVs_1\ttrain\nEDcpyGbwAVs_2\ttrain\nEDqFOrLwfpE_0\telephant\nEDqFOrLwfpE_1\telephant\nEDrX2_SzLF8_0\telephant\nEDtN3eOjUXg_1\tmotorcycle\nEDvdnYUw9b0_0\tperson\nEDxj4RwQr7k_0\ttruck\nEDxj4RwQr7k_1\ttruck\nEDxj4RwQr7k_2\ttruck\nEDxj4RwQr7k_3\ttruck\nEDxj4RwQr7k_5\ttruck\nED-QWlNA_QI_1\tperson\nED-QWlNA_QI_0\tperson\nEEFgTj2V6IY_0\tperson\nEEMkBuPFopc_0\tperson\nEENkey7gvFA_0\tcat\nEENyo-VOtiA_0\tperson\nEEQVWkmTS6A_0\tperson\nEEQVWkmTS6A_1\tperson\nEEfWTq58rX0_0\tmotorcycle\nEEfWTq58rX0_1\tmotorcycle\nEEiUwF9ID5k_1\telephant\nEEiUwF9ID5k_0\telephant\nEEiUwF9ID5k_2\telephant\nEEiUwF9ID5k_5\telephant\nEEnpnVNwpgk_0\tperson\nEEnpnVNwpgk_1\tperson\nEEn1JwzcH7Y_0\tperson\nEEtv5FqPqG0_0\tmotorcycle\nEEx5nPfhJdI_0\tbear\nEE5owiH92Io_0\tbird\nEFHnwo5U2Bc_0\tbird\nEFHnwo5U2Bc_1\tbird\nEFTcDwwNw_M_0\tperson\nEFd6XVMNdEk_0\tumbrella\nEFpWVH06Tf4_3\tmotorcycle\nEFpWVH06Tf4_1\tmotorcycle\nEFpWVH06Tf4_2\tmotorcycle\nEFryCLs5aWc_0\tperson\nEFwar_GkK6Q_0\tcow\nEF0hPkNXnoA_0\tskateboard\nEF1htFUPo80_1\tbus\nEF23dhLqzKk_1\tperson\nEF23dhLqzKk_0\tperson\nEF4KGrH7s08_1\ttrain\nEF4KGrH7s08_2\ttrain\nEF4KGrH7s08_0\ttrain\nEF8PHVKHaq8_0\tperson\nEF9VafNyS20_0\tperson\nEGCQIKdLkIU_1\ttrain\nEGHYSrxI1Ek_1\tperson\nEGIhtnFv2f4_0\tperson\nEGI5Yk7IU8s_0\tboat\nEGOtOZyUpk4_0\ttrain\nEGOtOZyUpk4_1\ttrain\nEGOtOZyUpk4_2\ttrain\nEGZ7-ChFJQI_2\tknife\nEGd19Lwe3vM_0\tperson\nEGgvoXoby8c_0\tperson\nEGgvoXoby8c_1\tperson\nEGiEfcahLzY_0\tperson\nEGsRldGZ4Bc_4\ttruck\nEGsRldGZ4Bc_5\ttruck\nEGsRldGZ4Bc_0\ttruck\nEGsRldGZ4Bc_1\ttruck\nEGsRldGZ4Bc_2\ttruck\nEGvzZJ10zwQ_0\ttrain\nEG7cF7KMqs8_0\tmotorcycle\nEG-A5-_1i-o_0\tcar\nEHD613XdEQc_0\tperson\nEHMEQV26qfk_0\tboat\nEHUgk_5vbps_0\thorse\nEHafuO8IcpI_3\tbird\nEHcP0uDfEyE_0\tumbrella\nEHft6kH6siE_0\tperson\nEHft6kH6siE_1\tskateboard\nEHtU4jYmFWw_0\telephant\nEHvP9Bwmq7M_2\tperson\nEHvP9Bwmq7M_0\tperson\nEHvP9Bwmq7M_1\tperson\nEHv9RwkIPXM_0\tskateboard\nEIIC6lIbxO4_0\tcat\nEIRbrmP8N9U_1\telephant\nEISmAs76j_g_0\ttrain\nEIUHtk1IdtA_0\tcow\nEIcGpS1nsXk_6\telephant\nEIcGpS1nsXk_4\telephant\nEIdaSifBFgk_0\tperson\nEIdaSifBFgk_1\tperson\nEIe7fhZxKpQ_0\tperson\nEIe7fhZxKpQ_1\tperson\nEInkqD_T5Os_0\ttrain\nEIwa8hvMQ9g_2\tbicycle\nEIwa8hvMQ9g_0\tbicycle\nEI8OMIBxEOo_0\tperson\nEI-G2_K6zus_0\tperson\nEJE1AAlhjcQ_0\tperson\nEJE2EqHSaLA_0\tairplane\nEJJefx2O7lo_0\tperson\nEJJ0aK1Mefo_1\tbird\nEJMke8tdD9c_0\tperson\nEJMp6Gszq8M_0\tperson\nEJM15lQ1nds_0\tbus\nEJM15lQ1nds_1\tbus\nEJNv-W_Wh3s_0\tairplane\nEJNv-W_Wh3s_1\tairplane\nEJOO-gnqZOQ_0\tperson\nEJQZBc87T7Q_0\tperson\nEJTbpxYS19w_0\tperson\nEJdJUArfCgA_0\tperson\nEJdJUArfCgA_1\tbicycle\nEJ2XL046J4A_0\tperson\nEJ3IJ7_jx0s_0\tknife\nEJ3IJ7_jx0s_1\tknife\nEKDm7Y7dQ-g_1\tbird\nEKETFVqhfZI_0\tperson\nEKOgJfGpWw8_0\thorse\nEKPKBwGLkg0_0\tperson\nEKR2BQWkMTI_1\tperson\nEKf-TzUsoG8_0\tperson\nEKsbh9eVG0w_1\tairplane\nEKv1nvgLQLc_0\tmotorcycle\nEK2VY_FFN04_0\tperson\nEK56Obpu5ME_0\telephant\nEK56Obpu5ME_4\telephant\nEK5-ZuOavbM_0\ttrain\nEK5-ZuOavbM_1\ttrain\nEK5-ZuOavbM_2\ttrain\nEK7wRGel2vk_0\tperson\ncuXky9bc80o_1\telephant\ncuXky9bc80o_3\telephant\ncuXky9bc80o_0\telephant\ncuYker921kg_0\tperson\ncuZPt_f2GfE_0\tperson\ncusvncJOcwQ_0\thorse\ncu0Z8d-ioZA_0\tairplane\ncu_YsyYcbL0_0\tcat\ncvBKWYZidIs_0\tperson\ncvFAAQuXQR8_0\tperson\ncvJuXsDfcUY_0\ttrain\ncvUktXqTBBA_0\tcar\ncvUktXqTBBA_1\tcar\ncveuhB6Z_D8_1\tbicycle\ncveuhB6Z_D8_6\tbicycle\ncvfI6ccn-J4_0\tperson\ncvgZ-1Uaigk_0\tperson\ncviAzkIEA00_0\tskateboard\ncvlOlYpovm8_0\tperson\ncvyLalOdUEY_0\tperson\ncvyTQ9oFD8s_0\telephant\ncv9PMwKXLoA_0\tperson\ncwBgT8f3504_0\tperson\ncwB99KCLazI_3\tperson\ncwEuIwecOZA_0\tcar\ncwHQZi15U3s_1\tbear\ncwHQZi15U3s_2\tbear\ncwKndGwjXho_0\tperson\ncwKndGwjXho_1\tperson\ncwPtR7LsWag_1\tperson\ncwPtR7LsWag_0\tperson\ncwTq-wB6R3U_0\tskateboard\ncwe2t4eoAs0_0\tperson\ncwf1OksNfQ0_1\thorse\ncwjK5oxoq5Y_1\tperson\ncwjK5oxoq5Y_2\tperson\ncwmY9UYaukc_0\tperson\ncwnltT3Eelo_2\tbicycle\ncwp0G17bk0I_0\ttruck\ncwp8Oe0F6y0_0\ttruck\ncwsLz_ppMx8_0\ttruck\ncwsx0Rs732s_0\tperson\ncwyDOlWxH00_0\tbus\ncwzHLMKmpWM_0\thorse\ncw054hU6MdM_0\tperson\ncw4vlk-0siU_1\tboat\ncw45Y0beNG4_0\tbus\ncw55i8mKHnE_0\tperson\ncw55i8mKHnE_2\tperson\ncw57dOs_v5A_0\tbear\ncxAcLoLkk2g_0\tperson\ncxJp5-r_mjQ_0\tperson\ncxLrrWl89wo_0\tperson\ncxMcoeT1INo_0\tperson\ncxQENdEkIVQ_0\tskateboard\ncxSj2n8O4Vk_0\tperson\ncxUXpTWO4iY_0\ttrain\ncxbTIQtmtLs_0\tperson\ncxiI7jApblc_0\tboat\ncxkH0GxPEqU_0\tmotorcycle\ncxm8wGi_pl4_0\tperson\ncxsitsK8l9w_0\thorse\ncxsitsK8l9w_1\thorse\ncx0cCIp1KeU_0\tperson\ncx0cCIp1KeU_1\tperson\ncx0tj_0g0-k_0\tperson\ncx2bUajKTrw_0\tperson\ncx4EC6uXkkY_3\tboat\ncyBgPXda4lw_0\tperson\ncydwQgvjXlk_1\tperson\ncyd0m3k4Iv8_0\tcat\ncynwjNSXfDs_0\telephant\ncyz45rMhH9E_0\tperson\ncy4xwLUwDN4_0\tperson\ncy4xwLUwDN4_1\thorse\ncy4xwLUwDN4_3\thorse\ncy5IjIQ0UNQ_0\tmotorcycle\ncy58Sr7mA_Q_0\tknife\ncy6woAEQ0aU_0\tknife\ncy8BRHRLKa4_0\ttrain\ncy9CeQwHsws_0\tbird\ncy9kq-lD2Q8_0\tskateboard\nczD_BiifXv4_0\tknife\nczLen_XZrRo_0\thorse\nczUjYoRVVYw_1\thorse\nczec9DaQ1sQ_0\tperson\ncze3sm-N48s_0\tperson\nczjU6Q4s1jc_1\tperson\ncznO_APZ6xQ_0\tbear\nczpxbOFiY_Q_1\tperson\nczpxbOFiY_Q_0\tperson\ncztHS4laeBQ_1\tbicycle\nczto2OaEIww_0\tperson\ncz0dXFpjC6o_6\tbear\ncz0dXFpjC6o_4\tbear\ncz0dXFpjC6o_5\tbear\ncz5kAZB6n0k_3\tbear\ncz6eGvs1xNE_1\tmotorcycle\ncz8sE1Vn4Gw_0\tperson\ncz83QPHVLnk_0\tumbrella\nc0GrJULqad0_0\tperson\nc0GstZDjoNM_0\tperson\nc0IYOMYovRo_1\tperson\nc0J3zJ8n3SI_0\thorse\nc0LibLcues0_0\tbear\nc0MEfCeuV5U_0\tbird\nc0MdSWVdmqY_0\tbus\nc0PyfX2HFqE_0\tperson\nc0TJZWOz78g_1\tdog\nc0XKBQNwSlg_0\ttruck\nc0aHKGTYgeo_0\tperson\nc0bZsiE4obs_0\thorse\nc0jq_aReY5M_0\tmotorcycle\nc0kH2qIyG7E_6\thorse\nc0kH2qIyG7E_4\thorse\nc0lBfqi79wQ_0\tcow\nc0lDR6ABjTE_2\tperson\nc0nRMc9KiiQ_0\tdog\nc0nXpd7yJsk_0\tperson\nc0o_nv0BL6Y_1\tbear\nc0pzN4lVApI_1\tperson\nc0qkbu5wLF8_0\telephant\nc0qkbu5wLF8_2\telephant\nc0wve_629pA_0\tperson\nc0yrclVs1YA_0\tcat\nc02KdAN0Hwg_0\tbird\nc04Vd9VQao8_0\tperson\nc04ixznYflE_1\tgiraffe\nc07Yqknz4KI_0\ttrain\nc07k0EtqcVs_1\tcar\nc08cFHAOc7I_0\ttrain\nc0_M6VhGXOo_0\tperson\nc0_M6VhGXOo_1\tperson\nc1JGF-ltiJ8_1\tbicycle\nc1JGF-ltiJ8_2\tbicycle\nc1PUETYl8Lk_0\tairplane\nc1QAgByBiYE_0\tperson\nc1WZ6dEz6kw_0\tairplane\nc1XMeGkSwJQ_0\tperson\nc1XfiRiOTb0_0\thorse\nc1a_E7CZsVk_0\tperson\nc1djg96PnM0_1\tperson\nc1djg96PnM0_0\tperson\nc1hBqL_LWE0_3\tbird\nc1j8TlZsEmQ_0\tboat\nc1laLoj4fM8_0\tperson\nc10eOkpL080_0\tperson\nc10eOkpL080_1\tperson\nc2B7cQwr4Pk_0\tperson\nc2EIdJJnku0_0\tmotorcycle\nc2E5_n_bZKc_0\ttrain\nc2Kh-3yj9Ak_0\tperson\nc2MTwEvPGGk_0\tperson\nc2MUYY-qPhA_1\tbus\nc2MqPrUNXQ4_0\ttrain\nc2UDI136z20_0\telephant\nc2UDI136z20_4\telephant\nc2UDI136z20_5\telephant\nc2UDI136z20_7\telephant\nc2YlmT-aFE4_0\tcat\nc2a9uwUCFK8_0\tcow\nc2dk3AjUcYs_0\tperson\nc2gJYqYcsZg_0\tperson\nc2luSxdPZ6A_0\tperson\nc2m_PmRSEmw_0\telephant\nc2qJhOvlIUU_0\tairplane\nc2xTBZttrzA_0\tperson\nc22HGSTHjBA_2\tknife\nc22HGSTHjBA_1\tknife\nc22yvcXZcM0_0\tbird\nc2_qHguvZbI_2\tbear\nc2_qHguvZbI_0\tbear\nc3E9z6F-Txk_0\ttrain\nc3J2U0kR6Hg_0\tperson\nc3TisGCbmoU_1\tperson\nc3Ur6j05SgQ_1\tbicycle\nc3YFgnDBuXw_0\tperson\nc3bCGnwqGxc_0\tcar\nc3eo0_ftrn4_0\tcow\nc3pP__Uybq8_0\tperson\nc3wt1MUbgD4_0\tperson\nc3wt1MUbgD4_1\tperson\nc37EOoRHd7E_2\ttruck\nc4A01X82TfI_0\ttrain\nc4FmSUmvYbo_0\tperson\nc4FmSUmvYbo_1\tperson\nc4Hh2XdTBGY_0\tcow\nc4ICOFVvcTs_0\tperson\nc4e-qA4esVY_1\tperson\nc4iCXPdqm6c_0\telephant\nc4jbOCZyGsQ_0\tperson\nc4k8Yk1x3H8_1\tperson\nc4k8Yk1x3H8_0\tperson\nc4xRJS9_5Fk_0\ttrain\nc4xRJS9_5Fk_1\ttrain\nc40Mwg88VJI_0\tperson\nc43ihGsR1eA_1\tperson\nc5AKIs1XUhc_1\tbicycle\nc5AKIs1XUhc_2\tbicycle\nc5AKIs1XUhc_3\tbicycle\nc5BYdZTaBgc_0\tperson\nc5CmxgLHcxA_0\tbus\nc5Fw-Fi4daE_0\tcow\nc5GANV8PlSM_0\tperson\nc5GIQcIJ9Tc_0\ttruck\nc5GOwfkZXFk_0\tperson\nc5GOwfkZXFk_1\tperson\nc5Q2ZeMDx3o_0\ttrain\nc5TlkWtFymE_3\tdog\nc5WT0W8SfGg_0\tcow\nc5WT0W8SfGg_5\tcow\nc5WT0W8SfGg_1\tcow\nc5WT0W8SfGg_2\tcow\nc5WT0W8SfGg_3\tcow\nc5WT0W8SfGg_4\tcow\nc5cooFy7-SM_1\telephant\nc5hEygqOXOU_0\tperson\nc5oiA5xy15M_0\tperson\nc56nid2YSes_6\tbird\nc56nid2YSes_0\tbird\nc56nid2YSes_1\tbird\nc56nid2YSes_2\tbird\nc56nid2YSes_5\tbird\nc56nid2YSes_8\tbird\nc56nid2YSes_9\tbird\nc5_dNG2vWXg_0\tcar\nc6EIognIYWs_0\tbird\nc6ZQRNXfcZA_1\tperson\nc6a4xySAJ0o_0\ttruck\nc6niMRNXDeo_0\tperson\nc6qKbpvd-iw_0\tperson\nc6rbqnU4LXs_2\tmotorcycle\nc6rbqnU4LXs_0\tmotorcycle\nc6s839WnVhE_0\ttruck\nc6yBOD3Wo5A_0\tperson\nc7B-3x-3V34_0\tperson\nc7ILC5wYs8A_0\tperson\nc7KoGv5Ha7k_0\tperson\nc7PMPnuPjp8_0\tperson\nc7RFexe2Ba4_1\tbicycle\nc7RFexe2Ba4_3\tbicycle\nc7RFexe2Ba4_0\tbicycle\nc7RFexe2Ba4_2\tbicycle\nc7SMRurbkY4_0\tbus\nc7bKlPVR5pI_0\tboat\nc7hVbIhp0Wc_0\tperson\nc7jWXqWoMz0_4\tbicycle\nc7s8weR8lEY_0\tperson\nc7v4ZFCK-A4_0\tperson\nc70kaPblMLU_0\tcow\nc74hYNtpwdA_0\tdog\nc75cllxWxZE_0\tperson\nc7_op6G05l0_0\tairplane\nc8B4ZVLv364_0\tperson\nc8Cl-5olqWk_0\tmotorcycle\nc8Gaja-xUeQ_1\tperson\nc8I3JAxoLTs_0\tbicycle\nc8I3JAxoLTs_1\tbicycle\nc8I3JAxoLTs_3\tbicycle\nc8LHqWmKrJU_1\tairplane\nc8LHqWmKrJU_2\tairplane\nc8Mo16hH7qs_0\tperson\nc8UrmdREAO8_0\tperson\nc8Y7MJRWFqE_0\tcat\nc8Y8y9BsPHw_0\tcow\nc8b9qqF9Xvw_0\tperson\nc8b9qqF9Xvw_1\tperson\nc8ezNTNUXqc_0\tcat\nc8wbvQnndJc_1\tbicycle\nc8wdGQw1jB4_1\tbus\nc8wdGQw1jB4_2\tbus\nc8y3bmW0X9s_1\tcow\nc8zphqgYcJM_0\tperson\nc80SYyKXCCw_0\tperson\nc8_fHVnrzZ8_2\telephant\nc9EDbgCRGP0_0\tperson\nc9GKsfyRkmE_0\tperson\nc9IdrMV-Y_Y_0\tperson\nc9Q9LPaqyug_0\tumbrella\nc9SbfXgAoO8_1\tairplane\nc9Somjq2gLs_0\tumbrella\nc9WDXLFtYLU_0\tbus\nc9XaEHVxu4M_0\tperson\nc9Y9a6KVWRE_0\tbird\nc9Y9a6KVWRE_1\tbird\nc9ZWCwVv6Q0_0\tperson\nc9dPiEkCwR4_0\tmotorcycle\nc9gCDztKasg_0\telephant\nc9pYz2lTh3I_1\tperson\nc90ldeMSfL0_0\tcat\nc94gzpjmj24_0\tperson\nc9_87BKOW1I_0\tcow\nc-CCw_cyicE_0\tcow\nc-G0LV4kyY0_0\tcar\nc-T9ITcEW9c_0\tperson\nc-T9ITcEW9c_1\tperson\nc-ZnwBvVFGE_0\tperson\nc-gH6T1q-sk_0\tperson\nc-pKAy_3arM_0\tperson\nc-uOjPSq-10_0\tcow\nc-vwn6zqogs_0\tperson\nc-vwn6zqogs_1\tperson\nc-4uPwFKBdY_0\tperson\nc-_iMD-ihnE_0\tmotorcycle\nc-_94CuEo_M_1\tperson\nc_SQI7NirwY_0\tperson\nc_THUYYi_-k_0\tairplane\nc_YojhaB5pI_0\tmotorcycle\nc_jNM33kJuA_0\tperson\nc_rUQgBtHY4_0\tperson\nc_rUQgBtHY4_2\tperson\nc_rUQgBtHY4_1\tperson\nc_wkIYzEEDk_0\tdog\nc_6OcDyZ93k_0\tbus\nc_9GO2BbPz4_0\thorse\ndAQu2GQSyrY_0\tcat\ndAS6SqC7TCw_1\telephant\ndAVIZQJ5Af4_0\tperson\ndAqurx13i7I_0\tknife\ndAynVVxxb_o_0\tperson\ndA7mx3mrJeA_0\ttrain\ndA_ZtitJeMA_0\tperson\ndBDSqZ8rirA_0\tperson\ndBGKqrEvsIE_0\tboat\ndBGKqrEvsIE_4\tboat\ndBKexOUQSQA_0\tcow\ndBKexOUQSQA_1\tcow\ndBKexOUQSQA_2\tcow\ndBKexOUQSQA_4\tcow\ndBKexOUQSQA_5\tcow\ndBKexOUQSQA_6\tcow\ndBOrrvJDv54_1\tskateboard\ndBPu5iVlw1Y_2\thorse\ndBSryinfjiI_0\tperson\ndBS9maEElcw_0\tperson\ndBUpfcdFDUQ_0\tbicycle\ndBWeUQd06l4_0\tperson\ndBWeUQd06l4_1\tperson\ndBiGneGqmh0_0\tcow\ndBk2FwZgrtk_0\tcow\ndBq77lvujCk_0\tbird\ndBuvGegR_vA_0\tperson\ndByVvpTlwL4_1\tknife\ndB29dsCcN9s_0\ttrain\ndB43vSgLY2M_0\tperson\ndCG24UL_NeM_0\tperson\ndCSF80Y6lso_0\tperson\ndCSF80Y6lso_1\tperson\ndCZ9suBocXk_0\tperson\ndCgz-7OgwMQ_1\tperson\ndCl8hSleXYQ_0\tcow\ndCoi3rXWgbM_0\tperson\ndCqdvmS1jts_0\tperson\ndCqdvmS1jts_1\tperson\ndC9rTC3kzsI_0\tcow\ndDADJZV4i74_0\thorse\ndDA5p5TJ03g_0\tperson\ndDB84W_zVOI_0\tskateboard\ndDB84W_zVOI_1\tskateboard\ndDE3p8Gs878_0\telephant\ndDGiQLFJtPs_0\tbicycle\ndDIbBZtEJ2w_0\tknife\ndDLgQQ2XRc8_5\thorse\ndDLgQQ2XRc8_3\thorse\ndDLgQQ2XRc8_6\thorse\ndDO-RlSt3Gw_0\tperson\ndDQ58wciink_0\tcow\ndDZYTPEd9KE_1\tairplane\ndDacKPH4sOw_0\tcar\ndDacKPH4sOw_1\tcar\ndDcBtNpmCeU_0\tperson\ndDgcHWpKMeo_0\tperson\ndDkaPLEvAwM_0\thorse\ndDkaPLEvAwM_1\thorse\ndDkaPLEvAwM_2\thorse\ndDqe9sBGR24_0\tbird\ndDx0MqaKT2w_0\tperson\ndDx0MqaKT2w_2\tmotorcycle\ndD-AlVwxf-g_1\tcow\ndD_Ew85jXzk_1\ttrain\ndD_PbxvCBcA_1\tperson\ndECTTSpEUKg_0\tperson\ndEW9ZwvMsDE_0\tcat\ndEc5fHlEXCo_0\ttruck\ndEuzpQL0tNo_7\telephant\ndEuzpQL0tNo_1\telephant\ndEuzpQL0tNo_2\telephant\ndE7OwbOHsu8_0\tperson\ndE7WsfeVkI8_0\tperson\ndE7X93gdVPQ_0\tcat\ndFCUyBTrvNM_0\thorse\ndFCu7E6aYM4_0\tperson\ndFCu7E6aYM4_1\tperson\ndFEo5YKHAcA_2\tskateboard\ndFEo5YKHAcA_0\tskateboard\ndFMPz16FOzE_0\tmotorcycle\ndFZSSPvMBqE_0\tzebra\ndFZSSPvMBqE_1\tzebra\ndFa7TcQRCUU_1\tbird\ndFbZxetmjCQ_0\tskateboard\ndFkNDweVNFU_0\tcat\ndFpJq9s5fec_1\tbicycle\ndFpJq9s5fec_2\tbicycle\ndFsDjjWW00Q_0\tknife\ndFth5-8MEhM_0\tperson\ndF7OkxFt3I8_0\tperson\ndF_aGgW1jcM_0\tperson\ndGE7t6KgXHc_0\tperson\ndGFrWX61Zk0_0\tperson\ndGS01inQU1U_2\tperson\ndGS01inQU1U_0\tperson\ndGS01inQU1U_1\tperson\ndGZBUkIXMpo_0\tperson\ndGZ_pzDrl70_0\tperson\ndGdh_BHleU4_0\tboat\ndGh51ZQ9QAg_0\tbird\ndGk8D_De-2E_0\tperson\ndGk8D_De-2E_1\tperson\ndGpbPaorWys_1\tbear\ndGq1bpRxbiA_0\tperson\ndGyR5TWO-p4_1\tperson\ndG0CtnphYzg_0\tperson\ndG5mjfvTY7c_0\tboat\ndG7DSOtetMY_0\tknife\ndG9J5UpxeyY_0\tperson\ndG9J5UpxeyY_1\tperson\ndHCgtjlT_Lg_4\thorse\ndHCpH8dTwfw_0\thorse\ndHF9NIqrx6Q_0\tcar\ndHGIXivupi4_0\tperson\ndHGIXivupi4_1\tperson\ndHGIXivupi4_2\tperson\ndHJkOetpjQw_0\tbus\ndHO6vTrB66w_0\tperson\ndHO6vTrB66w_1\tperson\ndHVDjpivOKw_1\tperson\ndHVDjpivOKw_0\tperson\ndHVgQCO07SU_1\tperson\ndHVgQCO07SU_2\tperson\ndHfs5GT-YpY_0\tcow\ndHg1Xorklm0_0\tperson\ndHimuOjriUc_0\tcow\ndHnk6ulSNSo_0\tperson\ndHnsZs2Riqk_0\tperson\ndHnsZs2Riqk_1\tperson\ndHsD3F8dTpc_0\tbird\ndHvlIrb2Q-k_0\tperson\ndHwR5d4xGEk_0\tknife\ndHwR5d4xGEk_1\tknife\ndHwR5d4xGEk_2\tknife\ndHwR5d4xGEk_3\tknife\ndHwR5d4xGEk_4\tknife\ndHxmY1bGbNc_4\tbird\ndH89qyunr6s_0\tperson\ndH94i4xFlZU_1\telephant\ndH94i4xFlZU_6\telephant\ndH94i4xFlZU_0\telephant\ndH94i4xFlZU_5\telephant\ndH94i4xFlZU_7\telephant\ndICl73jYZ3M_0\tperson\ndICrafh45_I_3\tairplane\ndIDxqrhmBE4_0\ttruck\ndIDxqrhmBE4_2\ttruck\ndIEZ2kfTzzY_0\tboat\ndIJk0w4SnH8_0\tbird\ndIVtaleUNWI_0\tperson\ndIVtaleUNWI_1\tperson\ndIX81Ov0fUY_0\tperson\ndIZM-9d8bSQ_0\tperson\ndIZM-9d8bSQ_1\tperson\ndIm0Sv_iE2E_0\tmotorcycle\ndIqYGVVgYsU_0\tperson\ndIzMmAGaF6U_1\tskateboard\ndI93uXfSaRM_0\tbird\ndJB-DXpgq2U_1\tbird\ndJKAhixNM9Y_1\ttruck\ndJYNs94fv_0_0\tperson\ndJgqX3uy6z4_0\tperson\ndJg4R9cpbjI_0\tperson\ndJisrPH71tE_0\tperson\ndJi_dOrUZnw_0\tperson\ndJjrFTy9H3c_0\tperson\ndJkzzYh6BkY_1\tcat\ndJnRg-1zO1g_3\tknife\ndJqGj0FeC9I_0\tcat\ndJvoaqZjIDw_0\tperson\ndJ2B9A0mYl0_1\tdog\ndJ2kWscI-tc_1\tdog\ndJ4PR9zme-s_0\tperson\ndJ6S9bSEYDc_0\tcow\ndJ8J7WOLZtk_0\tskateboard\nELDxjZXMtCg_0\tperson\nELLTxQ47f90_1\tperson\nELLTxQ47f90_0\tperson\nELNgTt9Jswc_0\ttrain\nELOZutiZKMM_0\tperson\nELOZutiZKMM_1\tperson\nELPpy9ABb3s_1\telephant\nELTeW4X2mGY_1\tcow\nELbg8i93W8I_0\tperson\nELbjX2Ya0_o_0\tdog\nELmktutrkDk_0\tperson\nELqA6fb0un8_0\tperson\nEL8H94Lycf8_0\tperson\nEMAVfcO6JFE_0\tperson\nEMKcTJp7ehY_0\tperson\nEMOpCv3vVfE_1\tskateboard\nEMP7p3FNxZU_0\tperson\nEMU8vGL7ZFQ_0\tperson\nEMb28oLn66k_0\tairplane\nEMgh3pwtnXg_0\tperson\nEMiRla730lM_1\tperson\nEMiRla730lM_0\tperson\nEMmg9OKgyBE_1\tboat\nEMmmZ6ADzfI_0\tskateboard\nEMngQ4YMTv0_0\tmotorcycle\nEMorunu9Ik8_0\ttruck\nEMqd3lVNUxg_7\tbus\nEMuGAIADn3s_0\tperson\nEMwcDTRPPMw_0\tairplane\nEMyQWQ_Yobc_0\tdog\nEM0yGxKJWqY_0\telephant\nEM1R3HXt7DY_0\tperson\nEM1z9o601v4_0\tknife\nEM3tBaIyR0o_0\tmotorcycle\nEM5e1snhsCs_0\tperson\nEM-k8ZAva6k_0\tperson\nEM-zjCQyGAc_0\tdog\nENAr6j6fcWU_0\tbird\nENCHiWUV4dk_0\tperson\nENI-JuSPNQA_0\tmotorcycle\nENSEWig-4ZM_0\tknife\nENXXFcrrxGM_0\tcar\nENc0uxXKsaI_0\tperson\nENkqstdLKl4_0\tperson\nENk4JRIbEaE_1\tperson\nENnPjtPjU6c_0\tperson\nENtoAci6OwQ_0\tcow\nENvdCzm4whM_0\ttruck\nENvdCzm4whM_1\ttruck\nENvdCzm4whM_2\ttruck\nEN0Klsi-AKY_0\tbicycle\nEN4IIJjhBeI_0\tzebra\nEN-QCSvtEd0_3\telephant\nEN-4SsZnn-k_0\tperson\nEOEXVXG1TDk_0\tperson\nEOVNlasJhIo_1\tperson\nEOdHjLYopi0_1\tbird\nEOedzXaVI4U_2\tbird\nEOe3CfOT53g_0\tperson\nEOmVKXeoKBc_1\tairplane\nEOq-3ZRn0SQ_0\tskateboard\nEOt6j5ecODw_0\ttrain\nEO7NccQDQyM_0\tcat\nEO8Dpvy4oXs_0\tzebra\nEO8mQrkIZuY_0\tperson\nEO_DwtyWh0s_3\tperson\nEO_DwtyWh0s_0\tperson\nEO_DwtyWh0s_1\tperson\nEO_DwtyWh0s_2\tperson\nEPOXqdKNjKg_2\tgiraffe\nEPU630RSI5c_2\tperson\nEPU630RSI5c_0\tperson\nEPWmdYKJaXk_0\tbird\nEPycDWf2vY4_0\tskateboard\nEP_ezteElzk_0\tperson\nEQBFPIdI8gY_0\tperson\nEQC8eEghvs8_0\tperson\nEQNSjjkyRBg_0\tperson\nEQNSjjkyRBg_1\tperson\nEQTee9qqTZs_0\tperson\nEQVCizuJQFY_0\tumbrella\nEQdEm5HuPG4_5\ttrain\nEQx1XHc0mRM_1\tmotorcycle\nEQzXCoQRbas_1\ttrain\nEQ5rBLoiT78_0\tbus\nEQ9-lbsee1s_0\tperson\nERCvzMzkDhg_0\tskateboard\nERGwo6vIXdQ_0\tperson\nERJR-zQYyH4_0\tperson\nERR-qjVJ3lY_0\tperson\nERVp_cX1juc_0\tperson\nERev6rrd5XA_3\tmotorcycle\nERyyYMb2fFk_0\tcow\nERzh41uuxUE_3\tbicycle\nER0IdSeymeI_0\tperson\nER0IdSeymeI_1\tperson\nER03PLUBt4c_0\ttrain\nER03PLUBt4c_1\ttrain\nER03PLUBt4c_2\ttrain\nER03PLUBt4c_3\ttrain\nER53sUYwz1I_0\tzebra\nER6vMbAyQ6E_1\tskateboard\nER6vMbAyQ6E_0\tskateboard\nESDQMC_70Pk_0\tbear\nESInVf3ioiA_1\tdog\nESMdbpGXk4I_0\tperson\nEST4CUX19Eg_0\tperson\nESokfN84OYk_0\telephant\nESokfN84OYk_3\telephant\nESokfN84OYk_4\telephant\nESpwZsbwQGA_1\telephant\nESpylyha7g0_0\thorse\nESt5TEXuGIM_0\tperson\nESt5TEXuGIM_1\tperson\nESwsyjITYGM_0\tskateboard\nETBia7K3ZHw_0\tmotorcycle\nETBia7K3ZHw_2\tmotorcycle\nETQTZgnfRK4_1\tperson\nETQi93bP3YQ_8\telephant\nETQi93bP3YQ_2\telephant\nETTgj1pxvME_2\tperson\nETWI4nXFANg_0\tperson\nETcmjY7Jigo_1\tmotorcycle\nETgN7EcVVQI_1\tperson\nETmYIq5CF2k_0\tmotorcycle\nET4xC8Wl_CA_0\tperson\nET4yAsJTvlk_0\tcow\nEUH3oSBX950_0\tperson\nEUH3oSBX950_1\tperson\nEULIYiiV-O0_0\tperson\nEULIYiiV-O0_1\tperson\nEULchAlLDfM_0\ttrain\nEURUU5P5flo_0\tperson\nEUcHraiUCjA_0\tbicycle\nEUcWvzarnb0_0\tumbrella\nEUdNEi4myuA_0\tperson\nEUtfoblvHn0_0\tperson\nEUuCDfb8lf4_2\tperson\nEUuCDfb8lf4_1\tperson\nEU93Mw9WGkc_0\tskateboard\nEVBHY1qGVos_0\tperson\nEVBHY1qGVos_3\thorse\nEVElggpPSCM_0\telephant\nEVE2SBJ-2S8_0\tperson\nEVH8Ql7_pYE_0\tperson\nEVTW6Ka7-NU_0\tperson\nEViJ_JQcv5c_0\ttrain\nEVmGPGaP6bY_0\tperson\nEVnnSfmb4go_0\tgiraffe\nEVn52FBjG9E_0\tperson\nEVn52FBjG9E_1\tperson\nEVxEEc26TWg_1\tgiraffe\nEWLiwu56oQc_1\tperson\nEWNd02yWiYw_0\tperson\nEWP0Hhxsf58_0\tperson\nEWQo_1YXfYM_1\tperson\nEWQo_1YXfYM_0\tperson\nEWTvjjpAUm0_0\tairplane\nEWXyQ1tS3jI_0\telephant\nEWdNgXvr54s_0\tdog\nEWfPRTjQO9k_0\tdog\nEWgsivaLhl0_6\telephant\nEWgsivaLhl0_1\telephant\nEWgsivaLhl0_2\telephant\nEWi25l2D0cw_0\tcat\nEWkndzLXvLc_0\tbicycle\nEWuOSRFWTzg_1\telephant\nEW0Mgele6Gc_0\tperson\nEW0Mgele6Gc_1\tperson\nEW6FHYagN0Y_0\tperson\nEW98OEvTxM8_0\tperson\nEW-Zuo7ArI4_0\tdog\nEXDDO7gLoL4_1\tperson\nEXDDO7gLoL4_2\tperson\nEXDDO7gLoL4_3\tperson\nEXDDO7gLoL4_4\tperson\nEXGwKMtyR1M_0\tperson\nEXHZgqkcXG8_1\tcow\nEXJITC62tU4_0\tumbrella\nEXSMz4HnWfg_0\tdog\nEXaiYiUQrMI_1\tdog\nEXfiGeKWKTk_7\tairplane\nEXfiGeKWKTk_1\tairplane\nEXiGyq1TD80_0\tperson\nEXiGyq1TD80_1\tperson\nEXkbZbo1n5U_2\telephant\nEXkbZbo1n5U_0\telephant\nEX817S50E5U_0\tperson\nEX-dqihLUwY_0\tmotorcycle\nEX-dqihLUwY_2\tmotorcycle\nEYCaJR9md8k_0\tairplane\nEYEWPdaJuL0_4\tbird\nEYEWPdaJuL0_5\tbird\nEYEwLM8YTwc_0\tperson\nEYFMOBeF9UE_0\tknife\nEYHtNGztiRQ_1\tcar\nEYKrEDelAdU_1\tbear\nEYM1oXAmBq0_1\tbus\nEYRf00qGMVU_0\ttrain\nEYV6D6G6t2c_1\tperson\nEYZsYCSedGw_0\tperson\nEYd9lSK7Bbk_0\tperson\nEYhtY59whvs_0\tperson\nEYmWVBDEutA_0\thorse\nEYnEMtlMaPY_0\tperson\nEYoj8D64YLA_0\tskateboard\nEYuLodJTgYs_0\ttrain\nEY2pZ9A48ng_0\ttruck\nEY2pZ9A48ng_1\ttruck\nEY2pZ9A48ng_3\ttruck\nEY25PJWD2j4_0\tperson\nEY36YeIgOYI_0\tperson\nEY36YeIgOYI_1\tperson\nEZWcsRlXIA8_0\tperson\nEZbOH9yEe-A_0\tdog\nEZh1lf4yfCg_0\tperson\nEZ5Wa2duCoM_0\tperson\nEZ5Wa2duCoM_1\tperson\nEZ7d9ab31ys_0\tgiraffe\nEZ9-_7o9Vds_0\tbird\nEZ9-_7o9Vds_1\tbird\nEZ_xC5EBwvk_0\tbus\nEaBdeSUjDYs_0\tdog\nEaFSd7_S8kc_0\thorse\nEaQ1P4QyRsY_0\tperson\ndKEVBoMMD2w_0\tboat\ndKJz_EakSc4_0\tperson\ndKMb2S2SSfI_0\tskateboard\ndKTgMjbnNPQ_0\tskateboard\ndKiwficH2d4_0\tperson\ndKi4xI4vB-k_0\tumbrella\ndKlCFQxk5Dc_3\tperson\ndKlCFQxk5Dc_5\tperson\ndKlCFQxk5Dc_0\tperson\ndKlCFQxk5Dc_1\tperson\ndKlCFQxk5Dc_2\tperson\ndKq4S1IVjlA_0\tperson\ndLFWcgSewxs_0\ttruck\ndLH8fBNk89Y_0\tcat\ndLIld9ux7p4_0\tairplane\ndLT61O_htwI_0\tcat\ndLUCKkji5wo_0\tperson\ndLUCKkji5wo_1\tperson\ndLV2VJkpyMI_0\tairplane\ndLbhzrFtNC0_0\tperson\ndLhVV7DMXkw_0\tperson\ndLoxdmLuphk_0\tdog\ndLq5OW1xY54_0\telephant\ndLq5OW1xY54_3\telephant\ndLq5OW1xY54_2\telephant\ndLtQB9P_7BU_2\tbear\ndLty27VgJcc_0\ttrain\ndLvr7BjgsHg_0\tperson\ndLwXzYr8beg_0\tcar\ndL3dSZMnBko_0\tperson\ndL3vGWsRVCg_0\tknife\ndMDGwTdSHIo_0\tmotorcycle\ndMJQi7oYiqQ_1\tperson\ndMS5hB4uWdk_0\tbird\ndMWgiVqknaE_2\tperson\ndMWgiVqknaE_0\tperson\ndMZONdbNFbk_4\tbicycle\ndMZONdbNFbk_2\tbicycle\ndMdUZi9lxrU_0\tcat\ndMiwR-DS6UE_0\tcar\ndMsIDwHkWNE_0\tperson\ndMulBz-N8oA_0\thorse\ndM7lOj89YZE_0\tperson\ndM7-xh2kSmc_0\tperson\ndM7-xh2kSmc_1\tperson\ndM9u0c0qSV0_0\tcow\ndNCm5MtFcp0_0\tperson\ndNEAY77it7o_0\tperson\ndNShS9OdIoA_1\tperson\ndNShS9OdIoA_0\tperson\ndNSlL572gMU_0\ttruck\ndNSlL572gMU_1\ttruck\ndNVvIPWEH1Q_0\tperson\ndNVvIPWEH1Q_1\tperson\ndNdTs9Qa1A0_0\ttruck\ndNeF_3qppZQ_0\tskateboard\ndNj_77jiPcs_1\tcow\ndNknNwahiv4_0\tgiraffe\ndNoz32bgN0U_0\tcar\ndNpQfDg_dIg_0\tperson\ndNqdMh44imM_0\ttrain\ndNs2JO9SgGo_1\tairplane\ndNs2JO9SgGo_2\tairplane\ndNyMDstraS0_0\tperson\ndN1cn1CPEa8_0\tperson\ndODPVlzMR1A_0\tperson\ndOHuuTREVQk_0\tperson\ndOHuuTREVQk_1\tperson\ndOHuuTREVQk_2\tperson\ndOHuuTREVQk_3\tperson\ndOMW6BLHI2s_0\telephant\ndOMW6BLHI2s_1\telephant\ndOOQ32tmk14_0\telephant\ndORLSKDLr1w_0\tcat\ndOUVBpTWHzc_0\tperson\ndOVzO5pkY2o_0\thorse\ndOWhuaTBmr8_0\ttruck\ndOdX5nkOBoQ_1\tperson\ndOdYYCqd6i0_0\tperson\ndOdYYCqd6i0_1\tperson\ndOd-8kfbjz4_0\ttrain\ndOd-8kfbjz4_1\ttrain\ndOfNevz8wlc_0\tbus\ndO2CbXVpSl0_0\telephant\ndPA7g60qlnk_1\tboat\ndPJk57_DSuI_0\ttruck\ndPJ7_mdmjJo_4\ttruck\ndPJ7_mdmjJo_1\ttruck\ndPTnDrK0jl0_0\tknife\ndPZPjPwJCnA_0\tperson\ndPiOaLH0K4Y_0\tbear\ndPiOaLH0K4Y_2\tbear\ndPma_hb-MR8_0\tskateboard\ndPnxUa8yPbw_0\ttrain\ndPpwBkl-F9k_3\tbicycle\ndPpwBkl-F9k_0\tbicycle\ndPp0no_eYOQ_0\tdog\ndPqheqisvs8_0\tperson\ndPvgWsIPDr0_0\thorse\ndP0jXsi0KUw_0\tskateboard\ndP_-3SJLP1Y_0\tperson\ndQB4GI0Bgus_0\ttruck\ndQCFCRTz2rc_1\tgiraffe\ndQCFCRTz2rc_4\tgiraffe\ndQCFCRTz2rc_5\tgiraffe\ndQCFCRTz2rc_0\tgiraffe\ndQIQv4YkBaM_0\ttruck\ndQI-ReUS1hk_0\tperson\ndQM_-V4jSpM_0\tcat\ndQNG1syFdKQ_0\tperson\ndQPdAoRj8vw_0\tdog\ndQWw3losmfA_1\tbicycle\ndQY2wbSJyOQ_0\tperson\ndQh9dmaqW3s_0\tperson\ndQh9dmaqW3s_1\tperson\ndQlybGW3tbw_1\tcat\ndQnNTlCD_AQ_0\telephant\ndQnNTlCD_AQ_1\telephant\ndQoX3OkaI4M_0\tperson\ndQzWZhDVLYk_1\tperson\ndQ4hJadqL_w_0\tperson\ndQ62PlC9Frc_0\tzebra\ndRBb5v_Fv3g_0\telephant\ndRDdBvl4olg_0\tperson\ndRHTO6H764g_0\tperson\ndRHYGXImEBk_2\tperson\ndRHYGXImEBk_0\tperson\ndRInM_HaQZs_0\tbus\ndRVEs1099F8_0\thorse\ndRcLZtR6KFs_0\tperson\ndRcrvTR9xIY_0\tperson\ndRiBVua-2Ck_0\tperson\ndRjzvcGshbA_1\tperson\ndRjzvcGshbA_0\tperson\ndRs8FcKuu6w_0\tboat\ndRt8H1uQ5Og_0\tumbrella\ndRt8H1uQ5Og_1\tumbrella\ndR7jBT3cxr8_0\tperson\ndR8kCc9XNJs_0\tboat\ndR-8FlykNZ0_0\tperson\ndSAODa472ys_0\tbird\ndSAYK4yUlDs_4\tperson\ndSAYK4yUlDs_0\tperson\ndSAYK4yUlDs_1\tperson\ndSAYK4yUlDs_2\tperson\ndSAYK4yUlDs_3\tperson\ndSEv_R8nQik_0\tzebra\ndSFMrnh2szI_0\tcat\ndSLakvIEH9o_0\tbear\ndSLmBYdUku8_0\tperson\ndSQTVC-RyAU_0\tperson\ndSWhe4RgQ_w_0\tcat\ndSZBg-Vcr7E_0\tmotorcycle\ndSojBtCOkqQ_0\tperson\ndSx4IloBWZs_0\tperson\ndSzAX5l_fs0_0\tperson\ndSzAX5l_fs0_1\tperson\ndS0mBDDgP_A_0\tperson\ndS0mBDDgP_A_1\tperson\ndS8x0l5I7f0_0\tboat\ndTDxzi0o_Qg_1\tairplane\ndTMe2Vse97w_0\tcat\ndTVBSXs5Me8_0\tperson\ndTVKs9m3eZU_0\tcat\ndTm_DRCtjCo_0\telephant\ndTm_DRCtjCo_1\telephant\ndTrt1C_90H0_0\tknife\ndTurjz-gJek_0\tperson\ndT6A3DwqZb0_0\tboat\ndT8wudfW9gg_1\thorse\ndT-INB6puFM_0\tskateboard\ndT-INB6puFM_1\tskateboard\ndUAtLBDfmBo_0\tairplane\ndUAtLBDfmBo_2\tairplane\ndUC_SF_mN_E_3\thorse\ndUC_SF_mN_E_1\thorse\ndUInMUIPtTs_0\tperson\ndUJH8d3CMU8_0\tbear\ndUMLWt99A7o_0\tperson\ndUP4OTLrOA0_0\tperson\ndUW_G_--wI8_0\ttrain\ndUXFUWivXPA_0\thorse\ndUXFUWivXPA_1\thorse\ndUbP54CBYd0_0\tairplane\ndUm9A-1AoMU_0\tperson\ndUqrowFcbD0_0\tperson\ndUx_UfS9cQI_1\tdog\ndUx_UfS9cQI_0\tdog\ndU-bQRDInro_2\tbird\ndU-bQRDInro_4\tbird\ndVAMoKYgrwE_0\tperson\ndVKQhCF8o8w_0\tperson\ndVTHVxh6Tro_1\tknife\ndVWAD4gOu-8_1\tperson\ndVd7OzbhOq0_0\tperson\ndViVbA7N_AE_0\tairplane\ndVqPo7-p71Y_0\tperson\ndVtqTTZTFDQ_0\tperson\ndWCqnck4Um0_0\tperson\ndWFVX1psRZI_0\tbird\ndWGkW13rQBY_3\thorse\ndWGkW13rQBY_5\thorse\ndWGkW13rQBY_8\thorse\ndWVJFIzIKEc_2\tbicycle\ndWVJFIzIKEc_0\tbicycle\ndWVJFIzIKEc_1\tbicycle\ndWXSWEaCId8_1\tperson\ndWdOl13DwwY_0\tairplane\ndWdl9RdXrHo_0\tperson\ndWdl9RdXrHo_2\tperson\ndWd0sszZOXc_0\tperson\ndWesodD0ff4_0\tairplane\ndWgfwKBrSiE_0\tperson\ndWgpYitSv0c_0\tperson\ndWkrnxWB1CU_0\tperson\ndWlDN9Hozgg_0\tdog\ndWtqRwEurDU_0\tperson\ndW1oE_LHALo_0\telephant\ndW4DX7lQoGg_0\telephant\ndW5aU0U7K28_0\tperson\ndW53l1sR_zM_0\tperson\ndXEH9QiCyHk_0\ttrain\ndXEH9QiCyHk_1\ttrain\ndXKi3ZHjgWM_1\tumbrella\ndXLyWGJxHnI_0\tperson\ndXOsaszlVY0_0\thorse\ndXSuppGXFeI_0\telephant\ndXSuppGXFeI_1\telephant\ndXdFEix8vu4_0\ttrain\ndXjUZeuzgaw_0\ttrain\ndXkmG8AR82Q_2\tairplane\ndXkmG8AR82Q_5\tairplane\ndX6W4-sxsX0_0\tcat\ndX9J6yDM5Q8_0\tperson\ndX-4XwYWv48_0\tperson\ndYGOSaGjHQU_0\tperson\ndYQMrQe1pSk_0\tperson\ndYRIEDyD9Qs_0\tairplane\ndYRKwU2TJYI_0\telephant\ndYVcalOS1SE_0\tdog\nEacR2o35-kc_0\tbicycle\nEaeD7utPpTQ_0\tperson\nEakGzU5UgWI_0\tperson\nEakGzU5UgWI_1\tperson\nEakGzU5UgWI_3\tperson\nEamZ8De_WFE_6\telephant\nEamZ8De_WFE_0\telephant\nEamZ8De_WFE_2\telephant\nEamZ8De_WFE_3\telephant\nEamZ8De_WFE_4\telephant\nEavqjWy5gag_0\tperson\nEaxszmfn7WA_1\tperson\nEaxszmfn7WA_0\tperson\nEay0MFBCdqY_1\thorse\nEazzsVK1-pM_2\tumbrella\nEbJV0e75xtk_1\tperson\nEbJV0e75xtk_0\tperson\nEbWt1hAb3LQ_0\tperson\nEbXzlcsBsfA_0\tperson\nEbYJAv5c_G8_0\tperson\nEblX3oKGsBA_0\tskateboard\nEb1n2o0YpOM_0\tcow\nEb3sGSIWtCw_0\tperson\nEb7juFDG3Dw_0\tcar\nEcMh5TIKmzY_0\tperson\nEcNpsheyrIU_0\tperson\nEcNpsheyrIU_1\tperson\nEcWrNFz5J-o_1\tdog\nEcpsBV2FEBE_3\thorse\nEcsiLHpIvL4_0\tperson\nEcu8VEIC2y8_2\telephant\nEcu8VEIC2y8_1\telephant\nEcvYBldDm_U_0\tperson\nEdE8zCwJ56g_0\tperson\nEdE8zCwJ56g_1\tperson\nEdIfx7lQxEw_1\tdog\nEdIfx7lQxEw_0\tdog\nEdOvSD40Tb0_0\tcow\nEdTkeITBkvY_0\tperson\nEdTkeITBkvY_1\tperson\nEdaY0DFamDc_1\tskateboard\nEdfKMOIOHtI_0\tperson\nEds-fi9s-O4_0\tperson\nEd486SKW0kM_0\ttrain\nEd-ENhlS7Dg_1\tboat\nEeCjxMzh5_A_0\tperson\nEeDhzR9I-Tc_0\tmotorcycle\nEeLllq2Zim4_0\tdog\nEeMUemitsFU_0\tperson\nEeRqVkQ1Z7Q_0\tcar\nEeRqVkQ1Z7Q_1\tcar\nEeTRT4j5GcQ_0\tperson\nEeYRHJuK3wo_0\tboat\nEeYqy9QZZTU_0\tairplane\nEeb2vPJsaN0_0\tperson\nEee6rmiMYKY_1\tcar\nEesk8VSxpIU_0\tcat\nEetKMgVh0Pk_0\tperson\nEexaBL5jDL4_0\tknife\nEexaBL5jDL4_3\tknife\nEeyjjk9-BvY_0\thorse\nEe7CW7lZfXA_1\tperson\nEe7CW7lZfXA_0\tperson\nEfE6r-Iq5CM_0\tperson\nEfG_eBrAjdI_0\tmotorcycle\nEfHCZUHt0d8_0\tperson\nEfMCesQKyoE_3\tairplane\nEfNSTkpl6dQ_0\tperson\nEfSMsLkasg8_1\tperson\nEfjC0VVD2Do_0\tperson\nEfvRGCuPoF4_0\tperson\nEf1Tm3dKzbY_0\tmotorcycle\nEf2GKdopP_A_0\tperson\nEf7-yzJqZto_0\tperson\nEf9YiYODEbg_0\tcat\nEf9q8mAPYZA_0\tperson\nEf_N7JmICUU_10\tbicycle\nEf_5u21WLbs_0\tcat\nEgDOCraAd64_2\ttrain\nEgHVReOnDpM_0\tperson\nEgPKMlxhH0A_0\tperson\nEgPxUnCFS10_3\tknife\nEgYCBIlDm98_0\thorse\nEgf4iNTfanU_0\tairplane\nEgf4iNTfanU_2\tairplane\nEghxGvj6pTs_0\tperson\nEgl_1FgGUyE_2\tbird\nEgpSSMkQOEE_0\tbicycle\nEgxlP5S15uQ_1\tmotorcycle\nEg6YUwqAQqM_0\tperson\nEg7bJ46L4Cg_0\tairplane\nEg7bJ46L4Cg_1\tairplane\nEg7bJ46L4Cg_2\tairplane\nEg82FN1vC3A_0\tknife\nEg9-5uBMrpc_0\tcat\nEg-cp7jgFA0_0\tperson\nEhF73HJvEWo_1\ttrain\nEhKAs4Z1JE0_0\tperson\nEhSaOGOPUns_0\tskateboard\nEhbaW6F3U6I_1\tperson\nEhbuzBK5bes_3\tgiraffe\nEhbuzBK5bes_2\tgiraffe\nEhcmJOG2Jws_0\tperson\nEhfmC9Wa8xs_0\tperson\nEho09eptX7c_0\tperson\nEhpwK0_8UJA_0\tboat\nEhpz_gcdCcY_0\tknife\nEhpz_gcdCcY_1\tknife\nEhpz_gcdCcY_2\tknife\nEh6FARrS1VY_0\tskateboard\nEh7f9wgtUns_0\tbus\nEh88_JdkWWs_0\tperson\nEh-x-OzZxGo_0\tperson\nEiE9eIJ-Rv4_0\tcar\nEiLWN5T6wko_1\tperson\nEiNTdTOmvDU_0\tperson\nEiUbGE2f6fU_0\ttrain\nEiUbGE2f6fU_1\ttrain\nEiZG3M9_EMc_0\tbird\nEiaYgqLcbqM_2\telephant\nEibdBvTND-I_0\tperson\nEibdBvTND-I_1\tperson\nEine_0RExlI_0\tperson\nEi1XBJFaUeI_0\tperson\nEi1XBJFaUeI_1\tperson\nEi6ZitRjwdA_0\tperson\nEi7n3944Ovs_0\tumbrella\nEi9d8OX0ui0_1\tairplane\nEi9d8OX0ui0_0\tairplane\nEi9724H_wUs_1\tperson\nEi9724H_wUs_0\tperson\nEjcMZ8Y0Oeg_0\tboat\nEjgxtJaNIH8_0\tskateboard\nEj2wn6JRBzA_0\tskateboard\nEj7xV32Trwc_0\tperson\nEj8UwQiT5jk_1\tknife\nEj8UwQiT5jk_3\tknife\nEj_zFc5qxRw_0\tcat\nEkMGStKSilE_0\tperson\nEkMdmPclE3k_1\tdog\nEkTrskvsL5c_1\thorse\nEkWd3wPBEyg_0\tairplane\nEkawSvsvh3g_0\tperson\nEkdP_pWa9s0_1\tairplane\nEke0rATHhX4_0\tperson\nEkh_cm7q1y8_0\tcow\nEklOuZWH-8Q_0\tmotorcycle\nEkyydrsMSkY_0\tperson\nEk1DlGGsUdY_0\tumbrella\nEk4323MkRYo_0\tbicycle\nElJtz3uv-AQ_0\tperson\nElLiin7Cda4_1\tperson\nElLiin7Cda4_0\tperson\nElNzy4USrLA_0\ttruck\nElR4MuOUYKM_0\tbird\nElgmQr70py4_5\ttrain\nElrxptn-Zqo_0\tperson\nEluRnlB_s6c_0\ttrain\nEluRnlB_s6c_3\ttrain\nElwZ1M6McHo_0\tskateboard\nEl2nzuCxrGk_1\thorse\nEl5fRl-4vko_0\tknife\nEl9Efl32L8w_0\tperson\nEmDjVcaznIA_0\tzebra\nEmDjVcaznIA_1\tzebra\nEmDjVcaznIA_2\tzebra\nEmJeLKaG_hE_2\tbird\nEmJk7hDSzaM_0\tperson\nEmJk7hDSzaM_1\tperson\nEmJk7hDSzaM_3\tcow\nEmWzmxDjjOs_0\tperson\nEmkwHglcEKA_1\tmotorcycle\nEmlvoH2AxWs_0\tperson\nEmqEntvqLw0_0\tairplane\nEmsMjm0VXJc_0\tskateboard\nEm44RLa7Qp4_0\tperson\nEm_UT-f7q0E_1\ttrain\nEnJkvPAMuaM_0\ttrain\nEnJkvPAMuaM_3\ttrain\nEnJkvPAMuaM_1\ttrain\nEnL2FiVIuJg_0\telephant\nEnL2FiVIuJg_1\telephant\nEnS1Yte0Xzw_5\tknife\nEnS1Yte0Xzw_2\tknife\nEnUW7YSmli0_0\thorse\nEnVtYzkXwjM_0\tperson\nEnbXP2xywwk_0\tperson\nEnmwKpZJTQc_0\tperson\nEnoNrjMNAC0_0\tperson\nEnrcDrbyUxY_0\tperson\nEnrcDrbyUxY_1\tperson\nEoaeqRc88HU_0\tperson\nEoallCLchmo_0\tcow\nEodtHMtH9zw_0\tperson\nEojPQY8nQ2Y_0\ttrain\nEouV6Ut4NP8_1\tperson\nEouV6Ut4NP8_0\tperson\nEouZIHzCFq8_0\tairplane\ndYVtJPfJmf4_0\tperson\ndYgPc190feM_0\tperson\ndYgxCdKNrIo_1\tairplane\ndYjCbeBAgYs_0\tperson\ndYmREF5dDkw_0\tdog\ndYosdOz5mZo_0\tperson\ndYr1OKT1lCA_0\tperson\ndYyHudM-fQc_0\tperson\ndYyHudM-fQc_1\tperson\ndYzh49Wr9bQ_0\tairplane\ndY9dlzr4w0Y_0\tperson\ndZFiRqMkFPc_0\tperson\ndZHJc_1os9Q_1\tperson\ndZHJc_1os9Q_0\tperson\ndZHJc_1os9Q_2\tperson\ndZMQgxFHQPA_0\ttrain\ndZQ2o-4a5tU_0\tperson\ndZSQXDQcafc_0\tknife\ndZUOCWwr2xs_0\tknife\ndZaFo3C_1ts_0\tperson\ndZdvK41DxLI_3\tcar\ndZio0uN6DHY_0\thorse\ndZio0uN6DHY_1\thorse\ndZjnkqYO2lE_0\ttruck\ndZmG64W2CtM_2\tumbrella\ndZmG64W2CtM_0\tumbrella\ndZsXB4o-wdE_0\tairplane\ndZzfVDrmMj0_0\tbird\ndZzfVDrmMj0_1\tbird\ndZ1vVETiQAQ_0\tperson\ndZ6ub2CEvbg_1\tbicycle\ndZ6ub2CEvbg_2\tbicycle\ndZ6ub2CEvbg_3\tbicycle\ndaBl0Q92zLE_4\tbear\ndaBl0Q92zLE_0\tbear\ndaIJjuHo2EQ_0\tcow\ndaMcE2oorrE_1\tperson\ndaWo89I2Tuo_0\tskateboard\ndaWo89I2Tuo_1\tskateboard\ndaWywQD6R4g_8\telephant\ndaWywQD6R4g_0\telephant\ndaWywQD6R4g_2\telephant\ndaWywQD6R4g_4\telephant\ndaWywQD6R4g_5\telephant\ndaWywQD6R4g_6\telephant\ndaXEykL8UQ0_0\thorse\ndaZHZXfmY7k_0\tcat\ndaaHTdFcx5o_0\tboat\ndaaX2TXbYmo_2\tairplane\ndadAGYt0vS0_1\thorse\ndalHUNR5yAA_1\tperson\ndan-4YoB-Vw_0\tperson\ndaoysu5sfUQ_0\tperson\ndapxBMe8Wz8_1\tperson\ndaqWFFdK8Ck_0\tperson\ndawGJDtHlcs_0\tperson\nda4jNzO5wL0_0\tperson\nda61HPBGEwo_0\tbicycle\ndbU6Fn_5bHI_0\tbus\ndbXr-9m66-U_0\tperson\ndbdhdeVMuL0_0\tbird\ndbhGB6XW3fM_0\thorse\ndbxb42TzQ_g_0\tskateboard\ndbysY1V2TwI_0\tperson\ndby-fBGIPRU_1\tboat\ndby-fBGIPRU_4\tboat\ndb9i2fI8dv4_0\thorse\ndcADt99ndxg_0\tperson\ndcADt99ndxg_1\tperson\ndcBMrHLTvPo_0\tperson\ndcEW4y5AI1E_1\telephant\ndcHcm85hd5s_2\tbear\ndcH304rxwLY_0\tperson\ndcJN3WJZLOE_0\ttrain\ndcLR55c41rg_1\thorse\ndcLoVk60Gkg_0\tcow\ndcLoVk60Gkg_1\tcow\ndcLp5mtSkPA_0\tcow\ndcO5id4LTVE_0\tperson\ndcO5id4LTVE_1\tperson\ndcO5id4LTVE_2\tperson\ndcO5id4LTVE_3\tperson\ndcO5id4LTVE_4\tperson\ndcUA_Wf8vrc_2\tskateboard\ndcXdmOY1YCw_0\tcar\ndcXdmOY1YCw_1\tcar\ndcblbU5lyQU_0\tperson\ndcdXiEQkghM_0\tperson\ndcdXiEQkghM_1\tperson\ndcf4zn9wOjM_1\tperson\ndcj9u89LAu8_0\tumbrella\ndcoFS0-09xc_0\tperson\ndcoFS0-09xc_1\tperson\ndcwbXzJsVDw_1\tcar\ndcxhSnf9sg0_1\thorse\ndc1_WHDpL3w_0\tperson\ndc-BpV5fuQM_2\tcow\nddK4WXTyoWw_0\tcow\nddPN4QZuLBE_0\ttrain\nddPxOsA2Cro_0\tperson\nddPxOsA2Cro_1\tperson\nddW0MYEUWlc_0\tperson\nddaqR7COVYo_0\tperson\ndddKAnk7-hQ_0\tumbrella\nddlPux88liU_0\tperson\nddruq0KhCxM_1\tskateboard\nddsTE3NwHyM_0\tperson\nddtNIDCxqCk_0\tperson\nddw0wDJgJwM_0\tperson\nddxQR-NB6E4_0\tperson\nddzrzJEogWQ_4\tmotorcycle\nddzrzJEogWQ_6\tmotorcycle\nddzrzJEogWQ_0\tmotorcycle\nddzrzJEogWQ_1\tmotorcycle\nddzrzJEogWQ_2\tmotorcycle\nddzrzJEogWQ_3\tmotorcycle\nddzrzJEogWQ_5\tmotorcycle\ndd0CsqY6Fbo_0\tairplane\ndd8a6btF_B4_0\tperson\ndeDEnw72hQk_0\tperson\ndeNoMwyFOO4_0\tperson\nded6WOfO9O8_1\tperson\ndeep6EOo6ds_0\tperson\ndeihMrgBXEc_0\tperson\ndelKGPVRJsY_0\tperson\ndemxgFkqGxA_0\tbus\ndeqo50gGTBo_1\tairplane\ndew_lb_L9hE_0\tperson\ndezAUC4KbJI_0\tperson\nde1f8qTDYUI_0\tperson\nde2HZ6DBOuM_0\tperson\nde4mcJTPj48_0\tperson\nde4mcJTPj48_1\tperson\nde7-gbLffxs_0\tcow\nde8KeV2waGY_1\tperson\nde8V1ovs5eM_0\tperson\nde_fGa7Zxus_0\tperson\ndfAvID4lRsE_0\tperson\ndfAvID4lRsE_1\tperson\ndfDTR9mCUZI_2\tdog\ndfEF6SMFbGM_0\tskateboard\ndfKBB3-VicU_0\tbus\ndfK1HsVc2B0_0\tperson\ndfh2lETTLZI_0\tskateboard\ndfp4iVaXCpg_0\tskateboard\ndfqLJxxdinA_0\ttrain\ndfsTKKT5-UU_0\tperson\ndfseA2X5Cow_0\tperson\ndf_PzyC0gTw_0\tcat\ndf_SYY4pb3I_2\tcow\ndgGYa05XpYo_0\tskateboard\ndgIsZXSKACE_0\tperson\ndgOQKwvhLpE_1\tdog\ndgTYRveHMjM_0\tcat\ndgYN1OH5oc0_0\tzebra\ndgl2b2bRpq0_0\tperson\ndgtaJOOOtKg_0\tperson\ndgweyIjmmDY_0\tcat\ndgyGZqXgvag_0\tperson\ndg6u7R87Gh4_0\tperson\ndhFII58PWhI_0\tperson\ndhIL9wRZMm0_1\thorse\ndhIt9lg6Sbw_1\tboat\ndhUG1gnTlso_0\tdog\ndhZ-JmFNyak_0\tperson\ndhcVp1GmJyI_0\telephant\ndhcVp1GmJyI_1\telephant\ndhgs2glg_N8_1\tperson\ndhgs2glg_N8_0\tperson\ndhiYTV7DJLY_0\tcow\ndhjeKi58cuU_0\tcow\ndhkFVTvJ6ZU_0\tcat\ndhy85XNJT3c_0\thorse\ndh03d5vq1B0_2\tdog\ndh1XFXciUf4_3\tbus\ndh1XFXciUf4_2\tbus\ndh6zZFXD0_c_0\telephant\ndiDDNe-MVfs_1\telephant\ndiMmgSNBO8k_2\tperson\ndiRn1fE6zMg_0\tperson\ndiSTaGHORrc_0\tperson\ndiSZzd4jM0E_0\tperson\ndiUCxWmV084_0\tperson\ndiZ-mRLPpqI_0\tperson\ndidB6Es7Des_0\tperson\ndidTjworKXg_2\tumbrella\ndif0t09rdZg_1\tcow\ndioELry6bbk_0\tairplane\ndix7GRytfcw_0\tperson\ndix7GRytfcw_1\tperson\ndi1KJ0Mb5M8_0\tdog\ndi2TPYyIeWc_0\tdog\ndjIw9AQoU3o_2\tperson\ndjLPrNtPSY8_0\tperson\ndjLUJy1sWMg_0\tcat\ndjNzrBpqnnY_0\tcar\ndjSxYfG99k8_0\tcar\ndjaGBINLXTQ_0\telephant\ndjh9QeYLg7M_0\tairplane\ndjiTvgkjTW4_0\ttrain\ndjiTvgkjTW4_3\ttrain\ndjiTvgkjTW4_4\ttrain\ndjiTvgkjTW4_5\ttrain\ndjiTvgkjTW4_7\ttrain\ndjlet5--ZW0_0\tperson\ndjlet5--ZW0_1\tperson\ndjpCG2oprrA_1\tperson\ndjvQyzGNp7o_0\tperson\ndj2Qk--KIkk_0\tperson\ndj6yGGCBFWc_0\tperson\ndj8d91U-F_0_0\tperson\ndkQWD9hv4fo_1\ttrain\ndkQbDCav3eM_1\tperson\ndkSetHNXnNY_0\tcow\ndkb-6x7zo5E_0\tperson\ndkdCTCL5imo_2\ttruck\ndkdCTCL5imo_3\ttruck\ndkdCTCL5imo_4\ttruck\ndkiOcFZwrA0_0\tbear\ndknj-Sv4HUs_1\tperson\ndkpsViIYlsI_0\tcow\ndkw4aWG9l6E_0\tbear\ndkw4aWG9l6E_4\tbear\ndkw4aWG9l6E_5\tbear\ndkxLcr2kvIM_1\thorse\ndk3Nf8K3RzI_0\tboat\ndk4gT0vHgeU_0\tperson\ndk6h_GL9OZo_0\tperson\ndk7QISqnWZc_0\tbird\ndk7juEuA2is_0\tbear\ndk7juEuA2is_2\tbear\ndlAMvsjssrY_0\tperson\ndlDsSVM3JJ8_0\tperson\ndlG7MtSpAK4_0\tperson\ndlIG99k9Hoo_0\telephant\ndlIkYaty1Uw_0\tcar\ndlNMnGKJJjU_0\tcow\ndlQ1Gr54T74_11\tbicycle\ndlQ1Gr54T74_14\tbicycle\ndlQ1Gr54T74_5\tbicycle\ndlVOuZK_1bY_1\tperson\ndlVTSnDsl38_1\tknife\ndlW_HPbVriI_1\ttruck\ndlW_HPbVriI_3\ttruck\ndlW_HPbVriI_0\ttruck\ndlW_HPbVriI_2\ttruck\nEpH59JsxI3w_0\tcar\nEpIb8r7uBqM_0\tperson\nEpJ_M6rB_PA_1\tbird\nEpOaQjhIh_M_0\tairplane\nEpP_TLXxb7Y_0\tcow\nEpSURaF1BfY_0\ttruck\nEpT8zxDFPf8_0\tcow\nEpVdzlk5GYU_0\ttruck\nEpd3r6iiqVk_0\tbicycle\nEpeIZCFbjw0_0\tskateboard\nEpnttpyYTAo_0\tperson\nEpoqtu0Pqe4_0\tcow\nEp8bd1STWKw_0\tmotorcycle\nEp81Lk66O50_0\tperson\nEp84L7WDoyE_0\tperson\nEqBJeYu5f_E_0\telephant\nEqBJeYu5f_E_3\telephant\nEqHBjvHkvf0_0\tperson\nEqJR5UZAlSg_1\tcar\nEqLYPeo9ZC0_0\tperson\nEqMqvcHp8Ko_0\tcar\nEqMqvcHp8Ko_1\tcar\nEqSYKCxmeDA_0\tdog\nEqSYKCxmeDA_1\tdog\nEqh7XqsYl5M_2\tperson\nEqmnFPweBmk_1\tboat\nEquATbp9uL0_0\tperson\nEquATbp9uL0_1\tperson\nEqvMMBAZP2o_0\tperson\nErUllSQJNgI_4\telephant\nErUllSQJNgI_5\telephant\nErWUOje4g8Q_0\tmotorcycle\nErX04vJ-JcU_0\tcat\nErf0FkqYsTE_0\tperson\nEro36xFQKS4_0\ttruck\nEro36xFQKS4_1\ttruck\nEr4yJXTWNNo_3\tbicycle\nEr4yJXTWNNo_4\tbicycle\nEr4yJXTWNNo_5\tbicycle\nEr5D0fXZsjk_0\tperson\nEr9tboOA5k8_0\tperson\nEsEreMKZP7Q_1\tperson\nEsQ05q5ZZVM_3\tskateboard\nEsQ05q5ZZVM_5\tskateboard\nEsQ05q5ZZVM_2\tskateboard\nEsYZbF7hCTE_0\tperson\nEsZV26-jxX8_0\tmotorcycle\nEsbWwOYbT8Q_0\ttrain\nEskqA8x8mX4_2\tairplane\nEsrUSkNrqWs_1\tperson\nEs0O5wtTZ2Q_0\tperson\nEs9GOUryI0U_0\tperson\nEs9Yq8uZ4fA_0\tperson\nEs-W0AxQ5Us_1\tcow\nEtebDuK3fUY_0\tperson\nEtlKR9-Q2dk_0\tperson\nEtx8YkcrSF8_0\tperson\nEt0RRuaW-Rg_1\tdog\nEt1PKq61KAk_0\tperson\nEuETmswYRrs_0\tcow\nEuHJB5UXmZg_0\tumbrella\nEuHvelij5ao_0\tcat\nEuIGG3PoslE_0\tperson\nEuInxfWuqqA_0\tperson\nEuZnOeXR020_0\tperson\nEua2VIbXEMs_0\tboat\nEufXUqphYVw_0\tperson\nEumfsHXsVGk_0\tperson\nEunz2V1RXXo_0\ttrain\nEurWaA7qCDw_0\tbear\nEuwjSGtSYlY_0\tperson\nEuwjSGtSYlY_1\tperson\nEuzDIk8ag30_0\tperson\nEuzVaAXsy4o_0\tmotorcycle\nEu0nzh2HQNk_0\tperson\nEvDZK2cFYVE_0\tmotorcycle\nEvGoGf-YCA8_0\tbicycle\nEvKPt0vynKY_0\ttruck\nEvN8x67_EQ0_0\tperson\nEvZF9DagIoQ_4\thorse\nEvZF9DagIoQ_0\thorse\nEvZF9DagIoQ_1\thorse\nEvmcyDEPnoA_1\tskateboard\nEvvbUe6FBSM_0\tbird\nEvvij-hmE4A_0\tperson\nEwBKceBTBbo_0\tdog\nEwBwIUrHR3o_0\tperson\nEwBwIUrHR3o_1\tperson\nEwDyryqt94g_4\tairplane\nEwDyryqt94g_5\tairplane\nEwKIz0qAvKQ_0\tperson\nEwSJeylFWsY_0\tperson\nEwUGFtWeyMA_0\tperson\nEwUeAvO5mrE_0\tcow\nEwU8puKxN8Y_0\tperson\nEwU8puKxN8Y_1\tperson\nEwWCc9whfDI_0\tcow\nEwYNowdS57c_0\tperson\nEwet2EA1xX8_1\telephant\nEwet2EA1xX8_2\telephant\nEwet2EA1xX8_0\telephant\nEwozH_35SDg_0\tperson\nEwq-V9jATzg_0\tperson\nEw8lEc8Ufi8_1\tbus\nExCPGilpuMM_0\tperson\nExCPGilpuMM_1\tperson\nExCjkt_zXuw_0\tperson\nExCjkt_zXuw_1\tperson\nExJjWM_rAnI_3\tairplane\nExJjWM_rAnI_1\tairplane\nExPBVcERfwY_1\tperson\nExPBVcERfwY_3\tperson\nExPBVcERfwY_4\tperson\nExT3xg9phtQ_0\tperson\nExVHmko3jfY_0\thorse\nExW1ju88BW8_0\tcat\nExb1TjMi76I_0\tboat\nExc3W9o5-04_1\thorse\nExe2EizU9VQ_0\tcow\nExe2EizU9VQ_1\tcow\nExfZl3DY8JM_0\tperson\nExfZl3DY8JM_1\tperson\nExl9alp64lE_1\tperson\nExqpcHBGBlw_1\tperson\nExvcP05yrS0_0\tperson\nExvcP05yrS0_1\tperson\nExxZODpPkQQ_1\ttrain\nExz2WL2-kR0_0\tgiraffe\nEx4__JMKkqI_0\tperson\nEyMzZV5iTEA_0\thorse\nEyP_0uEuXVs_1\tbear\nEybT7tq6XGk_0\tperson\nEymmgPoUyuM_0\tperson\nEymmgPoUyuM_1\tperson\nEymmgPoUyuM_2\tperson\nEyn7IfnWm4o_0\tairplane\nEyn7IfnWm4o_3\tairplane\nEyn7IfnWm4o_1\tairplane\nEyn7IfnWm4o_2\tairplane\nEyp8nornJW0_0\tbear\nEyp8nornJW0_1\tbear\nEyrfi9lGdoo_1\tairplane\nEyuKu6qMB6g_0\tperson\nEywYZ3Gjwuc_0\tperson\nEywnxH68jDU_0\tcow\nEyzwbz1ZxmU_0\tcat\nEy2TgrQ30Z0_1\tbicycle\nEy2TgrQ30Z0_2\tbicycle\nEy36TlCS4rQ_0\tperson\nEy4BLGQL2Bg_0\tbear\nEy7eosaz0zU_0\tperson\nEy7us0SSVAs_0\tairplane\nEy7us0SSVAs_2\tairplane\nEy7wIzCkFU4_0\tperson\nEy7wIzCkFU4_1\tperson\nEzC0tuKaVGA_0\tperson\nEzEX4OUEHHQ_1\tskateboard\nEzGa4SSPsbI_0\tbicycle\nEzYjRjhff20_0\tperson\nEzZEWp1cluc_0\tperson\nEzeDITt3y5I_0\tperson\nEzeDITt3y5I_1\tperson\nEzlyx_EudUQ_0\tperson\nEzlyx_EudUQ_1\tperson\nEzuizVcVbSA_0\tperson\nEz6I4TpzC5I_0\tperson\nE0K5Ll7wHUw_0\tbird\nE0YZDyUoHTM_0\tknife\nE00cOMpNw3o_0\tmotorcycle\nE01EgIBFxRk_0\tperson\nE038teDC3EM_0\tperson\nE0-Z0KM1UB4_1\tperson\nE1AwHXQ00ns_0\tperson\nE1MTmF3FAN0_0\tbicycle\nE1NfSTmGCRE_0\tknife\nE1ZhuBRYvKY_0\tcow\nE1bNSKg9iv8_0\thorse\nE1oEO09-bAw_0\tdog\nE1pmsS_ufrs_0\tperson\nE1xPwEvYymk_1\tperson\nE1xPwEvYymk_2\tperson\nE1xPwEvYymk_0\tperson\nE1zxNG3Fglo_0\tbird\nE17S76lXHfI_0\tperson\nE1_ETAQHwcM_0\tperson\nE2O5Y6VAhIc_0\tperson\nE2O5Y6VAhIc_1\tperson\nE2Pobz5qoAE_0\tperson\nE2Pobz5qoAE_1\tperson\nE2Vqlq1BQYs_0\tairplane\nE2WWQOKGeb4_0\tskateboard\nE2aiCls-clY_0\tperson\nE2lj1iRVceA_0\tskateboard\nE22IW-PgLfU_0\tperson\nE28Cad7vBrw_0\tperson\nE28Cad7vBrw_1\tperson\nE29-bZY3lEo_0\tairplane\nE3NmlH6taDs_0\ttruck\nE3SKOBDl6u0_0\tperson\nE3enDSeq6P0_0\tperson\nE3tmvYSpQSQ_0\tperson\nE35M5UWMXeE_0\thorse\nE35M5UWMXeE_2\thorse\nE4Bl9c7JbYs_0\tperson\nE4DFW1SxJfY_0\tdog\nE4DFW1SxJfY_2\tdog\nE4TfSUdVt8U_1\ttruck\nE4pulnGY9X8_1\tperson\nE43SZ65LnfY_0\tcow\nE45LqepDuqg_1\tperson\nE5BtXla2lCQ_0\tbicycle\nE5CQkNJct6Q_0\tmotorcycle\nE5HB-EDNtE8_0\tperson\ndlZZzrMO6yY_0\tperson\ndlbAWAuByWk_0\tperson\ndlcovhFKigE_0\tperson\ndlh5RGS5Bzw_0\tbird\ndlkVXsIhcZg_1\tperson\ndlo83yH621I_1\tcow\ndl2g71ftw9A_3\ttrain\ndl2g71ftw9A_4\ttrain\ndl2g71ftw9A_5\ttrain\ndl6ogvuxF78_0\tperson\ndl_fuQYhAP8_0\tperson\ndmDdRd6wULk_2\tdog\ndmJ1DuWiAdM_0\tperson\ndmMz5FhGOCc_1\tperson\ndmVAi4WMi3M_0\tperson\ndmVAi4WMi3M_1\tmotorcycle\ndmVAi4WMi3M_3\tmotorcycle\ndmW77KHtuCQ_0\thorse\ndmYSNG-7VCg_0\telephant\ndmfX7DsSS1k_0\tbicycle\ndmuWxnAfMn4_0\telephant\ndm4rFNN7FZQ_0\ttruck\ndm-lOmiP2d8_0\tcow\ndnAQ7q60f_g_0\telephant\ndnB0we4_DrY_0\tcow\ndnB6auv8PBk_0\tperson\ndnFZkG7_E1w_0\tperson\ndnNh07bnI_s_0\tcat\ndnUXo5nstys_0\tperson\ndnVV1s-LcAY_0\tperson\ndnY-4hOzYts_1\tperson\ndncQtuB_6qA_0\tmotorcycle\ndncxd1B2sLk_0\tgiraffe\ndnwqVE3lPyY_1\ttrain\ndnwqVE3lPyY_2\ttrain\ndn_r7u_5apk_0\tskateboard\ndoHOuG6wqXY_0\tmotorcycle\ndoSDuIGLFXY_0\tcat\ndoTj5H8Uf1I_0\tcow\ndoUwj_z1x5o_0\telephant\ndoX3oiADm_s_1\tperson\ndomu9ia2Vo8_0\tperson\ndorx67yK7WU_0\tbird\ndovn1QHCR7o_0\tperson\ndowbL0CZ5do_0\tbicycle\ndo1QIWrYeW8_0\tperson\ndo5o5Dw0vPc_1\telephant\ndo5o5Dw0vPc_4\telephant\ndo7abiC5aZk_1\tcar\ndo82ENX9cOc_0\tperson\ndo-LmSJTPj4_0\tskateboard\ndpDG64ULlUg_0\tboat\ndpGCSoTITrw_2\telephant\ndpJWbIaQYoI_0\tperson\ndpJWbIaQYoI_1\tperson\ndpQP5r61_GQ_0\tperson\ndpUorqkSYZE_0\tdog\ndpYYMgh5TS0_0\ttruck\ndpcwUs5srlc_0\thorse\ndpi0u6pfCTM_0\tperson\ndpjLyHb9AyI_0\tperson\ndpkF3SwOunc_0\tdog\ndpn6vUVXBuM_2\tumbrella\ndptZbHZQYPM_1\tdog\ndptZbHZQYPM_2\tdog\ndpxGzRQqAaU_0\tmotorcycle\ndpxGzRQqAaU_1\tmotorcycle\ndpxGzRQqAaU_2\tmotorcycle\ndpxVPiv62SY_0\tperson\ndp2cUWhnP0A_0\tknife\ndp3Q_aTYeJ4_0\tperson\ndp_JQh45a50_0\tperson\ndp_1VrEUWbU_0\tperson\ndqCFYWRf9g8_1\telephant\ndqDLl7BlAAA_0\tskateboard\ndqFRS9o1CSU_1\tperson\ndqOoL5LiXc8_0\tboat\ndqQPbKE4UhQ_0\tperson\ndqTlCZzLk6A_1\tcow\ndqWEwvhVNiI_0\tperson\ndqavRiIA-38_0\ttruck\ndqj-msAUvnc_0\tcat\ndqzc4W6f-x4_1\tperson\ndrAhAL_F38Y_0\tperson\ndrAh2lmjDs4_0\tskateboard\ndrJGoPHMunk_0\tperson\ndreDU-1isrI_0\tperson\ndre_PgfS8yw_0\telephant\ndrf5ijiEkUo_0\tperson\ndrm2oJ3X1HM_0\tperson\ndrqFwF60pgE_0\tairplane\ndrqFwF60pgE_1\tairplane\ndrqFwF60pgE_2\tairplane\ndrqe2hP0PKI_0\tperson\ndr3TumG_tlI_1\tcow\ndr4dU5UDF-Q_0\tperson\ndr8s5VC9Fxg_1\tperson\ndsLbM2wZHrc_0\tdog\ndsPwJ3J1ZKA_0\tperson\ndsTR1vv9XLE_0\tperson\ndsTR1vv9XLE_1\tperson\ndsUuAVsJSi4_1\tmotorcycle\ndstcI7MYsZ0_0\tperson\ndsyBSejpe-k_0\tperson\nds1BJMsasQI_0\tperson\nds6FmQYwgYw_0\tskateboard\ndtDGbuCwBuY_0\tbicycle\ndtHgnX0NtxE_0\tperson\ndtMbzXL9wO4_2\tbear\ndtOFqz41TJ0_0\tbird\ndtR2UeJbIvg_0\tperson\ndtWfbusf4Es_0\thorse\ndtYdUj-d8fA_0\ttrain\ndtZrB9iDzgQ_0\thorse\ndtlUL4D7_NM_0\tbird\ndtvZaXxNgKQ_0\tperson\ndtwUG12h74g_0\tperson\ndt8Tngmse50_0\tbicycle\nduOX3z4IJSY_0\tperson\nduTvmDpj0sI_3\tboat\nduTvmDpj0sI_2\tboat\nduV82Wn9rXk_0\tcar\nduZYUVeDXEM_0\tcat\nduaO7S-EH1A_1\tperson\nducdg4KXQsg_0\tperson\nduoFWPZbeNc_0\tperson\ndupnmzaPsWA_6\telephant\ndutp3txJPTY_0\tperson\nduvuNqufLjs_0\tcow\ndu5hbB5w3UU_0\thorse\ndu96VR7vtOk_0\tbird\ndvKKmu56UkE_0\tperson\ndvS2DSYGOGg_0\tperson\ndvbVbBosw38_1\tperson\ndvgf3R9k0uY_0\tbird\ndvur4MZD_yc_0\tperson\ndvur4MZD_yc_1\tperson\ndv0ptUC-DIE_0\tperson\ndv6ymk8duso_0\tbird\ndv_KURooPDU_0\tperson\ndwbRsYPV7Ag_0\tperson\ndwpopXTeeGc_0\tperson\ndwrYJ92znpw_0\tcat\ndwy7k_gtEco_1\tboat\ndw8kejnR7L4_0\tperson\ndw-2_KqGeYY_1\tbird\ndxFrLHoW9jI_0\tmotorcycle\ndxGlDl4IukI_0\thorse\ndxGlDl4IukI_1\thorse\ndxViI6VXh6Y_0\tdog\ndxn8VDPNvJM_0\tperson\ndxq9r-qrJ2A_0\tperson\ndxsQn1MuZRA_0\tbus\ndx0z7DYxGSw_0\tperson\ndx4rtOOz7tA_1\tumbrella\ndx6ucdpKZP0_0\tperson\ndx8nEHWD1xc_0\tperson\ndyAC2ey1DQU_4\tbird\ndyJ83t1zgkU_0\tskateboard\ndyPMbIsTtFs_0\tperson\ndyPt3VKGZPo_1\tperson\ndyR4vnjF5do_0\ttruck\ndyZixtbxEE4_1\tperson\ndym-lDsiSTM_0\tboat\ndyt8LtUqIMU_0\tboat\ndyy3oxsiErU_19\ttruck\ndyy3oxsiErU_14\ttruck\ndy2J0aeX5eQ_0\ttruck\ndy3nkqKOjbk_0\tperson\ndy6zETD5NFo_0\tcow\ndzEKq7fsVnQ_0\ttrain\ndzNRDfnNbeE_1\tperson\ndzS2ClyakEg_0\ttruck\ndzXv_YFLPqg_0\tperson\ndzahMuEcbCM_0\tcow\ndzeNnQOePGs_0\tperson\ndzhSVb26d7Q_0\tumbrella\ndzoQb8C3vxE_0\tperson\ndzsHYOJpBbY_0\tbear\ndzv-u3s_YtI_0\tperson\ndzyVndvBofo_1\thorse\ndz3SP1rd9zE_0\tcar\ndz_ATSJBx6k_0\tairplane\ndz_ATSJBx6k_1\tairplane\nd0J7uodSxF8_2\tmotorcycle\nd0NY8eqs19s_0\tmotorcycle\nd0NtMMBjQp0_1\ttruck\nd0NtMMBjQp0_2\ttruck\nd0NtMMBjQp0_0\ttruck\nd0RIwZfoGNg_0\tperson\nd0ZEYzyD9Vg_0\tperson\nd0b8-K_6D68_0\tumbrella\nd0hJditcWj4_0\tperson\nd0hQQC2i1Y0_0\tperson\nd0hQQC2i1Y0_1\tperson\nd0hdtlKidzs_0\tperson\nd0h9QWelhII_0\tboat\nd0lVKBOzOQ0_0\tskateboard\nd0qGN1A7XJA_0\tperson\nd0vHpkvShqg_0\tgiraffe\nd0vUARlHvjc_1\tcow\nd0v47QFRyvg_0\tperson\nd0v47QFRyvg_1\tperson\nd00UKAQHK2A_0\tperson\nd02xOzIVP-s_0\tperson\nd04Dr38addQ_0\tairplane\nd09H7U6x-Fc_0\tcat\nE5OHeMbBp9s_1\tgiraffe\nE5RbbN1bPN8_0\tperson\nE5YibOn90Co_0\tskateboard\nE5b9Yug5vbk_0\tbicycle\nE5b9Yug5vbk_4\tbicycle\nE5b9Yug5vbk_1\tbicycle\nE5b9Yug5vbk_2\tbicycle\nE5b9Yug5vbk_3\tbicycle\nE5dBaFyBYX0_0\tairplane\nE5me_giHEOE_1\tperson\nE5trQkGM3Wk_0\tcat\nE5wZ4pk5X0I_1\ttrain\nE59OnpOGBLU_0\tskateboard\nE6Am4hIuXvk_0\tperson\nE6Avey2AVRM_1\tperson\nE6A8vfHTdOQ_0\tperson\nE6EtoMfo384_0\ttrain\nE6EtoMfo384_1\ttrain\nE6GvpwdOQrw_2\ttrain\nE6GvpwdOQrw_3\ttrain\nE6GvpwdOQrw_8\ttrain\nE6JLxU918TE_0\tbicycle\nE6XGO0hx4N8_0\tperson\nE6Y2QsetU0M_1\tperson\nE6s0XT5G7Eo_0\tbird\nE6s0XT5G7Eo_1\tbird\nE6uGh-cPDjI_0\tbicycle\nE62w4NFSm5E_1\tdog\nE64d0EH39M4_0\ttrain\nE67ceZopcqQ_0\tperson\nE67ceZopcqQ_1\tperson\nE68IhhK04s0_1\tgiraffe\nE68IhhK04s0_2\tgiraffe\nE7BIM8cnCrc_2\ttrain\nE7BIM8cnCrc_0\ttrain\nE7F0Gt3Rea4_1\tperson\nE7F0Gt3Rea4_0\tperson\nE7LY2yKO0Jg_0\tknife\nE7MvCesCxNk_0\tknife\nE7dG4qPI_QY_1\tknife\nE7eYGQjaVYs_0\tbird\nE7hXPqOOiqo_1\tboat\nE7hXPqOOiqo_0\tboat\nE7qoCZ2e-vQ_0\tdog\nE7rhwzBxMqY_0\tperson\nE7zwjNToyao_0\tperson\nE70FO7I2AQ0_1\tperson\nE70FO7I2AQ0_0\tperson\nE76moy2SQhA_0\tperson\nE8JYTxKfqmQ_0\tboat\nE8OzYJ2gVAs_1\tbicycle\nE8OzYJ2gVAs_2\tbicycle\nE8OzYJ2gVAs_3\tbicycle\nE8RSSepY8tk_0\tperson\nE8R5lzlo5qw_0\tmotorcycle\nE8Xxr8SUaEY_0\thorse\nE8h4YnZbJg4_1\tperson\nE8n_eTUwyhc_0\tperson\nE8pbsHhMGOw_0\tperson\nE842T5CgJfk_0\tperson\nE854nPMWssI_0\thorse\nE8-Z9saoTjk_0\tperson\nE8_NjWtQtgI_1\tcar\nE9J2Brm4LSg_0\ttruck\nE9J2Brm4LSg_1\ttruck\nE9N59GTZ8uE_0\tperson\nE9R_qLxcZdY_0\tbird\nE9S5Tk5r2wU_0\tperson\nE9ZjM9SY__o_1\tperson\nE9ZjM9SY__o_0\tperson\nE9sCn_XaSHw_1\tbicycle\nE9sHGoiMmXc_0\tperson\nE9zmtafFrCo_0\tdog\nE9-1FSPKZ7k_0\tperson\nE-DE7HZ04WY_1\tperson\nE-OdBMMpwlo_0\tumbrella\nE-VRMpgKXIE_0\telephant\nE-VRMpgKXIE_7\telephant\nE-VRMpgKXIE_1\telephant\nE-VRMpgKXIE_2\telephant\nE-VRMpgKXIE_4\telephant\nE-YDPyDXtR8_0\tcow\nE-h1XNBlqsE_0\tperson\nE-pnZZeRFyQ_0\tperson\nE-q9j7xipsA_0\tcat\nE-seUZ3B-Ts_0\tmotorcycle\nE-zFmY_9LWk_0\thorse\nE-0FMMDuLw8_0\tperson\nE-3jsRP7KHc_0\telephant\nE_En6n1IyBw_0\telephant\nE_GC0IeKtu4_0\tperson\nE_K6zdkr0mo_1\tperson\nE_Xi5uEIiec_2\tbicycle\nE_e6E8T7on0_0\tdog\nE_02tA9RLyw_0\tumbrella\nE_7qbAkVDYE_0\telephant\nFAKE4Rfwdik_0\tperson\nFATjlgllzBU_0\tperson\nFATjlgllzBU_1\tperson\nFAdlwBJZk78_0\telephant\nFAeK9y98GL8_0\tzebra\nFAiIhoJh5uQ_0\tcat\nFAm6HgSzPTA_0\tcow\nFAn11rZ-gsU_0\tperson\nFAqiar6B2U8_2\tbird\nFAu0yvyjW-Q_4\tboat\nFAu0yvyjW-Q_9\tboat\nFAu0yvyjW-Q_1\tboat\nFAu0yvyjW-Q_5\tboat\nFAx0CsAigS4_0\tmotorcycle\nFA_K15dKk6k_0\tbear\nFBAcUphtxR4_0\tperson\nFBIVWWIbq-8_1\tcow\nFBIawPqElJ8_0\tbus\nFBKIUCHqUQk_0\tskateboard\nFBKIUCHqUQk_1\tskateboard\nFBNFSYoMCNM_0\tbird\nFBOWbksU5pI_0\tperson\nFBOWbksU5pI_1\tperson\nFBjp-C_Sbug_2\tbicycle\nFBjp-C_Sbug_11\tbicycle\nFBnFn5mY2R0_0\tperson\nFBwWw9c4KdY_0\tperson\nFBz0aAYDBFI_1\tperson\nFB8F1ku1XkU_0\tperson\nFCBsCwjCPWU_0\tperson\nFCQB6p_GcDY_0\tperson\nFCRAvY0glAI_0\tairplane\nFCd1d_7Hfpg_0\tumbrella\nFCkT11nk468_0\tairplane\nFClLRpdDi9A_0\tperson\nFCnE02wQQk4_0\tbird\nFCp7AKKYViY_0\tperson\nFC-ONjCL7tM_0\tperson\nFC_gwQU4yrs_0\thorse\nFDJyHtHix-0_0\ttrain\nFDYS2AyPJhc_0\tperson\nFDZBIlbFrk0_0\tperson\nFDej1TTCjP0_0\tumbrella\nFDfaLuM3y5A_0\tperson\nFDkiv1x0OGQ_0\tcar\nFDq3yKNo4Qs_1\tperson\nFDvTPzckQKc_0\tmotorcycle\nFD3pT-lj2tc_0\tcat\nFEM7OGFO_BI_0\telephant\nFEN0F0V1nhg_0\tdog\nFEOAvRWKb-k_0\tairplane\nFEU4yHFzkZs_1\tperson\nFEU4yHFzkZs_0\tperson\nFEWZolQuMv0_1\tperson\nFEfYdrS3kFc_0\tbird\nFEjcdYO4xPo_0\tperson\nFEoFDmI0pxI_0\tairplane\nFEzBza78J4w_0\tperson\nFEzBza78J4w_1\tperson\nFE0DpZ9GXoM_2\tperson\nFE0DpZ9GXoM_0\tperson\nFE0DpZ9GXoM_1\tperson\nFE0Q5phKq3c_0\tperson\nFE0Q5phKq3c_1\tperson\nFE4gj8EYF9k_0\tperson\nFE51Dml-nZY_0\tperson\nFE7iv_llNT4_2\tbicycle\nFE-JTPLk3fI_0\ttruck\nFFCtm1GZH_s_1\tbird\nFFHJUeZ_KKE_1\ttruck\nFFHJUeZ_KKE_2\ttruck\nFFLxkwDj1b0_2\tbus\nFFME8B_6LNA_1\tmotorcycle\nFFQl2DLyjdk_0\tcat\nFFQl2DLyjdk_1\tcat\nFFantnd2gLY_0\tperson\nFFantnd2gLY_1\tperson\nFFd_4DPNyRI_0\tcar\nFFijp_s0YwA_0\tdog\nFFi3nSvA0WY_0\tperson\nFFjqbw4R9l0_0\tcow\nFFm26XU-R7c_2\tperson\nFFm26XU-R7c_0\tperson\nFFm26XU-R7c_1\tperson\nFFndlV1rKas_0\tairplane\nFFpyQ_5PU7M_0\tbus\nFF9eHa3K8fM_1\tbird\nFGO6y3WssIg_0\tperson\nFGQCxd5EAx0_2\tairplane\nFGQCxd5EAx0_3\tairplane\nFGQCxd5EAx0_1\tairplane\nFGcS28ri5uY_0\tperson\nFGdEufjjhtg_0\tperson\nFGicL13npRI_0\tperson\nFGkNC4hzcfM_0\tperson\nFGkx6qk4oDk_0\tperson\nFGmjmDC1RoU_1\tskateboard\nFGmjmDC1RoU_0\tskateboard\nFGoutavzP5Y_0\tperson\nFGqrkJ3h0DA_0\tskateboard\nFG0PrdHReB0_2\tperson\nFG5l2wX8ccA_0\thorse\nFHAj71IwE7E_0\tskateboard\nFHA6nVCnv28_0\tperson\nFHB5eraeYEw_1\tknife\nFHJupOaUmtQ_1\ttrain\nFHOLOunv9Ec_0\thorse\nFHTc_V_05W0_1\tbird\nFHT1DAZpJVY_0\tcow\nFHZ-3pbJQrY_0\tbird\nFHgO4zu5RGA_0\tperson\nFHu50D73Fzo_0\tperson\nFIA67WzAuNs_0\tbear\nFIA67WzAuNs_1\tbear\nFIB12MYkANg_1\tbear\nFIDI0sZMPVU_0\tperson\nFIGhnuJWX5M_0\tperson\nFIGhnuJWX5M_2\tperson\nFIHYnB8Jrh4_0\tperson\nFIMbYQASgkk_0\thorse\nFIQ1iL3jVkM_0\tgiraffe\nFIV4OFmfS_s_0\tperson\nFIV4OFmfS_s_1\tperson\nFInOWVIV_go_0\tmotorcycle\nd1N4NJqa_8E_0\tperson\nd1PqtOyYTY0_0\ttrain\nd1PqtOyYTY0_1\ttrain\nd1PqtOyYTY0_2\ttrain\nd1Quy8k5O88_0\tcow\nd1UWs3bPTsc_0\tperson\nd1UWs3bPTsc_1\tperson\nd1YYgiXq3tw_0\tperson\nd1YYgiXq3tw_1\tperson\nd1bzn92PO0c_0\tperson\nd1eo2OWc45Q_0\tcow\nd1tf08A41eo_0\tperson\nd1ukwE8h4f8_0\thorse\nd1wbMXvcgNc_0\tperson\nd1wlubAM1-k_0\tperson\nd10K79pdybE_3\ttrain\nd14rOFFvTg4_0\tperson\nd14rOFFvTg4_2\tperson\nd14rOFFvTg4_1\tperson\nd165nDy63o8_0\tbicycle\nd165nDy63o8_1\tbicycle\nd17kaiZ5Ztc_0\tperson\nd2DRRd9l3TI_0\tperson\nd2RD5tyZt6c_0\tperson\nd2TxcbWHoBM_0\tcat\nd2WfBDEMf40_0\ttruck\nd2ZGi2fOtPY_0\tperson\nd2cDVorBK8s_0\tairplane\nd2cDVorBK8s_1\tairplane\nd2e49A9MnF4_0\tperson\nd2lSueNvuG4_0\thorse\nd2ns5iCGj78_0\telephant\nd2sn_b1z1Vw_0\tperson\nd2wHwCwQymw_0\tperson\nd2zgNRFDpSw_1\tbird\nd203fSHLzv8_0\ttrain\nd21TfucuHss_0\tumbrella\nd217pENbZVs_0\tperson\nd28DHw2okF8_0\tperson\nd3F_Gm514J0_2\telephant\nd3G8COtsJco_0\tperson\nd3MN8Sm5tiY_0\tperson\nd3MVAijPTjY_0\tmotorcycle\nd3P2bH2t8IQ_0\tperson\nd3Wdg9MPgLA_1\tskateboard\nd3Wdg9MPgLA_0\tskateboard\nd3duKA35FEI_0\tperson\nd3jP_YP-6EQ_0\tperson\nd3ro5gubiaQ_0\tperson\nd3ro5gubiaQ_1\tperson\nd3rzFaWiWwA_5\ttruck\nd3sHFgbvhIU_0\tcar\nd33yoN6QyYg_0\tbus\nd36tDEgs-IA_0\tperson\nd4A2uUrnVWI_0\tperson\nd4Cumy6qZPY_0\ttruck\nd4DbIWORtjY_0\tperson\nd4DbIWORtjY_1\tperson\nd4GvMFc_Vqg_0\tknife\nd4Le0GuzhaY_0\tskateboard\nd4QkJdQwkCo_0\tmotorcycle\nd4VJot5IZek_0\tperson\nd4VJot5IZek_1\tperson\nd4WRTfC57h0_0\thorse\nd4b9-LX5V1s_1\tcow\nd4hB6abJCs8_0\tperson\nd4mhHPSo7C8_0\tskateboard\nd4q-0AcOs78_0\tperson\nd4vhL4dar5s_0\tgiraffe\nd4vhL4dar5s_1\tgiraffe\nd45YTUkd_9M_0\tperson\nd47DPSbvftI_0\tperson\nd484zxSSkJM_1\tperson\nd4_lDGwny4k_0\tskateboard\nd5Ao3JBz7WM_0\tperson\nd5B0EMjLeZE_0\tperson\nd5PBtpn_6JQ_0\tperson\nd5gDBPwofbs_0\tperson\nd5gDqlNLGmw_0\tperson\nd5hj8eaC5fQ_0\tperson\nd5jIlHa1Y6o_0\tcow\nd5m8giMORSk_0\tperson\nd53_McJDtt4_0\tperson\nd55FAEl6kfM_0\tcat\nd55rz05ynyg_0\tairplane\nd6AkvjKCaE0_0\tperson\nd6AkvjKCaE0_1\tperson\nd6TWHVESLa8_6\tcow\nd6TWHVESLa8_5\tcow\nd6VCXnnHXGQ_0\tperson\nd6VCXnnHXGQ_1\tperson\nd6YTAD3T2i8_0\tperson\nd6a2EN1cB-4_0\tperson\nd6cgbxc35Ms_0\tperson\nd6mM21E4x-4_0\tumbrella\nd6m3DUG5E7Y_0\tperson\nd6uLbEhrIvw_0\tairplane\nd65wDJoMyA8_0\tperson\nd67YXl13SSo_0\tperson\nd6-bn34gHFc_0\tperson\nd7H5qLPNFz0_1\telephant\nd7cwZ3G7xSU_0\tbird\nd7kWNGqyvRk_0\tperson\nd7mQdSSoZ2E_0\tperson\nd7m0BF65qro_1\tperson\nd7m0BF65qro_2\tperson\nd7n5m9UuhP4_0\tperson\nd7n5m9UuhP4_1\tperson\nd7yxmt8AvOM_0\tperson\nd7yxmt8AvOM_1\tperson\nd71rdGKeKkE_0\tperson\nd74EhPMCxb0_0\tperson\nd7-3m4Nz8fk_0\thorse\nd8CJ5urtRlk_0\ttrain\nd8HIJN0pULI_0\tperson\nd8XcNMVXCD8_0\tbicycle\nd8b-SN3JEvk_0\tperson\nd8dPRbquLuM_1\tperson\nd8dPRbquLuM_0\tperson\nd8t8y3kLzgc_0\tperson\nd84iekZaJHc_4\tknife\nd9JyT5Kko5c_0\tperson\nd9LvxSh5P-Q_0\tperson\nd9OaiymMq0w_0\tperson\nd9PCSJzZTy8_0\tperson\nd9Pj3WrvXXc_1\tperson\nd9S0dKjWhNU_0\tperson\nd9S0dKjWhNU_1\tperson\nd9S0dKjWhNU_2\tperson\nd9YlucRFs0U_0\tperson\nd9cSZXEb_5E_1\tperson\nd9dysX9rdmA_0\tskateboard\nd9hh6urZ5FU_0\ttrain\nd9kzobAaimY_0\tmotorcycle\nd9lIw5maa3M_0\tperson\nd9qijNyVVmU_0\tperson\nd95k-74VSVE_0\tcow\nd-JD-mAXyIA_0\tdog\nd-Mnc38YAmw_8\ttruck\nd-OQw6tKhuM_0\tknife\nd-S3AmiMI1s_0\tcar\nd-e8mKtYWjk_0\tperson\nd-e8mKtYWjk_2\tperson\nd-e8mKtYWjk_1\tperson\nd-fv8fmGSlY_0\tperson\nd-hMPjLP2WE_0\tbicycle\nd-hMPjLP2WE_1\tbicycle\nd-hgDDQ3kwg_0\tperson\nd-h6ncywZ58_1\tperson\nd-h6ncywZ58_0\tperson\nd-oFe9Z0Obs_0\tdog\nd-oFe9Z0Obs_1\tdog\nd-rpsQgR8sw_0\tperson\nd-22m5Sq5OU_0\telephant\nd-5xdAZSjX8_2\tskateboard\nd--9RMf5LCA_1\tboat\nd_AudyfCYzg_0\tcar\nd_EP2nM4YMw_0\tbus\nd_ElAbuvxGQ_0\tdog\nd_ElAbuvxGQ_8\tdog\nd_SB-LVXyi0_1\thorse\nd_SmnRMWLD8_0\tdog\nd_S0JCKcFCg_0\tcow\nd_hsQ2L-klo_0\tperson\nd_nTA-SKHNM_2\tknife\nd_nTA-SKHNM_6\tknife\nd_ocJQiPpn0_1\tskateboard\nd_vnePeLmwI_0\tperson\nd_2HhXHP8fg_1\tcow\nd__UUbvo2t4_0\tperson\neADPEBi8wWs_0\tcar\neADPEBi8wWs_1\tcar\neADqJI9JKq8_0\tperson\neAFdLVF01GU_0\tbicycle\neANH6WnEpPs_1\tperson\neAPcJi7CaBw_2\thorse\neAPcJi7CaBw_1\thorse\neARl2H_FaEU_0\tcat\neAXN0KAt66I_0\tperson\neAXN0KAt66I_1\tperson\neAYoRncVO74_1\tperson\neAZbke5Perk_0\tcar\neAfmOFI5jUM_0\thorse\neAsHKktPNSo_1\thorse\neAvDt4p-AvA_1\tknife\neA3lmhfjTuM_0\tcow\neA5hiUXY2_Q_4\tairplane\neA5hiUXY2_Q_6\tairplane\neA7FV9uQbYw_0\tbus\neA8fIAfGi5k_0\tperson\neBB5vRA9JPE_0\tknife\neBHEKUkaBcI_0\tbird\neBLisw9b8i8_0\tcow\neBLisw9b8i8_1\tcow\neBLisw9b8i8_2\tcow\neBMqhmQr7vI_0\tbicycle\neBRcZ5KDeEA_0\tknife\neBgLKDW3lH4_0\tperson\neBmdALv9WEE_0\tperson\neBwWJ_geg4Q_0\tknife\neBy554vRg9M_0\tperson\neB83_xIotrw_0\tbicycle\neB83_xIotrw_3\tbicycle\neB_ZHbAvx-c_1\tperson\neCInOWr32gc_0\tdog\neCNG8qj36vs_0\tcow\neCSzfVb87kI_0\tperson\neCUuH2vPeDI_0\tperson\neCWhtTVetLA_0\tumbrella\neCeVtq40bcM_4\tbus\neCf8h359-j0_0\tbus\neClBvJnyYa4_0\ttruck\neCmgHa6ThE4_1\tperson\neC3Fwv7Uows_0\tperson\neC-5SEhAGvo_0\tcow\neC_fRVwxsiI_0\tperson\neDJamx945Ao_0\telephant\neDJamx945Ao_1\telephant\neDSAGlcfwKA_0\tperson\neDSmePW-Vrg_0\tperson\neDXqzj7vKFI_0\tmotorcycle\neDX2HUt9ttU_0\tperson\neDX2HUt9ttU_1\tperson\neDuzDDESzU0_0\tperson\neDwjZL3IGqM_0\tperson\nFIrviDrZriY_0\tbus\nFI2T176uKi4_0\tperson\nFI4oF175yHo_0\tcow\nFJCE3uzu0i4_0\tdog\nFJL5lb3wBKI_0\tairplane\nFJPRJ0A8BII_0\tboat\nFJVcRzA_pdI_0\tairplane\nFJdcStnbgU0_0\tperson\nFJl_FwYbg8s_1\tknife\nFJmyu27Omwk_0\tperson\nFJsMdQrRgFs_0\ttrain\nFJvHbRGgbXM_0\tgiraffe\nFJvSXVq8PPk_0\tbicycle\nFJxbfz8q8Qw_0\tperson\nFJzU4eC5GiI_0\tperson\nFJ5jeLsVXys_0\telephant\nFJ5jeLsVXys_1\telephant\nFJ7oeGn4dBM_0\tcat\nFKGFVLnchKE_0\tskateboard\nFKGFVLnchKE_2\tskateboard\nFKGFVLnchKE_4\tskateboard\nFKGFVLnchKE_5\tskateboard\nFKKoXDLhFjo_0\tperson\nFKMCYA2_RMs_0\thorse\nFKMCYA2_RMs_2\thorse\nFKMsbMSiqrQ_0\tperson\nFKTETXdoJjk_0\tperson\nFKVxjU1kTMM_0\tperson\nFKWzB37H8-E_0\tcow\nFKdcZ0D4-K8_4\thorse\nFKdcZ0D4-K8_1\thorse\nFKhBf2FcrKE_1\tperson\nFKhBf2FcrKE_0\tperson\nFKnj73Wv84c_0\tumbrella\nFKsZiccYt_g_0\tperson\nFKwKsWjLhiI_0\tperson\nFKzvgRVfOjM_1\thorse\nFKzvgRVfOjM_5\thorse\nFK0ezSvbg7o_0\tdog\nFK37T3KvNUU_0\tcow\nFK8OxK802HI_0\tperson\nFLF92L3WRrs_0\tperson\nFLF92L3WRrs_1\tperson\nFLQzeGFBo2I_1\tbird\nFLQzeGFBo2I_2\tbird\nFLTewjXG6Wc_1\tperson\nFLTewjXG6Wc_2\tperson\nFLTewjXG6Wc_0\tperson\nFLWAw0tGOo8_2\tbicycle\nFLWAw0tGOo8_3\tbicycle\nFLZeutEdtzU_1\thorse\nFLqZVv798FE_1\tperson\nFLqZVv798FE_0\tperson\nFLq3zU7UtgQ_0\tskateboard\nFLr23Hv4LfE_0\ttrain\nFLr23Hv4LfE_2\ttrain\nFLskMa3WD7M_0\tperson\nFLyV4pkEHUg_0\tperson\nFL1q74zVLvo_1\ttruck\nFL8ulwhcOho_1\tcar\nFL-QttmKDc0_0\tairplane\nFL-73OGqifE_0\tcat\nFL_DeYOGkaU_2\thorse\nFL_DeYOGkaU_0\thorse\nFMHc-oH_rOE_0\tperson\nFMTZga_deFY_0\tdog\nFMig7WOUQyU_1\tbear\nFMig7WOUQyU_2\tbear\nFMv3NfETfq4_0\tbicycle\nFMv3NfETfq4_1\tbicycle\nFNCMx4Aum_M_1\tmotorcycle\nFNJmejn3KNQ_0\ttruck\nFNJmejn3KNQ_3\ttruck\nFNJmejn3KNQ_5\ttruck\nFNKJAi0Xbz0_0\tperson\nFNNdAL0qtWM_0\thorse\nFNSpSfZSQfE_0\tperson\nFNbjJJgHt6c_1\tperson\nFNgfcu9JUHA_0\tcow\nFNjDy-du_gs_0\ttruck\nFNv5k4sCs5k_0\tperson\nFNxfPhr1AZk_0\tperson\nFN1B1veyxCQ_0\tcow\nFOAmP97Gboo_0\telephant\nFOAmP97Gboo_2\telephant\nFOL80Pq_HSs_0\tcat\nFOXwGm4ddCk_1\tperson\nFOacAsl9vUM_1\tbird\nFOnRpTgHAdI_0\tperson\nFOyA2uyFS0s_0\tcar\nFO-yhRhInHQ_0\tmotorcycle\nFO_sYJabdgQ_1\tbird\nFPBkLbjkE0I_1\tperson\nFPC9a1ebnRk_0\tperson\nFPFEZjz68RM_0\tperson\nFPHxPqZ9of4_0\telephant\nFPIVRAQI9Ao_1\tairplane\nFPS-rWu8sfw_0\ttruck\nFPdj2aDA2Is_0\tperson\nFPd8NgysFbw_0\tperson\nFPhiHYzZrc8_2\tbird\nFPmbKUp9Apc_0\tperson\nFPoBK2S6-kE_0\telephant\nFPpdaMeuTPM_0\tperson\nFP-joReSPjM_1\ttrain\nFP-joReSPjM_4\ttrain\nFQBe4ewvq3k_0\tbus\nFQDYCsUTzLU_0\tperson\nFQIKRtrwRJU_0\tperson\nFQKMItJWON8_4\tbicycle\nFQNa7v1nuHs_0\tbird\nFQNa7v1nuHs_1\tdog\nFQPeEa0PIhY_0\tperson\nFQQ5mFLQS_8_0\tairplane\nFQTA_Rs2r4k_0\tairplane\nFQa2-poPUOQ_0\tperson\nFQiI3CA-HsU_2\tperson\nFQiI3CA-HsU_0\tperson\nFQiI3CA-HsU_1\tperson\nFQnnRHyzLcE_0\tboat\nFQyvUPmvsSo_0\tbus\nFQ0G5VjpRO8_0\tcat\nFQ09pTeRKXM_0\tperson\nFQ8nNpJodyM_0\tperson\nFQ_PnAPHimg_0\ttrain\nFQ_YvOmwGng_1\tskateboard\nFQ_YvOmwGng_2\tskateboard\nFQ_YvOmwGng_0\tskateboard\nFRBmAObAjLg_0\tumbrella\nFRCsksZQW0g_0\tmotorcycle\nFRFZtNbUMfU_0\tperson\nFRFZtNbUMfU_1\tperson\nFRKbwt_HIJY_0\tcat\nFRUF5D_Bg4I_0\tboat\nFRZeTLb7R70_0\tperson\nFRcpw1KTh4w_0\tskateboard\nFRh68K9peM8_0\tknife\nFRs6gVga80M_2\tairplane\nFR0IeE_jWVE_1\tperson\nFSCpm1kxTIE_0\tumbrella\nFSJSVNwlHck_0\tperson\nFSSrkLtKRBk_0\tperson\nFSchPfgxMmk_0\tperson\nFSmTDuGYKRo_0\tperson\nFSrvVBrHdIY_0\tperson\nFSrvVBrHdIY_1\tperson\nFSs-_cK-4DE_1\tbird\nFS8ZnDA42Xg_0\tzebra\nFTHxfldxSrg_0\tperson\nFTlLAXuBE2M_1\tperson\nFTlLAXuBE2M_2\tperson\nFTlLAXuBE2M_0\tperson\nFTr8b641J_g_2\tzebra\nFTr_sg-tAYA_0\tperson\nFTr_sg-tAYA_1\tperson\nFT7LfULOrmU_0\tperson\nFUNI1-oxWb0_0\tperson\nFUPer2xPyRM_0\tperson\nFUQokq7Dm_0_0\tbird\nFUWPXNKt90g_0\tskateboard\nFUcLObUwigo_1\tperson\nFUcQGevNVQs_0\tperson\nFUp8cy7p6kc_0\tperson\nFUt-f-8QJmk_0\tboat\nFUzb9oSwhq4_2\thorse\nFU63gEB5T14_0\tperson\nFU63gEB5T14_1\tperson\nFU-Gyo-nX8w_0\tperson\nFU-Gyo-nX8w_1\tperson\nFVGYeJ_eKRY_0\tperson\nFVGYeJ_eKRY_1\tperson\nFVSihamjW0c_0\tperson\nFVcaEg-4Saw_0\tairplane\nFVm133076uE_0\tperson\nFVxqyMXxbTg_0\tperson\nFVxqyMXxbTg_1\tperson\nFVyZRq7FJUM_2\tperson\nFVyZRq7FJUM_0\tperson\nFVyZRq7FJUM_1\tperson\nFWAdovzWBpk_0\tperson\nFWCxpF5CAAo_0\tperson\nFWH6qzGM4Ko_0\tcat\nFWTx-_C46YA_0\tdog\nFWVW97tTSiI_2\tskateboard\nFWZANVS2JwI_0\tbird\nFWbVfjbC570_0\ttrain\nFWd_KJNB1hY_0\tperson\nFWeJwZsAuq4_3\tknife\nFWiwkCVxsvU_0\tairplane\nFWpcgznz11Q_0\tknife\nFWqFrwl7d-g_0\tairplane\nFWqFrwl7d-g_2\tairplane\nFWuSKVVP9Gw_0\tairplane\nFXPnVqm98h8_2\tcar\nFXbqlcQOm4U_1\tcar\nFXcjcGBH8uA_0\tairplane\nFXdP8V2Fyag_0\tbus\nFXdevKY06to_0\tbus\nFXjUPTGnrIk_1\tperson\nFXjUPTGnrIk_0\tperson\nFXrzFKXFtUE_0\tskateboard\nFXvqDQa0_pw_0\tbus\nFXz3PiouB_s_0\ttruck\nFX7DATABx3o_0\tperson\nFYPRZ3A5Wug_1\thorse\nFYQxEw6enVw_0\tknife\nFYR_8E37mhY_1\tboat\nFZJlwJ_5CIY_0\tperson\nFZJ0L36775Q_0\tbear\nFZOwW_igs2Q_0\tperson\nFZUo3m0w40U_1\tboat\nFZXz9ivLbZE_1\tperson\nFZfD0ASOr-0_0\tperson\neD5a0lOEA4c_0\tperson\neD5_C8Rnll0_1\tcow\neD9mxZpbjpo_3\tknife\neEBoNITml_U_2\tairplane\neEBoNITml_U_5\tairplane\neEKY2ZIJ7cw_0\tperson\neEKY2ZIJ7cw_1\tperson\neEUzIzmFpmg_0\tdog\neEZirBqUuUc_0\tcow\neErb9l8tm9Q_1\tperson\neEwALO20qQs_0\tmotorcycle\neEzaprIjPOA_1\thorse\neE7zgmIkklg_0\tperson\neE_bJ6JguBg_0\tperson\neE_bJ6JguBg_1\tperson\neFDTDuBtPdg_1\telephant\neFIUN94eOFY_0\tskateboard\neFKWB3vWXzM_0\tperson\neFNnJotKCuE_0\tdog\neFQAqsrxJIk_1\tcow\neFQAqsrxJIk_0\tcow\neFYXRQfFBFk_0\tperson\neFYi8GYHOwc_0\tbus\neFYi8GYHOwc_2\tbus\neFYi8GYHOwc_1\tbus\neFbHzEjDjsQ_0\tperson\neFbHzEjDjsQ_1\tperson\neFbOmylKLps_0\tbicycle\neFbOmylKLps_1\tbicycle\neFbOmylKLps_2\tbicycle\neFbOmylKLps_3\tbicycle\neFbOmylKLps_5\tbicycle\neFbmkhM4yvA_1\tskateboard\neFeLxXgEWb4_9\tairplane\neFeLxXgEWb4_10\tairplane\neFeLxXgEWb4_19\tairplane\neFkMiDqxNNg_0\tperson\neFn7qz_Ik-g_1\tbicycle\neFsEtWFKOCE_0\tperson\neFsEtWFKOCE_1\tperson\neFsJVO58dOk_0\tmotorcycle\neFsJVO58dOk_1\tperson\neFtXO4KQyP0_0\tperson\neF6vo2K3X7Y_1\thorse\neGANqnJQvcA_0\tperson\neGEeIkSKn9I_0\tperson\neGFxLRdHt9o_0\tperson\neGIMcDTDuZI_2\tgiraffe\neGKe_SHbpew_0\tdog\neGLaqISw-ZU_0\tcow\neGXX9n0KkAw_0\ttrain\neGavpqx_a-Y_1\tperson\neGeSgNqD64Q_0\tcat\neGp90l6AeQM_3\thorse\neGp90l6AeQM_4\thorse\neGp90l6AeQM_6\thorse\neGp90l6AeQM_7\thorse\neGp90l6AeQM_1\thorse\neGp90l6AeQM_2\thorse\neGsO1ybeNmw_0\tperson\neGulNc3Hz6E_1\tperson\neGw-BT7HLw0_0\tperson\neGx11vRzfMI_0\tperson\neG420j0UncU_0\tcat\neG9ay7ouawQ_0\tboat\neG_gCk-NdFc_0\tbicycle\neHFxA8eOkKo_1\tdog\neHJOSAF8Ksc_0\tboat\neHMokGJS_8k_0\tbird\neHPZiFRZgH8_0\tperson\neHS3e7Drwlw_0\thorse\neHYl5vL9urI_0\tperson\neHYl5vL9urI_1\tperson\neHZGFVBiNbM_0\tperson\neHhu8cP6sYY_1\ttruck\neHlKAc_jO3w_0\thorse\neHlKAc_jO3w_1\thorse\neHmn6jMH470_0\tbicycle\neHo7GgOz-4M_0\tbicycle\neHo7GgOz-4M_1\tbicycle\neHpMDoo4x9o_0\tperson\neHpMDoo4x9o_1\tperson\neHrYu8_xQuI_3\tairplane\neHuFhF5mn60_2\tdog\neHuHorwvDFE_0\tperson\neH-lfDuzZRU_0\tperson\neIbRJYX77lM_1\tperson\neIceWO1K4hg_1\tknife\neIlLo4L0TBY_0\tperson\neIm2mZqCQIU_0\tperson\neItSvz_9tc8_1\thorse\neI5A6Q8wsk8_0\tperson\neJGswWs5a_U_0\tperson\neJJBtIMsces_0\tcat\neJNeGPvJZBs_0\tperson\neJN7jtqxGc0_1\tperson\neJO3ahTuQlg_2\tknife\neJTzEdYt2KA_0\tperson\neJTzEdYt2KA_1\tmotorcycle\neJZyuG0FB0M_1\tperson\neJg7Dq1HzW8_1\tperson\neJi66YisQnM_0\tcat\neJnTGfqwSKw_0\tperson\neJntPRQdD6A_0\tcow\neJntPRQdD6A_3\tcow\neJxFV3LV_-o_1\telephant\neJzkkZWgmiM_0\tperson\neJ2omVOUJv4_0\tperson\neJ4AprAxRh4_0\tairplane\neJ4AprAxRh4_7\tairplane\neJ4AprAxRh4_5\tairplane\neJ9q5sR4oiE_1\ttrain\neJ9q5sR4oiE_3\ttrain\neKBgCy3izjg_0\tperson\neKCONra70xU_1\tperson\neKGFKx5vbJw_1\tbird\neKGFKx5vbJw_2\tbird\neKJMggclbAI_0\ttruck\neKYCRb3cMSc_0\tcat\neKcN648xBxg_0\tcow\neKdNbqJsxIY_1\tcar\neKirxEVv1N4_1\tgiraffe\neKpHpiZZSOY_0\tmotorcycle\neKsu0SXh0Cg_0\tgiraffe\neK5wkhSqhQg_0\tperson\neLAIclbgwtw_1\tmotorcycle\neLAIclbgwtw_2\tmotorcycle\neLCZ9U490do_0\tperson\neLK_O-E6TXY_0\tcow\neLLFV2_GBOs_1\tcow\neLLFV2_GBOs_4\tcow\neLLFV2_GBOs_5\tcow\neLLFV2_GBOs_0\tcow\neLLFV2_GBOs_3\tcow\neLRLhwJpaKE_0\tperson\neLXWvZhL6g4_0\tcat\neLfUxNIWQn8_0\tcat\neLsJ-MoKt-c_0\tmotorcycle\neLzEA8IlB5E_0\tcat\neL2OKu4DhkM_1\tbear\neL-v_R-bG30_0\tskateboard\neMJ8eEFu7lo_1\tcar\neMJ8eEFu7lo_3\tcar\neMN980Fn4Kc_1\thorse\neMQEyMimXFU_0\tcat\neMWM---NOF0_0\tperson\neMcgmNHMY_g_0\tperson\neMdVb5oIUWc_0\tperson\neMgUOtsKC0w_0\ttrain\neMsSwXfIf7o_0\tperson\neMv2h_s0LpQ_1\tskateboard\neMwSfQmonxM_0\tbird\neM5e2PBO5hY_0\tgiraffe\neM-1RwyzQpI_1\ttruck\neM-1RwyzQpI_4\ttruck\neM-1RwyzQpI_5\ttruck\neNDHGq_Vm3A_0\tperson\neNEaC09BQF8_0\tperson\neNG3je3HCHI_0\tperson\neNG3je3HCHI_1\tperson\neNIXfUjWW10_0\tbus\neNSkFxbG_L0_0\tskateboard\neNTeTVBDq8U_0\tperson\neNVGmOIKNII_0\tskateboard\neNVGmOIKNII_2\tskateboard\neNYeXwUr7rY_0\tskateboard\neNbwp7DEy6A_0\tdog\neNbwp7DEy6A_1\tdog\neNlXrdcWYPA_0\tperson\neNllsU_utBs_0\tgiraffe\neN0ufEmLTDM_0\tperson\neN3a3uFzNxw_0\tperson\neOJorgJNcl4_1\tcar\neOMSAOLQMc0_0\tperson\neOMro57lp5o_0\tbicycle\neON5oS1ddkA_2\tknife\neOXMKiuur7c_0\tperson\neOZ2mMo0l60_0\tperson\neOe9DskHw1g_4\tairplane\neOe9DskHw1g_3\tairplane\neOhLZkf2gyQ_0\tperson\neOj2KctQDKQ_1\tbear\neO0M1RCeWaA_0\tdog\neO9s3APOXdI_0\tbear\nePDBmIR0Mnk_1\tbear\nePEoVXrSERQ_0\tperson\nePPnXOa8FII_0\tmotorcycle\nePWPPUSuctk_0\thorse\nePWPPUSuctk_2\thorse\nePWPPUSuctk_3\thorse\nePaqZZz_gtY_1\thorse\nePgL4a_1DcI_0\tperson\nePgqzaxKKo8_0\tperson\nePhchRaBs-k_1\tairplane\nePhchRaBs-k_2\tairplane\nePjAF53eBSA_0\tperson\nePkzyffCJhs_0\tperson\nePli_zXbgF4_5\tbear\nePli_zXbgF4_1\tbear\nePli_zXbgF4_2\tbear\nePli_zXbgF4_3\tbear\nePli_zXbgF4_4\tbear\nePoC0Pj8xLA_2\tperson\nePo6J3guHBw_0\tperson\neQA0KwcbJlQ_0\tperson\neQI72zFfl34_0\tcow\neQI72zFfl34_2\tcow\neQMmOyBJUaA_0\tperson\neQOqA8LeUOU_1\ttruck\neQOqA8LeUOU_2\ttruck\neQOqA8LeUOU_8\ttruck\neQS3V0HV61g_0\tperson\neQTlUSSbOyY_0\tperson\neQWRQaVSPT8_0\tskateboard\neQXSsw2MJGk_0\thorse\neQZEFoxVGuY_2\tperson\neQZOAGlSYBc_0\tperson\neQcocP3auyk_0\tcar\neQfbBM_c96I_0\tknife\neQfbBM_c96I_1\tknife\neQi8AZ4DQO4_0\tairplane\neQjFi5iBL-c_0\tskateboard\neQl0Q82jNOY_0\tcat\neQmSzg2ZEpw_0\tperson\neQmSzg2ZEpw_1\tperson\neQoRdZR8_q8_0\tperson\neQpbjnMSNLE_1\tbus\neQ1R5EruVgo_0\tbird\neQ1R5EruVgo_1\tbird\neQ2eWzgVggo_0\tperson\neRAZ8LnDRN4_0\tperson\neRBc8OmROx4_0\tcat\neRCMzS-dM8o_0\tperson\neREzhoz4UA8_0\tbicycle\nFZieBxFsZO4_4\tbird\nFZieBxFsZO4_7\tbird\nFZieBxFsZO4_8\tbird\nFZieBxFsZO4_11\tbird\nFZsDQUdCBiE_0\tperson\nFaINra3PYko_0\tbus\nFaINra3PYko_2\tbus\nFaINra3PYko_1\tbus\nFanmFyCIvSc_1\tskateboard\nFaxr0F1n4lk_0\tperson\nFa8JS9CCs60_0\tperson\nFbC6M7cRN1k_0\tperson\nFbLE0CqDZ_I_0\tperson\nFbN-_RdBAoA_0\tperson\nFbRfH2tJCZg_0\ttrain\nFbUasHXeVXg_1\tperson\nFbVrmfwHLD8_1\tcar\nFba1mHso_c8_0\tperson\nFbcl3O89qPI_0\tperson\nFbryy4ItyRo_0\tmotorcycle\nFbsxP5HIH-w_0\tperson\nFbtbQbo3w6A_0\tperson\nFbtbQbo3w6A_1\tperson\nFbtbQbo3w6A_2\tperson\nFbzdX2M1spw_0\tperson\nFb9GVgZUQkk_1\tbird\nFb-bT-5HFvo_1\tperson\nFb-bT-5HFvo_0\tperson\nFcAKq2q6WuI_0\tperson\nFcGoc7P1MnA_0\tairplane\nFcHZFDzsW6U_0\tperson\nFcI2xE1s0tE_0\tperson\nFcJofbjqKR0_0\tperson\nFcNTnULQ914_0\ttrain\nFcPxUMks1f8_0\tairplane\nFcQ9ypCnsnM_3\telephant\nFcdE5l-9Cl4_0\tperson\nFcfkxe_EegE_3\tskateboard\nFckxSGw75TA_8\telephant\nFckxSGw75TA_1\telephant\nFckxSGw75TA_3\telephant\nFckxSGw75TA_4\telephant\nFckxSGw75TA_6\telephant\nFcmq6FVlPrs_2\thorse\nFcyT7NFOtOU_1\ttruck\nFczLlZB8PPQ_0\thorse\nFdBcdDQa2Yc_0\tperson\nFdG3QrZtdYo_0\tperson\nFdM1BVOZnpc_0\tperson\nFdM1BVOZnpc_1\tperson\nFdYZH48B1gQ_0\tgiraffe\nFdYpikKc6Rk_0\tperson\nFdcxQx4sFow_0\tperson\nFdgWx-kasEQ_0\tcar\nFdgw87Au0kg_0\tperson\nFdp1t1Kk42s_0\tperson\nFdvgBe0Ix0A_0\tperson\nFdvgBe0Ix0A_1\tperson\nFdviMb1gxkI_0\telephant\nFdyA9CQ40Xo_0\tcat\nFd1Rn6HvibQ_0\tbus\nFd1ZmuLPSNA_2\ttruck\nFd1ZmuLPSNA_0\ttruck\nFd1ZmuLPSNA_1\ttruck\nFd1ySlMqOEk_0\thorse\nFd6kpMD00LI_0\tperson\nFeAmji-BcLE_0\tskateboard\nFeHGwC6UYlQ_1\tperson\nFeHGwC6UYlQ_0\tperson\nFefqZU-M3NQ_0\tdog\nFeioRbELmKY_0\tcat\nFel-MqoIa98_0\tperson\nFenJI9gPekk_0\tperson\nFevOpclGxX8_0\tperson\nFe0XVxKTD10_0\tperson\nFe1ne3adKqs_0\tperson\nFe1o0fdRyjk_0\tperson\nFe1o0fdRyjk_1\tperson\nFe1o0fdRyjk_2\tperson\nFe_r1BcuOm8_0\tairplane\nFfCfKve9svg_0\telephant\nFfGzM6IRg6I_0\tperson\nFfTyXxo_JLY_0\thorse\nFfWtRI5MlvQ_1\tperson\nFfWtRI5MlvQ_0\tperson\nFfddIx2fdDE_0\tperson\nFfkcxMLN90Q_1\tperson\nFfkcxMLN90Q_0\tperson\nFfpScNxcfaE_0\tperson\nFfpuED53W2w_0\tperson\nFf3kCsp4dss_0\thorse\nFf37VadXulw_0\tperson\nFf37VadXulw_1\tperson\nFf-s3k4nzl0_0\tcow\nFgAW1wm55t4_0\tumbrella\nFgBAfHhZDtY_0\tcow\nFgCkJ9L956k_2\thorse\nFgHkoen3Fbs_0\tperson\nFgHkoen3Fbs_1\tperson\nFgHkoen3Fbs_2\tperson\nFgK205YdiNI_0\tzebra\nFgaH6B8Im-s_1\tperson\nFgh-oweWR10_1\ttruck\nFgh-oweWR10_2\ttruck\nFgh-oweWR10_5\ttruck\nFgkgjnYWuvc_0\tmotorcycle\nFglWoBFeCGs_0\tboat\nFgqe5FVDM7w_1\tbus\nFgqe5FVDM7w_3\tbus\nFgtxhgrL-1s_0\tbicycle\nFhAkQ-D6j7M_1\tperson\nFhNe0p3NvAk_0\tperson\nFhS2OrbfOqA_0\thorse\nFhTIUIB4MQk_0\tperson\nFhdb7UXlKgw_0\tperson\nFhhQQi3XBRs_0\tperson\nFhim9zq_3dc_0\tdog\nFhtl-JSkWvY_1\tskateboard\nFh1QSbERb_I_0\tperson\nFh1jlYGKYy8_0\tcow\nFh2wm1SuBlM_0\tperson\nFh5hapK4iY0_0\thorse\nFh-e1BaovqE_0\tperson\nFiAj5FRP_QI_0\tbear\nFiAj5FRP_QI_1\tbear\nFiAj5FRP_QI_2\tbear\nFiGZEZ8BFeg_0\tperson\nFiLeL7fMtKI_0\tperson\nFiMl9o33Uaw_0\tperson\nFiQbZpev_LA_0\tperson\nFim4ZNdANXI_0\thorse\nFipIgAA0lFk_0\tbicycle\nFirrKl6H41c_0\tperson\nFirrKl6H41c_1\tperson\nFivrGIBKDvo_1\telephant\nFiz1rnLi2OM_0\tperson\nFi4kJfnwDFc_1\tbicycle\nFi4kJfnwDFc_0\tbicycle\nFi7LPQxqu14_0\tperson\nFi9uLLmtWaQ_0\tperson\nFi_IAiAUqaU_1\thorse\nFi_IAiAUqaU_0\thorse\nFjBRf4S85bg_0\telephant\nFjBRf4S85bg_1\telephant\nFjCz86a5wp4_0\tperson\nFjF5nRRKjKc_0\tperson\nFjMslXNPmHo_0\tairplane\nFjRDB5KtmZk_0\tcow\nFjUvDc65QJo_0\tperson\nFjZltjNG2NU_0\tskateboard\nFjfP5wdsmM0_0\tcat\nFjo3Q6r1Unc_0\tcow\nFjsVcnD_MIg_0\tmotorcycle\nFjvoIjZBqfU_1\tperson\nFjvoIjZBqfU_0\tperson\nFj98ZrblH1g_0\tumbrella\nFkAQLLdAAbk_0\telephant\nFkAQLLdAAbk_1\telephant\nFkFAVoUYxPc_1\tskateboard\nFkOkAlvY34U_0\tcow\nFkSrQgrkwxM_0\tperson\nFkSrQgrkwxM_1\tperson\nFkZy3LGoN9I_0\tdog\nFkkUslZGIbg_0\tbear\nFkvcJknwKuY_0\tperson\nFkzewHxki8o_0\tskateboard\nFk4XzK5XI6A_0\tbus\nFk4XzK5XI6A_1\tbus\nFlD1RAiVpek_0\tperson\nFlD1RAiVpek_2\tperson\nFlD1RAiVpek_1\tperson\nFlEhS-F3ygQ_0\tumbrella\nFlGO6UYJUzE_0\thorse\nFlNEteNmUhc_0\tperson\nFlR1fAhH2Xo_0\tdog\nFlYY0RaMPNY_0\tperson\nFlgN1oA45yM_0\tbear\nFl2yqFTps4E_0\tperson\nFl6OhW0-1w0_0\tperson\nFl9EhNo7Keg_0\tperson\nFmDFcSMFeno_0\tperson\nFmDFcSMFeno_1\tperson\nFmDOHRJspxI_0\tperson\nFmMYoani5Vg_0\tperson\nFmOLwdbHDxQ_0\tperson\nFmOfXWRFoXQ_2\tbird\nFmUhkvEy_7s_0\tperson\nFmUhkvEy_7s_1\tperson\nFmVDxGIS5zk_5\ttrain\nFmVDxGIS5zk_7\ttrain\nFmVDxGIS5zk_8\ttrain\nFmVDxGIS5zk_9\ttrain\nFmVDxGIS5zk_10\ttrain\nFmVDxGIS5zk_1\ttrain\nFmVDxGIS5zk_2\ttrain\nFmc6udEpldU_0\tcat\nFme4Abd5nUA_2\tbird\nFme4Abd5nUA_1\tbird\nFmoAxj0I_HE_0\tperson\nFmqOvCWa7zg_0\tperson\nFmrozJZpKR8_0\ttrain\nFmsAY671mqQ_7\tknife\nFmuPNtoqS2E_0\telephant\nFm1Depfmi_k_1\tperson\nFm5EMiek6AE_0\tperson\nFm6Hq8f2Qxk_1\tairplane\nFm6Hq8f2Qxk_2\tairplane\nFnEnQ8PP_eE_0\tskateboard\nFnEnQ8PP_eE_1\tskateboard\nFnGScEGhwDA_0\tperson\nFnKvuj-emb4_0\tperson\nFnKvuj-emb4_1\tperson\nFnMl1BAE_jc_0\tbear\nFnMl1BAE_jc_4\tbear\nFnNceIdqZ3w_0\tperson\nFnNceIdqZ3w_1\tperson\nFnTofG0IZf0_0\tperson\nFnb6xihA7ck_0\tperson\nFncXKaqIxJo_0\tperson\nFncXKaqIxJo_3\tperson\nFniMTwzxRZQ_0\tperson\nFnv6GlZeZ98_2\tairplane\nFnwZm6-uVkU_0\tperson\nFn6j8CspFw4_5\thorse\nFn6j8CspFw4_2\thorse\nFn6j8CspFw4_3\thorse\nFn6j8CspFw4_4\thorse\nFn7CPx1Df1I_0\tdog\neRGlFEYZ74g_0\tperson\neRQQ8fY6DVA_0\tperson\neRToPN2xDdI_1\thorse\neRToPN2xDdI_2\thorse\neRVbBhT_bcs_0\tperson\neRXcoQINrwY_0\tcow\neRa3aIGemkw_0\tperson\neRiOVczmKs0_0\tperson\neRk0k7ru0C0_0\tperson\neRlVo64o3EE_0\thorse\neRn_VZZAhDc_0\tbird\neRpQzm5PYXw_0\tperson\neRvRu0q-GoE_0\tdog\neR2L8Yeikhc_0\tperson\neR2s4XgNo7o_2\tdog\neR6IwGLaa1M_0\tbicycle\neR7y-Ei3DLg_0\tperson\neSGBtfzFobI_0\ttrain\neSId-3VXvKk_3\tdog\neSIwAUMyFgU_0\tperson\neSKH9cYOKk8_0\thorse\neSPrJOSU8AM_0\ttrain\neSa1vsOaz1c_0\tknife\neSiLV8rS59E_0\tperson\neSiLV8rS59E_1\tperson\neSljhVPS-Ik_2\tperson\neSljhVPS-Ik_0\tperson\neSpAsKZSmiA_0\tairplane\neTDKrXMMrQ0_0\tcow\neTKPoRwNChU_0\tperson\neTKPoRwNChU_1\tperson\neTKSWSWvAyw_1\tperson\neTNf-Cqbbro_1\tperson\neTQF3UDg8qc_0\ttruck\neTTKvmF97nI_0\telephant\neTUWLCcJU2k_2\tbus\neTU8LeMW9qA_0\tperson\neTc1z6mbb50_0\ttruck\neTdIp3O6Gdc_0\tbear\neTkYJ5e2d6g_0\tperson\neTkbZ2QtHvw_0\ttrain\neTkbZ2QtHvw_1\ttrain\neTpyN9lx8_4_0\thorse\neTsE0jLxU3w_0\ttruck\neTsE0jLxU3w_2\ttruck\neT3B8Dicg34_1\tperson\neT5K9fPU-0g_0\tperson\neUGoobFpS4s_0\tperson\neUKe6XaWIfA_0\tmotorcycle\neUQjLdCSTbY_0\tperson\neUQ4P2JG1yg_0\tbus\neURPg0TbtFI_0\tperson\neUU0KJ-w2bc_0\tperson\neUVgOxQT_-8_0\tcow\neUbEHnOzRA8_0\tperson\neUbEHnOzRA8_1\tperson\neUbEHnOzRA8_2\tperson\neUe_Rayk8X8_0\tperson\neUyzGl0--ms_1\tperson\neU6G8jITD_Y_0\tairplane\neVJOOrHqc34_1\tskateboard\neVL1UQ_nteE_0\tcar\neVNGBAn5Oxc_0\tcat\neVPABDrI9js_0\tbird\neVYydWvg5Go_1\tperson\neVcLRosJZew_0\tperson\neVhB8QJJogM_1\tknife\neVn8akHyS64_0\tairplane\neVn8akHyS64_2\tairplane\neVn8akHyS64_3\tairplane\neVn8akHyS64_6\tairplane\neVuy4uctm28_0\tperson\neVu1gME4-Qs_0\telephant\neVu1gME4-Qs_1\telephant\neVywFyCLwko_0\tperson\neVzfhyg8qFU_0\tperson\neV2KIbTSnH4_1\ttrain\neV4pA62ABv8_1\ttrain\neV6nRsgY8PQ_0\tperson\neV64Qw4Zebk_0\tperson\neV-VIypuuNY_1\tbird\neWHnCpVoKhw_0\ttruck\neWbvhqFVvXk_0\tboat\neWlQOgHQT7g_0\tairplane\neWpIepmfRus_0\tperson\neWpIepmfRus_1\tperson\neWsle8FxRvY_0\tperson\neWyDiulNMGo_0\tmotorcycle\neW6l7xJBq-Q_1\tboat\neW6o2X8qAtQ_0\tcar\neXDegroOl34_0\tperson\neXECAC_iXPc_0\tperson\neXLLe0Z-fJk_0\tperson\neXUIt5B2NQc_0\tperson\neXYniqUW4z8_0\tbicycle\neXYniqUW4z8_2\tbicycle\neXaCA1qL7uY_0\tperson\neXeifN6Jv8c_0\telephant\neXeifN6Jv8c_1\telephant\neXeifN6Jv8c_3\telephant\neXeifN6Jv8c_4\telephant\neXeifN6Jv8c_7\telephant\neXfkthdw2L4_0\tperson\neXixQXmPyYw_0\telephant\neXoF6xS_5u4_3\tknife\neXuelMqu_1M_0\tknife\neXveKyc2TQg_0\thorse\neXxAlPRFiqs_0\tperson\neXxAlPRFiqs_1\tperson\neXxAlPRFiqs_2\tperson\neX3bd4kHxuc_9\tairplane\neYDpQFJpz7k_0\tperson\neYJe2k1E0XQ_0\tbus\neYY-Mz3L_Ac_1\telephant\neYeHu-IftM0_0\tperson\neYnlQEvgHVc_0\tcat\neYqlHj6MSc0_7\tbicycle\neYyGqoW9Q3c_0\tbus\neYyri5GAJDE_0\tperson\neZEN_5rnTLM_0\tperson\neZL3Ew4O7YI_0\tperson\neZXS_3nTpdo_1\tmotorcycle\neZXS_3nTpdo_2\tmotorcycle\neZZb5rnc1iA_0\tbus\neZf-Rsr1aNs_1\ttrain\neZgo_XfmmO0_0\tperson\neZgo_XfmmO0_1\tperson\neZl_FRsZx3o_0\tperson\neZym_LkJnpY_1\tknife\neZ2Y_Qtg0VU_0\thorse\neZ2Y_Qtg0VU_1\thorse\neZ4N2Y737ss_0\tperson\neZ_peGgPSDE_0\tperson\neaHXGY8ImzY_0\tperson\neaOqHSeEVG0_0\telephant\neaR-dFaZRGc_2\tgiraffe\neaTX3J2X23g_1\tperson\neaTX3J2X23g_0\tperson\neaalMrdHsQ0_0\thorse\nearUgdES0lk_0\tperson\neaxPmkwGK5U_0\tbird\nea1EeKBBjxk_0\tumbrella\nea1YcZPjbxU_2\ttruck\nea4saeRZ0_M_0\tperson\nea8mbQn2kv0_2\tdog\nea8mbQn2kv0_1\tdog\nebFgEyNciRc_0\tcow\nebMZJ-lUhbw_2\tbicycle\nebMZJ-lUhbw_3\tbicycle\nebMZJ-lUhbw_1\tbicycle\nebOubiwIUC0_0\tperson\nebV9mcxICDs_0\tdog\nebY5nNOPdN0_0\tperson\nebY52fJyTPs_1\tperson\nebY52fJyTPs_0\tperson\nebagV2pOV20_0\tboat\nebhnTUXh7Pc_0\tcat\nebh7xOXlO7Y_1\tperson\neboXP28MlOE_0\tairplane\nebt0_AWnuyM_3\tbear\nebyMEAOqPhQ_0\tskateboard\nebz4umtEYag_1\tmotorcycle\neb0UO8Y5r5A_0\tcar\neb1-qD5D7Us_0\tperson\neb5d4XIDSqs_0\tcar\necDEmZdWz8Q_0\tperson\necGOS5ZO0Tw_0\tskateboard\necGOS5ZO0Tw_1\tskateboard\necJIf9dcDHk_0\tperson\necKMZLATsNg_0\tperson\necKst7suEZo_1\tmotorcycle\necPynengjhg_0\tperson\necUmR_974l4_0\tbear\neccbjuLjCr0_0\tbicycle\necex13DrS00_1\tbus\necgqb4spDo0_0\tcow\neclnV3fwFVg_0\tcar\necndV9N-b9M_0\tboat\necrgwn6gB7c_2\tperson\necrgwn6gB7c_0\tperson\necrgwn6gB7c_1\tperson\nec0L5W9HzYQ_0\tperson\nec0zPF4t8jM_0\tperson\nec10-YUa1PE_0\tperson\nec4Mjwm2hyQ_0\tperson\nec4ya7ogbFU_0\tperson\nec59VG2krTI_0\tknife\nec7hzm4ZgOM_0\tbus\nec8daVdUMW8_0\telephant\nedErePLiFl4_0\ttruck\nedFb7FxjVPc_1\tperson\nedOvHaEGfM0_0\ttrain\nedO7Q7znUJA_0\tcat\nedPmPMqUt4c_1\tperson\nedPmPMqUt4c_2\tperson\nedS79MnRXwE_1\tperson\nedYcGdD4UGI_0\tperson\nedd8R4oDMdg_0\tperson\nedlAlkitTfg_0\tbird\nedlAlkitTfg_1\tbird\nedq1Zw1FWGY_0\tperson\nedrtSs6UdCI_0\tboat\nedtqJ_N0258_0\tperson\ned0O35MjM6Q_0\tcow\ned5jfyH6JyI_0\tperson\neeEjRmROBZs_0\ttrain\neeEjRmROBZs_1\ttrain\neeJDVUC0bio_0\tbird\neeV0a3p0uz8_1\tdog\neeYr-ujfh4Y_1\tperson\neeYtwUSuQzY_1\tairplane\neeYtwUSuQzY_2\tairplane\neeYtwUSuQzY_0\tairplane\neeZyIsjtgj0_0\ttrain\neeahFaPbx5M_0\tskateboard\neea6uRdJLL4_1\tbird\neee-1I8uLeU_0\tcow\neefTfPIGkq4_1\tperson\neef-qkyU0jY_0\tboat\neepn_UxMI5o_0\tskateboard\nFoFA-VOPhV8_0\tzebra\nFoIc9MjzbBk_0\tperson\nFoSynLz7aJ8_0\thorse\nFoSynLz7aJ8_1\thorse\nFoUqmWxXlNU_0\tperson\nFoUqmWxXlNU_1\tperson\nFobAHnW_q6s_0\tperson\nFog-McdMlO0_0\tperson\nFomH9b8uRKs_2\tknife\nFot4m5WU4Aw_1\tperson\nFot4m5WU4Aw_0\tperson\nFouVJvkYyPs_0\tperson\nFpCdNHknwMQ_3\tcar\nFpCdNHknwMQ_5\tcar\nFpEzn8x46OE_0\tbird\nFpGO4RTCIuk_6\tbicycle\nFpGO4RTCIuk_0\tbicycle\nFpGO4RTCIuk_2\tbicycle\nFpGyjKY-NIk_0\tmotorcycle\nFpGzMvzCvKo_0\tperson\nFpI0Do5LaU8_0\tperson\nFpTdRnuOS8M_0\tperson\nFpTdRnuOS8M_1\tperson\nFpaob2f1sqE_1\tperson\nFpaob2f1sqE_0\tperson\nFpev0w7vGO4_0\tperson\nFprxIVYXUL4_0\thorse\nFpzpuYeDf6M_0\tbus\nFp1vbL5guA0_0\tperson\nFp2HgWZlr2k_0\tperson\nFp7RJqXwz6c_0\tperson\nFp-TG2XDrC4_4\tcar\nFp-TG2XDrC4_0\tcar\nFp-TG2XDrC4_1\tcar\nFp-TG2XDrC4_2\tcar\nFp-TG2XDrC4_3\tcar\nFp_5yBxyvR4_0\tumbrella\nFqFhpogmR2s_0\tcat\nFqHStgmNnKA_0\tbicycle\nFqHStgmNnKA_1\tbicycle\nFqTHQ5KBbaY_0\telephant\nFqjhuAhttZw_2\ttrain\nFqjhuAhttZw_1\ttrain\nFquAMi_ikSA_0\ttruck\nFqxWiT-6dLM_0\tperson\nFqxZmvVkHIA_0\tgiraffe\nFqxZmvVkHIA_2\tgiraffe\nFqx-wOpqzZo_0\tairplane\nFqzYUW3X9pc_0\tperson\nFqzYUW3X9pc_1\tperson\nFq_esHSu_sk_0\tperson\nFrC2HuRBsYA_0\tperson\nFrC-Gp1GmVw_0\tcow\nFrC-Gp1GmVw_1\tcow\nFrIO6gNGeao_0\tperson\nFrUCytgm6sM_2\thorse\nFrViqM6fVR0_0\tdog\nFrVxG6x7tj0_0\tknife\nFrVxG6x7tj0_2\tknife\nFrgvokGeeds_0\tperson\nFrk0tcM1o_w_0\tperson\nFrm5N8YRz_E_0\tperson\nFrpsbU7nO00_0\tperson\nFrxIGKawDiA_0\tperson\nFrzgyfVukw4_0\tperson\nFrz8huGrR4M_2\tmotorcycle\nFr0K__Q_Kv4_1\tbird\nFr2qdnHURF4_0\tboat\nFsHjWJUILr4_0\tperson\nFsScYp1HNk0_0\tperson\nFsXYM3nf7O4_0\thorse\nFsZyoaRLGfw_1\tperson\nFskWl7cTGUU_4\tmotorcycle\nFslFjbzL4rY_0\ttrain\nFsuA_2-7e1w_0\telephant\nFsuA_2-7e1w_1\telephant\nFsvwyL1hLDU_0\tbus\nFswGt3qhUXE_1\thorse\nFs6Lk0xDsWk_0\tdog\nFs6Vua80iU4_1\tbus\nFs-DmOC6Ksw_0\tperson\nFtAgz58w2vs_0\tperson\nFtC0Y3Dca60_0\tdog\nFtD8uBgTi3E_0\tcat\nFtJ8y0gIpKg_0\tdog\nFtJ8y0gIpKg_3\tdog\nFtJ8y0gIpKg_5\tdog\nFtJ8y0gIpKg_1\tdog\nFtJ8y0gIpKg_4\tdog\nFtMshKheG8Q_0\telephant\nFtet3EW_gR0_0\tskateboard\nFtet3EW_gR0_1\tskateboard\nFtj_1qTEwE8_0\tcow\nFtqLCjhRQgQ_1\tperson\nFtqLCjhRQgQ_0\tperson\nFtwMaVMlLbM_0\tperson\nFtwZasadNWo_1\tperson\nFtwZasadNWo_0\tperson\nFt3Xr78g1jg_0\tdog\nFt4RUB75d64_0\thorse\nFt5ZV3L5LV4_0\tperson\nFt8VPp_VNJs_0\tknife\nFuCuNV5vL-8_0\tperson\nFuIIvsD7qyY_1\tperson\nFuMf00RPDmg_0\tbear\nFuNvDTe7cAM_0\tbird\nFuR3p7f2R30_0\tperson\nFuTf8iiIHWI_0\tmotorcycle\nFuVQuZfX71w_1\telephant\nFuc49AUfyaA_1\ttrain\nFufG8eRehvk_0\tperson\nFuoKMOMcl0I_0\tbus\nFu3A7S4V26Q_0\tperson\nFu4p4U9AqY4_0\ttrain\nFu5TDXXdHyc_3\ttrain\nFu5TDXXdHyc_0\ttrain\nFu5TDXXdHyc_1\ttrain\nFu5TDXXdHyc_2\ttrain\nFvB0FA24g0c_0\tmotorcycle\nFvD-5pXN6B4_0\tperson\nFvF8CGSAVBw_0\thorse\nFvIqBpjD4A4_0\tperson\nFvKJQTsxS6o_0\tbus\nFvNiWF5wWJA_0\ttruck\nFvN6HD0c3I8_1\tbicycle\nFvQ8wYSFAhA_0\tbird\nFvZ_lMA5MYE_0\tperson\nFvZ_lMA5MYE_2\tperson\nFvZ_lMA5MYE_1\tperson\nFvcxD9PJ1-g_0\tbear\nFvcxD9PJ1-g_1\tbear\nFviKCn2JGbY_0\tperson\nFvksDxENves_0\tbird\nFvksDxENves_1\tbird\nFvmW4A9wN1c_0\tperson\nFvslrkU6Ii8_1\tskateboard\nFvslrkU6Ii8_5\tskateboard\nFvslrkU6Ii8_4\tskateboard\nFvuJoToFsZ0_0\tskateboard\nFv2LjW2C5SU_0\tknife\nFv2LjW2C5SU_2\tknife\nFv2SAN8CNlg_0\thorse\nFv6OQz_y5V0_0\tperson\nFv80QjBLyXw_3\ttrain\nFv80QjBLyXw_4\ttrain\nFwBCZ90I_aw_0\tcat\nFwIN5LlmnSA_0\tperson\nFwMy9UR3xJA_0\tperson\nFwMy9UR3xJA_1\tperson\nFwNHDlUxkVE_0\tperson\nFwSQA6A_bWE_0\tperson\nFwZzzptQg0s_0\tperson\nFwZzzptQg0s_1\tperson\nFwf5SGfOguQ_0\tbird\nFwf_1L-RQB4_0\tcow\nFwhmGtqpt5s_1\tskateboard\nFwrkNuHACuE_0\tperson\nFwtyj6Ut62E_2\tdog\nFw8NHywJSJw_7\tairplane\nFw8NHywJSJw_8\tairplane\nFxHZCFGlLk8_0\ttruck\nFxI0-u_zPQQ_1\tskateboard\nFxI0-u_zPQQ_0\tskateboard\nFxJg66y6Vj4_0\tperson\nFxJ0douRc4s_0\tperson\nFxMnA-aNvVI_0\tknife\nFxXVgnAjOCs_0\tperson\nFxXVgnAjOCs_1\tperson\nFxitbyLzBbw_0\tperson\nFxmfshFrhyg_0\tperson\nFxmfshFrhyg_1\tperson\nFxp_EDLEylo_1\tbear\nFxxuVRsJiCQ_7\tbird\nFxxuVRsJiCQ_9\tbird\nFxxuVRsJiCQ_11\tbird\nFx74SXbZiUI_0\tboat\nFx-8EgSEaDg_0\tperson\nFyFea2NifCo_0\telephant\nFyKB3iEKNlg_0\tperson\nFyO1UliwWNQ_0\tskateboard\nFyQulDaVp8I_0\tperson\nFyTFrxalrzY_2\tbicycle\nFyb5_PxuzrI_1\tairplane\nFyjgIZnRT0A_0\tperson\nFylDI9Ssx18_0\tdog\nFyqooE73pSs_0\ttrain\nFyuLo6pvAxk_0\tperson\nFyuLo6pvAxk_1\tperson\nFy6UODQTxBw_0\tdog\nFy8cULzM424_0\tperson\nFzJOOqEWb48_0\tperson\nFzP8vDH_ynM_0\tbicycle\nFzV_56qru4c_1\tperson\nFzV_56qru4c_0\tperson\nFzaaAJ_dGjI_1\tdog\nFzc4L1eWvQ0_0\tknife\nFzeiG746wec_0\tperson\nFzoJlCfL5bc_0\tperson\nFzpV3zrU7w0_0\tcat\nFzufL9SIDZ4_2\tperson\nFzvLoCiUbCU_0\tperson\nFz4RMW4ONrQ_0\tskateboard\nF0Ekv-HAlnk_0\tairplane\nF0G64yaBMBM_0\tperson\nF0G64yaBMBM_1\tperson\nF0I59IAm-vo_0\tperson\nF0Qk5fG3X-M_0\tperson\nF0Q9zBIa4vg_0\tknife\nF0Q9zBIa4vg_1\tknife\nF0Q_-7qxWws_3\telephant\nF0UBtRxGNhA_5\tbird\nF0XjqeFLlgU_1\tbird\nF0ZAshDVPxg_0\tperson\nF0c4qnJQtDU_0\tbear\nF0gFV3Zl1ew_0\ttrain\nF0gFV3Zl1ew_2\ttrain\nF0hx5kgZ3go_0\telephant\nF0mBUyvb90Y_0\tperson\nF0qXU9y4p-Q_0\tperson\nF0z1cmfnPsQ_1\tbicycle\nF1B_Y1twDK0_0\tcow\nF1CZ2DPXJ9M_4\tbicycle\nF1CZ2DPXJ9M_1\tbicycle\nF1KHVI6XeVo_0\tperson\nF1eNAhwM5Pc_0\thorse\nF1jGg9828BI_0\tperson\nF1j27LEBSpI_1\tcar\nF1qXLHQywDc_1\telephant\nF1sQlUVWZLM_0\tperson\nF15XLgp6ED4_0\tskateboard\nF2Bb2pFQRyU_1\tperson\nF2EV6W4vdT8_0\tbus\nF2GhztG-3ZM_0\tcat\nF2HupbPd4Rc_0\tperson\nF2JDbaIJXuM_0\tperson\nF2JeBrL43Kg_0\tperson\nF2JnnpLll3c_1\thorse\nF2Kd_wTgfHc_0\tbird\nF2N-fmDDyCs_0\ttrain\nF2an_w-D4WM_0\tdog\nF2bbT3y10lk_0\tperson\nF2dx02YK1MY_0\tcat\nF2kBHcrY7Ck_0\tperson\nF2nvlBMOvGc_0\tboat\nF2nvlBMOvGc_2\tboat\nF2nvlBMOvGc_3\tboat\nF2nvlBMOvGc_4\tboat\nF2yvXHbr1Us_6\tbird\nF2yvXHbr1Us_7\tbird\nF20W1m4x2Ys_0\tperson\nF20_Ihwr_1Y_0\telephant\nF21R2kQ-je4_0\tperson\nF2244CO9Fuo_0\tairplane\nF250PqK5Gb4_1\tairplane\nF3AMItpIJlI_0\tdog\nF3AMItpIJlI_4\tdog\nF3FUBdTgY7c_0\tcar\nF3FUBdTgY7c_1\tcar\nF3Lz3rnQ-7A_0\tperson\nF3Lz3rnQ-7A_1\tperson\nF3NneLgyZiU_0\tperson\nF3RkQzIQjeU_2\tbicycle\nF3XFJeSjPDU_0\tbird\nF3XFJeSjPDU_3\tbird\nF3gY7oCc-j8_0\tcat\nF3j318NP2P0_0\tperson\nF3j318NP2P0_1\tperson\nF3oP1Se_HdQ_0\tmotorcycle\nF35JtGCIiCo_0\tdog\nF377W3trtdg_2\tdog\nF4DJmxH-fuw_0\tskateboard\nF4FXVb3DdJE_0\tperson\nF4FXVb3DdJE_1\tperson\nF4HgVMHEiVQ_1\tbird\nF4Ja9TDp5eg_0\tperson\nF4R1rt0I4Ik_1\tperson\nF4R1rt0I4Ik_0\tperson\nF4WWEXEO6Cw_0\tairplane\nF4hUo05eI2s_0\tperson\nF4hVb1AsJ9M_0\tumbrella\nF4hVb1AsJ9M_1\tumbrella\nF4hp-2UBFcI_0\tperson\nF4l8U4NGPMU_1\telephant\nF4rQJlBkGa8_0\tperson\nF4tzOjT91r0_2\telephant\nF41NWCYabpM_0\tperson\nF44j0JHVdfU_2\tbicycle\nF44z7XXoIZk_0\tcow\nF4-R6x6hSno_0\tairplane\nF4-R6x6hSno_3\tairplane\nF5IEcbmSBiU_0\tperson\nF5UiBt9FiQ4_1\ttruck\nF5brWxznDYA_0\tbicycle\nF5drV0qDFvU_0\tperson\nF5pSgana5Ds_0\tperson\nF5pwABHMaZM_1\tskateboard\nF5y_lQCCiYk_0\tperson\nF51aHL_AuQ8_2\tperson\nF51aHL_AuQ8_0\tperson\nF51aHL_AuQ8_1\tperson\nF54NzXjey4Q_1\tperson\nF6AkwJu9acQ_0\tperson\nF6BUhbvKAY0_3\tbear\nF6I3hGIdHBM_0\tairplane\nF6L1DckOdFs_0\tperson\nF6L1DckOdFs_1\tperson\nF6L1DckOdFs_2\tperson\nF6UTU1zVfY0_0\tperson\nF6X-PDReV8U_0\tskateboard\nF6uVxnnSkQg_0\tcat\nF63FWqs6n6A_1\tperson\nF63OB46zw20_0\tperson\nF66U-dCKTVs_5\telephant\nF67kQb83GEo_0\tperson\nF7Aw74QT7I8_0\tmotorcycle\nF7D1ccHfWQM_0\ttrain\nF7GYFMuRxr8_0\tperson\nF7MruF3gqRk_0\tperson\nF7MruF3gqRk_1\tperson\nF7M2n9Irv10_0\tperson\nF7adrDrejOI_0\tbicycle\nF7adrDrejOI_3\tbicycle\nF7adrDrejOI_7\tbicycle\nF7iFGXShjIg_1\tknife\nF7lmwAhsTVE_1\tcat\nF7lmwAhsTVE_0\tcat\nF7wyUoc1ELM_1\tperson\nF7wyUoc1ELM_0\tperson\nF72e40LPG8g_2\tairplane\nF72e40LPG8g_3\tairplane\nF72yH9hRoS0_0\tperson\nF77I6mkMOmM_0\tperson\nF77I6mkMOmM_1\tperson\nF77WzfDD-Ac_0\tperson\nF77WzfDD-Ac_1\tperson\nF8VZcw3-DMg_0\tperson\nF8XbiaxQYFA_0\tcar\nF8kTGPYH29o_0\tairplane\nF8sVrU5FfZw_0\tperson\nF8vyo42LQM0_0\tairplane\nF9KIXBo3lNI_0\tbird\nF9KIXBo3lNI_1\tbird\nF9WnfUhb8A4_0\tboat\nF9hhOJk3fdY_0\tperson\nF9jiY40SX4g_0\tperson\nF9kDOaogdPA_0\ttrain\nF9kDOaogdPA_1\ttrain\nF9nirQJj4wc_0\tmotorcycle\nF9qYvrO4nMM_1\tperson\nF9qYvrO4nMM_0\tperson\nF942FTRne2Q_0\tperson\nF95fIsG0A7U_0\thorse\nF98XVAomn1s_0\tperson\nF-AROt5V1zQ_0\tairplane\nF-L2byRMMEI_0\ttruck\nF-QpXlvCAdw_0\tgiraffe\nF-RVugkjZ1k_0\tperson\nF-RVugkjZ1k_1\tperson\nF-dxzMmjOT0_0\tperson\nF-dxzMmjOT0_3\tperson\nF-dxzMmjOT0_1\tperson\nF-poowwxrxU_0\tperson\nF-3G1FhnsdY_2\tcow\nF-3G1FhnsdY_3\tcow\nF-7EAK7rTI8_0\tbird\nF_AoZsBu8j8_0\tperson\nF_AoZsBu8j8_1\tperson\nF_BBB0J-9tQ_0\tmotorcycle\nF_CsG_jIxC8_1\ttruck\nF_I4rwh1mtE_0\tperson\nF_JJmqKJBnY_0\tperson\nF_Kw8qyfgjU_0\tperson\nF_WtOi2ZeSE_0\tumbrella\nF_oxJfyCUrw_0\tperson\nF_wVAS7hR9E_0\tcat\nF_5NdFCcCrQ_0\tairplane\nF_59LD9YnAU_2\tperson\nF_8qVC7MHM0_0\tperson\nGABXImD8qwM_3\tdog\nGADBGhd7Hbc_0\thorse\nGAF3BbJqKos_0\tperson\nGAF3BbJqKos_1\tperson\nGAGFuwQyn2A_1\tperson\nGAVdXzEftIU_1\tperson\nGAaPJd_iVeU_0\ttrain\nGAb6ZqG64o4_0\tperson\nGAb9NG_JnoU_0\tcow\nGAe7SnwoPQQ_1\tairplane\nGAg-aVsz7AI_1\tperson\nGAinaDnPPO0_0\telephant\nGAnYrNhN90c_1\tperson\nGAoDRtFNSeQ_0\tbird\nGAoaBt8kfHQ_0\ttrain\nGApyoyRTlPk_0\tperson\nGArUrBTpgzk_4\tairplane\nGArUrBTpgzk_1\tairplane\nGArUrBTpgzk_3\tairplane\nGAzsUwyCRAI_0\tcow\nGBF7wVda328_0\tdog\nGBLwQswYGpQ_0\tdog\nGBUiAfFHr8o_0\tperson\nGBYAc4swbr8_0\tperson\nGBYFzcFWKtI_0\tskateboard\nGBYeOSgHxaw_1\tperson\nGBhV-vm_cDs_0\tmotorcycle\nGBhV-vm_cDs_1\tmotorcycle\nGBhV-vm_cDs_2\tmotorcycle\nGBhV-vm_cDs_3\tmotorcycle\nGBjWoHEvi24_0\ttruck\nGBnf-AAsQts_0\tperson\nGBvWcmiB_zQ_0\tperson\nGBv60Rpf6hA_0\tperson\nGBwqR6gIUJk_0\tperson\nGBwqR6gIUJk_1\tperson\nGB0RUQ72TDU_1\tmotorcycle\nGB0RUQ72TDU_2\tmotorcycle\nGB0RUQ72TDU_4\tmotorcycle\nGB1A1gXLxF8_1\tumbrella\nGB1A1gXLxF8_0\tumbrella\nGB2Z9Zd9kCM_0\tcow\nGB3M7jlJvZo_0\tumbrella\nGB3dD_Sz5yA_0\tcow\nGCECUCM275I_0\ttruck\nGCECUCM275I_3\ttruck\nGCECUCM275I_4\ttruck\nGCECUCM275I_1\ttruck\nGCECUCM275I_2\ttruck\nGCHyhn505e4_0\tperson\nGCL5aSCyDAQ_1\thorse\nGCR8piyI8to_0\tperson\nGCdYlCKelqg_2\tbird\nGCf79ImcoV4_0\ttruck\nGCiR2DBKEUo_3\tumbrella\nGCiR2DBKEUo_0\tumbrella\nGCyZCLCX4jI_1\tbus\nGC5X3-Zi5fo_0\tbear\nGC_4PRhWwy0_1\tperson\nGDBvvswiioY_0\thorse\nGDErDO6sQxg_0\tperson\nGDHukw9i8AE_0\tbear\nGDPBufHJ6pE_0\tperson\nGDVxjq335kg_0\tperson\nGDVxjq335kg_1\tperson\nGDW_ebhUmXg_0\tperson\nGDeoeNk-jj8_1\ttrain\nGDgRHR5rt5g_0\tdog\nGDhVskUd-i0_0\ttruck\nGDkTfXax1EI_1\tperson\nGDr1CfMsWCo_0\tknife\nGDyR3j6e9uU_0\tbear\nGD0qZhFYMtE_1\tbear\nGD5H2vUIQUM_0\tbird\nGD7nVz18opA_0\tcow\nGEC16HE9LPs_0\tskateboard\nGEK0W7Soe5I_0\tperson\nGEOILdSs_m4_0\tperson\nGEXtPkuLXV4_0\tperson\nGElPgxFGsYM_0\tperson\nGEmM96O2bm0_0\tperson\nGEoAqEILC5I_0\tbicycle\nee4MHg5K9xo_0\tperson\nee4MHg5K9xo_1\tperson\nefANTTg0s7E_0\tperson\nefD7irKhsjg_1\tzebra\nefFDVTrJnI0_0\tperson\nefQ-zUFNN-U_2\tairplane\nefQ-zUFNN-U_3\tairplane\nefQ-zUFNN-U_0\tairplane\nefQ-zUFNN-U_1\tairplane\nefUVmXxR3pI_0\tperson\nefXikRhGmrs_0\tperson\nefdHHLZ3g1Q_0\tmotorcycle\neffHbT0DhsY_1\thorse\neffHbT0DhsY_2\thorse\neffHbT0DhsY_3\thorse\nefj0ZypW97U_0\tperson\nefl9qpSfN9o_0\tskateboard\nefo_cgnnucQ_4\tknife\nefqCl5PWA5Y_2\tbear\nef6fQWU1KdY_0\tperson\nef9zPCUJ5uQ_0\tboat\negByT16s_54_0\tperson\negByT16s_54_1\tperson\negHnmalt3d8_0\thorse\negQiifLgKHE_0\tperson\negVsaW3pIR8_0\tbus\negotrU2sxIs_1\tcow\negotrU2sxIs_0\tcow\negymuz3YUjw_0\tperson\neg0xHA2KO2M_0\tcar\neg0xHA2KO2M_1\tcar\nehAg6V-5Puk_0\tairplane\nehB-VoBE8As_0\tperson\nehFoBFIrRho_0\tperson\nehFvz7g6tcc_1\tperson\nehFvz7g6tcc_0\tperson\nehF--LpGjPU_0\tperson\nehI3hX4P2gg_0\tbus\nehSU0TuduDM_9\tboat\nehSU0TuduDM_0\tboat\nehSU0TuduDM_3\tboat\nehSU0TuduDM_7\tboat\nehSU0TuduDM_8\tboat\nehTOHuz8De4_0\thorse\nehhoOXi21uc_0\tperson\nehhzn87_kyY_0\tknife\nehpsJCYWhMo_0\tdog\neh0-hoyeQv4_0\tperson\neh383O3j2o8_0\ttrain\neh8ClQx55Pk_0\telephant\neh8ClQx55Pk_3\telephant\neh8ClQx55Pk_1\telephant\neh-Hpgj7SPM_0\tbird\neiIxHOvvvog_0\tperson\neiKfZPTeN-M_0\tperson\neiMVAVfFk50_1\tgiraffe\neiNlPbSqaQM_2\tbear\neiOC7H2_I7E_0\tmotorcycle\neiYV7UFe9_4_0\tperson\neiZm5CglnLc_0\tperson\neiirsESzuHs_0\tbicycle\neim8NPBqZXg_1\tperson\neis2vlxPtf4_1\tperson\neivFKGFBySc_0\tperson\neivMnaQyUKU_0\tperson\nei0PFx0qNIQ_1\tperson\nei0PFx0qNIQ_0\tperson\nei4Yn0KXnAM_0\tperson\nejDpzIUHAMk_0\tperson\nejD4KjqrkFo_0\tcat\nejIMw0_a1Zo_0\tperson\nejIMw0_a1Zo_1\tperson\nejVKT8cDDTY_2\tmotorcycle\nejoDQZqi4DU_0\tperson\nejsflVtvinE_0\tdog\nejzqfqBU2XY_2\thorse\nejzqfqBU2XY_0\thorse\nejzqfqBU2XY_1\thorse\nej5D22-gpzY_0\tperson\nekBhYo1n09M_0\tperson\nekGn7Al_5S0_0\tperson\nekOQkNLi9gA_0\tperson\nekPQmhXqsJs_0\tcow\nekQPPxQDQrA_0\tbird\nekYErFjRBcY_0\tperson\nekaQzIhIz6U_0\tperson\nekhId7QWajE_0\tperson\nekw22HGT0TY_0\tperson\nek6F1Yy6r4g_1\tperson\nek6F1Yy6r4g_0\tperson\nek9m3wFRD78_0\tmotorcycle\nelAJmgZ3uV8_1\tperson\nelIopJ6sLS8_0\tmotorcycle\nelS7CV83kDQ_0\tcat\nelbH9USSXbU_1\tperson\nele_x5If5RM_0\tcat\nelfDIDNaxO8_0\tbicycle\nelfDIDNaxO8_1\tbicycle\nelk9Eg_zAzA_0\thorse\nelwOqTHVPb4_0\tcar\nel_1tnvsCAY_0\telephant\nemAlGe0D2Ro_0\tcar\nemBk5WfF9MA_0\tperson\nemFvwwYH0Dk_0\tperson\nemLp02HobE4_0\tperson\nemO2DsNKmTw_0\ttrain\nemVjapACNME_0\tperson\nemWHcaPL5H0_0\tperson\nemXkTzHEyT4_0\tboat\nemhCPyXIbNk_0\tperson\nemqrQO4JZsU_1\tskateboard\nemxIavKneZw_0\tperson\nemzfRpng4hM_0\tbicycle\nem3XyVBpKCc_0\ttrain\nenA3HVeW4MM_1\tperson\nenCpXewY40c_0\ttruck\nenCpXewY40c_1\ttruck\nenR0OQhVBwE_0\tperson\nenWAeU6n9LQ_0\tperson\nenXS9AGUoow_0\tmotorcycle\nenY96p1ZALE_0\tknife\nenfPrTim6AU_0\tcow\nengcDIwacLg_1\tperson\nengcDIwacLg_0\tperson\nen06DIx0cz0_1\tperson\nen06DIx0cz0_0\tperson\nen6AOaqCY1s_0\ttruck\nen9gUgAJoek_0\tperson\neoFFf1yMhOg_0\tperson\neoauVNDdle8_0\tperson\neodvToXk2OQ_1\tcow\neodvToXk2OQ_0\tcow\neohpHQHPoXo_0\tdog\neovUEztTVZ4_0\tperson\neoyj6UfwM1c_0\tairplane\nepIcFi7yUZg_3\tcow\nepK_YUgNzQI_0\tcat\nepUTWEmTW1o_0\tbus\nepXYWAgJeJM_0\tperson\nepZSAxAzWRs_0\tperson\nepeLK68bI3k_0\tperson\nepeLK68bI3k_1\tperson\neph8ACa_bv4_0\tperson\neph8ACa_bv4_2\tperson\nepis0oQPudE_1\tperson\nepu8oDLyhBw_0\tcow\nepxbwMupoU0_0\ttruck\nepxxfkiUpVQ_0\tperson\nep15pnX1AxU_0\ttruck\nep4od2aZYv8_0\tdog\neqAMk_GzwUg_0\ttruck\neqMRouLMQI0_0\tperson\neqPXFnE2SxE_0\tperson\neqTdm4-YomY_0\ttrain\neqWb0eTMl98_0\tcow\neqiPG6XAei8_1\tperson\neqiVR6aa8XA_0\tperson\neqnF1_Lwa94_0\tmotorcycle\neqswu7XtVeE_0\tboat\neqswu7XtVeE_1\tboat\neqvu61eQ-D0_0\tperson\neqwZeHPEjT0_0\tbus\neq2VUeTEEGM_0\telephant\neq2VUeTEEGM_1\telephant\neq2-yJIiWyA_0\tskateboard\neq7fzAhOZEo_0\tperson\neq8-99wqpC4_0\tmotorcycle\neq-XVpUOFlQ_0\tcow\nerDb15O0GYM_0\tperson\nerIMuEor6gc_0\tperson\nerJzcEpQ-sA_0\tperson\nerKEWcCPgjU_0\tperson\nerKRZXMcCzQ_0\tbus\nerLW6pBgIrE_0\tperson\nerLW6pBgIrE_1\tperson\nerWerfoGejo_1\tdog\nerZ0-WmkPj8_0\tperson\nerfJrdfPp8M_0\ttruck\neri-jOmjJ5U_0\tperson\nerprzr0GCa0_0\tperson\nerrX-c_luf8_0\thorse\nerwHbfRwbDc_0\ttrain\neryYeuoNAdw_0\tperson\nesEKixC0bi0_0\tmotorcycle\nesFUx8MS7FU_0\tperson\nesFUx8MS7FU_1\tperson\nesHEHZv3XAw_0\tperson\nesdMTvdz7G8_0\tperson\nesd9prHEDmY_0\tcat\nesnr6cTpfQI_0\tskateboard\nesnr6cTpfQI_1\tskateboard\nesrkVh27SSg_0\tgiraffe\nesr3dKZtZ9I_1\tperson\nestRADheTso_0\tperson\nesxEV1BYf8g_0\tdog\nes0lurDiGrM_0\ttruck\netCrz_vcvJI_0\tzebra\netFtHhL2hac_6\tbicycle\netHjccaFHjw_0\tperson\netZXvy6wqZM_0\tcat\netZjkcz1NXE_1\tperson\netfOefeQ0NA_2\tknife\netgjVXNON5k_0\tperson\nethiyhktDW0_0\ttrain\netrQY3yeg8M_1\tperson\netrQY3yeg8M_0\tperson\netu6chaT_o0_0\tmotorcycle\netu6chaT_o0_1\tmotorcycle\netu6chaT_o0_2\tmotorcycle\neuNO4mGjpL4_0\tperson\neuS2rEsG-jA_0\tperson\neuaiFpmh6SU_1\tperson\nGEuy-JvOFBM_0\thorse\nGEwLV10zHSM_0\tperson\nGEwYE_QVNHE_0\tboat\nGE061if8j60_0\thorse\nGE8D0jEjasg_1\tbird\nGFCN_4akSi4_0\tperson\nGFMwf7Ly_Sc_2\tperson\nGFMwf7Ly_Sc_0\tperson\nGFN08ryY-U0_2\tknife\nGFTwQgse_Lk_4\tknife\nGFXh14V5BN0_0\tcow\nGFkCQFowcfs_0\tperson\nGFkCQFowcfs_1\tperson\nGFlTNatYs1E_2\thorse\nGFlTNatYs1E_0\thorse\nGFmBVLxS0W4_0\tperson\nGFsVA4Rxqv0_0\tcow\nGFtZEmPze30_0\tperson\nGFytNaOS7eE_0\tboat\nGF28RuK9Mio_0\tperson\nGF28RuK9Mio_1\tperson\nGF29WU5hVFU_1\tumbrella\nGF29WU5hVFU_2\tumbrella\nGF4b86WLzWE_0\tperson\nGF-zdmzb4zY_0\tbus\nGGBhXIkXN-U_0\tdog\nGGCSOyr8iNg_0\tcat\nGGNkUcwxgU0_1\tairplane\nGGX2r0RT9h4_0\tbird\nGGY5BDDn5LE_0\tperson\nGGtf7t-SVb0_0\tperson\nGGytoCC23B4_0\tdog\nGG2kiaUm9pg_0\tperson\nGG_CxOFs69U_0\tbicycle\nGHAR-041e4w_0\tperson\nGHF_00q4fw0_0\tperson\nGHN9eBe1Bp8_0\tknife\nGHWPuquucrM_0\tcow\nGHZjWHKMwyw_1\ttruck\nGHqedSEAQ9k_1\tperson\nGHqmzbJnjVg_0\tperson\nGHu-Q-Jbh6E_0\tumbrella\nGH_-l0dCs1A_0\ttruck\nGINmKyxk55E_0\tperson\nGIOByl4-GaE_0\tperson\nGIQcZHeI0rA_1\tknife\nGIRWosek2kk_0\tperson\nGIesL1NmKrU_0\tairplane\nGIiKoRSDN-Q_0\tskateboard\nGIiKoRSDN-Q_1\tskateboard\nGItE5rGj_-g_0\tperson\nGI0iwCtSgJY_0\tperson\nGI7YeWGyVRM_0\thorse\nGJAe8ctAWb0_0\tperson\nGJHbNDEY178_1\tperson\nGJHbNDEY178_0\tperson\nGJIPOsnsWAg_0\tperson\nGJIPOsnsWAg_1\tperson\nGJL8p4_PeKo_0\tperson\nGJMk0Meedm0_0\tperson\nGJbtzWK_dYk_0\tperson\nGJpkQJ1A6Gw_1\tcow\nGJy5Zhvk6lE_0\tperson\nGJ1O_aGTN94_0\tmotorcycle\nGJ4kWS7SklQ_0\tperson\nGJ7mp6eUiPg_0\tcar\nGJ9641JuJGs_1\tperson\nGJ9641JuJGs_0\tperson\nGKCr5DPt-O4_0\tcar\nGKC9zObtOMM_0\tperson\nGKEhy910De4_0\ttrain\nGKWJ0lgaDCg_0\tumbrella\nGKWJ0lgaDCg_2\tumbrella\nGKewJtAM0mQ_1\tperson\nGKewJtAM0mQ_0\tperson\nGKhEkZ-cdNQ_0\ttrain\nGKlP0uncbyg_0\tperson\nGKlP0uncbyg_1\tperson\nGKlP0uncbyg_2\tperson\nGKlP0uncbyg_4\tperson\nGKmEvD6kEV0_0\tbicycle\nGKn-IcumftE_0\tperson\nGKpcLh6EzTI_0\ttruck\nGKs6SswOMow_0\tskateboard\nGKyR_cV3NzE_0\tbird\nGK1HKUicpqc_0\tperson\nGK7khWET2AA_0\tperson\nGLBHzmRhRXw_0\tperson\nGLCLinUtVWM_0\tperson\nGLJJdMPYSaY_0\tperson\nGLLgtpj5VIc_2\telephant\nGLLkz3ew2Cw_0\tperson\nGLN48vyNNE8_0\tperson\nGLOfyCC7cpg_1\tperson\nGLOfyCC7cpg_0\tperson\nGLTbuhg3c9c_0\tcow\nGLTcmtEP3PQ_6\tperson\nGLTcmtEP3PQ_0\tperson\nGLTcmtEP3PQ_1\tperson\nGLTcmtEP3PQ_2\tperson\nGLTcmtEP3PQ_4\tperson\nGLT0qdbJFmE_0\tperson\nGLYc7lsUKvQ_0\tcow\nGLemLQ7Taz4_0\tdog\nGLiiNf5XBGw_1\tperson\nGLnBX7vZMds_0\tcar\nGLncyVpSovs_0\tperson\nGLonpYW6Yi8_0\tperson\nGLsxpYW-07A_0\tperson\nGLy3RuBdLZ4_0\tgiraffe\nGL2K160VZnM_0\tairplane\nGL5i6mrfwJQ_0\tperson\nGL6eTReYh8E_0\tgiraffe\nGL7g579uon4_0\tbus\nGL_EwiiBm1A_1\tperson\nGL_EwiiBm1A_0\tperson\nGMCQFxoF1UE_0\tbear\nGMJi6djWGYg_0\telephant\nGMLP7F_Da2w_0\tperson\nGMVqWicQ2d4_0\tmotorcycle\nGMeN9Z1A9X4_0\tcar\nGMj9b1A2R98_0\tbus\nGM3BiiUS2Xw_0\tcat\nGM31sVP8NMA_0\telephant\nGNJ088XwXpI_2\tskateboard\nGNLzZ4OPnHc_0\tboat\nGNLzZ4OPnHc_1\tboat\nGNN-BevC79g_0\tknife\nGNRZ4AjoiSE_0\tairplane\nGNawMpiTEFs_0\tperson\nGNnrNuC9zGU_2\tperson\nGNnrNuC9zGU_1\tperson\nGNqCvE7d9mE_0\tperson\nGNr1nF-F-40_2\tboat\nGNvEs3KBgRw_0\tperson\nGN97F0ERx8k_1\tperson\nGN97F0ERx8k_0\tperson\nGOE3QOj97xk_0\tperson\nGOLZ7CWDXjk_1\tperson\nGON778LYTqk_0\tperson\nGOQICMUoGL8_2\tperson\nGOWRiwkZo2U_0\tperson\nGOW84-_w-LQ_0\tbicycle\nGOZwEuPDmzc_0\tperson\nGOb0e4ojb3c_0\tairplane\nGOkeNGfFi8Q_0\tperson\nGOkeNGfFi8Q_1\tperson\nGOpAs6aca30_1\tperson\nGOpAs6aca30_2\tperson\nGOrO-A4yd5c_0\tperson\nGO0RyAWdVQA_0\tperson\nGO1tmJmOjZU_0\tcow\nGO9YRVC_2SA_3\telephant\nGO9YRVC_2SA_4\telephant\nGO98cqZbP2o_0\tcar\nGO98cqZbP2o_1\tcar\nGPABD8HFpQU_0\tskateboard\nGPCArlk4udc_0\tbird\nGPHwY1J1u04_1\tcat\nGPLKI0foxxc_0\tperson\nGPUUqd1IyNA_0\tdog\nGPUdCDtaGOQ_2\tboat\nGPViSMkz1ds_1\thorse\nGPViSMkz1ds_0\thorse\nGPZznxc87vA_0\tcow\nGPlHiCxNeIU_0\tperson\nGPnO7jt_-JI_0\tperson\nGPn2JSguaBI_4\tumbrella\nGPn2JSguaBI_0\tumbrella\nGPn2JSguaBI_1\tumbrella\nGPtN0Kb9qZs_0\ttrain\nGPzwYc908OM_0\tbicycle\nGP2YaQXsf0s_0\tumbrella\nGQJu2FlmC0A_0\tknife\nGQRDl6gw-n8_2\tbear\nGQRDl6gw-n8_3\tbear\nGQV1QfplpXU_0\tperson\nGQ6mrqpELDs_0\tperson\nGQ99sfZjwTo_0\tperson\nGRMv9irLuQw_0\tmotorcycle\nGRQUwn0jA8Q_0\tperson\nGRRXv9O7hNk_0\tmotorcycle\nGRRullNXQUY_3\tskateboard\nGRTcBPmHWPU_0\tmotorcycle\nGRjf8G-WDvc_0\tperson\nGRk94EZiwO8_0\tskateboard\nGRo9Bmi4ghA_0\tcat\nGRwCcOF0NyI_0\ttrain\nGRwCcOF0NyI_3\ttrain\nGRwCcOF0NyI_1\ttrain\nGRwCcOF0NyI_2\ttrain\nGRwvd8Xl-l0_0\tbird\nGR5qTAjCnB4_0\tcow\nGSD3hdUWKNg_0\tperson\nGSD_Asi3tsA_0\telephant\nGSD_Asi3tsA_6\telephant\nGSIFRlloCGA_0\tcow\nGSMYNBUuI74_1\tmotorcycle\nGSb8ilGRCd8_0\tumbrella\nGSkpDZZFQd4_0\tboat\nGSmR-G7zCN0_0\tairplane\nGSqatXKKzUU_1\tboat\nGS1El_XLryU_3\tbird\nGTaW87cQCZk_0\tbird\nGTegSO4BiDY_0\tperson\nGTgztSxvdzw_0\thorse\nGTg35QGB0bQ_1\tperson\nGTg35QGB0bQ_0\tperson\nGTjqtTiUFFA_0\tperson\nGTkZ7eZIV5I_0\tskateboard\nGTpF9CW8Kyo_2\tcow\nGTpF9CW8Kyo_3\tcow\nGTpF9CW8Kyo_0\tcow\nGTpF9CW8Kyo_1\tcow\nGTt9sqczKqg_0\tperson\nGTuP3gwjf70_0\tperson\nGT4askC-EmE_0\tskateboard\nGT4askC-EmE_2\tskateboard\nGT6Ta63CfGc_0\tbus\nGT7pB1SoSWQ_0\thorse\nGUA64cJx_1s_0\tperson\nGUG7toTLyt4_0\tbear\nGURTVjQ25hM_0\tairplane\neufhHTT-6cc_0\tperson\neujtr13Kbtg_0\tcow\neutsycO_2Zw_0\tumbrella\neu0WWqOzPNI_1\tboat\neu07YiPAVxk_0\ttruck\neu6zY6HpY1M_0\tperson\nevA7SzcjAkU_2\tknife\nevA7SzcjAkU_3\tknife\nevA7SzcjAkU_0\tknife\nevDr0RJRRV8_0\thorse\nevMMyqn2S94_0\tperson\nevRaMSC7xlI_0\ttrain\nevVOgDU7DsE_6\ttruck\nevcE8ru07G8_0\tumbrella\nevcWn6cN50A_0\tumbrella\nevhP2M5P0rM_1\tperson\nevksM4sehcQ_0\tcat\nevtk4IiqjkM_1\tperson\nevw-tqTTtQ8_0\thorse\nev1ATOeJPxY_0\tperson\nev1ATOeJPxY_1\tperson\nev53NALjp3I_0\tperson\nev7a6Z-ZOv4_0\tperson\nev-fVsUuvfA_0\tperson\newB46nb-ZFI_0\tbird\newFZmQCCZm0_0\ttruck\newFZmQCCZm0_2\ttruck\newOgoCimrdA_0\telephant\newUWpmdjLHA_0\tbicycle\newUWpmdjLHA_2\tbicycle\newgdEY7GtsQ_1\tairplane\newkBRzmoZzo_1\ttrain\newkBRzmoZzo_2\ttrain\newkeB8zzSVE_2\tdog\newkeB8zzSVE_3\tdog\newkeB8zzSVE_1\tdog\newoUjWEEJS4_0\tdog\new9rbdv73TA_0\tumbrella\nexR3lT_G3Yk_0\tknife\nexZF88kJoP8_0\tperson\nexjWaQ0ssbM_3\tairplane\nexjWaQ0ssbM_0\tairplane\nexjWaQ0ssbM_1\tairplane\nexn-_MfEP6Q_0\tperson\nexoNfV0vU_Q_1\tperson\nexoNfV0vU_Q_0\tperson\nexw_qJh1qp8_0\tcat\nex6Il_1Ielw_0\tmotorcycle\nex7mPB9cYwc_0\tperson\nex7mPB9cYwc_1\tperson\nex-yo1W_s34_0\tskateboard\neyAxkbxVdHA_0\tperson\neyAxkbxVdHA_1\tperson\neyNJXyldIhM_0\tperson\neySeJsY8tZU_0\thorse\neyZeTi4-udw_0\tboat\neycvZhhuzOI_0\tperson\neyd3cO1cRyw_0\tperson\neyg_dFAAJ_c_0\tumbrella\neyi_kSPelbM_0\tperson\neyo2iTfyALs_0\tcat\ney49lNbkqdQ_0\tperson\ney7evH7qmFA_1\tperson\ney9CIllx21w_2\ttruck\ney9CIllx21w_5\ttruck\ney9CIllx21w_8\ttruck\nezOxb6H18Dk_0\tperson\nezX_8NsARn4_1\tperson\nezYCeDV1Aew_0\tbicycle\nezam_iANUkY_0\tmotorcycle\nezdehi1wmW4_0\tcow\nezktd-PtOQo_2\thorse\nezktd-PtOQo_3\thorse\nezrNhnjWp-s_0\tperson\nezrNhnjWp-s_1\tperson\nezu6OcJjjLk_1\tperson\nezvAmpvi364_1\tperson\nezyLlrEVZRU_1\ttrain\nez4u6-2yh8U_1\tperson\nez7mJtg4aoU_0\tcow\ne0Al-yQwL8w_1\tbear\ne0C174hEUpI_0\tperson\ne0HCj6FnKMo_0\tperson\ne0HrgDMAL5c_0\tboat\ne0K-Wc2SGSk_0\tperson\ne0V--elE2Dc_3\tboat\ne0V--elE2Dc_0\tboat\ne0XejLvBbTw_0\tmotorcycle\ne0dXS2okSxo_0\ttrain\ne0jUh6hQykw_0\tperson\ne0jUh6hQykw_2\tperson\ne0kJTvItoXc_1\tperson\ne0kJTvItoXc_0\tperson\ne0qJxStHuGA_1\tskateboard\ne0rXPv5Q8ac_0\tperson\ne1KQ3rXcBVg_0\tairplane\ne1KQ3rXcBVg_2\tairplane\ne1KQ3rXcBVg_1\tairplane\ne1S7tY6zlBs_0\tbus\ne1ZNGYPt280_0\tcow\ne1a0tLtZdm8_0\tperson\ne1dAdTW0-s8_0\tperson\ne1guDr5Lq88_0\tperson\ne1iYijyYnIc_0\tperson\ne1iYijyYnIc_1\tperson\ne1v5-Vy3ikU_0\tmotorcycle\ne11u2SRsMQk_0\tumbrella\ne110Ssoc3rc_0\thorse\ne2Biqc_Y8fI_0\tboat\ne2Biqc_Y8fI_1\tboat\ne2C6vpxx1BQ_1\tperson\ne2C6vpxx1BQ_0\tperson\ne2DeceLJ4QU_1\telephant\ne2DeceLJ4QU_0\telephant\ne2DmJ2nN-bM_0\tperson\ne2DmJ2nN-bM_1\tperson\ne2IXk3LUK0k_1\ttruck\ne2Jc499uBac_0\tbus\ne2MbvKCUxBQ_0\tskateboard\ne2oWEimFUeM_0\tboat\ne2oWEimFUeM_6\tboat\ne26M0NUTUcs_0\tperson\ne29Si0sk8Vs_0\tperson\ne3Ep8F-TVbQ_1\tbicycle\ne3Ep8F-TVbQ_0\tbicycle\ne3MrKt1yh3E_0\tairplane\ne3ezeG4Gm80_1\tknife\ne3fz03vzrmQ_0\tperson\ne3pGW6uqeQA_0\tcat\ne3tP581aZ0Q_0\tperson\ne34jQApS9Bw_0\tperson\ne3_zIH1Jrf0_0\tperson\ne4R8Aj-X5iA_1\thorse\ne4ZrrwoRRXc_0\tbear\ne4c8OdRhAyA_0\tknife\ne4c8OdRhAyA_3\tknife\ne4iZ27N3agg_0\tperson\ne4rO9AJXQzY_1\tperson\ne4yT58KhTcs_1\tairplane\ne4yT58KhTcs_2\tairplane\ne4zdJYlc4z8_0\tperson\ne47QRGUx_Hs_0\ttruck\ne47QRGUx_Hs_1\ttruck\ne48A0CBQct8_0\tperson\ne5CFfGS4B1s_0\tperson\ne5DZWu7GqG4_3\tbicycle\ne5MbNYLt7wU_0\tperson\ne5MbNYLt7wU_1\tperson\ne5RlRpaBXnE_0\tdog\ne5UjJAZHaBc_0\tperson\ne5VUEXqXFTM_0\tumbrella\ne5kfPy-MIGw_0\telephant\ne5lFDgi4EIs_0\tcow\ne5-Pz_Q8VUA_0\tperson\ne6F88LQJoLc_0\tperson\ne6G0gHixPGE_0\tboat\ne6IQ-jfygns_0\tperson\ne6IQ-jfygns_1\tperson\ne6T5hbKQwAs_0\tperson\ne6aWxOF189s_0\tperson\ne6hz-jEGxsg_0\tperson\ne6muu75RFmg_0\tbus\ne6s13mZyuYY_0\tskateboard\ne6s13mZyuYY_2\tskateboard\ne6s13mZyuYY_3\tskateboard\ne6xT3S6wuwE_0\tperson\ne64lVlYKNYs_0\thorse\ne7IeNjbA7ms_0\tmotorcycle\ne7JZ2C-e9_w_1\tskateboard\ne7Q3z9gbUw8_0\tskateboard\ne7TKWwysO8Q_0\telephant\ne7W79Xp4qxI_0\tperson\ne7aF0fG2O2U_0\tbear\ne7aF0fG2O2U_1\tbear\ne7eZQb8WjmQ_0\tperson\ne7xAzZCvd_Y_0\ttruck\ne70XtlB-Au8_0\ttruck\ne70XtlB-Au8_1\ttruck\ne70XtlB-Au8_2\ttruck\ne70XtlB-Au8_3\ttruck\ne70XtlB-Au8_7\ttruck\ne70jqVThihE_3\tknife\ne70jqVThihE_1\tknife\ne72VJJ7jkoI_2\tairplane\ne76gr0pJMLg_0\tboat\ne8BQbcBgcjc_0\tperson\ne8VeeESy9Xc_0\thorse\ne8XzpXJnucs_0\tmotorcycle\ne8XzpXJnucs_1\tmotorcycle\ne8XzpXJnucs_2\tmotorcycle\ne8Y4hXyFPDY_0\tperson\ne8ZFu6n4mg8_0\tperson\ne8b7eo56B5Y_1\tperson\ne8b7eo56B5Y_0\tperson\ne8mSJe1G9U4_0\thorse\ne8mSJe1G9U4_1\thorse\ne8mSJe1G9U4_3\thorse\ne8mSJe1G9U4_4\thorse\ne804z6ehgWE_0\ttrain\ne836XbTclWA_0\tperson\ne86xkdgTdTA_0\tperson\ne873uWjeaPU_0\tperson\ne88X3OKvqTI_0\tcow\ne9Ceg407V2o_1\tbird\ne9GSzFiQj8I_0\tperson\ne9GoxfmycMQ_0\tperson\ne9MugXot7JI_0\telephant\ne9MugXot7JI_2\telephant\ne9MugXot7JI_1\telephant\ne9Y8BHEdYpg_1\tperson\ne9Y8BHEdYpg_0\tperson\ne9Z237Wup_E_0\tboat\ne9aADbJBMmQ_1\tboat\nGUY72Rg_9g4_3\tairplane\nGUY72Rg_9g4_0\tairplane\nGUY72Rg_9g4_1\tairplane\nGUY72Rg_9g4_2\tairplane\nGUcZWh6tol4_0\tcow\nGUq5xrqphew_0\tcow\nGVCJZzVnGUQ_2\tperson\nGVCJZzVnGUQ_0\tperson\nGVCJZzVnGUQ_1\tperson\nGVG_dHMt7eA_0\ttruck\nGVRLfBtpGgA_0\tperson\nGVeNt6hXwK4_0\tperson\nGWCwYIRE8YU_0\tperson\nGWIAU4GsgZM_0\tperson\nGWQD6FxWwpk_0\tboat\nGWckuI3sTHA_0\tbear\nGWmOpSmpGmg_0\tcar\nGWmOpSmpGmg_1\tcar\nGWmOpSmpGmg_2\tcar\nGWsXKIAM9yY_1\tcat\nGWsXKIAM9yY_0\tcat\nGWygvbszdUs_1\ttrain\nGXS6axKBr7A_0\tperson\nGXX1pJeR1HE_0\telephant\nGXX1pJeR1HE_1\telephant\nGXZ3IXi7YXk_0\tperson\nGXcbgDsx_Zc_0\tperson\nGXfsYdVEMeA_10\telephant\nGXfsYdVEMeA_0\telephant\nGXfsYdVEMeA_5\telephant\nGXfsYdVEMeA_6\telephant\nGXfsYdVEMeA_8\telephant\nGXgoAnrkdVg_0\tperson\nGXiDQ52vcoY_0\tperson\nGXoA1zfvnOA_0\tcar\nGXrzW-OHh_Q_0\tcow\nGXtA9dxzvII_0\tperson\nGXyeuhOYX2k_0\ttruck\nGXyeuhOYX2k_1\ttruck\nGX1v3ymtHtc_0\tperson\nGX-3aTTy4lM_0\tperson\nGX-3aTTy4lM_1\tperson\nGX-3aTTy4lM_2\tperson\nGYA-3PblNaU_0\tperson\nGYHWtVM2x6c_0\tperson\nGYTD79P3b8w_1\tperson\nGYT5Cq1tl2Q_0\tcat\nGYWNYnWPaeE_0\tperson\nGYY-ElZl7ZM_0\tdog\nGYldHkVSD_A_3\tairplane\nGYmeM7epDjY_0\tperson\nGYmeM7epDjY_1\tperson\nGYoXwAkvJns_0\tperson\nGYsx_49_O1U_0\ttruck\nGYuIsHEGV6o_0\tperson\nGYuMuXQgLPI_0\tperson\nGY0HVEiAPvo_0\tperson\nGY3D9bb9kLY_0\tairplane\nGY65ShkktrM_1\tperson\nGY9iCFFBA20_0\tperson\nGY-carc6vxw_2\thorse\nGY-carc6vxw_3\thorse\nGY-carc6vxw_4\thorse\nGY-dmOLQNH4_0\ttruck\nGZIpKCyb0bU_0\tairplane\nGZLsv-Y_aRw_0\tperson\nGZM5nvvMeNo_1\tairplane\nGZOUGcF_xaM_2\ttrain\nGZThnpa-8Ak_0\ttrain\nGZUk3BlrK7k_0\tperson\nGZWH1bUqm9U_0\tperson\nGZYSkuRZwGE_2\tskateboard\nGZb9G8sVRz4_0\tperson\nGZb9G8sVRz4_1\tperson\nGZgL3ZQI9nM_0\tcow\nGZhuCclpFuk_0\telephant\nGZq8tIKR9b4_5\tbus\nGZsP_n7aFMo_0\tperson\nGZxvpxqvHFs_1\tairplane\nGZ0bYvVD_us_1\tbird\nGZ1aL_iE5a8_1\tperson\nGZ6PRvVVeZk_0\tperson\nGaAL3IYDUgM_0\tskateboard\nGaD4QsNCcik_0\tperson\nGaF_t9Af1hg_3\tumbrella\nGaJvFxg_lFY_0\tperson\nGaJ7Bu5UrgQ_1\tbus\nGaJ7Bu5UrgQ_2\tbus\nGaVmURUD-i8_0\tperson\nGaYAyNs2FDI_1\tperson\nGad1St-JBls_0\tdog\nGaeWhfSP3EA_2\tknife\nGagCDetg0dg_0\tbicycle\nGai7qgVSFc8_1\tcat\nGangZBQawtQ_0\tperson\nGax9nZtMs7M_0\tperson\nGayl2EVJTkw_0\tdog\nGa3YHyqOqYY_1\tperson\nGa3YHyqOqYY_0\tperson\nGa_Oju23T9s_0\tperson\nGbBl5CcJgeE_14\telephant\nGbBl5CcJgeE_6\telephant\nGbBl5CcJgeE_8\telephant\nGbBl5CcJgeE_9\telephant\nGbBl5CcJgeE_10\telephant\nGbC0DAAn-XU_3\tbear\nGbC0DAAn-XU_12\tbear\nGbC0DAAn-XU_14\tbear\nGbE-oXaNVBA_0\telephant\nGbE-oXaNVBA_3\telephant\nGbE-oXaNVBA_5\telephant\nGbE-oXaNVBA_6\telephant\nGbE-oXaNVBA_7\telephant\nGbE-oXaNVBA_8\telephant\nGbE-oXaNVBA_9\telephant\nGbE-oXaNVBA_12\telephant\nGbGEC5pQ9f8_1\tcow\nGbHLET097K8_0\tboat\nGbN_zMz1D6o_0\tperson\nGbOK07Tq7mA_0\tboat\nGbVDftpuPMo_1\tperson\nGbW-55xLUnQ_0\tairplane\nGbY3uHcC3ys_0\ttruck\nGbbhlv2Obsc_0\tperson\nGbbhlv2Obsc_1\tperson\nGbd1-rm9Oyw_0\ttruck\nGbmEMxbMtCI_0\tbicycle\nGbs4s3pX3H0_5\tknife\nGbs4s3pX3H0_0\tknife\nGbs4s3pX3H0_1\tknife\nGbs4s3pX3H0_2\tknife\nGbs4s3pX3H0_3\tknife\nGbulfCx1hwo_0\tperson\nGb_YkJHLgns_0\ttrain\nGb_YkJHLgns_1\ttrain\nGcCQF52Ok14_5\tperson\nGcCQF52Ok14_1\tperson\nGcCQF52Ok14_3\tperson\nGcCQF52Ok14_4\tperson\nGcEgsdqMiBg_1\tperson\nGcEsDxUkr00_5\telephant\nGcEsDxUkr00_1\telephant\nGcRRhnk4ynk_0\tperson\nGcnVDv6bIAk_0\tperson\nGctFFbsebBs_0\tperson\nGcwS7IyeG5Y_0\tmotorcycle\nGc0lgXRlxGE_1\tperson\nGc0lgXRlxGE_0\tperson\nGc3iNFz3s-o_0\tcow\nGc5OyOM0VxI_1\tperson\nGc5OyOM0VxI_0\tperson\nGdI2CnryrFQ_2\tcar\nGdNJ-VDNc3k_1\tperson\nGdQuxx_RXvs_2\tbear\nGdbphRsxpKU_5\thorse\nGdbphRsxpKU_3\thorse\nGdfyxcmHHOQ_0\tperson\nGdiGBeJ9m_k_0\tperson\nGdiGBeJ9m_k_1\tperson\nGdsJ0QHb83w_1\tperson\nGdsJ0QHb83w_2\tperson\nGduwjeptozQ_0\tperson\nGd5qUjEeqZ4_0\tmotorcycle\nGeHV-tf-ZGA_0\tbus\nGeUECF6hDkg_0\tairplane\nGeb74PkjTYY_1\tperson\nGehgPYVYwDs_0\tperson\nGek3IJfBaU0_0\ttrain\nGeuYAXldbbg_4\tairplane\nGeuYAXldbbg_1\tairplane\nGeuYAXldbbg_2\tairplane\nGeuYAXldbbg_3\tairplane\nGewTJtB97l8_2\tknife\nGe2suMLyOTY_0\tcow\nGe4SjOnEYWs_1\tperson\nGe4SjOnEYWs_0\tperson\nGe8RWLzmrE0_0\tperson\nGe8RWLzmrE0_2\thorse\nGe9uJatNWuw_0\tperson\nGe9uJatNWuw_1\tperson\nGe-VfDpriPY_1\tperson\nGe-VfDpriPY_0\tperson\nGfCjURNr9T4_0\tperson\nGfLxzlZxHic_0\tperson\nGfbcHsH3DKI_0\tperson\nGfeXUZVyvL4_0\tperson\nGfefENTSQOI_0\tperson\nGfkX7I9bclY_0\tcow\nGfqA0SZPeXU_2\thorse\nGfqA0SZPeXU_3\thorse\nGfxwasnA0Ao_0\tbird\nGfxwasnA0Ao_3\tbird\nGfyBiJNU7bY_0\tcar\nGf50aWojLhk_1\tairplane\nGgV4eSmNyaA_1\telephant\nGgV4eSmNyaA_0\telephant\nGgcoCmlTlbc_0\tperson\nGgfESlKFIkU_0\tdog\nGgkncqtrgPI_0\tperson\nGgsFohIKlpw_0\tdog\nGgyOGY2q9xE_0\tskateboard\nGg9uDi7KjJ0_0\tperson\nGhBPvHC15BE_0\tperson\nGhHPtGuUtRY_0\tperson\nGhI4uqxOQpc_0\thorse\nGhLdswZDYMs_0\tbicycle\nGhLdswZDYMs_1\tbicycle\nGhMC34aeHnU_2\tperson\nGhMC34aeHnU_0\tperson\nGhMC34aeHnU_1\tperson\nGhQRZOseJfY_0\ttruck\nGhbtO__NASs_0\tperson\nGhbtO__NASs_1\tperson\nGhbt5lVT3dk_0\ttruck\nGhiVm-6oFyg_0\ttrain\nGhwtPgHjLvg_0\tdog\nGhxWr3HvvXA_1\tperson\nGiRzA3Fe1-s_0\tperson\nGijruln92tk_0\ttruck\nGik59IGJFLo_0\tbird\nGioAI9XlGGg_0\tbird\nGioEMsI07Jw_0\tperson\ne9ihaIQuVMU_0\tknife\ne9ihaIQuVMU_2\tknife\ne9iolRKSwBw_0\tperson\ne9mOqKDBOVg_0\tperson\ne9nH--aGWDM_0\tperson\ne90GV6rl3NE_0\tperson\ne9-w67QSEBs_0\tperson\ne9-w67QSEBs_1\tperson\ne9_LqDqVkGs_0\tperson\ne9_LqDqVkGs_1\tperson\ne9_LqDqVkGs_2\tperson\ne-PcZyfAPZ4_0\tperson\ne-R-FxrDQao_0\tperson\ne-dVHSE1qXI_0\tperson\ne-gU8I2kZyY_1\tbicycle\ne-n0pRU6uSk_0\tbus\ne-n0pRU6uSk_1\tbus\ne-qbVMLqnEw_0\tperson\ne-siUblegSA_0\tdog\ne-siUblegSA_1\tdog\ne-v2yWUGKiU_1\tboat\ne-zbkYroVUk_0\tperson\ne-43rdp3psc_0\tperson\ne--Qr92yhBo_2\thorse\ne--vN-5QX-E_0\tperson\ne-_nLPye6sc_0\tperson\ne_APlM8VSiw_1\tperson\ne_APlM8VSiw_0\tperson\ne_FyX6iUBZk_1\tperson\ne_GD2rN9Jcg_0\tperson\ne_SYVD0TY14_0\tairplane\ne_UwPkRMD74_0\tperson\ne_aHtRh2PpI_0\tcat\ne_b_4zlKmdo_0\tgiraffe\ne_qdDAeerKQ_1\tbird\ne_-SOM0hufo_0\ttruck\nfAHFZWyNZQ4_0\tbird\nfAHFZWyNZQ4_2\tbird\nfAJAQb5tzFA_0\tdog\nfAJ939SI_YI_0\tperson\nfAKXvHREf8E_0\tbird\nfAMkbedQ0GI_1\tperson\nfAQoNDLgds4_0\tbear\nfAUG8-TdflE_0\tperson\nfAjj5137yKM_0\tbicycle\nfAm_6grpTOI_0\tperson\nfAyBUKM7898_0\tperson\nfAz2ecihxEU_0\tperson\nfA5ArJS7ScI_0\tcar\nfA6XfSl7pqY_0\tperson\nfA_OWAI_8kc_0\tperson\nfBH6rLEukMU_0\tperson\nfBIh-CAYfy0_0\tperson\nfBLrr2zYnRw_1\tperson\nfBLvIU3Q7Rw_0\thorse\nfBPjBSdwz1o_0\telephant\nfBPjBSdwz1o_1\telephant\nfBP3dZYp3sM_0\tperson\nfBT1cNog4Lw_0\tperson\nfBkDTXhVYCs_0\tgiraffe\nfBmp8URVoB4_0\tcar\nfBsQegHOF8Y_0\tperson\nfBtfkn4uDKE_0\tcow\nfBvAf66603Q_0\tperson\nfBwrgO05rqo_0\ttruck\nfByljFegqK4_0\tperson\nfCADagfWgSU_1\telephant\nfCK_OirKTO4_0\tperson\nfCMJnkyFS5c_0\tperson\nfCMJnkyFS5c_1\tperson\nfCPVsi1S2jM_0\tcat\nfCTNp-hiUkQ_0\tperson\nfCTNp-hiUkQ_1\tperson\nfCT0UeuTcQk_0\tperson\nfCUZclkgF-c_3\tcar\nfCUZclkgF-c_4\tcar\nfCUZclkgF-c_5\tcar\nfCVoLETgca4_0\tbicycle\nfCW56GByDs0_1\tperson\nfCW56GByDs0_0\tperson\nfCX_8Q_OAos_1\tdog\nfCZXrHFimHM_0\tperson\nfCbvdNQUcRE_0\tcat\nfCdlrWXZ7kY_0\tperson\nfCiWi1Dk-yE_1\tperson\nfCkgtao7rJk_0\tmotorcycle\nfCmwPCLYVXE_0\tskateboard\nfCmwPCLYVXE_1\tskateboard\nfCm-8YmQfoY_1\tgiraffe\nfCoXLMBzqTc_0\tcat\nfCohGx6PWyM_0\tperson\nfCr-fmsVVWE_0\tperson\nfCsSoErwvfw_2\tskateboard\nfCsSoErwvfw_0\tskateboard\nfCsSoErwvfw_1\tskateboard\nfCtyUxRaSdQ_0\tskateboard\nfCwicNYDKmo_0\tperson\nfCzWVcZvGuk_1\tmotorcycle\nfC6O_2ljm_c_1\tperson\nfC6O_2ljm_c_2\tperson\nfC6O_2ljm_c_0\tperson\nfC8FUnipL3M_0\tbird\nfDBgRd9yK8Q_5\tairplane\nfDBgRd9yK8Q_1\tairplane\nfDBgRd9yK8Q_4\tairplane\nfDCK-s1gX18_0\tskateboard\nfDCadv28EEo_1\tperson\nfDCadv28EEo_0\tperson\nfDFpsal4hHo_0\tperson\nfDIVkvMCQ9I_1\tcow\nfDJjIhw4XBI_2\tperson\nfDJjIhw4XBI_1\tperson\nfDLBxom0wgI_1\tcat\nfDVesIz_ON0_1\tperson\nfDe30IPiQ0Y_1\thorse\nfDuiW9_sHcQ_1\tperson\nfDyXAhF761Q_0\tperson\nfD89z8ycv7U_0\tperson\nfD89z8ycv7U_1\tperson\nfD89z8ycv7U_2\tperson\nfEDj20Gce80_0\tboat\nfEK6hdzjG5E_0\tcow\nfESV3o1vc1A_1\tbird\nfES_1kR2d8o_0\tperson\nfEVLKYBuE7k_0\ttruck\nfEXq69B6L0s_0\tgiraffe\nfEZ5cqJWg0A_0\tbicycle\nfEdlpwoza6o_0\tperson\nfEdlpwoza6o_1\tperson\nfEdlpwoza6o_2\tperson\nfEgqRE0XOMM_0\tperson\nfEh5hyz4LCU_0\tskateboard\nfEiWI60P4XI_0\tbicycle\nfElOryAiN0s_0\tperson\nfEmh4mfGsCA_0\tperson\nfEupHSTMXLk_0\tknife\nfE0raHY_nY8_0\tcat\nfE_sSvVFvZU_0\tdog\nfFBkKrJlobs_0\tcow\nfFEDu-fiUUM_0\tperson\nfFGmvl4E9QI_0\tbird\nfFImZECw1c0_0\tskateboard\nfFImZECw1c0_1\tskateboard\nfFOTZMvg0n0_0\thorse\nfFRp0dBucFA_0\tbus\nfFTJuANVr2I_0\tperson\nfFWU4PNTKDo_0\tperson\nfFWU4PNTKDo_1\tperson\nfFaJ5epORzQ_0\tperson\nfFd91uPKDVA_0\tperson\nfFksYDaR-NI_1\telephant\nfFmCHQgzMRc_1\tperson\nfFmCHQgzMRc_2\tperson\nfFmhW2ygNKw_0\tperson\nfFncU3kR5qw_0\tcar\nfFogpyIr-Ic_0\tperson\nfFq0hnzgGSw_2\tbicycle\nfF0RlMrKBFo_0\tbicycle\nfF1S-952IOU_0\thorse\nfF3WOuwnvrA_3\telephant\nfF3WOuwnvrA_5\telephant\nfF3pBoS7xFg_1\tperson\nfF3pBoS7xFg_0\tperson\nfF34g3sNiHo_0\tperson\nfF7snD5S5Q4_0\tcar\nfF_BanWRtKo_1\tskateboard\nfF_BanWRtKo_0\tskateboard\nfGGJnSDPzUI_0\tperson\nfGI6_U9U_zc_1\tperson\nfGPsR0YiVaE_0\ttrain\nfGgJ0VACAo4_0\tumbrella\nfGlnCmVPzIs_0\tperson\nfGrC6VCXVL4_0\tperson\nfG1NOqIRoLA_0\tperson\nfG6uSVeocMo_0\tperson\nfG-4n3Gy1fk_0\tperson\nfHO3g6Q_bNE_0\tperson\nfHUjlWalvJQ_0\tperson\nfHVJzD_AvV8_0\tperson\nfHepRAiQQ04_0\tcow\nfHlfVMMfXNg_0\tperson\nfHm5WgSYk2Y_0\tbus\nfHoBjwC8H50_0\tdog\nfHoBjwC8H50_3\tdog\nfHsaxiTw0dI_0\tmotorcycle\nfHzSK8AEv5U_0\tperson\nfHzzixV1xyg_1\tcow\nfH5U2jXbkEg_1\tknife\nfH8PS8Fjvbg_1\tcow\nfH8PS8Fjvbg_2\tcow\nfIABVBcluZ0_0\tskateboard\nfIABVBcluZ0_1\tskateboard\nfIFMCt78hmI_0\ttruck\nfILyoB3Pgrg_1\tdog\nfIM7jmsq_FE_0\tperson\nfIN8z4lkdyA_0\tcar\nfIN8z4lkdyA_2\tcar\nfIN8z4lkdyA_3\tcar\nfIPXE6MOZp0_0\tairplane\nfIT1bTlW3UQ_0\tperson\nfIVT3rTMptI_1\ttruck\nfIXFrPFEL0w_0\tgiraffe\nfIlXSJxnKD8_0\tperson\nfInEVgREyyY_0\tdog\nfInYB8sD7tM_0\tperson\nfIrb5Y93wjw_0\ttrain\nfIvUwaa2ziY_0\tperson\nfIyrHecb8SQ_0\telephant\nfI0VoDDN2lE_2\tperson\nfI0VoDDN2lE_0\tperson\nfI0VoDDN2lE_1\tperson\nfI5fnVs_kWg_0\tmotorcycle\nfI8DySScPWU_0\tskateboard\nfJGPTgv8EUs_0\tperson\nfJJBGybbnH4_1\tknife\nfJJX9D4siG4_0\tcat\nfJTeqi3aqRc_0\tcar\nfJYGkMT9c6U_0\ttruck\nfJY5zGaYs8s_0\tperson\nfJdWgbIMXZ0_5\ttrain\nfJdWgbIMXZ0_0\ttrain\nfJdWgbIMXZ0_2\ttrain\nfJpRqXhL3wE_0\tskateboard\nfJp4DAu46Yg_1\tperson\nfJxbRDMY46o_0\tperson\nfJyBgU7rZvE_0\tperson\nfJ71o3Q-oVE_1\tcat\nfKDRpRcSnrw_0\tcat\nfKHs2FNZk6M_0\tperson\nfKLJqhEdsTY_0\tcow\nfKLJqhEdsTY_1\tcow\nfKLS0DAexvw_1\tboat\nfKLS0DAexvw_2\tboat\nfKLS0DAexvw_3\tboat\nfKRZ4PPWgg8_1\tperson\nfKcOtlmf6r0_3\tboat\nfKcOtlmf6r0_2\tboat\nfKgpRiyDlvc_0\tperson\nfKhENDvpnmA_0\tboat\nfKhe37bCgeA_1\thorse\nfKp-Lvw2bUM_2\telephant\nfKp-Lvw2bUM_3\telephant\nfKp-Lvw2bUM_4\telephant\nfKrxRvMxZqM_0\tperson\nfKxBpYS29uM_0\tdog\nfKyPRwF5y6s_0\tperson\nfKzFEc6hR-c_2\tperson\nfK89Z2AwlCg_3\tbus\nGiuUBGsdiqI_0\tperson\nGizeLrnWRmk_1\tperson\nGizeLrnWRmk_0\tperson\nGi--TM8Xz3I_0\tperson\nGjCs_s2EnpE_0\tperson\nGjFr4qO_LX4_0\tdog\nGjJFQButa0w_0\tbear\nGjJk6U2crcw_0\tskateboard\nGjJp-yqt7xk_0\tairplane\nGjZDPTKpIdE_0\tperson\nGjZP-buSAG8_0\tperson\nGjdyi0kf79Y_0\ttruck\nGjfhgZMeHAA_0\tperson\nGjgu3OFbWKI_0\tbear\nGjkrI0adkJk_0\tperson\nGjmNPrYyCwg_0\tperson\nGj87GZKvhdo_0\thorse\nGkCXvg93pAA_0\tcow\nGkGG1F5by14_0\tperson\nGkddmkbGSAc_0\tcat\nGkfp-yV9e94_0\tperson\nGklwzbjOzYQ_0\tperson\nGkmRFBuktnQ_0\tperson\nGkxkfi_wHeA_1\tmotorcycle\nGkxkfi_wHeA_0\tmotorcycle\nGk6IzYQADXg_1\tskateboard\nGk6IzYQADXg_0\tskateboard\nGk9v8ABOPNw_1\telephant\nGlLzIn-6ouU_1\tbicycle\nGlLzIn-6ouU_2\tbicycle\nGlPdixjfu44_0\tcat\nGletqIQ8irw_0\tmotorcycle\nGlsMcq1cM2c_1\tbird\nGlxEVs7z_7Y_0\tperson\nGl7S2JNezLg_0\tboat\nGl7S2JNezLg_3\tboat\nGl9cy66E4FQ_2\tknife\nGl_UMssuTWU_0\tperson\nGmI47tbiNQ0_0\tperson\nGmKT2rhDILU_1\tknife\nGmQX3sIhhqo_0\tcow\nGmS0yrU3Hcw_0\tperson\nGmUFocQWPTo_1\tboat\nGmdxq1glmKY_1\tdog\nGmeGRg8XZ5M_0\tperson\nGmvKmbIHKHM_1\tperson\nGmvKmbIHKHM_0\tperson\nGmww9V50JtU_0\tdog\nGm9BnQSZlxk_1\tperson\nGm9kb3zHsLA_0\tcat\nGnFoElm_rrw_0\tdog\nGnGd8Q_cSHU_0\tperson\nGnGd8Q_cSHU_1\tperson\nGnO2sxJNWjk_0\telephant\nGnRp7QHoAr4_0\ttrain\nGnkSrEpnmRo_1\tperson\nGnmgLr5p-r8_0\tbus\nGno0JyFsjGk_5\tknife\nGn0av9LV5FU_0\telephant\nGn3AqY6vUyU_0\telephant\nGn7B_MiLuhA_0\tskateboard\nGoEBr-GbeCk_0\telephant\nGoEcYxqxcZ8_1\tbus\nGoEy1J3s8Xs_0\tcow\nGoRGaOgttBU_0\thorse\nGoUjZ5wJ2do_0\tcar\nGoWyqQorqOY_0\tcat\nGoXlqK766lk_0\tperson\nGolDzhH16vg_0\ttrain\nGorfZ7y-Jw8_0\tskateboard\nGosFitiV7as_0\tperson\nGotzQ9ecvkM_0\tperson\nGoubTEJzKUI_0\tperson\nGo16BKYvDSs_0\thorse\nGo5M-oyC28A_0\telephant\nGo8BM-B0ML4_0\tskateboard\nGpCjTjkSw3k_0\ttrain\nGpCjTjkSw3k_5\ttrain\nGpCjTjkSw3k_3\ttrain\nGpCjTjkSw3k_4\ttrain\nGpCjTjkSw3k_2\ttrain\nGpDilZGSveI_0\tperson\nGpJmJforKzo_0\tperson\nGpPbMduP_3Y_0\tcow\nGpProJiVxa4_0\tbear\nGpTPDl3MzZw_0\tcat\nGpVy_gD1slw_0\tdog\nGpY4Nw8LLy4_0\tbird\nGpkftB3rq5g_0\tdog\nGpn_kF1lXuc_0\tbicycle\nGpn_kF1lXuc_8\tbicycle\nGpn_kF1lXuc_13\tbicycle\nGpn_kF1lXuc_14\tbicycle\nGpzE4RQTM1Y_0\tairplane\nGp3g6UYBBzw_0\tperson\nGp3g6UYBBzw_1\tperson\nGp70TnjZRfU_1\ttrain\nGp70TnjZRfU_2\ttrain\nGp70TnjZRfU_0\ttrain\nGqZeX-EEEL8_0\tperson\nGqc_LkQvKak_2\thorse\nGqjVd_dRiB8_0\tperson\nGqjVd_dRiB8_1\tperson\nGqjoBpwsgUc_0\tperson\nGqjoBpwsgUc_1\tperson\nGqntj1GoicU_0\tbus\nGqzN0dyl5p4_4\ttruck\nGq-mMFeLCyo_0\tperson\nGrG-ipHg_4w_0\tperson\nGrK4qEJjeKE_0\tairplane\nGrNDwiO4kdI_0\tairplane\nGrQ0zJbkeXE_0\tperson\nGrXOOtPiIGw_0\tzebra\nGrYsw9-Skqg_0\tperson\nGrZvWtxffXE_0\tperson\nGrpvM1_CRqI_0\ttrain\nGruxXrzWzjk_0\tairplane\nGruxXrzWzjk_2\tairplane\nGruxXrzWzjk_3\tairplane\nGruxXrzWzjk_5\tairplane\nGrzyUDtV-Ug_0\tperson\nGr6be_D6d9Q_2\tskateboard\nGsFDHyoPppk_0\tperson\nGsGHB19iuE4_0\tperson\nGsKJMkVSeV4_2\tairplane\nGsL7VYYWhu0_0\tperson\nGsOgw9XtlWc_0\tairplane\nGsOgw9XtlWc_1\tairplane\nGsTlT_7Zb1Y_0\ttrain\nGsVvc55IHn0_0\tskateboard\nGshXL9V-lrM_1\tperson\nGsj4aXqBPHM_0\ttruck\nGsn06D15nmk_0\tmotorcycle\nGsrSyK5ymQo_0\tboat\nGsrenPacLW0_1\tperson\nGs67R7prarI_1\tmotorcycle\nGs7J9Yo-uF0_0\tcow\nGs7J9Yo-uF0_1\tcow\nGs79ZsyWm74_0\tperson\nGtAKWYvc9kY_0\telephant\nGtCbEqqQgqY_0\tperson\nGtCbEqqQgqY_1\tperson\nGtD2m1EXxjc_1\tbicycle\nGtKaIcQJZcc_1\tperson\nGtLYNeredOY_0\tboat\nGtVrmoeEcMM_0\tknife\nGtZPw5ftw88_0\tperson\nGtZSRodviU8_0\tperson\nGta1hcIAAE0_0\telephant\nGtiiYqVQ2Kw_0\tperson\nGtmp8y8APfQ_1\tskateboard\nGtnqm4SnEXo_0\thorse\nGtnqm4SnEXo_1\thorse\nGtnqm4SnEXo_2\thorse\nGtnqm4SnEXo_3\thorse\nGtnqm4SnEXo_4\thorse\nGtqcx01NTTw_0\tknife\nGtsvc9lA7hs_0\tairplane\nGt33VfmFDWw_0\tperson\nGt6q9b3QUvE_0\tbicycle\nGt6q9b3QUvE_2\tbicycle\nGt7thmVY6aQ_0\tperson\nGuQvGMFuhu4_1\tcar\nGuQvGMFuhu4_3\tcar\nGuXelRN3wMo_4\tbear\nGuaD24NfCe0_0\tperson\nGuawwNMbfBI_0\tperson\nGue43DvNTGc_1\ttrain\nGuf15LHosg8_0\tperson\nGugU0nZdPJU_0\tbus\nGuhfGduN9v0_0\tperson\nGulmsZq-VsU_6\tboat\nGulmsZq-VsU_0\tboat\nGulmsZq-VsU_3\tboat\nGulmsZq-VsU_4\tboat\nGulmsZq-VsU_5\tboat\nGusEs8RA4_o_0\tmotorcycle\nGuwTG6RtcFI_0\tperson\nGu4MWCc2Wws_0\tbicycle\nGu-vFv_w9Vo_0\tperson\nGvFmkdxnKyI_0\thorse\nGvIj2sMkJwM_0\tperson\nGvNhgCGtUOQ_0\ttruck\nGvQvyfTNykM_0\ttruck\nGvRM_UnjJoE_2\thorse\nGvdMRPX4KR4_0\ttrain\nGvdMRPX4KR4_1\ttrain\nGvdMRPX4KR4_5\ttrain\nGvoIcT-hFek_0\tperson\nGv9mTaerVLc_0\tperson\nGwFrSa-YwfI_0\tbear\nGwFrSa-YwfI_1\tbear\nGwIn1NaaEwE_0\tbus\nGwbpMG2B14Y_0\ttruck\nGwgaNLd1f7s_0\ttruck\nGwlNXPuUvXM_0\tperson\nGwnBP9a07RE_0\tperson\nGwnBP9a07RE_3\tperson\nGwnBP9a07RE_4\tperson\nGwnBP9a07RE_1\tperson\nGwnBP9a07RE_2\tperson\nGwx1ad4lW1Q_2\tperson\nGwyl7djxZkg_0\tcow\nGwy4ODXAAU8_0\tperson\nGw5YyHT1Nt8_0\tperson\nGw9Vi_Io9DM_0\tperson\nGw_Tiv72jms_1\thorse\nGxANCkxq7Ng_0\tmotorcycle\nfLCd0DDhfBk_0\tperson\nfLEUT0rTkv0_0\tbird\nfLJniCJFPTg_3\telephant\nfLPHwVvk6K4_0\tperson\nfLPHwVvk6K4_1\tperson\nfLWW1YWO26Y_0\tbird\nfLdMmSIfseM_2\tperson\nfLdMmSIfseM_0\tperson\nfLe279fKywo_0\tdog\nfLsDTJxlsW8_0\tperson\nfLwrxElzLZs_0\tperson\nfLyNbq9v6kg_0\tperson\nfL1w15qwbqE_0\tperson\nfMOnb4P7tww_1\tperson\nfMOnb4P7tww_0\tperson\nfMO1J7ojQqk_0\tdog\nfMTosfHKy2I_0\tdog\nfMi6lVyCOHw_0\tboat\nfMwCpOTv9RY_0\tbus\nfM-puV4uyzs_0\tperson\nfNAZ9IDLZy0_0\tperson\nfND_OguW0MM_1\telephant\nfNIdPhAsjiM_0\tcat\nfNJSPU5r3sc_0\tperson\nfNO_o1D0kvY_0\tperson\nfNdRm3HWQmo_1\tmotorcycle\nfNgr2EBEDCQ_0\tcar\nfNgr2EBEDCQ_1\tcar\nfNg3y0FHjgg_0\tperson\nfNhDT1fwzKM_0\tperson\nfNhDT1fwzKM_1\tperson\nfNh54BNEJBQ_0\tcat\nfNw9dDcM4ms_0\tbear\nfN-FYknWOSk_1\tperson\nfN-FYknWOSk_2\tperson\nfN-43XPvLwg_0\tmotorcycle\nfOLR2dvBtqo_0\tcow\nfOO1pHvrPWQ_0\tperson\nfOatLQK_AyQ_3\tbicycle\nfOcPVX4sAxg_0\thorse\nfOjKgQf86dk_0\thorse\nfOkrLuGKDvk_0\tperson\nfOkrLuGKDvk_1\tperson\nfOkrLuGKDvk_2\tperson\nfOsd2aWzfBo_0\tcow\nfOtnatCU7_Q_0\tperson\nfOuV2101nEo_0\tbear\nfOv8ocd2xhA_2\tknife\nfO30fgQYdT4_0\tbus\nfO8Do_0RQXU_0\tperson\nfO9GgD7GqE0_2\tbus\nfPBIIZV6fuU_0\tperson\nfPMNtuJztSA_0\tperson\nfPVn9Wxf_HQ_0\tperson\nfPVn9Wxf_HQ_1\tperson\nfPrhiYslRjA_0\tperson\nfPzDDdztZNk_0\thorse\nfPzQyo7caqU_0\tperson\nfPzqpL90owQ_6\tbear\nfP5AyxuGIS8_0\tperson\nfP8x_x2_k5g_0\tperson\nfP-DMm3u5n4_0\tcat\nfQEGEb4W3IE_0\tperson\nfQNyLEXwnn0_0\tperson\nfQOjoYB5hPQ_0\tperson\nfQOjoYB5hPQ_1\tperson\nfQOymYsdTtU_0\tperson\nfQdA_-549Dk_0\tdog\nfQh5RtZzYzo_0\tbicycle\nfQlChBB42M0_0\tperson\nfQoJWcmQmsU_1\tperson\nfQo0G2i1QjY_0\tperson\nfQt3g_9u1RQ_0\tairplane\nfQyE_yIAu_0_1\tskateboard\nfQ26oO2Y5NM_0\tbicycle\nfQ4H6UmTepU_5\tgiraffe\nfREDiuJlBf8_0\tperson\nfREDiuJlBf8_1\tperson\nfRFF0xtrWhI_0\telephant\nfROdeQpu88o_1\tknife\nfRS5rhYP7LM_0\tperson\nfRXDSh8gr0c_1\tperson\nfRZ7Wze7ATs_3\tknife\nfRcegyxH0Is_0\tcar\nfRhNtVu6anA_0\tdog\nfRjCbO3MyU8_0\tperson\nfRmnBvuwZlU_0\tdog\nfRmnBvuwZlU_1\tdog\nfRrLguORoeU_1\tumbrella\nfRrLguORoeU_2\tumbrella\nfRrd-Z2R-Gs_0\tperson\nfRtzYh_gGgI_1\tcow\nfRwzMPH6Kvw_0\tperson\nfR1zDIeBHFg_0\tperson\nfR6FrFNXUxY_0\tperson\nfR-JNy5hccc_0\tumbrella\nfSA7T5svJ-o_0\tbus\nfSBe_a8ZkZU_0\tcat\nfSey4VJgLM0_0\tperson\nfSfKYTVt7V8_2\tbird\nfSfX4Z6SR2U_0\thorse\nfSj-h8lAhWw_0\tcat\nfSoqM6oq2AA_0\ttrain\nfSoqM6oq2AA_2\ttrain\nfS0098HnnhM_0\tperson\nfS3KL3nj7FY_0\tperson\nfS73PiHaNi8_0\tperson\nfS8_byjM-1M_3\tzebra\nfS8_byjM-1M_0\tzebra\nfS_6fgFOiPU_3\ttrain\nfTFLfGUcgMs_0\telephant\nfTFLfGUcgMs_3\telephant\nfTFVwPKxUHE_2\telephant\nfTP9YgSJZg8_2\tknife\nfTVb5uxWnsI_0\tperson\nfTVb5uxWnsI_1\tperson\nfTgirzB_QLU_0\tperson\nfThV1JtaTJg_0\tperson\nfTkIm1nb6qg_1\tbird\nfTkIm1nb6qg_2\tbird\nfTnnG_WcLYY_3\tknife\nfTnnG_WcLYY_4\tknife\nfTwiavhNzxs_0\tperson\nfUB-cH8rjW4_1\tperson\nfUB-cH8rjW4_0\tperson\nfUF__EdDFVs_0\tskateboard\nfUISEtXSRYM_0\tperson\nfUU4R6RP4ek_0\tmotorcycle\nfUXpqgf4jUA_0\tbus\nfUd8LjmonBM_0\tperson\nfUetaCH3tZk_0\tperson\nfUg6JULdTnU_0\tperson\nfUonzpmV18o_3\tbird\nfUqVKgWVVNY_1\tperson\nfUqVKgWVVNY_2\tperson\nfUwzXH9i0yQ_0\tperson\nfUx60fl9UkU_0\tperson\nfUzsVWD48bA_0\tperson\nfU3o6Frqdww_0\ttruck\nfU4DzirdCVE_1\tairplane\nfVAmI93Yb6E_0\tcat\nfVAsOuag4vY_1\tgiraffe\nfVHZEHosow0_2\tperson\nfVH3n0aghP4_1\tperson\nfVH3n0aghP4_0\tperson\nfVH7PpDqlPE_0\tboat\nfVIVas1R1tk_0\tcow\nfVOy449KQlY_0\tperson\nfVX7qR-o-9I_0\tcat\nfVZfWzDBb-c_0\tperson\nfVZ_9hWIGpA_2\ttruck\nfVdrMKHN9WY_1\tcow\nfVq7Of0Tr-s_0\tperson\nfVr3XVUzJaA_0\ttrain\nfVv5EqFYsAY_0\tperson\nfV80H_L3AN8_1\tmotorcycle\nfWLqbV7Z7Go_1\tperson\nfWLqbV7Z7Go_0\tperson\nfWb_-8hhubg_0\tperson\nfWmJ9tUUCwg_0\tperson\nfWpdcmgr5r4_0\thorse\nfWxgjNDC4OQ_0\tcar\nfWxgjNDC4OQ_1\tcar\nfWxsOgW3P6U_0\tperson\nfW1Z_Mx1RaA_0\tperson\nfW4fh_WBiMY_0\ttrain\nfW7yPljMFRc_0\tperson\nfW7yPljMFRc_1\tperson\nfW_HPaNBsDE_0\tcat\nfXCFktk2xdc_0\tperson\nfXLB02IH0G4_0\tperson\nfXLB02IH0G4_1\tperson\nfXOdZ0uKuBc_1\tdog\nfXWqvRfBWto_0\tperson\nfXX7K6CQfBw_0\tairplane\nfXYn01Cgmqs_0\tdog\nfXY7h0cc6tw_0\tcow\nfXbnEKMaIoM_1\tboat\nfXbnEKMaIoM_0\tboat\nfXka5y708fI_1\tperson\nfXowuJDXhhU_0\tperson\nfXyBm7_EDVc_0\tskateboard\nfXzIQASqygY_0\tbird\nfX-kSrf_K8w_0\thorse\nfYDgPdRtmjU_0\ttrain\nfYLtnvuW_VI_0\tmotorcycle\nfYMA0fLN8sI_0\thorse\nfYN5ZIicl_k_0\tcar\nfYmfHE2mONE_1\tperson\nfYnsIFGQfT8_0\tperson\nfYql4FiApLQ_0\thorse\nfYtm_pGBWkU_0\tperson\nfYu5ChRgapY_0\tmotorcycle\nfYw5KVCsg_4_0\tperson\nfYyI8x0tNAA_1\tbear\nfY4-6vsjmD8_0\tperson\nfY82KLfOpbk_0\tperson\nfY82KLfOpbk_1\tperson\nfZCdkf9VQzU_2\tcow\nfZEFEAYBlGE_0\tcat\nfZFYdgZbSBg_0\tperson\nfZFYdgZbSBg_1\tperson\nfZJOS8BlA-w_0\tperson\nfZOtury_J_w_0\tperson\nfZTIKbSjOhk_0\tairplane\nfZTJH_9Pqvg_0\tperson\nfZTJH_9Pqvg_1\tperson\nfZWP75nltcM_0\tbird\nfZXzEYFmZ_8_0\tperson\nfZXzEYFmZ_8_1\tperson\nfZiiYH3WfD8_0\tskateboard\nfZnbOFaSEQc_0\tperson\nfZnbOFaSEQc_1\tperson\nfZp_UgW_xZU_1\tmotorcycle\nfZp_UgW_xZU_0\tperson\nfZu7wEVEuX8_0\tperson\nGxHmm60dKvc_0\tskateboard\nGxLI4BFLrps_0\tperson\nGxPYf4SAQvE_0\tperson\nGxPYf4SAQvE_1\tperson\nGxWuAfBV300_0\tperson\nGxg0Pt_9bIE_0\tperson\nGxwwTXW-DdQ_2\ttrain\nGx1zPI3b2oc_0\tperson\nGx3xtKPwlz0_1\thorse\nGx4ryd6AGl4_1\ttrain\nGx4ryd6AGl4_2\ttrain\nGx4ryd6AGl4_3\ttrain\nGx4ryd6AGl4_0\ttrain\nGyGdlCtDdJc_0\tperson\nGyIKdb5KDHk_1\ttrain\nGyPRnKI78iA_0\tperson\nGyU8x9urAxE_0\tmotorcycle\nGyVDsnuS5jU_0\tperson\nGyXlgRxQ1jo_0\ttrain\nGyXlgRxQ1jo_1\ttrain\nGyZHiIEOBos_0\tcat\nGya_TrOGXpo_0\tperson\nGyhjyC5aJ8U_0\tbus\nGyjb_P1W7TA_2\tbus\nGyn_wSuRB3w_1\ttruck\nGyzaf_gaIYY_0\tmotorcycle\nGy9JueTT4XU_0\tperson\nGy_XuBCvbUc_1\tdog\nGy_XuBCvbUc_2\tdog\nGzB9OTV44PA_0\tperson\nGzHy2xjKB_8_0\tperson\nGzLmftr6tl8_0\tperson\nGzRkvFxVlx0_0\tperson\nGzTDLPCsgSM_0\tperson\nGzVj8bI0bSk_0\tskateboard\nGzVj8bI0bSk_1\tskateboard\nGzcgYGEqOlY_1\thorse\nGzesZ0laH2w_0\tmotorcycle\nGzizYdL25ZY_0\tperson\nGzjkTrnmEnU_0\tairplane\nGzjkTrnmEnU_1\tairplane\nGznFDBDT2c0_0\ttruck\nGznFDBDT2c0_2\ttruck\nGzrgq_nWH_Q_0\thorse\nGzujCDTak_4_0\thorse\nGzujCDTak_4_2\thorse\nGzy_PnFtEpM_0\tperson\nGz3Np50b9q4_0\ttruck\nG0DQ6VdMp-U_7\tcar\nG0DQ6VdMp-U_0\tcar\nG0DQ6VdMp-U_1\tcar\nG0DQ6VdMp-U_2\tcar\nG0DQ6VdMp-U_4\tcar\nG0DQ6VdMp-U_5\tcar\nG0DQ6VdMp-U_6\tcar\nG0FSe53KN-w_0\tperson\nG0WsFATo9RQ_0\tperson\nG0dXxEbeJnM_1\tperson\nG0d44YoKXX4_0\tperson\nG0kDhLojiI4_0\tgiraffe\nG0leBoTgEx4_0\tperson\nG0rwWyFSsYE_0\ttrain\nG0r2tR6EcF8_1\tperson\nG0urH-9ytbc_0\thorse\nG01Xi8VMxgQ_0\tperson\nG03JTuHY_RM_0\tknife\nG1AIHF-KITc_0\tperson\nG1AtN7CvCXw_0\tperson\nG1EnmuHlxig_0\tperson\nG1P_XnEL4dc_1\tperson\nG1P_XnEL4dc_0\tperson\nG1TS-PvdREA_0\tperson\nG1TS-PvdREA_1\tperson\nG1ThERK4a8E_4\tairplane\nG1ThERK4a8E_0\tairplane\nG1UoN56m5DM_0\tperson\nG1YNrrT9-z8_0\tbird\nG1YNrrT9-z8_1\tbird\nG1cY71JK5_E_0\tmotorcycle\nG1c0-CTyZ3I_0\tperson\nG1dKhZZARDk_0\tairplane\nG1z6RMtKkbM_0\tbird\nG1z6RMtKkbM_1\tbird\nG11cHAnx17E_0\thorse\nG13ARgckI9w_0\tperson\nG17Kpx1bgXM_0\thorse\nG1_R_EJpLZU_0\tcow\nG2FXcVDezv4_0\ttruck\nG2HOmWxj5gg_0\tperson\nG2LNQIwbLHE_0\tperson\nG2S4rwP6qJY_0\tbicycle\nG2V6wliL2AA_0\tknife\nG2g4Z-Syzi8_1\tdog\nG2lFYYEolz4_0\ttrain\nG2lFYYEolz4_2\ttrain\nG2x5gACWSwA_0\tcow\nG2z7yjdCUuI_0\tairplane\nG23Q_C35Uqs_0\tbear\nG24yJOgl9t0_1\tperson\nG25iisvOYhA_0\tcat\nG2-v9IBlnTs_0\tperson\nG3AuCS7s68w_0\tbird\nG3IID08lWos_0\tperson\nG3P-Vvra2GU_0\thorse\nG3SowFCFa0g_0\tperson\nG3VeVH6pbdE_1\tperson\nG3a0EYtnqHA_0\tperson\nG3cazaory7w_0\tperson\nG3f8bIoGGZ0_0\tdog\nG3kNB0zhHQc_0\tperson\nG3pT4MJrpDI_5\tumbrella\nG3pT4MJrpDI_6\tumbrella\nG3pT4MJrpDI_4\tumbrella\nG3vP7_U6yXU_1\tcow\nG37Dm4oy794_0\tbicycle\nG38EbyEOITE_0\thorse\nG38SrxcVYWs_1\tperson\nG39ryVtNnhQ_3\telephant\nG39ryVtNnhQ_8\telephant\nG39ryVtNnhQ_9\telephant\nG39ryVtNnhQ_11\telephant\nG4PD_RAK48Y_0\tperson\nG4VPBDOgq54_1\tskateboard\nG4VpcUuXgRs_0\tperson\nG4VpcUuXgRs_1\tperson\nG4ckSGXUGts_0\tperson\nG4fbkcKiZVg_0\tperson\nG4nRZ4PHvC4_0\tdog\nG4rJejZ9FIM_0\tcar\nG4r0UJvtDXs_0\tcow\nG4xFWKKoN0M_0\tmotorcycle\nG47wnMA6RVE_0\tbus\nG4_xR7lZIPo_3\tbear\nG5D1cAo2D6s_1\tperson\nG5JwolS0D1M_5\telephant\nG5QgL60_yfc_0\tknife\nG5SlrQeATlc_0\tbus\nG5SlrQeATlc_2\tbus\nG5hG8j0KxBI_0\tperson\nG5ixkqq66VA_0\tperson\nG5rBbx_kODY_0\tperson\nG5ztukDN_Qg_0\tzebra\nG51fdi_hG_0_0\ttrain\nG52uuPWcC3M_0\tumbrella\nG553b8ZAd3Q_0\tperson\nG58FuwBYL-0_0\tskateboard\nG5_UJ1wEKh4_0\tperson\nG6OttGznP9E_0\tperson\nG6OttGznP9E_1\tperson\nG6QMME1QbK8_2\tcar\nG6Qmm4T-cd0_0\tbus\nG6WiR4W4WWk_0\tperson\nG6b9lySVCCY_0\tperson\nG6eAvUHoDkc_0\tperson\nG6fvYSH13nI_2\ttrain\nG6iVTjyPM04_1\thorse\nG6sFOs8MgGU_0\tbird\nG6sFOs8MgGU_3\tbird\nG6sFOs8MgGU_6\tbird\nG66e5ltBFoI_0\tperson\nG7DhRPK7pwc_1\tbicycle\nG7F-ufxEXPY_0\tknife\nG7H7fQ_Q1Ec_0\tperson\nG7H7fQ_Q1Ec_1\tperson\nG7ID9RdMSkE_0\tperson\nG7MvPG8Qv84_0\tgiraffe\nG7TezoE9Cmo_0\tperson\nG7WblvVQPF0_0\tperson\nG7Z01jmMzlI_0\tbird\nG7krBQa_KLc_0\tperson\nG7p90FBQk_0_0\ttruck\nG7slUshqPvY_0\telephant\nG74HXSqYO-A_0\tmotorcycle\nG75uQAEuUkE_0\tperson\nG766vinfuBw_5\tbicycle\nG766vinfuBw_9\tbicycle\nG77KKnCpwWY_3\tskateboard\nG8EC6svgwKU_0\tperson\nG8NIqmq7YdE_2\tbear\nG8V2UsTc1Ik_0\tcat\nG8V33bTVNII_14\tbicycle\nG8V33bTVNII_1\tbicycle\nG8V33bTVNII_2\tbicycle\nG8V33bTVNII_6\tbicycle\nG8V33bTVNII_9\tbicycle\nG8XX8bkx6Ek_0\tperson\nG8hStuDYwH0_2\tairplane\nG8kDZAPbUe8_0\tperson\nG8kDZAPbUe8_1\tperson\nG8k84FwnW2k_0\tmotorcycle\nG8lDrK3u3r0_2\telephant\nG8lfwRN3Iew_12\tboat\nG8lfwRN3Iew_0\tboat\nG8lfwRN3Iew_8\tboat\nG8lfwRN3Iew_9\tboat\nG8lfwRN3Iew_11\tboat\nG8sDCWad2Bg_0\tcat\nG8s2n3jAKW8_0\tcow\nG8tbj2R0iso_0\tperson\nG80DOuBBH_Y_3\tairplane\nG8--2JpJa6g_0\tperson\nG9DdsOO1mZo_0\thorse\nG9FQJdIxjsk_0\tbird\nG9YPEOrV5UU_0\tperson\nG9YPEOrV5UU_1\tperson\nG9YPEOrV5UU_2\tperson\nG9ZKH_DS9DU_0\tperson\nG9gsnqhd_Sw_0\tcat\nG9hPaEx7Ci0_1\tknife\nG9i66tUOspc_0\tdog\nG9juxPad3zY_0\tperson\nG9nlPUwJQB0_0\tperson\nG9nvXjuig6s_0\tperson\nG9qCl1NZelo_0\tcow\nG9rxIfeUWVo_0\tairplane\nG9vDsElCKAY_0\tdog\nG9zd0G8dIt0_0\tperson\nG93PAKTtVpM_0\thorse\nG97UC0qtVDw_0\tperson\nG97YtHMd2hw_0\tperson\nG99rEXOdlC8_0\thorse\nG9_TgGWQQi8_0\tperson\nG-Sr-qmWZNo_0\tcow\nG-YYtvCU7qY_0\tdog\nG-d6o3nTBFA_0\tzebra\nG-nFiFb0Xos_1\tknife\nG-nbiqZuFdc_2\thorse\nG-qCe2DK3Tk_0\tmotorcycle\nG-u_ThqhoJE_0\ttrain\nG-yCRlVSs6w_0\tperson\nG-3kOsn1fPY_1\tperson\nG_ADLUKVq8Y_0\tboat\nG_LtPKO6be4_0\thorse\nfZ1GVGZmTRA_0\tperson\nfaJuqm4umTQ_0\tperson\nfaSv8ijeKeE_0\tperson\nfaVBgge6xkE_0\tperson\nfaW2tWwuCMg_1\tperson\nfaW2tWwuCMg_0\tperson\nfahs60oGhLU_0\ttrain\nfatTPMeG5Pc_1\tbear\nfa-rHhFEloA_1\ttruck\nfa--elcQpd4_0\telephant\nfbDYKST2P-I_0\tmotorcycle\nfbFVM0UM5V0_0\tperson\nfbM5MhIve5s_0\tdog\nfbM5MhIve5s_1\tdog\nfbiXTCkCkqY_0\tskateboard\nfbmZZXaRkak_5\thorse\nfbmZZXaRkak_6\thorse\nfbmnWcE_64U_0\tskateboard\nfbsyvHQPZZk_1\tdog\nfb3Iq9yQ1VY_0\tperson\nfb3WxEfe8l8_0\tmotorcycle\nfcCb2W4HMLk_0\tperson\nfcD6n99azfw_0\tperson\nfcGNPf6n7Ws_0\tbear\nfcWegrm8wCE_0\tperson\nfcbcnvGoWLs_0\tcar\nfchtQi7-OD4_0\thorse\nfclxNO1L-rY_0\tcow\nfcpGNeDgpDI_0\tperson\nfc1qNL5u2wg_0\tperson\nfdCTLMd6wEY_0\tcat\nfdQaoSZKA_s_0\tperson\nfdRULl8YSnU_0\tcow\nfdYvCuft5zQ_4\telephant\nfdYvCuft5zQ_5\telephant\nfdYvCuft5zQ_1\telephant\nfdYvCuft5zQ_2\telephant\nfdZBeWyKON0_0\tperson\nfdbvWvUoFW8_1\tbird\nfdbvWvUoFW8_2\tbird\nfdbvWvUoFW8_3\tbird\nfdkrZ9uL854_0\tperson\nfdlDkbbDniw_1\telephant\nfdmV18YEDKM_0\tcat\nfdnBDcIwPBA_0\tperson\nfd3ea86gmJI_0\tmotorcycle\nfd3ea86gmJI_1\tmotorcycle\nfd8Ba2cZgxI_2\tbear\nfeAexE1IYq8_0\tperson\nfePU3BlF4Zc_0\tperson\nfePU3BlF4Zc_1\tperson\nfeQX_1dqh9g_9\tbicycle\nfeQX_1dqh9g_1\tbicycle\nfeQX_1dqh9g_3\tbicycle\nfeZfxIunWHo_0\tperson\nfeZoXB7I6wE_0\tperson\nfedmeW-WImw_0\ttrain\nfegJtwcNo5c_0\tbicycle\nfeh4XVzjQdI_0\tcat\nfelt48AIbIs_1\tperson\nfenYF-k-y4c_0\tskateboard\nfeqLG8n4nDE_1\tperson\nfe05wKXl2cI_0\tperson\nfe05wKXl2cI_1\tskateboard\nfe5_49oxMwc_0\tperson\nffIQZZ_P3ck_0\tcat\nffOeGlw8_C8_1\tcow\nffZoY75S_-k_1\tbird\nffZoY75S_-k_0\tbird\nffbSaNikNF4_1\telephant\nffeYBfcgF3s_0\tperson\nfftSD6UfvEA_1\tperson\nffttXyArNGc_1\tknife\nffvXiSjPp6c_0\thorse\nffwk_8ycQiA_0\tperson\nff1PHzfARZk_0\tperson\nff5MH6QQuJk_6\tknife\nff5MH6QQuJk_2\tknife\nff5SaJnQg5M_0\tperson\nfgEpQHGYIjc_0\tperson\nfgFy8l-b1iI_0\tmotorcycle\nfgJJxPEHVZQ_0\tperson\nfgPShysxuQM_0\tcat\nfgQE-9shdmQ_0\telephant\nfgUjCKe_e_Y_0\tperson\nfgWtwTKCtMQ_0\tperson\nfgfizI4AnVs_0\tperson\nfggT4HM2Uy4_0\tperson\nfgsaC375d38_1\tbird\nfgvUj1mCqio_0\ttrain\nfg1ISXcyb10_1\tdog\nfg5mCaScLE4_10\tumbrella\nfg5mCaScLE4_0\tumbrella\nfg5mCaScLE4_3\tumbrella\nfg5mCaScLE4_4\tumbrella\nfg5mCaScLE4_6\tumbrella\nfg5mCaScLE4_7\tumbrella\nfhHLCLuQAdE_0\tbird\nfhHLCLuQAdE_3\tbird\nfhHLCLuQAdE_4\tbird\nfhHLCLuQAdE_1\tbird\nfhHLCLuQAdE_2\tbird\nfhQN_vhNmgo_0\tcow\nfhan95LbdqQ_1\tknife\nfhmsHcZfBC4_0\tperson\nfhutr5rLQN0_0\tperson\nfh5lB6U-7Wk_0\tperson\nfiGa0nIEYbw_0\tperson\nfiKecNhAgFU_0\tmotorcycle\nfiS0pY80kkU_0\tdog\nfiWtkuDUFvM_0\telephant\nfiZAhg2twZs_0\tperson\nfigjWJDEn1c_0\tperson\nfijO0rB1rfY_0\tairplane\nfinRU64JVRU_1\tbus\nfi2s2k_aamk_0\tperson\nfi46OpYa89I_3\tbicycle\nfi46OpYa89I_10\tbicycle\nfi46OpYa89I_2\tbicycle\nfi6gdEVUAUc_0\tcat\nfi8YGUm_6x0_0\tperson\nfi9GleMDHIc_0\tperson\nfjF31Mh-tNQ_0\tperson\nfjKXALm76kI_0\tbus\nfjXufPzimEQ_0\tperson\nfjZ4J-BZX2U_0\tperson\nfjaHYcaE7-w_0\tperson\nfjaHYcaE7-w_1\tperson\nfjnR81fSTeI_0\tumbrella\nfjnxqBnMZzs_0\tperson\nfjtn0lRVX_4_0\ttruck\nfjwgdNBSCFc_0\tperson\nfjwgdNBSCFc_1\tperson\nfj29rB34ea8_0\tperson\nfkERi_ma2UE_0\tperson\nfkERi_ma2UE_1\tperson\nfkHiDyuUaWA_0\tperson\nfkIfLHGu_CQ_0\tperson\nfkQEEtG6Tbg_0\tperson\nfkSf5a3q6oY_0\tboat\nfkSf5a3q6oY_3\tboat\nfkUDB0V3UXc_0\thorse\nfkUDB0V3UXc_1\thorse\nfkVSILZPyXg_0\tbear\nfkaKyYrWPpQ_0\tperson\nfkfnbZ2MSXk_4\tbicycle\nfkfnbZ2MSXk_0\tbicycle\nfkfnbZ2MSXk_6\tbicycle\nfkx0e2gvPYA_0\ttruck\nfkyM4LNUCck_0\tperson\nfk0v7vZDpgU_0\tperson\nfk10mtIF_Hs_0\thorse\nfk8yMMO1gRA_0\tperson\nfk8yMMO1gRA_1\tperson\nflADy--Uwx8_0\ttruck\nflERyzHjhzQ_0\tskateboard\nflMijcdhRAU_0\tperson\nflgTyT4DB7E_0\tbear\nflgaLcoSjb4_0\tbear\nfluEronPyZk_0\tcow\nfl6-NRwVy10_0\tperson\nfl7Q9yxFoOs_2\tperson\nfl95IAyDN-s_0\tskateboard\nfmERtylbqN4_0\tperson\nfmGJj0qYc6g_1\tperson\nfmGJj0qYc6g_2\tperson\nfmLKgz4DQhQ_0\tairplane\nfmL66yeOiI8_0\tperson\nfmRfUvIIvT8_0\tperson\nfmYELQL9Cs0_0\tbus\nfmbEAdugI3Q_0\tperson\nfmbb6SQ6qiI_0\tperson\nfmbb6SQ6qiI_1\tperson\nfmbu89zGN4Y_0\tperson\nfmdem4Z9BHI_0\tbird\nfmfg5yyhjkA_1\tperson\nfmiq_EhaURY_1\tperson\nfmiq_EhaURY_0\tperson\nfmtIa6nxUd4_0\ttrain\nfmuzrZHZYis_0\tskateboard\nfmwC1khd3BU_2\tperson\nfm3zFVlJw4k_1\tperson\nfm-ScTLdSL8_1\tbus\nfm_bcsJYhu4_0\tdog\nfnAGderLxPg_0\telephant\nfnAGderLxPg_3\telephant\nfnDP4B5jpSY_0\tperson\nfnFMQ2VFlEc_0\tperson\nfnOL3ZL61u0_0\tperson\nfnOkwsmzdaI_0\thorse\nfnRq5X91IV0_0\tperson\nfnZR6FD_eZ8_0\tboat\nfnZR6FD_eZ8_1\tboat\nfnbSgwO8v0c_1\tboat\nfnbsAmTQJOs_0\tbicycle\nfnbsAmTQJOs_1\tbicycle\nfniJ36z0_Pc_0\tcow\nfnj1YtAaztU_0\tperson\nfnkHdQf9H3w_0\tknife\nfnmuFbydHek_0\tperson\nfnpjkwiPkSY_0\tskateboard\nfntRlkYDiD0_1\tperson\nfntZVzkwhz4_1\tperson\nfnvst-Sk4MU_0\tumbrella\nfnvst-Sk4MU_1\tumbrella\nfnz6gTPuInQ_0\tdog\nfnz6gTPuInQ_1\tdog\nfoAoOCF4rE4_0\tcar\nfoI1jEbg9uA_0\ttrain\nfoJs0wXX1O8_0\ttruck\nfoaFgrzsPOY_0\tperson\nfobJTCY7ifQ_0\tbus\nfodsoLtLzqI_1\tcat\nfojRgMUsu3c_0\tperson\nG_RgJ0t0Cbo_0\tperson\nG_aU-_2ZiSw_0\tdog\nG_lOQAV6xWs_0\tcat\nG_poofS7HD0_1\tperson\nG_poofS7HD0_0\tperson\nG__VTazZtp0_0\telephant\nHARRnedV05U_0\tcar\nHAVUursfTOI_1\tzebra\nHAtu6frOH1k_0\tperson\nHA1TDbNot8E_0\tperson\nHA-iE7bcfT0_0\tcar\nHA-iE7bcfT0_1\tcar\nHBI13CpuAmI_0\tknife\nHBLJbCs1mSg_0\ttruck\nHBMah_r3E1g_0\tperson\nHBOqQBe7rhE_0\tperson\nHBO6G57uhXA_0\tperson\nHBY4_6b_sRY_0\tcat\nHBiSuZWtb4E_0\tboat\nHBmaJJ0nTAo_0\tperson\nHBwjWdXrpPA_0\tdog\nHBzYVphfmRQ_0\tperson\nHCA4jkg9HTY_1\tperson\nHCA4jkg9HTY_0\tperson\nHCEjNJewxbw_0\tperson\nHCJ1EYfF8qg_0\telephant\nHCKZ7kihdaM_2\tairplane\nHCMBgpQ2z18_0\tcow\nHCSbzHGXxmA_0\tcat\nHCczjWUmlW0_1\ttruck\nHCczjWUmlW0_0\ttruck\nHCg0k7LnfkY_1\tcow\nHCg0k7LnfkY_0\tcow\nHCiRQdh20qg_0\tdog\nHCm-B3JjzhY_0\tcow\nHCpxRBja8lE_0\tperson\nHCp6gYC9NFE_0\tcow\nHC72_Yrigik_0\tperson\nHDN4DqO_KLg_0\tdog\nHDQEWwETuU4_0\tperson\nHDRKiYaoEnA_0\tperson\nHDSw0KM8cSs_0\tperson\nHDkI156rPRA_0\tperson\nHDmK6y86kYM_0\tperson\nHDmK6y86kYM_1\tperson\nHDnYEdh7xG8_0\tperson\nHDqUvaFm_R0_0\tskateboard\nHDr5if6Mb_4_0\tperson\nHDziFGwpXmg_1\tcar\nHDziFGwpXmg_2\tcar\nHDziFGwpXmg_3\tcar\nHDziFGwpXmg_7\tcar\nHD1tKnKT1Dc_0\tmotorcycle\nHD7QKzuFNas_1\tperson\nHD7QKzuFNas_0\tperson\nHD_alEnCVhM_0\ttruck\nHD_alEnCVhM_1\ttruck\nHD_wYO2_O8k_0\tperson\nHD_4ZJr68p8_1\thorse\nHEIjtOJze90_0\tperson\nHEfIJ3wMKRI_1\tperson\nHEmv-biWoEA_0\tairplane\nHErkHysJd-M_0\tperson\nHEr_leMW1zE_0\tbear\nHEr_leMW1zE_3\tbear\nHEr_leMW1zE_1\tbear\nHEyY4zEX-no_0\tperson\nHE-4YEdBwuw_0\tdog\nHE-4YEdBwuw_1\tdog\nHFDK_y7kibQ_0\tknife\nHFE9ujNILoA_0\tcat\nHFQFlm1jWiE_0\tperson\nHFQFlm1jWiE_1\tperson\nHFRCZSouOn4_0\tbird\nHFWQl2JJfic_2\tperson\nHFa18pRSsXU_0\ttrain\nHFlanXHBGHg_0\tperson\nHFuw8C2bQ6g_0\tperson\nHF07qDRPgrw_0\thorse\nHF1xhyTtWLk_0\tmotorcycle\nHF3Nn3KqXOk_0\tperson\nHF3Nn3KqXOk_1\tperson\nHF4PefI86r0_0\tperson\nHGFcsJmjWHs_0\telephant\nHGFcsJmjWHs_9\telephant\nHGFcsJmjWHs_4\telephant\nHGFcsJmjWHs_5\telephant\nHGFcsJmjWHs_7\telephant\nHGLC_YFRxPY_0\tskateboard\nHGLLnmQiCU0_0\tperson\nHGLLnmQiCU0_2\tperson\nHGLLnmQiCU0_1\tperson\nHGLdrgf2e2c_0\tperson\nHGVNoha70iA_0\ttruck\nHGZDROOjAY4_1\tperson\nHGZDROOjAY4_0\tperson\nHGeCBN48g9o_0\tperson\nHGm4OftDlT8_2\thorse\nHGnIxotAPOU_0\tperson\nHGnegc2CRTM_0\tperson\nHGvXva6SUvE_0\tperson\nHGw4URr4QUs_0\tperson\nHG1zQzSX2rU_0\tperson\nHG8oY2Ac4-M_0\tperson\nHG_JAnXBzJQ_0\tskateboard\nHHGq5gd6w1g_0\tskateboard\nHHPW65GVeoA_0\tperson\nHHRUnCEVnAo_0\tcat\nHHc5mD1TxGQ_1\tknife\nHHe9m9BOi3A_0\tperson\nHHgC0pkNiIA_0\tperson\nHHgC0pkNiIA_1\tperson\nHHi26rWtC38_0\tperson\nHHx5E8VfnkY_0\tperson\nHH0OILx6PKY_0\tperson\nHH1JApHMx2I_0\tdog\nHH148v63a5o_0\tperson\nHH9wMNMJ2sE_0\telephant\nHIBd79qG-XQ_0\tperson\nHICJGOFvwoc_2\tbird\nHIHX1rpDx_I_0\tcat\nHIIQ917jPqg_0\ttrain\nHIJGcmgyEcg_0\tknife\nHIJGcmgyEcg_1\tknife\nHIKyhRtWQ4c_2\thorse\nHIK-Z8wXFug_0\tperson\nHISWMgqg80E_0\tskateboard\nHITf8extnnk_0\tperson\nHIXuU8Z0N9o_1\tmotorcycle\nHIgiF2bkOys_0\tperson\nHIgiF2bkOys_1\tperson\nHIiu2EVu5H8_0\tperson\nHIqhXDkhHsc_0\tperson\nHIqr0-BB8Xo_1\tknife\nHIrcAjP1fDs_2\tbird\nHIz27dqnl20_0\tbus\nHI3L38NCy0A_1\tboat\nHI3L38NCy0A_0\tboat\nHI_h7HfFDVw_0\tboat\nHJGPBeom3y4_1\tumbrella\nHJSiTzkFpHk_0\tperson\nHJVpMFJT2LU_0\tperson\nHJVpMFJT2LU_1\tperson\nHJg7wtoy2vk_0\tperson\nHJhZhn0zf1s_0\tperson\nHJi1L5HxuLo_0\tskateboard\nHJi1L5HxuLo_1\tskateboard\nHJi1L5HxuLo_2\tskateboard\nHJq4kVvdeRg_1\tskateboard\nHJrd3kpvjh0_0\tperson\nHJr5BOgO9XY_0\tperson\nHJ6BZjeSHTY_0\tboat\nHKFJzdCsRfA_0\tperson\nHKGK0FLN9vA_2\tzebra\nHKGK0FLN9vA_3\tzebra\nHKIwynmyQp4_0\tperson\nHKWELXwIVvI_0\tperson\nHKqHmDjxF6Y_1\tperson\nHKsVn1IWaas_0\tperson\nHK28Vb__IfY_0\tperson\nHLAEqFEcR90_4\thorse\nHLAEqFEcR90_0\thorse\nHLAEqFEcR90_2\thorse\nHLAEqFEcR90_3\thorse\nHLBgSJD-3lg_0\tbicycle\nHLL_j-CQKqQ_0\tumbrella\nHLaiRkL4gFA_0\tmotorcycle\nHLhbGKVR4mE_3\tdog\nHLy3UUDhaJY_4\tgiraffe\nHL06bx_HNg0_0\tcat\nHL6dNcrAEoM_0\tperson\nHL8fh6O6iUA_1\ttrain\nHL9F68y-0kY_0\thorse\nHL9F68y-0kY_1\tperson\nHL9o2Vs9d8s_1\tperson\nHMF0KrAf0iI_0\tperson\nHMIGIwIcNq8_0\tperson\nHMJerOjZn4I_0\tperson\nHMQQrRvzwiM_0\tboat\nHMUBbUP6Ko8_2\tboat\nHMV7H81wz84_0\ttrain\nHMb-pPTMZ5I_0\tumbrella\nHMxMledcSVE_0\tperson\nHMyUpcpZGdM_1\tbird\nHM4hJE0Db2Q_0\tperson\nHM4zY3uzwOQ_0\tperson\nHM7sD8YClkI_0\tperson\nHM_3ck6yooo_0\tperson\nHNGh3Rvn6Sw_2\tknife\nHNGh3Rvn6Sw_3\tknife\nHNRwM8zXMTM_0\tperson\nHNXQ_dkhX-Y_0\ttruck\nHNdRITK9TGE_0\tperson\nHNeVOXPyunw_2\tperson\nfo9SmkQa35Y_0\tmotorcycle\nfo9SmkQa35Y_1\tmotorcycle\nfpM1eiK3iok_0\ttruck\nfpNLFTgOciY_1\tumbrella\nfpRq9BsaPzs_1\thorse\nfpRq9BsaPzs_2\thorse\nfpVZYKlsFsU_0\tboat\nfpdUwZ8Gnd8_1\tcow\nfpeYfCUzvDY_0\tcat\nfpkxYBJDTtI_0\tperson\nfpkxYBJDTtI_1\tperson\nfpmtNez1u0o_0\tbus\nfpnTZF4bvk8_0\tperson\nfpomSxrdTyE_0\tperson\nfpo2kf1idyo_0\tperson\nfpp_41AxRNI_5\tgiraffe\nfpp_41AxRNI_1\tgiraffe\nfpp_41AxRNI_4\tgiraffe\nfqQL3QPq-lo_0\ttrain\nfqXvzEGxSak_0\tbus\nfqcie5yyOxA_0\tcat\nfqfHWT5hjkY_0\tcat\nfqkVB4qZbgw_0\tperson\nfqlWb2OJg3Y_0\tbus\nfqnioIm10xY_1\ttrain\nfqpMhE5qOKk_1\tperson\nfqxGN6r9oIY_0\tzebra\nfq5Zh2Lo9GQ_0\telephant\nfq959dAMasM_0\ttruck\nfrFSlwby-0k_0\ttrain\nfrFrggXiJZY_1\tperson\nfrItg4I9oEQ_0\tperson\nfrItg4I9oEQ_1\tperson\nfrJtciauQQw_0\tperson\nfrRHj0FPzVQ_1\tperson\nfrW5BpQ3-Fw_0\tperson\nfrXxZevI11c_0\tperson\nfrXxZevI11c_1\tperson\nfrY6tIPR-Co_0\tbicycle\nfreW9Vk3GhU_1\tperson\nfrfLZ70XIXI_1\tdog\nfrgCmAtYao4_1\tboat\nfrh4LMyWaQw_0\tperson\nfrn-rfqmGVs_0\tperson\nfrx5Uv7-1zw_0\tperson\nfr3S3gEtDS0_1\tperson\nfr616yExbeg_0\tknife\nfsD7pYdfrpg_0\tperson\nfsE0DlVODpY_1\tperson\nfsFtKjirvM4_1\tperson\nfsFtKjirvM4_0\tperson\nfsOoFz6I_js_1\tperson\nfsOoFz6I_js_0\tperson\nfsVlTdh13Lk_0\tperson\nfsXVGaRpUNg_0\tperson\nfsd-DhcH5gE_0\tperson\nfsd-DhcH5gE_1\tperson\nfsh-wcyuPM0_0\tperson\nfs3oXXx75XA_0\tperson\nfs6L5bmf4pQ_1\tperson\nfs6Rgfl4CtI_0\tboat\nfs6p-qaLswQ_0\tcow\nfs7RdtNY3Ck_0\telephant\nfs9uDpde9ig_1\telephant\nftG2YflDq_E_0\tknife\nftH3_awR5ZA_0\tperson\nftIp5PyaGNc_1\tknife\nftNSK_rSs98_1\tairplane\nftSUBEOhdck_0\tcat\nftX9ErOmiAE_0\tcar\nftX9ErOmiAE_1\tcar\nftcnCvd4yeU_0\tperson\nftlmGO0CnHk_0\ttruck\nfuHAM8D3ros_3\tbicycle\nfuO2QMXiDMU_0\tmotorcycle\nfuPtCtdvowQ_0\tperson\nfuSxdcdxe70_1\tperson\nfuSxdcdxe70_0\tperson\nfuh4-mC5fvg_0\tcar\nfuklviv_MRE_0\ttruck\nfunKReksXEQ_4\thorse\nfur41mRCURs_0\tcow\nfutBuKCP9zw_0\tumbrella\nfu5d7x7pORY_0\thorse\nfu_f4n_bYPU_0\tperson\nfvAislzoQVU_0\tperson\nfvDUF-aukF4_0\tperson\nfvH1bolPY2U_0\tperson\nfvKg6ReEigA_14\tbicycle\nfvKg6ReEigA_2\tbicycle\nfvKg6ReEigA_3\tbicycle\nfvKg6ReEigA_4\tbicycle\nfvKg6ReEigA_5\tbicycle\nfvKg6ReEigA_8\tbicycle\nfvKg6ReEigA_11\tbicycle\nfvKg6ReEigA_15\tbicycle\nfvKg6ReEigA_16\tbicycle\nfvKg6ReEigA_17\tbicycle\nfvKg6ReEigA_19\tbicycle\nfvLauezWx5g_1\tskateboard\nfvLkNgA4N0k_1\tperson\nfvZYmQ6SJrQ_0\tperson\nfvcIpyJFuQA_0\tperson\nfvdoipKMj4g_0\tperson\nfvfb_kQCs-I_0\thorse\nfvhVuqonUHg_0\tperson\nfvhVuqonUHg_1\tperson\nfvlGWjjirUQ_0\tperson\nfvqWMyJJqog_0\tperson\nfvqWMyJJqog_1\tperson\nfvtTggVCkFk_0\tperson\nfvzbC9c98ik_0\tdog\nfv42-nzlEsY_0\ttrain\nfv8F7gjL7Js_0\tairplane\nfwCUjUa0cHQ_0\tperson\nfwG8C9CEISw_0\tperson\nfwLL8mlHf0I_0\tbicycle\nfwL9zu2j3rk_0\tperson\nfwQMFtFdERs_0\thorse\nfwQMFtFdERs_1\thorse\nfwTB5tDP4cU_0\tperson\nfwT-VIjQCa8_0\tperson\nfwop4msktdA_0\tcow\nfwv2gGVEi6g_0\tperson\nfwwOICMutXc_0\tdog\nfxFzCD192K4_1\tbird\nfxHZn2FXRGk_0\thorse\nfxHZn2FXRGk_1\thorse\nfxQYhMoNR9I_0\tperson\nfxQY5tnybxQ_0\tskateboard\nfxWwYiT8yXk_0\tperson\nfxWyDyUmxuY_0\thorse\nfxbNI1vTtq0_0\ttrain\nfxbjh88g3Vw_0\tperson\nfxcDLsblNhs_1\tbird\nfxdVSYuYJOE_0\tperson\nfxhuSOpUuGs_0\tperson\nfxr4HpTRNS0_0\tdog\nfxxjK3mjCF0_1\tperson\nfxyg5GQk8H8_0\tairplane\nfxyg5GQk8H8_2\tairplane\nfxyg5GQk8H8_3\tairplane\nfxyg5GQk8H8_4\tairplane\nfx07mGL1WQY_1\ttrain\nfx2_nahpAfE_0\tperson\nfx4HT1nuEg4_1\tperson\nfx4HT1nuEg4_0\tperson\nfx9TwmuIYCY_0\tskateboard\nfx9fckiExps_0\tperson\nfx_zN3FWeJ0_1\tbus\nfx_zN3FWeJ0_3\tbus\nfx_zN3FWeJ0_0\tbus\nfyE4_usnxHc_0\tperson\nfyE4_usnxHc_2\tperson\nfyOZZ_u9Jm0_0\tperson\nfyOxr6iISdI_0\telephant\nfyRO8_b4wJU_0\tperson\nfyTzI2wuC0M_0\tperson\nfybHaZZmAzE_1\ttrain\nfydZoAN9JpI_0\tperson\nfydZoAN9JpI_1\tperson\nfydZoAN9JpI_3\tperson\nfyhSoeveW3I_0\ttrain\nfyyLjISjzvM_0\tperson\nfyztN8okJkU_0\tperson\nfyztN8okJkU_1\tperson\nfy5GdRFHsLs_0\tcat\nfzFR54WdDEU_0\tperson\nfzV_Z79golE_1\ttruck\nfzaNjkWQtW0_1\tskateboard\nfze3woUbt0w_0\tdog\nfzh-lO5lQhQ_1\tbird\nfzoZsW3AMTU_0\tbird\nfzp3cT3c5Wg_0\tperson\nfzp3cT3c5Wg_1\tperson\nfzp3cT3c5Wg_2\tperson\nfzqX7N7ICQw_1\tperson\nfzqX7N7ICQw_0\tperson\nfzrGdIi_J9k_0\tperson\nfzr9mWLJM6E_1\tperson\nfzr9mWLJM6E_0\tperson\nfzvrWQX908c_0\tperson\nfz1PTzziIcg_0\tperson\nfz1kPSLo_p8_1\ttrain\nfz8emqnbleQ_1\tboat\nf0BJ56Dn3D0_0\tcat\nf0E5mPnVSSU_1\tperson\nf0JOvKbLwTQ_0\tperson\nf0LbneUbWUk_0\tcow\nf0TYLMAZLpA_0\tperson\nf0XZTHcpmZY_4\telephant\nf0XZTHcpmZY_2\telephant\nf0XpDJO5Tw0_0\tperson\nf0XpDJO5Tw0_1\tperson\nf0Z8cmobjWs_0\ttruck\nf0Z8cmobjWs_4\ttruck\nf0Z8cmobjWs_7\ttruck\nf0Z8cmobjWs_8\ttruck\nf0mYYISWwxo_1\tperson\nf0mYYISWwxo_0\tperson\nf0o0SmB2JAE_1\tcow\nf0o0SmB2JAE_0\tcow\nf03_N__tWuI_2\telephant\nf1ASjw4-yL8_0\tperson\nf1Da4qa1SIw_1\tperson\nf1EKnOQEf5g_0\tboat\nf1GkfW2mOlE_0\tperson\nf1G2DlbJqyI_0\tperson\nf1HKyLr8nL0_0\tperson\nf1JCS5F-LuU_0\tperson\nf1KEvGLqqwI_1\tumbrella\nf1O6FYMq5zk_0\tperson\nf1XB0uA4Dvo_0\tbus\nf1Z1HedJzos_0\tskateboard\nf1fEuZwBkDQ_0\tperson\nf1nxCdtYwdQ_0\thorse\nf1sTzp9ahWM_1\tperson\nf1sTzp9ahWM_0\tperson\nf1uaPSveXCI_0\tperson\nf2ADBeQ0Vys_0\tperson\nf2ADBeQ0Vys_1\tperson\nf2EbBSZ8osI_0\tzebra\nf2EbBSZ8osI_1\tzebra\nf2HKs4L6fwE_0\tperson\nf2HKs4L6fwE_2\tperson\nf2HKs4L6fwE_1\tperson\nf2MDAAk-Euo_1\tperson\nf2ULSb7lIAo_0\tcow\nf2ULSb7lIAo_1\tcow\nf2ULSb7lIAo_3\tcow\nf2hfKAL0ZoA_0\tumbrella\nf2hfKAL0ZoA_4\tumbrella\nf2hhMTSObNY_0\tskateboard\nf2p2YcmHn8c_1\tbicycle\nf2s4nNZ_qew_0\tboat\nf2ypHkP1WUg_0\tperson\nf3EOdxK13SU_0\tgiraffe\nf3HU85Jx7m0_0\tcow\nf3JkzQkcdVM_0\thorse\nf3Kxw7yBcW0_2\tperson\nf3Kxw7yBcW0_1\tperson\nf3Np8rGlxOE_1\tperson\nf3VJKfFdBW0_1\ttruck\nf3aufQBTMME_0\tboat\nf3bk60UZpqE_0\ttruck\nf3bk60UZpqE_5\ttruck\nf3bk60UZpqE_9\ttruck\nf3kQ_6EG8cM_0\tperson\nf3spBT1AGyw_0\tperson\nf31ePv3WlNc_0\tperson\nf33OpHIFMWA_1\telephant\nf33OpHIFMWA_3\telephant\nf33OpHIFMWA_0\telephant\nf33OpHIFMWA_2\telephant\nf35syqOsqSo_0\tboat\nf38P7AlhP5g_0\tperson\nf39rc-7_QQc_0\tperson\nHNr7Ed0_pQY_1\tbus\nHNtUUtLCSDY_0\tgiraffe\nHNtojLNWnKQ_0\tperson\nHN6XGq0aRx4_0\tperson\nHN84N_vu_hw_1\tperson\nHOAbQ4r1tzM_1\tknife\nHOA47mRJ9B8_0\tperson\nHOOwNsMTi9g_0\tperson\nHOSMm-4fUVM_0\tdog\nHOZcbA0OPF0_0\tperson\nHOkS1ljUX4s_0\tbear\nHOmzECHFah4_0\tdog\nHOxzSXuj0O0_0\telephant\nHOxzSXuj0O0_3\telephant\nHO6yeFgs7Hs_1\tbicycle\nHO7Uf5Enr1U_1\tperson\nHPAa3KI1Z30_1\tdog\nHPDws9wJu40_0\ttrain\nHPIdRNu7STU_0\tdog\nHPIxVE3OLG4_0\tperson\nHPPTr0Mpe0A_0\tbicycle\nHPRp9F-4ts4_0\tdog\nHPSJZXcOiEc_0\tperson\nHPjcp8hS6vs_0\tperson\nHPjcp8hS6vs_1\tperson\nHP0RUfuvfx4_0\tperson\nHP4O8FbEpEg_0\tbus\nHP6ROW7ahtU_0\tperson\nHP6YRIGqiI4_0\thorse\nHP62suxiDNw_0\tbicycle\nHP62suxiDNw_2\tbicycle\nHP62suxiDNw_3\tbicycle\nHP62suxiDNw_1\tbicycle\nHP9u4FmRvbw_1\tbear\nHQBhagraDwo_0\tcat\nHQIxUlu7xSY_0\tperson\nHQKVBNWD_ls_0\tperson\nHQM9aDN7Tf0_0\tperson\nHQZVUknJ0lw_1\tperson\nHQZVUknJ0lw_0\tperson\nHQePQ1mfzKw_0\tperson\nHQePQ1mfzKw_1\tperson\nHQhnj0h9OyA_0\tperson\nHQhnj0h9OyA_1\tperson\nHQjXFK_0sFo_0\tperson\nHQxihmm6sSs_0\tperson\nHQz_At1F0Yk_2\tbicycle\nHQ4ZWia0f1E_2\tcow\nHQ9gmrJ6Bm4_3\tairplane\nHQ9gmrJ6Bm4_4\tairplane\nHQ9gmrJ6Bm4_5\tairplane\nHQ9gmrJ6Bm4_1\tairplane\nHRCOvhALHv0_0\ttrain\nHRUX75Ve2aQ_0\tperson\nHRVMd5SmF8Y_0\tumbrella\nHRl1VhUfhok_0\tperson\nHR1wffFOaEw_0\telephant\nHR4ExP8Ompc_0\thorse\nHSKpu2UmvBo_0\tperson\nHSKpu2UmvBo_1\tperson\nHSN6tO3rh-c_0\tperson\nHSVWpwFagLg_1\tperson\nHSdyrMzM64w_0\tcow\nHS3WVWEFHm8_1\tperson\nHS3WVWEFHm8_0\tperson\nHTAnAeW5Bhs_0\tbird\nHTS20hgMcFQ_0\tbicycle\nHTTz78R4i0c_0\tperson\nHTehrgCQAPo_0\tperson\nHTgldgqci04_0\tperson\nHUFGafskCjw_0\tperson\nHULASsoz03U_0\tperson\nHULASsoz03U_1\tperson\nHUPxNiCgjn0_0\tknife\nHUfwe7j7IBE_0\tperson\nHUgX2V1AkVw_0\tperson\nHUiMyxUEC_A_0\tperson\nHUv2tT_n5Bo_0\tperson\nHUy4cHFX-04_0\tperson\nHUz7znJTRNg_1\tumbrella\nHU_HuNQ4TDw_0\tcow\nHU_HuNQ4TDw_1\tcow\nHU_HuNQ4TDw_2\tcow\nHVEmUm86PBo_0\tmotorcycle\nHVI1w93kCfo_0\tperson\nHVOWKezX_bo_0\thorse\nHVOWKezX_bo_2\thorse\nHVYf36PFglw_0\tdog\nHVY9hWgMujc_1\ttruck\nHVeqzrLyVtk_0\tperson\nHVkFV2q27S0_1\tperson\nHVkQkPaQbrw_0\tperson\nHWAW-J3ZpIs_0\tcow\nHWA45moBwMo_0\thorse\nHWEI24n2tHY_0\tperson\nHWItJuo6DSM_0\tbus\nHWXgDvYdlHE_1\tperson\nHWZSmtWVH54_0\tperson\nHWZenKFJqkY_0\tperson\nHWZenKFJqkY_1\tperson\nHWZenKFJqkY_2\tperson\nHWfpkRSnZp8_0\ttrain\nHWfpkRSnZp8_2\ttrain\nHWjaeLf99dU_0\tbear\nHWr9Kqi0B2A_0\tperson\nHWsTMfZok5E_0\tperson\nHWtKIjJacjk_0\tperson\nHWtyII4CMWg_0\tcar\nHWtyII4CMWg_3\tcar\nHW7FTNqTKhs_0\ttrain\nHW7yQK_j65g_0\thorse\nHXARJhNURSs_0\tperson\nHXH_F5SX6FU_0\ttruck\nHXH_F5SX6FU_3\ttruck\nHXH_F5SX6FU_1\ttruck\nHXKnqbEGfVw_0\tbird\nHXKnqbEGfVw_6\tbird\nHXKnqbEGfVw_1\tbird\nHXKnqbEGfVw_2\tbird\nHXKnqbEGfVw_3\tbird\nHXLA3nbxgh4_0\tperson\nHXWoqdza4oA_0\tdog\nHXaAJtjX1mE_0\tbicycle\nHXaAJtjX1mE_2\tbicycle\nHXaAJtjX1mE_1\tbicycle\nHXa-0NlFTP4_0\tperson\nHXcSrTLsF9c_0\ttrain\nHXhYYfE4uN8_0\tperson\nHXvgiezvrYI_0\ttruck\nHXx4tRTfGRM_1\tdog\nHX0kjr3XYHI_1\tbear\nHX7P1ipPByA_0\tdog\nHX-gTvdUaOE_2\tmotorcycle\nHYLAdzbqvC0_0\tperson\nHYWEWmMMrsU_0\tcat\nHYW3dAv02gE_0\tcow\nHYW6VucwAEg_0\tperson\nHYXFGMzivds_10\ttruck\nHYbuNzqXmyY_0\tperson\nHYiN6skKjfY_0\tknife\nHYoonHvZXCc_0\tmotorcycle\nHY1aAYxxlQo_0\tperson\nHZC5bba_V4Y_0\tknife\nHZJ-JQkt590_1\tbicycle\nHZKExvpKLQ8_1\tperson\nHZLdGfto2mI_0\tcar\nHZSPPN3TMx8_0\tbird\nHZZadt4SIl0_0\tdog\nHZceU_BV2GM_0\tperson\nHZceU_BV2GM_1\tperson\nHZceU_BV2GM_2\tperson\nHZd4rCCsNMs_0\tskateboard\nHZd4rCCsNMs_1\tskateboard\nHZd4rCCsNMs_2\tskateboard\nHZkmrVeoUV4_0\tperson\nHZscUISrdww_0\tperson\nHZ-tGW__JOI_0\tcat\nHaE1N8Q1b7s_1\ttrain\nHaMpIMApSi8_0\tperson\nHaO3z-4gcBs_2\ttrain\nHaRliuOtm7s_1\tperson\nHaiLotzzEXk_1\telephant\nHaiLotzzEXk_2\telephant\nHaiLotzzEXk_0\telephant\nHarW34izH-M_1\tperson\nHauA239AM7I_0\tdog\nHavxbX8tng0_0\tperson\nHayoEz1x5Ks_0\tperson\nHayoEz1x5Ks_1\tperson\nHay4Nx9S5-k_4\tbicycle\nHay4Nx9S5-k_1\tbicycle\nHa8XGRvxQxs_0\tperson\nHa_OuYxLXIs_0\tperson\nHa_w-xJsHAY_0\tzebra\nHbBCtCXKIEE_0\tperson\nHbH7DpR0WUw_0\tperson\nHbJufGCjdSE_1\tperson\nHbKh31cncOI_0\tbird\nHbLoxqqdYsQ_0\tcow\nHbQu1mfGg4c_2\telephant\nHbQu1mfGg4c_3\telephant\nHbQu1mfGg4c_0\telephant\nHbQu1mfGg4c_1\telephant\nHbcyjRGbMBY_0\tdog\nHbcyjRGbMBY_1\tdog\nHbhmBauZqxE_0\thorse\nHbq35QImz2w_0\tperson\nHbq35QImz2w_1\tperson\nHbuCy2fsJk8_2\tknife\nHbyKQdGpxhA_0\tboat\nHb3INTcuOVk_0\tperson\nHb5zCzD4J_E_1\ttrain\nHb5zCzD4J_E_2\ttrain\nHb5zCzD4J_E_3\ttrain\nHb5zCzD4J_E_6\ttrain\nHb5zCzD4J_E_7\ttrain\nHb5zCzD4J_E_8\ttrain\nHb5zCzD4J_E_11\ttrain\nHcBQXS22BDs_0\tperson\nHcJTaK6Q9P8_0\tperson\nHcXN4Pwnaeg_0\thorse\nHcfxwdbwk8c_0\tperson\nHchet3FQwII_0\tperson\nHcxL3_INS_0_0\tperson\nHc5ZM6UWTbY_0\tperson\nf4OI46BYh08_0\tskateboard\nf4Oj9uMeFdI_0\telephant\nf4PgAt4YpfE_0\tcat\nf4P-R7h_gTU_0\tperson\nf4QyVWC6yrw_0\tperson\nf4XkIcezAd8_0\tperson\nf4XkIcezAd8_1\tperson\nf4Y2tjwOV2k_0\tcow\nf4Y2tjwOV2k_1\tcow\nf4bys9o_Z2M_2\tbird\nf4s0cImpNBM_1\tcow\nf4xLPprxm30_4\tknife\nf49BXPlU-iI_0\tknife\nf4_Mfc9Ccg8_0\ttruck\nf5BIXG_nLok_0\tbus\nf5HsrI3Codk_0\tbird\nf5J7yrE24eY_0\tperson\nf5LuupUslCU_0\tperson\nf5Q2iD7VUx8_0\tskateboard\nf5W37dv91tU_0\tperson\nf5apNjAecEc_0\tperson\nf5bVoAXze0Q_0\tmotorcycle\nf5d1IXK1Tz0_0\tbird\nf5rzpIRd4wA_0\ttrain\nf5wHsLucnf8_0\tperson\nf5zEWaDr1jg_0\tcat\nf50eMXA_-bM_1\tperson\nf50eMXA_-bM_0\tperson\nf53Jmsa7Jkc_0\tperson\nf6AcbdJ77A4_1\ttrain\nf6E2ODGGF28_1\tperson\nf6Px5vjTeRI_1\telephant\nf6Px5vjTeRI_0\telephant\nf6Px5vjTeRI_3\telephant\nf6Px5vjTeRI_5\telephant\nf6UBVcEIt3I_1\tperson\nf6cXiuO-MvQ_1\ttruck\nf6dVANLzPTY_0\tperson\nf6dVANLzPTY_1\tperson\nf6o6ukW_Qog_2\tbear\nf65c6sEDtkE_0\tperson\nf7A6AOC8fOg_0\tperson\nf7A6AOC8fOg_1\tperson\nf7ExsvPto-E_1\tmotorcycle\nf7Fs7-jGglk_1\tbear\nf7GJgMh9xt4_0\tperson\nf7WvltLziTI_0\tboat\nf7cI-B4pJso_0\tcow\nf7kLnCuNTQo_0\tcow\nf7lmZQGcfBA_3\telephant\nf7lmZQGcfBA_4\telephant\nf7lmZQGcfBA_0\telephant\nf7lmZQGcfBA_1\telephant\nf7oBEoL94vw_0\tperson\nf7pnt1rB9kI_0\tperson\nf7x074oihas_0\tperson\nf73BEqi2_DM_0\tperson\nf7-S_iQAyKU_0\tcar\nf7-htlH5qd4_0\tbird\nf7-htlH5qd4_2\tbird\nf8A1o9Nbs64_0\tskateboard\nf8BXIJnggCI_1\tboat\nf8BXIJnggCI_3\tboat\nf8BXIJnggCI_4\tboat\nf8Dp8Yvyr_0_0\tperson\nf8PVrlhAIV4_0\tperson\nf8T4DHNu6MY_1\ttruck\nf8ZxXHSqC_8_0\tboat\nf8cW6kw6240_0\tperson\nf8mzzGhPBaw_1\tcar\nf8q3fKwf5PY_0\tknife\nf8yFyIwDCQ4_4\tgiraffe\nf8zLCa1oGOE_0\thorse\nf8z83D9vGPo_2\tknife\nf80hjE6vabs_0\tperson\nf80hjE6vabs_1\tperson\nf84ypk41ULc_0\telephant\nf9H0LrBLc9Y_0\tperson\nf9H0LrBLc9Y_1\tperson\nf9H0LrBLc9Y_3\tperson\nf9H1bUagACA_0\thorse\nf9H6UaPUITk_0\tcat\nf9LOlCLfsJs_0\tperson\nf9N4Jxt-kUs_1\tknife\nf9N4Jxt-kUs_2\tknife\nf9TCFTluRIc_1\tbus\nf9e12AC1jXM_0\tbear\nf9oWC3kSP1M_0\tmotorcycle\nf9ovukmKaq4_1\tperson\nf9sPt8HIN0w_1\tskateboard\nf9sj-0ZFV6E_0\tperson\nf9sj-0ZFV6E_1\tperson\nf9v2ONFCiwQ_0\tperson\nf91XzUXz11U_0\tperson\nf96d9EwxAB4_0\tperson\nf9-IyW9tVLY_0\tperson\nf-FxqFk0TdM_0\tperson\nf-JXaNm7TBw_0\tperson\nf-J7SQBHRN4_0\ttruck\nf-Yei4idfG8_0\tairplane\nf-dhfS-geuI_1\telephant\nf-dhfS-geuI_2\telephant\nf-h9L-PN1ZM_1\tbird\nf-iLJUDdrD8_0\tperson\nf-niuVrgiIc_1\tperson\nf-rp_CghH-E_0\tskateboard\nf-s-4lM4qPA_0\ttruck\nf-w51BH60RQ_0\tperson\nf-1WVe76te0_0\tcow\nf-4EyKUawVo_0\tbear\nf-7ZEGsCz9U_0\tperson\nf_GKi-DGmzM_0\tperson\nf_Gf2hpt7y4_0\tgiraffe\nf_GudF8uST0_0\tperson\nf_NsA6enCZE_0\tperson\nf_OOyDOAAOU_7\telephant\nf_QhMhkyUSY_3\ttruck\nf_QhMhkyUSY_1\ttruck\nf_QhMhkyUSY_4\ttruck\nf_Us8TvJMUQ_0\tperson\nf_VwDCt9HTc_0\tdog\nf_WQIaZ5PjY_0\tboat\nf_bXOtZjzfo_0\tperson\nf_b0IaRqtbs_0\tperson\nf_jLGz53IpQ_0\tperson\nf_jLGz53IpQ_1\tperson\nf_mo54sXCc8_1\tperson\nf_mo54sXCc8_0\tperson\nf_rC1JIAMBU_0\tperson\nf_wk-NOqceY_0\thorse\nf_yMF9tkk70_1\tcar\nf_yvJuTzFHc_0\tmotorcycle\nf_yvJuTzFHc_2\tmotorcycle\nf_2I0S-EYu8_0\tdog\nf_3x9qJXCjA_0\tperson\nf_49EFLQ02I_0\tperson\nf_8S2hHC2rc_0\tbicycle\nf__fXHkVh5E_1\tcow\ngAKFUl9e_kg_0\tperson\ngAQ92hISW6g_0\tperson\ngARNWQDyaYM_0\tboat\ngAYbqApcfGs_0\tperson\ngAdIZN7_0SM_1\tairplane\ngAeHmfC6t5s_0\tcat\ngAetQXcftXM_2\tdog\ngAnOylz1kDY_0\tperson\ngAnmF0EFcB4_2\telephant\ngAorjWC_59o_0\tcat\ngAo9Rsd6xwg_0\tcow\ngA2FDYNulg8_1\tperson\ngA22uEcTAuY_1\tdog\ngA84cp5Keqk_0\thorse\ngA_a2Ajm7B8_1\thorse\ngBFsvbfVaLg_0\tperson\ngBJgWZcXu9o_0\tperson\ngBK7NwUcSoY_1\tperson\ngBOpan7nm6M_0\thorse\ngBOpan7nm6M_1\thorse\ngBPipHCII3M_0\tbus\ngBRc8zqsL78_0\tdog\ngBUOzZPs_o4_2\tperson\ngBUOzZPs_o4_0\tperson\ngBYqrtFnN_Y_0\tperson\ngBYqrtFnN_Y_2\tperson\ngBeaBC0u9cQ_0\tperson\ngBhKhiEJUCM_0\thorse\ngBiq_BH15FM_0\tdog\ngBoebgAjbVw_0\tperson\ngBoebgAjbVw_1\tperson\ngBs3hPLJTGs_1\thorse\ngBwCej92lKg_1\tperson\ngB0wConR2VI_1\tskateboard\ngB2QHXkiiHs_2\telephant\ngCDBnQV_G3c_0\tcat\ngCDBnQV_G3c_1\tcat\ngCGtBmntCiI_1\tmotorcycle\ngCHegjuq0os_0\tperson\ngCHegjuq0os_1\tperson\ngCI1E3Hezdo_2\tcow\ngCI1E3Hezdo_1\tcow\ngCTp3CdMHCo_0\tperson\ngCT0VAdPm98_0\tcat\ngCuOoA6aZ5U_0\tcat\ngC7K3OeQFHo_3\tbird\ngC7XtkA9y_Y_0\tdog\ngC-xUbdM-tU_0\tperson\ngC-xUbdM-tU_1\tperson\ngDAPPFBC9Gw_0\ttrain\ngDEpD9ek-O8_0\tskateboard\ngDGLrPPl_PU_0\tcat\ngDMsKJ61KPo_1\tskateboard\ngDOGAHsBM_o_0\tperson\ngDTs0BOj8Fw_0\tcat\ngDU0hHsqtbU_3\tknife\ngDU0hHsqtbU_5\tknife\ngDU0hHsqtbU_0\tknife\ngDVGs8wTXCQ_0\tcat\ngDkDXOm8z5Q_1\tcow\ngDkDXOm8z5Q_0\tcow\ngDk-zDBsv7g_0\tdog\ngDnSIxaiPzk_0\tperson\ngDn3-DCSgNg_0\ttrain\ngDsBFuJE6D8_2\tdog\ngDvOoWXI3yg_0\tperson\ngD2GATPADlA_0\tperson\ngD5_x_Bz1z4_0\tperson\ngD5_x_Bz1z4_1\tperson\ngED4_ImWufA_0\ttruck\ngEE_GCrAqF0_0\tperson\ngEJi9Jawk2A_0\tperson\ngEOxDCDD97k_1\thorse\ngESEn7ZZELM_0\tperson\ngESEn7ZZELM_1\tperson\ngEai3uMvvFg_0\tairplane\ngEai3uMvvFg_3\tairplane\ngEai3uMvvFg_4\tairplane\ngEhLmQnM720_0\tcar\ngEu4mV0DWRQ_0\tperson\ngE0ZQD1rCy8_0\tperson\ngE0mBxOEwRI_1\tskateboard\ngE0mBxOEwRI_3\tskateboard\ngE8ErAnVuzY_0\tbird\ngE8ErAnVuzY_2\tbird\ngE-GVN9ErhI_0\tperson\ngFEnoylVci0_0\tperson\ngFac0jUOjCE_0\thorse\ngFcIMdm4qtI_0\ttrain\ngFdHQTLSmnc_0\tairplane\ngFfVZSPVYmY_0\tperson\ngFiSl9m-w0k_0\tperson\nHdBc9ySq76E_1\tbird\nHdCyMGZFJhM_0\tperson\nHdFYXjdN5_8_0\tperson\nHdO2lmXvENQ_0\thorse\nHdR6VoZEwAU_0\tcat\nHdSXU0fhHbM_0\tperson\nHdT_9pXdxuc_1\tperson\nHdbZzqJGLo8_1\tcow\nHdcXcqUlgI4_0\tskateboard\nHdhKF0UWx4g_0\tperson\nHdh3nOzwVW8_0\tperson\nHdjbDB8UvCY_0\tperson\nHdo3_NQiVKw_0\tknife\nHd85XlwoOMc_0\tperson\nHd-wT5OTZDE_0\tperson\nHd-wT5OTZDE_1\tperson\nHeIrGQnIMOE_0\tdog\nHeLNz5XJe08_0\tperson\nHeTGT7JfvB0_0\tperson\nHeUD1Hrzswg_0\tbird\nHeYNsU-PKJs_0\tcow\nHeYNsU-PKJs_1\tcow\nHedUVNznPK0_0\tcar\nHedUVNznPK0_1\tcar\nHeoyKd78htI_0\tperson\nHeoyKd78htI_1\tperson\nHewdFRJAXH4_0\tperson\nHe08dewEgbY_3\tmotorcycle\nHe08dewEgbY_0\tmotorcycle\nHe1OQxCPk_w_0\tperson\nHe5cucK-e48_0\tperson\nHe6bAMDkCss_0\telephant\nHfDHvE46LYU_1\tbird\nHfDzCPRQ2nw_1\telephant\nHfEXlJ0dOhU_0\tperson\nHfEZYvYqq_Y_0\tcow\nHfHNi93ZHoo_3\tcow\nHfHNi93ZHoo_1\tcow\nHfOcLeLWchM_0\tperson\nHfZ871F0xSo_0\tcat\nHfnnbr4CeTg_3\tbus\nHfqI5BIpp0s_0\tperson\nHfq3_YJ7BpY_0\tmotorcycle\nHfq9JFmquE4_0\tperson\nHfvJc2dxUR4_0\tboat\nHf1Iyyz2DMY_0\tperson\nHf1Iyyz2DMY_1\tperson\nHf8JWsbSYYk_0\tperson\nHf8-8h45g-g_1\telephant\nHf8-8h45g-g_0\telephant\nHf8-8h45g-g_2\telephant\nHgDimNCaxF0_1\tbear\nHgFCKM4ndEc_0\tcar\nHgMYuCtsOwc_0\tperson\nHgMYuCtsOwc_1\tperson\nHgO57Npp9Yg_0\ttrain\nHgexaoNeZJk_0\tperson\nHgiYmNrxUzg_1\tperson\nHgkeptGXNt4_0\tmotorcycle\nHglF9x-ORXU_0\tperson\nHgr5__oevds_0\tperson\nHg2vqnLAc8I_0\tdog\nHg4DJ-x85Dw_1\telephant\nHg-R_RMIEN8_0\tairplane\nHhASNiFpJlw_0\ttruck\nHhF6cAtp7Xs_0\tknife\nHhGGJNmwWHk_0\tperson\nHhVSLU0A-wk_0\tcar\nHhcMy4KZ9mY_0\tskateboard\nHhfSUB2LOTU_0\tperson\nHhiUVwHWmwM_1\tperson\nHhiUVwHWmwM_2\tperson\nHhiUVwHWmwM_0\tperson\nHhjGAeK-XWg_0\tperson\nHhoRf1Ovlf8_0\tperson\nHhvq-cwBJgo_0\tperson\nHhwzl9x_m34_3\tcow\nHhxV27YhiqI_0\tskateboard\nHh1xD0M0N8Q_0\tperson\nHh6x850teNQ_5\tairplane\nHh6x850teNQ_7\tairplane\nHh6x850teNQ_8\tairplane\nHh6x850teNQ_9\tairplane\nHh6x850teNQ_10\tairplane\nHiBUWbOyqcQ_0\tperson\nHiGZ2EdJh2o_0\tperson\nHiMItbtVHcY_0\tcat\nHiMItbtVHcY_1\tcat\nHiNt0G1AIO4_0\tmotorcycle\nHiTE5nqzjBw_0\tzebra\nHiUz61ffgHA_0\tperson\nHiZDjdREbmc_0\tumbrella\nHim7gJ7sArU_0\tperson\nHim7gJ7sArU_1\tperson\nHinGUsliCKc_0\ttruck\nHirBTVnhNls_0\tcow\nHi4ITByGP0Q_0\tperson\nHi4mzrYdRBQ_0\thorse\nHi4mzrYdRBQ_2\thorse\nHi4mzrYdRBQ_3\thorse\nHi8Ey0o5mCQ_1\tperson\nHi-7ZtG_JWI_1\tperson\nHi_YHp3Jz48_0\tcow\nHjAtN_MbguE_0\tperson\nHjLLTWwaCB8_0\thorse\nHjNfykX021M_0\tperson\nHjNfykX021M_1\tperson\nHjgdNiVfO9M_0\tskateboard\nHjlX9nu9Vf4_0\tperson\nHjo13y8dFy4_0\tmotorcycle\nHjt_y0CW-dY_0\tperson\nHjt_y0CW-dY_1\tperson\nHjxd2cno65M_0\tskateboard\nHj0J8FVxBjg_2\tperson\nHj0J8FVxBjg_0\tperson\nHj0J8FVxBjg_1\tperson\nHkApyQz8MTY_1\thorse\nHkQ4tzUFCUU_0\ttruck\nHkW_wLkAKpg_0\tperson\nHke6h3Sv5bA_1\tbicycle\nHkzYNIDq0q4_0\ttrain\nHk45sdCRh9g_1\tbear\nHlEkgK08UfY_1\tperson\nHlTQbPXnzu8_0\tdog\nHlWsih27OmA_0\tbird\nHlaPVZM-53c_0\tperson\nHlfpirtC6oQ_0\tperson\nHlmuHGoCGAI_0\tcow\nHltyUzvtugM_1\tbicycle\nHlurUBv4bh0_1\tgiraffe\nHlurUBv4bh0_3\tgiraffe\nHlurUBv4bh0_4\tgiraffe\nHlwSaYwFLRE_0\thorse\nHl3qik9GRX4_0\tperson\nHl5MXwWiXWM_0\tperson\nHmDDLtJcD5g_0\tperson\nHmORePbYJkk_0\tskateboard\nHmPvsdwo_fY_0\tdog\nHmRm2phIiGo_1\tbird\nHmY8zwmIiac_0\tcow\nHmaGylwEFxw_0\tperson\nHmbTCfB3Vkg_0\tperson\nHmk4dZnPtRY_0\tbus\nHmn3xf-zqWI_0\tperson\nHmqV_7hAxdw_0\tperson\nHmr0jbygomI_0\tgiraffe\nHmwxDK0zo6U_0\tperson\nHmyj1zKgToA_0\tperson\nHm0kxS31F_U_0\tperson\nHm0kxS31F_U_1\tperson\nHnNJeASG0-M_3\tperson\nHnNJeASG0-M_4\tperson\nHnNJeASG0-M_2\tperson\nHnNzkYDhWks_1\tperson\nHnNzkYDhWks_2\tperson\nHnNzkYDhWks_0\tperson\nHnP7iXcgg8g_0\ttruck\nHnSHJ_iCdi4_3\ttruck\nHnSHJ_iCdi4_1\ttruck\nHnUrGKpAsOk_0\tcat\nHnbNOJpzYPE_0\tperson\nHnjhdtM8qSI_0\tskateboard\nHnptRKjBUF0_2\tboat\nHnwYRWj3fk4_2\tknife\nHnxaJbaAiUI_0\tperson\nHoH5exlgIxk_1\tskateboard\nHoLifxKZUpI_0\tperson\nHoLifxKZUpI_2\tperson\nHoLifxKZUpI_1\tperson\nHoNs_4V1pNs_1\tbear\nHoNs_4V1pNs_4\tbear\nHoP_nMgAxAk_0\tboat\nHoeeRkyNozc_0\tcow\nHon64st5_6g_0\ttrain\nHo2ixBE8dzE_0\tgiraffe\nHo5TcUOlb3Q_0\tmotorcycle\nHo5o7aBqNAc_0\tperson\nHo6N0OgD-1M_0\tperson\nHpBBda_pbf8_0\tmotorcycle\nHpGr16tW9dk_1\tperson\nHpQ90KkREGo_0\tperson\nHpUPD5_WMYI_0\ttrain\nHpZ3IzUfsGg_3\tbus\nHpbQsLdUHN4_0\tboat\nHpjyvLHus3Y_1\tskateboard\nHpkTeQdQ03Q_0\tskateboard\nHprw9lNWGGs_0\tperson\nHptcjVcfzgY_0\tcow\nHpwk73qvroU_1\telephant\nHpzTTAS6Qt8_0\tperson\nHp0SQy5w9Q4_0\tperson\nHp-eaTbVfLY_1\tbear\nHp-2Gb7Fwns_0\tcow\nHqxhhM71S2g_0\thorse\nHq1KLztJBrE_0\tperson\nHq6tGHLzg4Q_0\tperson\nHq814Tfrblw_1\tairplane\nHrHPBJOnFgg_1\ttrain\nHrHPBJOnFgg_0\ttrain\nHrHPBJOnFgg_4\ttrain\nHrHPBJOnFgg_6\ttrain\nHrdVu5J3rZQ_0\tperson\nHr-keYNRBhA_0\ttrain\nHsLZwGFHYUg_1\thorse\nHsNcZZ6iwHQ_0\tperson\nHsOiHc1moVk_0\tperson\nHsOkCwZLv_w_0\tbus\nHsOkCwZLv_w_3\tbus\nHsOkCwZLv_w_1\tbus\nHsOkCwZLv_w_2\tbus\nHsR2xk4I1as_0\tperson\nHsVKw_8AQtM_0\tperson\nHsZgeesgCZQ_0\tperson\nHsjVUPs3XB4_0\tboat\nHslbDMoiABY_0\tcar\nHslld67XdsY_0\tperson\nHswufOfUGyk_0\ttruck\nHsyscFWIPZs_0\tbus\nHs0HRqYcYqA_0\tcar\nHs6bVSOu98U_0\tdog\nHs_vQr20HdQ_0\tskateboard\nHs_vQr20HdQ_3\tskateboard\nHtErHV_tZqs_0\telephant\nHtIbfC8DDos_0\ttruck\nHtNaGNO6nnc_0\tperson\nHtRiNzzfakk_0\tperson\nHtUPhgHKN9c_1\tboat\nHth8t7jhKPs_4\thorse\nHth8t7jhKPs_7\thorse\nHth-I5KYVsI_0\tcat\nHt054jKgWfE_0\tperson\nHt9C8ABsxrg_0\tperson\nHt_bczKGV-0_0\tperson\nHuNIgJEUelo_0\tperson\nHuNIgJEUelo_1\tperson\nHuOzcY9ybpo_2\tdog\nHuVl7peYYF8_0\tperson\nHuVoecmBgpM_2\tbird\nHuVoecmBgpM_1\tbird\nHuZPTuSe7Zw_0\tperson\nHue6Q5JKEKw_0\tcow\nHun4T6fv3cs_0\tperson\nHuqC6CX9uRA_1\tperson\nHuyd-7WlWWU_0\tperson\nHu3xpcZqwRg_0\tperson\nHu9DGxLcg2c_0\tperson\nHu-VYy60p64_0\tperson\nHvHJi-EkL8c_0\tskateboard\nHvIubGltpPY_0\tdog\nHvLq5xDKM6E_2\tbicycle\nHvP4rcOll6k_0\tperson\nHvQGnFuiwtg_0\tcat\nHvTvaPx2hXw_1\ttrain\nHvhkLhJ4YFQ_0\tperson\nHvuLPfhVT3s_0\tperson\nHvyIg5RMLbU_1\tperson\nHvyIg5RMLbU_0\tperson\nHvyzpBvy40o_0\tperson\nHv5sH0eTE_M_0\tdog\nHwSP55CmiCk_0\tperson\nHwS3weg4aQc_0\tdog\nHwS3weg4aQc_2\tdog\nHwY6kiQlICc_0\tperson\nHwdEYJ2bZkg_0\tairplane\nHwdyzravQpY_0\tcat\nHwfLycybCD0_0\tmotorcycle\nHwgmR0Qlm_I_0\tperson\nHwipRH29Hr0_0\tbus\nHwnqezsko-Q_0\tperson\nHwnqezsko-Q_1\tperson\nHwxnH--ot8o_0\tcar\nHw0JhQaRYcA_0\tcow\nHw2Bhz2SkUI_0\tperson\nHw2Bhz2SkUI_1\tperson\nHxMniz8r1x4_0\tperson\nHxP056QWsGY_0\tperson\nHxP056QWsGY_1\tperson\nHxaFZyog34E_0\tperson\nHxaFZyog34E_1\tperson\nHxgU1Dh8wMs_1\tperson\nHxiBpvG82Ys_0\tmotorcycle\nHxq1wNRv5Yg_0\tperson\nHxv6y6I4mvE_0\thorse\nHx19D3w4xGI_0\tgiraffe\nHx_Z9TOIV8U_0\tmotorcycle\nHyJgfYNotwk_0\ttruck\nHyUY7bqdm9Q_7\tdog\nHyUY7bqdm9Q_0\tdog\nHyVLne6RE-A_0\tperson\nHyXjUWAQ970_0\tskateboard\nHygs9OBUgg4_1\tperson\nHyuQCu-z558_0\tmotorcycle\nHywSTw3dtgs_0\tperson\nHywSTw3dtgs_1\tperson\nHy4E2NZEc34_1\ttrain\nHzAOQnmw_bo_1\telephant\nHzAOQnmw_bo_2\telephant\nHzCClfShiwM_0\tperson\nHzCClfShiwM_1\tperson\nHzDzb9xxc6o_0\tperson\nHzESeh3ZV4g_0\tperson\nHzHWWeZEU6E_1\tskateboard\nHzJgpBBIk1o_0\tcat\nHzLm3QfIx9w_0\tperson\nHzLm3QfIx9w_1\tperson\nHzXBY-SJECY_0\thorse\nHzYY4-iAvrk_0\tcow\nHzdSxrJ2oBw_0\tskateboard\nHzkmlCJwvqo_0\thorse\nHzlcc_lAGVo_2\tskateboard\nHzqIVSJNXAU_1\tperson\nHztbwJhPXyk_0\tperson\nHz6I6jLi4NA_0\tdog\nHz8qayZDGpU_0\tperson\nH0Adt_c6kJo_2\telephant\nH0EEB1bPOjE_0\tperson\nH0VjOJvg49Q_0\tbicycle\nH0Ym6NE2ny8_0\tcat\nH0gWl9KRbHo_0\tperson\nH0k2WZec6aA_1\ttrain\nH0k2WZec6aA_3\ttrain\nH0k2WZec6aA_4\ttrain\nH0k2WZec6aA_0\ttrain\nH0u061QsnHw_0\tcat\nH0yhw97jkkY_0\tperson\nH0z8VqDW-vg_1\tairplane\nH01F2fhFpr0_0\telephant\nH097WsXpask_0\tperson\nH097WsXpask_1\tperson\nH1C2ZZeeVs0_0\tcow\nH1Hd5Japfbc_3\ttrain\nH1Hd5Japfbc_0\ttrain\nH1Hd5Japfbc_1\ttrain\nH1Hd5Japfbc_2\ttrain\nH1JIvu1dbbk_0\tperson\nH1JIvu1dbbk_1\tperson\nH1MTfTrQrE0_1\tperson\nH1d68B_jDjI_0\tperson\nH1hg-0_AS9A_0\tcow\nH1xBJoYM7rE_4\ttruck\nH1xBJoYM7rE_5\ttruck\nH117IshzypA_0\tknife\nH144B0rpQh0_0\tperson\nH144B0rpQh0_1\tperson\nH1-_3CvKDzc_0\tbird\nH2Q-46IlKEc_5\ttruck\nH2Q-46IlKEc_6\ttruck\nH2RoEMwxEAk_1\tperson\nH2TqEPsubdM_0\tbear\nH2iTxNLOK1Q_2\tmotorcycle\nH2iTxNLOK1Q_0\tmotorcycle\nH2iTxNLOK1Q_3\tmotorcycle\nH2vkpfO2yqU_0\tperson\nH22P5Z4GfkE_0\tperson\nH29Xe5gG_-s_0\tperson\nH3A2DSw_xNU_1\telephant\nH3GcVWKTVd4_2\ttruck\nH3NrFrjQlfc_0\tperson\nH3exbzmmPQY_0\tperson\nH3jC0oToDjU_2\tperson\nH3jC0oToDjU_3\tperson\nH3jC0oToDjU_0\tperson\nH3o1VsopVFM_1\tbicycle\nH3o1VsopVFM_2\tbicycle\nH3pifBCagTI_0\tperson\nH30IPtBzf_s_5\tskateboard\nH30ifg3HO_I_3\tdog\nH33IRr1Z3-w_1\ttrain\nH36UOsilz4M_0\tperson\nH4Hp-UJYZ_g_0\tbicycle\nH4JiUp8EH3s_0\tzebra\nH4VZD26aqe8_0\tskateboard\nH4VZD26aqe8_1\tskateboard\nH4bN1hcXw9Q_1\tperson\nH4dTHFeYa30_0\tmotorcycle\nH4eE_LAeWXQ_0\tperson\nH4eE_LAeWXQ_1\tperson\nH4gxLA7vTo4_0\tperson\nH4lBmXOi3Uc_0\tdog\nH40G2dsVha4_1\ttrain\nH41XJMKpfFM_0\tbus\nH42hQSjU97o_0\tknife\nH5NqMNaMEiM_0\tbird\nH5YO56LD_dY_0\telephant\nH5YO56LD_dY_1\telephant\nH5iHzuWmtDw_1\tdog\nH5sijKl_Xi4_0\tcow\nH50EXfjT2O0_2\tairplane\nH50EXfjT2O0_0\tairplane\nH50EXfjT2O0_1\tairplane\nH50-_mqAU14_1\tcow\nH55Ru4hgats_2\telephant\nH55Ru4hgats_3\telephant\nH6OhYxXS1So_0\tcat\nH6UwkC3sYic_0\tcat\nH6ZHYEOcjCI_0\tbicycle\nH6ZHYEOcjCI_1\tbicycle\nH6Z8sZ34ZGw_0\tmotorcycle\nH6dXJIZnH-k_2\ttrain\nH63oHdGMBAs_0\tbird\ngFunUi36tVM_0\thorse\ngFunUi36tVM_1\thorse\ngFvhLM1k-IY_2\ttruck\ngFwCuQBtZiU_1\tumbrella\ngF7IM-CiOdU_7\tbicycle\ngF7IM-CiOdU_0\tbicycle\ngGBEKYXUhbE_0\ttruck\ngGMxVO2zmP4_9\tbird\ngGMxVO2zmP4_1\tbird\ngGMxVO2zmP4_2\tbird\ngGMxVO2zmP4_5\tbird\ngGMxVO2zmP4_8\tbird\ngGSCGkm00jM_1\tbicycle\ngGYN2hnw1SQ_1\telephant\ngGdKtY4p1E0_0\tairplane\ngGt9CVOzJOI_3\tknife\ngGzaN_8PxZw_0\tskateboard\ngG8tfb-eSuo_0\ttrain\ngHC3HqRbW6g_0\telephant\ngHF9PM2MVuw_1\ttrain\ngHvzU7dfBU8_0\tgiraffe\ngHyK46CyQtA_0\tcow\ngH0LLPcn-H8_0\telephant\ngIBZr7Mh05k_0\tbird\ngIMq_fnjtSM_0\tcat\ngISy0wedyW4_0\tboat\ngInHAdlbB60_1\tskateboard\ngIsXFCo7Nt4_1\tdog\ngIxuS1GwPPo_0\ttrain\ngJV63DGM7Ew_1\tcar\ngJa0yNDBFio_3\tperson\ngJa0yNDBFio_0\tperson\ngJa0yNDBFio_2\tcow\ngJfD9eHnos4_1\telephant\ngJn5fXk7dCs_0\tairplane\ngJuZGVWuQQ8_2\tbicycle\ngJ-k_oHkqYc_0\tcat\ngKHR68FmKE8_3\tairplane\ngKHR68FmKE8_0\tairplane\ngKHR68FmKE8_4\tairplane\ngKmF78OWCUc_0\tmotorcycle\ngKqUwiPYSh8_0\tmotorcycle\ngK7dud30V7k_0\tgiraffe\ngK_K33gm3SA_1\tmotorcycle\ngLQWgnWqQ1Y_0\tbicycle\ngLRU7lXCgNw_1\tdog\ngLRexWYaW_Q_0\tskateboard\ngLbADp0AlZU_0\tbird\ngLtnBhTBpkA_1\tboat\ngL3uBv5NWJU_1\tbus\ngL7JySv9H4I_0\tbicycle\ngMAW4Am5_pc_0\tcow\ngMBTewi9VZg_0\tcow\ngMCCgBzug_U_0\tknife\ngMFgEtqbTXs_0\tboat\ngMJuszEOURk_0\tcat\ngMMJH4UYboM_3\tbus\ngMXt8X-xC_g_0\tdog\ngMlNev_l4Yg_0\tbus\ngMlhd1gczF4_0\tairplane\ngMsGe7w79Hg_1\tcar\ngM9tFNvc1xw_0\tcow\ngNDSQ2l9FYg_1\telephant\ngNMkDmfkZ1E_0\tmotorcycle\ngNcGXjn7g9o_0\tskateboard\ngNwKVPIi010_1\tskateboard\ngN2aKPpTpzQ_1\tdog\ngN7-cLfUlt8_4\tgiraffe\ngN7-cLfUlt8_6\tgiraffe\ngOOB0RZmnUA_0\tcow\ngORdlzUa3nQ_1\tbird\ngO48FZrUm88_0\tskateboard\ngO-8RNI2Puc_1\tdog\ngPhcXlQLLRU_0\thorse\ngPrWvEE7yjw_0\tcat\ngPteWZyyJeo_0\tcow\ngP3SQErTTOg_1\tmotorcycle\ngQBW4py4GhY_0\tskateboard\ngQEGmIhhEQ4_0\ttrain\ngQEGmIhhEQ4_1\ttrain\ngQEGmIhhEQ4_2\ttrain\ngQFqppfDRRk_0\tumbrella\ngQLZ5H-n0Uk_4\tknife\ngQVlREJXkik_0\tknife\ngQWTTEHj5Hs_0\tcat\ngQeqE3dgZoM_3\tairplane\ngQe5gykuyi4_1\ttrain\ngQpWY94Fx5E_0\tmotorcycle\ngQpuEhphXHk_0\tcar\ngQpxfwrF7Sc_0\tbus\ngQ6AUvEXuaQ_0\tbicycle\ngQ9HhxeKI4A_0\tmotorcycle\ngQ_SF2MtsUc_0\telephant\ngRFcteFGpLM_0\tskateboard\ngRJGd_HzC-8_0\tknife\ngRJpf6JwJeU_1\tgiraffe\ngRNKgw2D_mE_0\tknife\ngRVrvJioWZ8_1\ttrain\ngRoGrhv1ebI_0\telephant\ngRsOR1tKh8U_0\ttruck\ngR3ihf3rch0_0\tcar\ngSXDTJjj1jk_0\ttrain\ngSi2fNTUsy8_0\thorse\ngSlT3ALqvTM_0\tskateboard\ngS0DTbVQ2x8_1\tknife\ngS25yLrNO98_0\tbear\ngS2-SAccVh0_0\tskateboard\ngS7U-6Z8M2g_1\tknife\ngS_9D3OWXAk_0\tairplane\ngTqgARR0BBQ_1\tboat\ngT27MQBhatA_0\tskateboard\ngUDoTzwZlso_0\tdog\ngUL0-NbHvuA_0\tmotorcycle\ngUMLascwbtU_0\ttrain\ngUNCDmbzxq8_0\ttrain\ngUbc_OUTnOs_0\tairplane\nH7ONEeAkBFo_3\tmotorcycle\nH7ONEeAkBFo_2\tmotorcycle\nH7YUH_GBWdQ_0\ttrain\nH8B-3STVp6E_0\tcat\nH8LitQV6pNM_0\tcat\nH8SccYIiPs8_0\tzebra\nH8coORJpR80_1\tskateboard\nH8k1E1i7AvQ_0\tknife\nH9AQUC0N1zI_0\thorse\nH9JfwPhdCjg_0\tboat\nH9KjlXZYxJU_0\ttrain\nH9KjlXZYxJU_8\ttrain\nH9TUml4LflE_0\tcow\nH9UTvMwaoRg_0\tcow\nH9bbSssKl2o_14\tumbrella\nH9eutGBn3zw_0\tmotorcycle\nH-C6EBylvh4_1\tcat\nH-IoiGsEU5Y_0\ttrain\nH-QKbNwtoH8_1\tcar\nH-gh485Om10_0\tbus\nH-gh485Om10_1\tbus\nH-kkRVEs3Bg_0\tmotorcycle\nH-uiufHSb3s_0\tknife\nH-uvqjsUCLc_0\tdog\nH-uvqjsUCLc_1\tdog\nH-5Ynjv0dQI_1\ttrain\nH-62b99sK_s_0\ttrain\nH-62b99sK_s_1\ttrain\nH_Ei1gRODpw_0\tdog\nH_KMZLSAxMw_0\ttrain\nH_iI201Iqws_1\ttruck\nH_iYHl4pFuQ_0\thorse\nH_mRfG30Gzo_0\tskateboard\nH_1O-OBZ3BA_0\thorse\nH_6vxd3ckIY_0\tcat\nIADSsAb2KSo_1\tumbrella\nIADSsAb2KSo_2\tumbrella\nIAFApeJ5FvM_1\tmotorcycle\nIAOiNYVeqzE_0\tbird\nIAaINtcnO7A_0\tbicycle\nIAcbsZcN_pM_1\tmotorcycle\nIAkSntQ2Aso_0\thorse\nIAlz_evs7fU_3\tcar\nIApV0rfD9oQ_0\tdog\nIAsXYmK1baI_0\tmotorcycle\nIAwKojHnvtU_0\ttrain\nIBD9tJNb9_o_0\ttrain\nIBFp5y96q78_0\tmotorcycle\nIBFp5y96q78_2\tmotorcycle\nIBKLgBXZFzw_0\tmotorcycle\nIBYJQU6-nGg_2\tcow\nIBYg-hMbb04_0\tknife\nIBm1C4qJtTg_5\tumbrella\nIBm1C4qJtTg_8\tumbrella\nICQbVnaJL_0_0\tbus\nICZ4tinBQZg_1\tknife\nICZ4tinBQZg_2\tknife\nICZ4tinBQZg_3\tknife\nICg3W1-Prhk_0\telephant\nICnAWjPDzRw_0\tcow\nICtLhp-qveM_0\tboat\nIDCBO7W7xpo_0\tcow\nIDNvFEra8mc_5\thorse\nIDNvFEra8mc_1\thorse\nIDNvFEra8mc_2\thorse\nIDNvFEra8mc_3\thorse\nIDNvFEra8mc_4\thorse\nIDO6jw3u3_w_1\tairplane\nIDcxChwEqDs_2\thorse\nIDeGA2EV3WY_0\tairplane\nIDeimFOIbVc_0\ttrain\nIDmwsXLZKUs_0\tcow\nID1faW2L3rM_0\tcat\nIEOg-ZulFR0_1\tbird\nIEPYJyHfP2E_1\telephant\nIEYC-aYAQ40_0\tboat\nIE5qZDd7tWw_0\telephant\nIFGohfPURX4_0\tperson\nIFfS7hatV0s_0\ttruck\nIFkUMGE7bbc_1\telephant\nIFkUMGE7bbc_0\telephant\nIFrHlldbUdQ_0\tcow\nIFvO1O-6vqk_0\ttruck\nIHQvg9gYLjw_0\tdog\nIHSCfRs-J38_2\tskateboard\nIHY0eeHfBcY_4\ttruck\nIHjI35oW0T4_0\tcar\nIHxX0fKU9iM_1\tskateboard\nIH3E7RS6Hn8_0\tcat\nIH9BmEg26Cw_0\tperson\nIIBN7FGNNEs_1\ttrain\nIIBN7FGNNEs_2\ttrain\nIIBN7FGNNEs_3\ttrain\nIIBN7FGNNEs_4\ttrain\nIINTapIzzes_2\tskateboard\nIIw0KKAeBeQ_0\tskateboard\nII0JbbQq-Sg_1\tbird\nII61z65eDCY_2\tcow\nII61z65eDCY_0\tcow\nII94vSsb4Uc_0\tcar\nII_okDlDaO0_0\tcat\ngUt0vA8_1Ow_0\tairplane\ngUvZ3RC9tEU_0\tknife\ngU3SNUS1_ng_0\tbicycle\ngU4mBoB-b7k_1\ttrain\ngVAp7rt84ic_2\tbicycle\ngVCrRXledlU_1\tboat\ngVCrRXledlU_0\tboat\ngVV-5JdLuXk_3\tcar\ngVXzT_h1SFI_3\thorse\ngVXzT_h1SFI_4\thorse\ngVXzT_h1SFI_2\thorse\ngVaB7hwBhTA_0\tcat\ngVjL5txcFMI_0\tknife\ngVrTFXdPWJ8_0\telephant\ngVxqk8tLXL8_0\ttruck\ngV27xS9pqNQ_0\ttrain\ngV3Xmwy3RKo_6\ttrain\ngV3Xmwy3RKo_13\ttrain\ngV9A5NfFexQ_0\tcar\ngWcacGgcxYU_4\tbear\ngWlmYVY4kW4_1\tbicycle\ngWnhQi-zfEE_0\tskateboard\ngWpNWuo7vio_2\telephant\ngWpNWuo7vio_3\telephant\ngWsOR7UiwDs_0\tairplane\ngWz5ZMzC58s_0\tcar\ngXBIzdmmHbA_1\tbird\ngXEHUZgPCGg_4\tbear\ngXFmghAzaVg_1\tmotorcycle\ngXGvO4k4xQY_0\ttruck\ngXHsyuynhso_2\tknife\ngXW33K91X7c_0\tbicycle\ngXn0Y5X5MJE_1\tzebra\ngXn0Y5X5MJE_0\tzebra\ngXt0u16Y6ZY_0\tboat\ngY_Ey8Ps_ZE_0\tcow\ngZhsGXSn5bU_0\tmotorcycle\ngZqGyIMgMbs_0\tbicycle\ngZxcxQBlx0s_0\tcat\ngZzmloffFW4_0\tbus\ngZ8kZt451Ww_3\thorse\ngZ92ZDty9wI_0\tskateboard\ngaCEAVQd1-M_1\tbird\ngaS7x3F3gpk_0\tbicycle\ngaS7x3F3gpk_1\tbicycle\ngaS7x3F3gpk_2\tbicycle\ngaS7x3F3gpk_3\tbicycle\ngalykATgRC0_0\tcow\ngaqS-4IaQ5c_2\tbus\ngbA3ItatxL8_0\tskateboard\ngbE0vzWpHj0_1\tknife\ngbE0vzWpHj0_4\tknife\ngbGl_-TnPjk_0\tbird\ngbI95ZXEUz0_0\tknife\ngbTTJah5oMw_0\telephant\ngbTTJah5oMw_2\telephant\ngbgbqiiEKVs_0\tgiraffe\ngcBaPcA_1_0_0\ttrain\ngcExbr9FO94_0\tgiraffe\ngcJ7XqXHPwM_0\telephant\ngcT_dy3neEk_8\tbicycle\ngcXhYL06Acs_5\tbicycle\ngcYBNx0fUg8_0\ttruck\ngchz9HDvVDk_0\ttrain\ngc80cGOHyKM_0\tknife\ngdCpPYwBVlY_0\tknife\ngdEBkAYaDPw_1\telephant\ngdELg0NrkdA_0\tdog\ngdvUXfsBMIk_0\ttrain\ngdzzJI7xjBg_0\ttrain\ngdzzJI7xjBg_1\ttrain\ngd2O-Z5dOIk_0\tairplane\ngd4r5aA8jeg_0\tbird\ngd4r5aA8jeg_1\tbird\ngeBwGOC-lX4_0\ttrain\ngeBwGOC-lX4_1\ttrain\ngeBwGOC-lX4_2\ttrain\ngeBwGOC-lX4_3\ttrain\ngeQCe6Cq5MU_1\telephant\ngeQCe6Cq5MU_2\telephant\ngeWChvEotKU_0\ttrain\ngefGPLN-abw_0\tperson\ngfGsOzQ7gto_0\tbear\ngfS7FJH6Vkk_0\tbear\ngfUC20NWtjU_0\tmotorcycle\ngfVlQhN0BBU_0\tbicycle\ngfuVNdXffSs_0\tairplane\ngf1mvdt9kbI_0\thorse\nggIyqAThI1g_0\tbird\nggPHtWoCcKs_3\tumbrella\nggTFLaNIJck_0\ttrain\nggVLptkmsys_0\ttruck\nggpz03j1REI_0\tbus\ngg3sG7O2P-g_0\tbus\nghEfyxUaVGs_1\tcat\nghIGC_DOfuk_0\thorse\nghqqgJWnVEU_0\tknife\nghyp-SKVuC8_0\tmotorcycle\ngiVGzMF1Yo4_0\tskateboard\ngiVGzMF1Yo4_1\tskateboard\ngipHWMPB-W4_3\tbear\ngipHWMPB-W4_1\tbear\ngitOEvGnoYk_0\tairplane\ngi9bnW7uLkE_0\tcat\ngjGlUXCT9A4_1\tknife\ngjK5A6cIEnw_0\tdog\ngjRhqzTAkWw_0\tcow\nIJFaomtLVDE_0\tcat\nIJNUwvacbKY_0\tcow\nIJVUMGoBSQs_4\tcow\nIJXVtb2GeJ4_0\ttrain\nIJdYiBYP31A_0\tmotorcycle\nIJlBmhH72m4_1\tcow\nIJ6g4ZRBksE_0\tcat\nIKLj0LJIMKs_4\tairplane\nIKLj0LJIMKs_5\tairplane\nIKLj0LJIMKs_2\tairplane\nIKftyV_zwkE_0\tskateboard\nIKqmWAu3GF0_0\tdog\nIK7Mnvty4VY_0\tperson\nIK8IJWsxg3M_5\tairplane\nIK8IJWsxg3M_6\tairplane\nILAGhYr9yts_1\tmotorcycle\nILLYlwlFTzA_0\telephant\nILmTjHZqkCo_1\ttruck\nILqxie6aqXg_0\tbicycle\nILqxie6aqXg_1\tbicycle\nILqxie6aqXg_2\tbicycle\nIL1HokSKOyY_0\tcat\nIL9r35lU8So_0\tskateboard\nIMD3U_DzO3E_0\tmotorcycle\nIMD3U_DzO3E_1\tmotorcycle\nIMde-053G78_0\thorse\nIMulJdQXZvM_0\ttrain\nIM7vwh5qua4_0\tcow\nIM8dlwNTjXU_0\tcow\nIM8v82x7ovA_2\ttrain\nIM8v82x7ovA_1\ttrain\nINFs2lfikXE_1\tknife\nINULdzdrdys_0\thorse\nINXkuJ9WvIU_0\ttrain\nINZhGblywrk_0\tbus\nINkhg9y4asY_0\tbear\nINtj4nfjRA0_1\tbear\nIN2TGHJrQEg_2\tskateboard\nIN2TGHJrQEg_0\tskateboard\nIN2TGHJrQEg_1\tskateboard\nIOPYEZzmeqg_0\tcar\nIOPYEZzmeqg_1\tcar\nIOQuWawPM3k_0\tbird\nIOfUvlEkN7g_0\tbus\nIOiqrNof90k_1\tknife\nIO3Z-ebx_f8_5\tbus\nIPI2_GXx1tI_0\tbird\nIPWixEFBDOY_0\thorse\nIPfYf-nFKic_0\tairplane\nIPfYf-nFKic_1\tairplane\nIP1CH8MMir0_0\tknife\nIQOfCy4FW8w_0\tskateboard\nIQXAYnslAnc_0\tcar\nIQoVuUTZILY_0\tairplane\nIQsV_hTCyMA_1\tbicycle\nIQwk7Ge6Apk_0\ttruck\nIRK6-ixyaVI_0\telephant\nIRSbjN-mnJI_0\tskateboard\nIRZBnQJoKiU_0\tskateboard\nIRztQZ4bigY_0\tcar\nIR9A3u83crI_4\telephant\nIR-PGdIPgcE_0\tskateboard\nISAnMprDgCk_0\tskateboard\nISJW4GuahWg_2\tdog\nISSTEs8xDWk_0\tumbrella\nISYwpUKxHJU_1\telephant\nISYwpUKxHJU_2\telephant\nISYwpUKxHJU_0\telephant\nISud5E9hZxU_0\ttrain\nISud5E9hZxU_1\ttrain\nIS9s3kJzTcA_0\tairplane\nITCcMWC_RW8_0\tumbrella\nITbwhPVxFv0_0\tumbrella\nITrisbHlaJw_1\ttruck\nITzBy7T7_fI_1\tumbrella\nIT6TArZww6A_0\tcat\nIT8VqGbdH_A_0\thorse\nIT_zQ44PPOo_0\tdog\nIUH4PYmObvU_0\tdog\nIUO1sDZgGHs_0\tbird\nIUdyfRMOyX8_0\telephant\nIUdyfRMOyX8_8\telephant\nIUdyfRMOyX8_1\telephant\nIUdyfRMOyX8_2\telephant\nIUdyfRMOyX8_3\telephant\nIUdyfRMOyX8_4\telephant\nIUdyfRMOyX8_5\telephant\nIUdyfRMOyX8_6\telephant\nIUdyfRMOyX8_7\telephant\nIUf7a2WuoBw_0\ttrain\nIUgkMOA3siY_1\tbus\nIUlDlS2KD-k_0\tbicycle\nIUlDlS2KD-k_1\tbicycle\nIUzpvnXep7M_0\tbear\nIU7x7I53cng_0\telephant\nIVFq204Rr9c_0\tairplane\nIVHx3I13xdQ_0\tboat\nIVSJSu0PlsI_0\ttrain\nIVVFeaTw6IE_0\tbicycle\nIVjCZS2Fo7k_0\tbird\nIVpmCnL5cE8_1\tgiraffe\nIVrBPzhFMi8_1\tmotorcycle\nIVrBPzhFMi8_2\tmotorcycle\nIVzxeeJEtiY_1\tbear\nIV6EMw4XYco_0\tskateboard\nIV6EMw4XYco_1\tskateboard\nIWCZ1PDW99k_0\tmotorcycle\nIWVIIKxipc8_0\tmotorcycle\nIWVIIKxipc8_1\tmotorcycle\nIWn16DCfLbc_1\tknife\nIWumeAEXWVo_1\tboat\nIWu47p4l06Y_5\tumbrella\nIWu47p4l06Y_6\tumbrella\nIWu47p4l06Y_3\tumbrella\nIWu47p4l06Y_4\tumbrella\nIW1cFMDjPUk_0\tbear\nIW2mFJ8iw6Y_0\tbird\nIW4ZnmQeNtA_1\telephant\nIW4g0kfA3GE_0\ttruck\nIW5Vgh3SE-I_4\telephant\nIW7TwQ-hY7I_0\tmotorcycle\nIW7TwQ-hY7I_1\tmotorcycle\nIXTgztKfRQU_0\tskateboard\nIXVCCLG3_cw_0\tbird\nIXyV2vpIEA8_0\tdog\nIXyV2vpIEA8_2\tdog\ngja4H3sGrqQ_0\tcar\ngjdlZhmnGbk_0\tairplane\ngjfdI7hO92E_0\tbird\ngjquLAxFRWw_2\tumbrella\ngjx4xu1TyWU_1\tcow\ngj7W2zjQApw_3\tknife\ngkEoTLpAw7g_0\tairplane\ngkLRnt1OCH4_7\thorse\ngkRqNmGQbPI_0\tskateboard\ngkXKCuc0Moc_0\tskateboard\ngkXKCuc0Moc_1\tskateboard\ngkb4Ya5QW9M_0\tbird\ngkb4Ya5QW9M_1\tbird\ngkf0Bcsuhlc_1\tcar\ngkf0Bcsuhlc_3\tcar\ngkf0Bcsuhlc_4\tcar\ngkiUpdrObXo_1\telephant\ngkz49y5qcvc_0\thorse\ngkz-LCZcGtc_5\tbird\ngk1x_qYyDl4_0\tcat\nglNWqIolkq8_0\tskateboard\nglOskJOtnTU_0\tknife\nglOskJOtnTU_2\tknife\nglSdaND81E8_0\tperson\ngltHxIp_ma8_0\tbird\ngmCT9tUPTB4_1\tgiraffe\ngmdxOMQMgnw_0\tairplane\ngmnvPoB2cNY_0\tmotorcycle\ngm53_sbr85Q_1\tbird\ngm53_sbr85Q_2\tbird\ngm9M-m4mCZ4_0\tcar\ngm9M-m4mCZ4_2\tcar\ngnA9QVNkmTU_1\tknife\ngnD6mU9A2oo_0\telephant\ngnEttGTQqQ4_1\ttrain\ngnEttGTQqQ4_0\ttrain\ngnF9YJM1jaE_1\tcow\ngnGvXHS4UDs_0\tairplane\ngnM9SRiFh7M_0\ttruck\ngnM9SRiFh7M_1\ttruck\ngnPrHGB85WY_0\tbus\ngnTj3krZROI_4\tboat\ngnVo44q-XDI_0\tknife\ngnb1N_MLdcY_2\telephant\ngnwCzU63_YY_0\tperson\ngn2XuCFK-hE_0\ttruck\ngn2bME2rmGw_0\ttruck\ngoIfg0C9kmM_0\tdog\ngoOIZE0j6DM_0\tbicycle\ngoSyNORcJ00_0\tairplane\ngok9kHQ77dY_0\tskateboard\ngollBTymf8I_1\tbus\ngomnpeJd5zw_0\tboat\ngonzAOezSOQ_1\ttrain\ngonzAOezSOQ_2\ttrain\ngosq350N9dI_2\tskateboard\ngoyIWrU1Lbo_0\tcat\ngpBoXY6MM5E_0\tdog\ngpEiPRMcPwo_4\tbear\ngpY-o8xPA3w_0\tbicycle\ngpa4WfWCLa0_1\telephant\ngpa4WfWCLa0_0\telephant\ngpa9p4XNeKc_3\tbear\ngpbdiDEPd-s_0\tskateboard\ngpjqG97-SyQ_0\thorse\ngpmdLMUX53k_0\tbear\ngp2SDJHMADo_3\thorse\ngp2SDJHMADo_0\thorse\ngp2SDJHMADo_2\thorse\ngp9q0jvTKo0_0\tbird\ngqNgT7LxZSQ_1\tbus\ngqOfm9XTr6M_3\tairplane\ngqOfm9XTr6M_0\tairplane\ngqOfm9XTr6M_1\tairplane\ngqOfm9XTr6M_2\tairplane\ngqbDkeOx0mA_0\tmotorcycle\ngqgQpw4DWZA_0\tgiraffe\ngqhweewmNn8_0\tskateboard\ngqkLzCkKKtE_0\tskateboard\ngqucExXpPys_0\tcar\ngqxvRzuWcrI_0\tbird\ngrBVFo1wSjs_1\tbird\ngrFPTYaKb7Q_0\tbus\ngrI0uf6IwBw_0\tbear\ngrNkPqf-ySE_0\tdog\ngrWw42izM6M_1\ttrain\ngrWw42izM6M_2\ttrain\ngrWw42izM6M_0\ttrain\ngrbP7mKMX_A_5\tairplane\ngrbP7mKMX_A_1\tairplane\ngrbP7mKMX_A_4\tairplane\ngrdEE264TwM_0\tmotorcycle\ngrdIYaNewv0_0\tmotorcycle\ngrhIgcHgpOw_0\tbus\ngsCvhqZCWX0_0\tdog\ngsUrGSN-k00_0\thorse\ngsbJ13WiSvE_1\thorse\ngsfIYIQ1siA_0\tskateboard\ngsvn88OsH_8_3\tknife\ngsv7RJk7dtY_0\tdog\ngs_C12A8Wq4_1\tbicycle\ngtFIMtVrAGk_0\tbicycle\ngtNJSexRjxE_0\tcar\ngtNdVTTd0tg_0\tbicycle\ngtNdVTTd0tg_2\tbicycle\ngtOa6rSatLA_0\tcow\ngtQ_uFTKEck_1\thorse\ngtii5vwjSTY_1\tdog\ngtuj1cOmYSs_1\ttrain\ngtuj1cOmYSs_3\ttrain\ngtz5ClHTSVo_0\tcat\ngtz5ClHTSVo_1\tcat\ngt_WHCkauOA_1\tknife\nguVl_gp0sJE_0\tbus\ngugP5f2JRJ0_1\tbear\ngugP5f2JRJ0_0\tbear\nguh1OUkdIGE_0\thorse\nguktzkv1els_0\tboat\nguv5reh2NH4_0\tboat\nguxRXiegac0_4\tbird\ngvNxDnFriAI_0\tskateboard\ngvcioONBIcE_0\ttrain\ngviQTbs7dIk_1\tbird\ngvjcggbLXRo_0\telephant\ngvjcggbLXRo_1\telephant\ngvjcggbLXRo_2\telephant\ngvk0hzlYu9E_0\tumbrella\ngvraCN0RYko_0\tdog\ngvtY3fwbgdc_0\tcow\ngvuBfR3HXac_0\telephant\ngv4sQFTuJ-k_0\telephant\ngv7qY66lOhs_0\tgiraffe\ngv8pF9t1zYM_0\telephant\ngwKq56_M6Kc_0\thorse\ngwN_p_IRuoo_0\thorse\ngwP-6gOPn2c_0\tmotorcycle\ngwTc-69C_P4_0\tknife\ngwTyjJwBgRk_0\thorse\ngwy7eePYryM_1\tboat\ngw9MjutMhLs_1\tairplane\ngw9MjutMhLs_3\tairplane\ngw9MjutMhLs_0\tairplane\ngw9MjutMhLs_2\tairplane\ngxKnyBP8_cs_0\telephant\ngxejG9D0guY_1\tperson\ngxgZg6BU3ds_0\tdog\ngxgZg6BU3ds_1\tdog\nIX4HjI_9vLY_2\tdog\nIX4IwgbTdCk_0\tdog\nIYBF45M9nTc_0\tskateboard\nIYBzvotFEYo_0\tmotorcycle\nIYZZ-K_Ygpo_0\tbicycle\nIYdXz1cOCWc_0\tgiraffe\nIYukRQKxhFI_0\tperson\nIYukRQKxhFI_1\tmotorcycle\nIZESZPVT0zk_3\tbear\nIZGady38Nh8_0\tbird\nIZIPpBl_h0Q_5\ttruck\nIZIPpBl_h0Q_0\ttruck\nIZIPpBl_h0Q_6\ttruck\nIZJ1PO3Fkuw_0\tumbrella\nIZLMXYU4A-0_0\tairplane\nIZLMXYU4A-0_2\tairplane\nIZTfd31H0AI_0\tbicycle\nIZUO1x0QT1I_1\telephant\nIZ2nFUgP-Pw_1\telephant\nIZ2nFUgP-Pw_5\telephant\nIZ2nFUgP-Pw_6\telephant\nIZ2nFUgP-Pw_3\telephant\nIZ2nFUgP-Pw_4\telephant\nIaAPZOFgclo_1\telephant\nIaG7siKVlak_0\tgiraffe\nIaxZJVx5ptw_0\ttruck\nIaxZJVx5ptw_1\ttruck\nIaxZJVx5ptw_2\ttruck\nIaxZJVx5ptw_3\ttruck\nIa0DjYXcBWc_8\telephant\nIa0DjYXcBWc_4\telephant\nIa0DjYXcBWc_5\telephant\nIa0DjYXcBWc_6\telephant\nIa0DjYXcBWc_9\telephant\nIa0DjYXcBWc_10\telephant\nIa0DjYXcBWc_11\telephant\nIa0DjYXcBWc_12\telephant\nIa0DjYXcBWc_13\telephant\nIa0DjYXcBWc_14\telephant\nIa0DjYXcBWc_16\telephant\nIa0DjYXcBWc_18\telephant\nIa0DjYXcBWc_19\telephant\nIbEpwiOUFEI_0\tdog\nIb15GlTvqTQ_2\tskateboard\nIb2u6u-j2vk_0\tskateboard\nIcEs4vbIcDM_0\tumbrella\nIcSumCpVOy0_0\tskateboard\nIcZ2D-MawSg_0\ttruck\nIciJuq7ZY6o_0\telephant\nIckUkdfRndY_1\tknife\nIc1cufihs-0_0\telephant\nIc1cufihs-0_1\telephant\nIdSlvHXTrmE_1\tskateboard\nIdXPNOQD97w_0\tmotorcycle\nIdabN3kTjSk_0\tskateboard\nIdrTVVio1U4_0\tdog\nIdvQme2elLk_1\ttruck\nIdvQme2elLk_2\ttruck\nIdvQme2elLk_3\ttruck\nId6HsaEvZ0k_0\tperson\nIeB4Nf3h7T4_0\tbus\nIeENvG3Qtk0_5\telephant\nIeFUkGY1b4Y_4\telephant\nIeXb8CHr4ms_0\ttrain\nIefPtlA5ebA_0\tmotorcycle\nIehTemq8EYc_27\tbicycle\nIehTemq8EYc_28\tbicycle\nIehTemq8EYc_0\tbicycle\nIehTemq8EYc_6\tbicycle\nIehTemq8EYc_11\tbicycle\nIehTemq8EYc_15\tbicycle\nIehTemq8EYc_17\tbicycle\nIehTemq8EYc_19\tbicycle\nIejh8w6egIA_0\tumbrella\nIek9nAfsymA_0\tbus\nIewJcdqOzCY_0\ttrain\nIewJcdqOzCY_1\ttrain\nIe4Ct_HRDNw_1\tdog\nIe5lfGQndBs_0\tairplane\nIe8dc7EO7VI_0\tbicycle\nIe8dc7EO7VI_1\tbicycle\nIe8dc7EO7VI_2\tbicycle\nIfBft2ltqqE_0\tskateboard\nIfBft2ltqqE_1\tskateboard\nIfFnkz6EUno_1\thorse\nIfGZXa16ZnQ_0\tknife\nIfTrYE-Ox50_0\tcat\nIfZDLHBP_qk_0\tbus\nIfpbe7xlKp4_0\ttruck\nIf4WPZY4LIY_0\telephant\nIf8EotoXQVQ_1\ttruck\nIgO9_kN8D5I_0\tcat\nIgRs6nmhv2w_0\tcat\nIge9Idj8fDw_3\tcow\nIge9Idj8fDw_2\tcow\nIg0Luv6UlkE_1\tbicycle\nIg1JdzucmLI_0\tboat\nIg9jZPM0n2A_0\tcar\nIhR6ePM1wRw_0\tbird\nIhXAXy3VAqA_0\tcat\nIhdGvFfk3Ks_0\tbird\nIhlRPxknT9E_1\tmotorcycle\nIhp3YZGcRjM_0\thorse\nIhp3YZGcRjM_1\thorse\nIhsr3gT-u00_0\tcow\nIiBzrow5m9w_0\tcat\nIiH0f7VOXTY_4\tairplane\nIiH0f7VOXTY_2\tairplane\nIiH0f7VOXTY_3\tairplane\nIie6uM_sdLE_2\ttruck\nIie6uM_sdLE_5\ttruck\nIiscR53FEz0_1\tairplane\nIiy_W2tIOWI_0\tboat\nIi9URMIXJjc_0\tdog\nIjHqTBt-tzY_0\thorse\nIjMLYR0bH6g_0\tcow\nIjf2ZMTxDUs_0\tcow\nIj57BoIbMws_0\ttrain\nIkOG3ZnCvY4_0\tcow\nIkVifrtYlcI_0\tskateboard\nIklc7ijgOtA_0\thorse\nIkl-nlqwUJA_2\ttrain\nIkqFTjEXf4g_0\tmotorcycle\nIkrKcORoFLI_0\tcat\nIk4UxtlIrw0_5\tairplane\nIk4UxtlIrw0_13\tairplane\nIlJT6oek8KQ_0\tdog\nIlNV-gFlp3Q_0\tumbrella\nIlshSY2CGU0_0\tcow\nIl1TKTSRPO4_0\ttrain\nIl7GtfxmBlQ_0\tskateboard\nImCV4d0kYxY_0\tskateboard\nImEKl15Aipo_2\tbear\nImOLHl6gwLE_0\tgiraffe\nImO7oG_YuSU_0\ttrain\ngyBWGyhFuWg_0\telephant\ngyBWGyhFuWg_1\telephant\ngyBWGyhFuWg_2\telephant\ngyBWGyhFuWg_4\telephant\ngyBWGyhFuWg_7\telephant\ngybSfaDRdVA_2\tairplane\ngy3zF39Y7B8_0\tairplane\ngzQrsNwx8MQ_1\ttruck\ngzTHA0tMocM_0\tbird\ngzUj7KfvRPY_0\ttrain\ngzVdw-5l3sY_5\tbear\ngzWwT4ufwFY_0\tbicycle\ngzwzd6nOPoI_3\tbear\ngz13nfjIblU_0\tskateboard\ng0Jq0uIY3i0_2\tknife\ng0LufqNJtss_1\telephant\ng0LufqNJtss_2\telephant\ng0SdZmm5Mm0_0\thorse\ng0W6U-p-T2c_0\thorse\ng0om0nrfC4w_5\tairplane\ng0om0nrfC4w_1\tairplane\ng0tXovGqqSE_0\tcow\ng0zcJWO1MbU_1\tairplane\ng02OQmAgfo4_0\ttrain\ng02OQmAgfo4_1\ttrain\ng04xUjb4z0w_0\tbicycle\ng05TJKB5TL0_0\telephant\ng05TJKB5TL0_1\telephant\ng05TJKB5TL0_2\telephant\ng1HtoWJ3NjA_0\tairplane\ng1UUBEfyzJ4_0\thorse\ng1UUBEfyzJ4_1\thorse\ng1ZtaoEqtjI_0\tbus\ng1j_9A4-PL4_0\tcow\ng1n74kWqKFM_0\ttruck\ng1vq3JO3eH0_0\tskateboard\ng12DCVqfKjM_0\tairplane\ng13JzTyNCPY_0\ttruck\ng17hrSF1YN8_1\tbear\ng2Hh_97o7jY_0\tperson\ng2KeNy_WECo_0\tbus\ng2MUK80Ht8k_0\thorse\ng2MUK80Ht8k_1\thorse\ng2MUK80Ht8k_2\thorse\ng2MUK80Ht8k_3\thorse\ng2MUK80Ht8k_4\thorse\ng2cCr0rRIeo_0\tmotorcycle\ng2vRpfpQuNE_1\tmotorcycle\ng2-SNBvYdNc_3\tcar\ng2-SNBvYdNc_2\tcar\ng3DAFznLlXw_0\telephant\ng3e6vDSvpN4_0\tskateboard\ng3g7M2Xv3JY_0\tzebra\ng3ytRwjgoMI_2\thorse\ng3ytRwjgoMI_3\thorse\ng30xOR9j3_A_0\tskateboard\ng30xOR9j3_A_1\tskateboard\ng38MDXW9ndc_0\telephant\ng4BX8_C-NeQ_1\tdog\ng4KzjuhixSo_0\tmotorcycle\ng4KzjuhixSo_1\tmotorcycle\ng4R5jZXlnl4_0\ttruck\ng5OKbEXlegI_0\tcow\ng5SIvfoi7tE_2\tbird\ng5S-76eh6vs_0\tcar\ng5ztjA03q5k_0\thorse\ng55_MKVNAE8_0\tmotorcycle\ng57hZ17etp8_0\tskateboard\ng5-55T7AzUE_1\tskateboard\ng7Qk-cV3IFs_1\tcar\ng7YvJasRFj0_0\tskateboard\ng7fZhFRdYJs_3\tzebra\ng7oMLF6ZfT8_0\thorse\ng7oMLF6ZfT8_1\thorse\ng8aScpqmhVU_0\tumbrella\ng8iDSRkz_go_1\tboat\ng80ZYUNhRME_1\tdog\ng9Am-b3OqbI_0\ttruck\ng9RM9VSJPIY_0\tknife\ng9WrMIn5AkI_0\tskateboard\ng9sD-4RBa3Y_0\tmotorcycle\ng9uOEJm7wdw_0\telephant\ng9yESRreg5k_0\tboat\ng9zLmd4IZ78_0\tbus\ng91mK1sMiSI_1\telephant\ng9-6tclIBcc_0\tmotorcycle\ng-Dfzs3HQ8w_0\tboat\ng-EVS_QxLxA_0\thorse\ng-F4Eig_Rxc_0\tmotorcycle\ng-F4Eig_Rxc_2\tmotorcycle\ng-F4Eig_Rxc_1\tmotorcycle\ng-JTM0dCFFA_0\tcow\ng-SJXmYYHqI_0\ttruck\ng-SlOveVnAs_0\tcow\ng-Z7CA3qr1A_0\tskateboard\ng_B2r70EsjY_2\thorse\ng_XW0YLzND0_0\tmotorcycle\ng_XW0YLzND0_1\tmotorcycle\ng_XW0YLzND0_2\tmotorcycle\ng_dN59QhubM_0\tperson\ng_jq8Uy4P2s_0\ttruck\nhAHsYyTOJoI_0\tcow\nhAIBcR5MAVE_0\tboat\nhAUD4Cy2GiM_0\tcow\nhAUD4Cy2GiM_6\tcow\nhAUD4Cy2GiM_1\tcow\nhAUD4Cy2GiM_3\tcow\nhAUD4Cy2GiM_4\tcow\nhAUD4Cy2GiM_5\tcow\nhAVbFSsRfOY_0\tairplane\nhAcx9u12Rd0_1\tcat\nImZcOQCdJng_0\tskateboard\nImiKNikVSsM_0\thorse\nImqKWexMOEA_0\tbird\nImuhe4E1pxo_0\tcar\nImy4SpqoC4k_0\tcat\nIm3ooIguQHk_4\ttrain\nIm3ooIguQHk_5\ttrain\nIm3ooIguQHk_6\ttrain\nIm3ooIguQHk_0\ttrain\nInEZSi4Zz08_4\ttrain\nInEZSi4Zz08_1\ttrain\nInEZSi4Zz08_2\ttrain\nInTq6s23Ygc_0\tcat\nInn1lo0hbX0_0\ttrain\nInvv-JPzV-0_0\telephant\nIn_dBFPRoso_1\tairplane\nIobdPoAtEB0_0\tbear\nIoqRCAQzibw_1\tskateboard\nIoqRrAswOwY_5\tbear\nIoqRrAswOwY_4\tbear\nIo5wOOkpkdE_0\tskateboard\nIpOFHasyloc_0\tcat\nIpQQ9QabgiU_0\tbear\nIpVCKTRou10_1\ttruck\nIpVCKTRou10_3\ttruck\nIpVCKTRou10_4\ttruck\nIpYZmcVrqdQ_0\tcow\nIpnTUQCHioc_0\tgiraffe\nIp0c_3xCHRA_2\thorse\nIp-N_PYIqhA_0\tmotorcycle\nIqv9963BN8w_0\tcat\nIrAxUS0aBTQ_0\tbird\nIrDY9nE1V2I_1\tmotorcycle\nIr4CkmTmSXQ_0\tcow\nIsSCjgdAQiE_0\tdog\nIslqPZDUBHI_0\tmotorcycle\nIssSzh7Z-vo_0\tumbrella\nIsxRqs7KcbQ_0\tcat\nIs2E8gFNBWo_4\tbear\nIs2E8gFNBWo_1\tbear\nItL-C-szpU8_0\ttruck\nItiAXqRQm3A_1\tknife\nItkxwET4PNc_0\tdog\nItzhXkBVmEY_0\tcar\nItzlBA8cl3c_2\tairplane\nIt_dJluX63g_0\tcat\nIuN_risviek_0\tgiraffe\nIuZ6JD-k2nM_0\tdog\nIuk4W5KJbQ8_0\tbus\nIuwJz5d-8J4_0\tumbrella\nIuw0f-Y8t6I_0\tairplane\nIuw0f-Y8t6I_1\tairplane\nIu26NyEUoGY_2\tboat\nIu5GqI9oVnk_0\tmotorcycle\nIvJLhgaveaw_0\tskateboard\nIvMiQ2e-5hQ_0\tbus\nIvMiQ2e-5hQ_1\tbus\nIvX1MeQN-e0_0\tcat\nIvZSk33MtAc_0\tmotorcycle\nIvZqPTK9DEQ_0\tmotorcycle\nIvZqPTK9DEQ_4\tmotorcycle\nIvZqPTK9DEQ_3\tmotorcycle\nIvfWyYn_ifg_0\telephant\nIvjNeTpV6hs_0\thorse\nIvyftS2bPuo_10\tairplane\nIvyftS2bPuo_0\tairplane\nIvyftS2bPuo_2\tairplane\nIvyftS2bPuo_6\tairplane\nIvyftS2bPuo_7\tairplane\nIvyftS2bPuo_9\tairplane\nIwcC1J_ImAs_0\tcar\nIwd7i4kvS5c_0\tcar\nIwfpNUPSvpw_0\tmotorcycle\nIwgX5DfmIQo_0\tbicycle\nIwhf27USDD4_0\tmotorcycle\nIwmwVP_e5Ag_0\tbus\nIwxmbUX4fcg_0\tcow\nIw6-0LYvEmQ_0\tbird\nIw6-0LYvEmQ_1\tbird\nIw7zBsW9W5Y_0\ttrain\nIxLbLqfxrhg_1\tboat\nIxNA0hdkWGg_0\tcow\nIxObyCZ6OfY_4\tgiraffe\nIx8eS24W75g_4\tairplane\nIx8eS24W75g_5\tairplane\nIx8eS24W75g_0\tairplane\nIx8eS24W75g_1\tairplane\nIx8eS24W75g_2\tairplane\nIx8eS24W75g_3\tairplane\nIyNmzxdv8-Q_0\tbird\nIyQlh0wdd9I_8\tboat\nIyQlh0wdd9I_7\tboat\nIyU3NizvZuM_0\thorse\nIyU3NizvZuM_5\thorse\nIyj9D6cwI5o_0\tbicycle\nIyk9-k1RP-M_0\tcar\nIyk9-k1RP-M_1\tcar\nIyk9-k1RP-M_2\tcar\nIys_rL0bPcc_4\tboat\nIy4SrujSLuQ_1\telephant\nIy4SrujSLuQ_5\telephant\nIy4SrujSLuQ_6\telephant\nIy4SrujSLuQ_3\telephant\nIzC8vjFriRE_0\thorse\nIzPS29ghTxo_0\tknife\nIzQjjBqimYw_5\telephant\nIzQjjBqimYw_10\telephant\nIzQjjBqimYw_11\telephant\nIzQjjBqimYw_0\telephant\nIzQjjBqimYw_1\telephant\nIzQjjBqimYw_2\telephant\nIzQjjBqimYw_3\telephant\nIzQjjBqimYw_4\telephant\nIzQjjBqimYw_6\telephant\nIzQjjBqimYw_7\telephant\nIzQjjBqimYw_9\telephant\nIz4_9EtiVXc_0\tmotorcycle\nIz8cco4VLow_0\tcow\nIz8gKIZcfqo_0\tbear\nIz8uzZuBiXs_0\tbird\nI0eY-kKi2FM_0\tumbrella\nI0iEaW1Qg_o_1\tbear\nI0oVkr613Rw_0\tskateboard\nI0voLEPKkG8_0\thorse\nI0voLEPKkG8_1\thorse\nI0voLEPKkG8_2\thorse\nI0voLEPKkG8_3\thorse\nI0voLEPKkG8_4\thorse\nhAplCSSZqAs_0\tairplane\nhAplCSSZqAs_1\tairplane\nhAteY2rkmVg_8\tbus\nhAteY2rkmVg_1\tbus\nhAuFEp75jVo_0\ttrain\nhAzefhyFMN4_0\ttruck\nhAzsdnh5Iq8_0\telephant\nhA_YzyjSVZM_0\tbicycle\nhBDc0K6CvHg_0\tbus\nhBDvdp2RCCw_1\tairplane\nhBDvdp2RCCw_2\tairplane\nhBDvdp2RCCw_3\tairplane\nhBDvdp2RCCw_5\tairplane\nhBDvdp2RCCw_7\tairplane\nhBKuHV_S8lM_0\tskateboard\nhBOhA_sljfE_0\tumbrella\nhBcYx5Uc-vw_0\tcar\nhBcZZeXsCaw_0\tbicycle\nhBgxILtRUIc_0\tcat\nhB23PCerELA_0\tskateboard\nhB-M9w3C_Tw_0\tboat\nhCB731pKdcg_1\ttrain\nhChqLLLAmF4_1\tbird\nhChqLLLAmF4_0\tbird\nhCkn4pJxSkk_0\tbear\nhCrrYhe3x9Q_0\ttrain\nhCsCAXkiQ4Y_2\ttrain\nhCynNRrrTKI_0\tcow\nhC5Wac-AzgM_1\telephant\nhC5Wac-AzgM_2\telephant\nhC5Wac-AzgM_3\telephant\nhC5augWtBcQ_1\tbicycle\nhDAv3aPvZjc_1\ttruck\nhDLAKS4hCfc_0\tcar\nhDM4sCvlRoA_1\tairplane\nhDVx_yYysaA_0\tcow\nhDXpSU7bq44_0\tbicycle\nhDYV-Vz3xwA_1\tdog\nhEAQZIsaIew_1\ttrain\nhERCXzHI2nA_1\telephant\nhERyFpl4aDk_0\tdog\nhEWJZ4dCcIY_2\tcow\nhEZt4InN7Eo_0\telephant\nhEZt4InN7Eo_1\telephant\nhEZt4InN7Eo_2\telephant\nhEdpC8HEa-A_0\tmotorcycle\nhE04tUrJzXo_0\ttruck\nhE7N0N5vik0_0\tbird\nhE7N0N5vik0_1\tbird\nhE-VIrAVcBA_2\tbus\nhFFrC0_rJYA_0\tairplane\nhFTTcrUxPeg_0\tcow\nhFTTcrUxPeg_3\tcow\nhFdi9yxVkys_0\tmotorcycle\nhFdi9yxVkys_1\tmotorcycle\nhFixbos35O4_0\ttruck\nhFnKIVp-Dcc_0\tcow\nhFnKIVp-Dcc_1\tcow\nhFzR4bgxihU_0\tbicycle\nhGH72iljdzU_0\tbear\nhGRdOlSIQRU_1\ttrain\nhGRdOlSIQRU_2\ttrain\nhGRdOlSIQRU_0\ttrain\nhGiCVP3Z8l0_0\tumbrella\nhG6vW_xUZgA_0\ttrain\nhG6vW_xUZgA_1\ttrain\nhG959XPTh_8_0\tbear\nhG-quo0MZM8_0\telephant\nhG-quo0MZM8_1\telephant\nhHEIEEdrXYE_0\tcow\nhHIyy4Vda6M_0\tcat\nhHjzciM78AA_0\tcow\nhHtOM5_wiWM_0\ttruck\nhHtqPiAg32Q_0\tumbrella\nhH_akvS98jo_0\tskateboard\nhIH6LuoXbpE_0\tcat\nhIXTbG6ho4E_0\tperson\nhIXTbG6ho4E_1\tperson\nhIXTbG6ho4E_2\tperson\nhIz3ONvP-Bo_0\tzebra\nhI3P4BxIr-o_0\tbear\nhI3eGFKYRuc_1\thorse\nhJP8qg-kSZA_0\tcow\nhJTl4NJ0qIs_0\tperson\nhJhBQsD0_hw_0\tbus\nhJkgoq_T4Pk_0\ttrain\nhJmxsYAKHdc_0\tumbrella\nhJtloiw4D-M_0\tcar\nhJ_uvoDrzkI_0\tgiraffe\nhKJQH8VbGk4_0\tairplane\nhKJQH8VbGk4_1\tairplane\nhKYJZqP-44M_0\tairplane\nhKYJZqP-44M_1\tairplane\nhKgtNPTirdc_2\telephant\nhKgtNPTirdc_3\telephant\nhKlKPyuUYps_0\tbus\nhKtHZYDaoXA_1\ttrain\nhKtHZYDaoXA_2\ttrain\nhKtHZYDaoXA_3\ttrain\nhKtHZYDaoXA_0\ttrain\nhK6w0B1cu-I_0\tcow\nhK7VoN3cI74_0\tcat\nhLGnjjoilbo_0\tskateboard\nhLHaPstpghQ_0\tmotorcycle\nhLKzDOp8XLc_1\tzebra\nhLNcuJAwfDo_0\tcow\nhLVZsqfElxI_0\tdog\nhLX1LeVKgi8_0\tcat\nhLjDO37EQ60_2\tdog\nhLjDO37EQ60_1\tdog\nhLscdjfkeho_0\tcow\nhLte0Y4VWR0_0\tknife\nhL_QAgWBkJ4_0\tcow\nhL_noZA6D8E_0\ttruck\nhMGVdq71lME_1\thorse\nhMLkMrqUtA0_0\thorse\nhMRIDt-1dY4_0\ttrain\nhMgp2oyTB80_0\tcow\nhMjke9g_Ysw_0\thorse\nhMuO0MHPIOQ_0\telephant\nhMuO0MHPIOQ_1\telephant\nhMusKbJqZDY_0\tskateboard\nhNHGh8N1XGg_0\tknife\nhN_-56Oxma0_0\tdog\nhOJJ65CVNuM_0\tbird\nhOOwQSSrFVc_1\tcow\nhOid-qo2Ozw_0\tcow\nhOky3qIMxRY_0\tskateboard\nhOpJoO7UciM_1\tbicycle\nhOrAXl-jATo_0\tairplane\nhOxMkI1d3oc_1\tairplane\nhOxMkI1d3oc_3\tairplane\nhOxMkI1d3oc_4\tairplane\nhOxMkI1d3oc_6\tairplane\nhOxMkI1d3oc_7\tairplane\nhOxMkI1d3oc_9\tairplane\nhPEsz5u87CI_0\tbus\nhPIDFIwLI8c_0\tcar\nhPWhKQfDoXg_0\tairplane\nhPWhKQfDoXg_1\tairplane\nhPW2NpCU668_2\telephant\nhPW2NpCU668_0\telephant\nhPW2NpCU668_3\telephant\nhPW2NpCU668_5\telephant\nhPW2NpCU668_6\telephant\nhPW2NpCU668_7\telephant\nhPW2NpCU668_8\telephant\nhPa5hUze91s_0\telephant\nhPa5hUze91s_1\telephant\nhPb_Rq2yKRA_0\tcow\nhPo5Wd-otbY_0\tdog\nI0yz1LGLl08_0\telephant\nI1Ejpa2UWSk_1\tbird\nI1Pdo-p11tI_0\tmotorcycle\nI1Quuhyu2UI_1\tmotorcycle\nI1YfOiyQW_8_0\ttruck\nI1wfW86V8So_0\tdog\nI1wfW86V8So_2\tdog\nI14JWDgkllE_0\ttruck\nI14JWDgkllE_1\ttruck\nI19kQsgjFRA_0\tbicycle\nI2IfiPw2aKE_0\telephant\nI2OQlELjXvU_1\ttruck\nI2OQlELjXvU_2\ttruck\nI2OQlELjXvU_3\ttruck\nI2hmFe1pYes_0\thorse\nI2o_4OyrJlI_0\thorse\nI3DSZk-7nG8_0\ttrain\nI3JCCqGY3c8_0\ttruck\nI3KJj6GQ5QE_0\tcat\nI3OWw4AK0MI_0\tdog\nI330kG5lk5A_0\tknife\nI3-xBh-IrIo_0\tairplane\nI3_lU2I_AaU_0\tbicycle\nI4BMptNse7c_1\ttrain\nI4BMptNse7c_0\ttrain\nI4CMNv-VRDo_0\tbird\nI4Gi7kq5XAs_0\thorse\nI4HuQ8DDxoM_0\tskateboard\nI4WNAfBvm5E_1\tskateboard\nI4z-3IGHMW4_0\tdog\nI5KNdt1NT8g_0\tskateboard\nI5QNP3-QHLw_0\tcow\nI5SA8N1JKwM_0\tcat\nI5WNgPfoaZQ_2\tmotorcycle\nI5pU9zWz4Fg_0\tmotorcycle\nI6fJWB7DpAM_0\tbus\nI6oT6dLeq7A_0\tmotorcycle\nI6wEvIOC-Pk_0\ttrain\nI7GbkWE2A0M_0\tbus\nI7aUrrDieE4_0\tcow\nI7bKlZxD6Fs_0\tbicycle\nI7xOURJQUps_0\ttrain\nI7xOURJQUps_1\ttrain\nI7x_od8h4iw_0\tcow\nI7-iLB-NVGg_0\tdog\nI8FoWQrnHGY_0\tbird\nI8Ms0rXjfXU_0\tskateboard\nI8Ms0rXjfXU_1\tskateboard\nI8Ms0rXjfXU_2\tskateboard\nI8Qx-qd0eLg_0\tboat\nI8Qx-qd0eLg_1\tboat\nI8UlumMtAG8_0\thorse\nI8Vr0DzHV9U_0\tcow\nI8rww3UUjYI_0\tperson\nI9AGRokco_M_0\ttrain\nI9FPkgdc-5E_1\tcow\nI9XcFcBW-HM_0\tmotorcycle\nI9oAq_x5pqg_0\tbus\nI9yrFs_JpWc_1\tskateboard\nI94qZUJmKP8_1\tbicycle\nI94qZUJmKP8_2\tbicycle\nI-SRTsDkhLM_0\tcow\nI-TshjRdh74_1\tknife\nI-blRAakQjM_0\tboat\nI-h3cTJlsRc_0\tdog\nI-nb60BTO_g_0\ttrain\nI-raj-aLy8s_8\thorse\nI-ywD5MDZZ4_3\tcow\nI-ywD5MDZZ4_4\tcow\nI_LhSNsRHMs_0\telephant\nI_kI39ZHymk_0\thorse\nJAEzOCIew2Q_0\tairplane\nJAEzOCIew2Q_1\tairplane\nJAb3p7VYLzI_0\tbear\nJAb3p7VYLzI_1\tbear\nJAcHxxzG1vA_0\tmotorcycle\nJAf3nC1hYS4_0\tdog\nJAp2_UJfFao_0\tperson\nJAqAH7n-3lA_0\tbus\nJAzD-VzDxfc_2\tbicycle\nJAzD-VzDxfc_4\tbicycle\nJAzD-VzDxfc_5\tbicycle\nJAzD-VzDxfc_8\tbicycle\nJAzD-VzDxfc_11\tbicycle\nJAzD-VzDxfc_13\tbicycle\nJAzD-VzDxfc_17\tbicycle\nJAzD-VzDxfc_18\tbicycle\nJAzD-VzDxfc_19\tbicycle\nJA2PLZmRABc_1\tumbrella\nJBGewEMeWIs_1\tdog\nJBGewEMeWIs_5\tdog\nJBKG_tl08RU_0\tcow\nJBMhOrDLcho_0\tcat\nJBYr3VbJLoM_0\tperson\nJBkymGnh5mA_1\tbicycle\nJBkymGnh5mA_2\tbicycle\nJBkymGnh5mA_3\tbicycle\nJBkymGnh5mA_4\tbicycle\nJBlCFCV4sdw_0\thorse\nJBlCFCV4sdw_1\thorse\nJBxFgwl0To8_0\tcow\nJB0SELYSRXA_1\tbear\nJB-hzl-gILo_2\ttruck\nJCIJbwBevro_2\tbird\nJCSRBZQpYCw_1\tbear\nJCSRBZQpYCw_5\tbear\nJCTYAwT6ppk_0\tmotorcycle\nJCTYAwT6ppk_1\tmotorcycle\nJCTYAwT6ppk_2\tmotorcycle\nJCciDn0O6X0_0\tairplane\nJChsfz-p2KI_0\tcat\nJCuE5X37xIE_3\tboat\nJCuE5X37xIE_4\tboat\nJDJWapHD_kM_0\tboat\nhP8Jfo1RaSk_0\telephant\nhP8Jfo1RaSk_1\telephant\nhP8Jfo1RaSk_2\telephant\nhQWcyTkfPeU_1\tdog\nhQZDg__nxQA_4\tbear\nhQZ5lNlAXBI_0\ttruck\nhQe3_1EvqIY_0\tcow\nhQfYabI9_ec_0\tbird\nhQkbXGwGwyg_0\tskateboard\nhQve0ugvy6s_0\tmotorcycle\nhRAbtgVJiWI_0\tbear\nhRJ0Qk_qdAY_0\tairplane\nhRS45wmOq9c_0\telephant\nhSR-ZVA-vMU_0\tdog\nhSWyYOzvh0g_0\tdog\nhSf3uEm8r9M_0\tbus\nhShwtMLieCc_0\tboat\nhSiozs1nz7o_1\tmotorcycle\nhSzgOCvRfq4_0\tbear\nhS-h8AUEibc_0\tcow\nhS-h8AUEibc_1\tcow\nhS-h8AUEibc_2\tcow\nhTHBMsKC5ZI_0\tcat\nhTZr7OF0VuY_5\tdog\nhUJMSp4rMrc_0\ttrain\nhU0EbblT2vQ_2\tairplane\nhU388mZGPGg_0\tcat\nhU9B31AVZNg_0\tbus\nhVJjOdU5-yQ_0\tcar\nhVNKN_qFEUA_0\tbicycle\nhVOImOLBY1g_0\tskateboard\nhVdb-Q3aJ9E_0\tdog\nhVhNOzZA40E_0\tcat\nhVnD8rlLRgM_0\tbird\nhVq6NOrBwlM_1\tmotorcycle\nhVsAAQqAHyI_1\tskateboard\nhWHUct-PLfY_1\tmotorcycle\nhWHUct-PLfY_0\tmotorcycle\nhWNyVxx4a94_0\tcat\nhWn0ddeHF0I_2\tzebra\nhXAQH1xVKB8_0\tcow\nhXWQ710-JZQ_0\tmotorcycle\nhXagj4A6N-s_1\telephant\nhXbMo03RQWk_0\ttrain\nhXflTk4WVAA_1\tbear\nhXf7dimd2bo_2\tcat\nhXhtGcCMf5Q_0\tairplane\nhXsCNMb3eTc_0\tbicycle\nhYD7HKMKa3k_0\telephant\nhYFW5XhMxyg_1\tknife\nhYIPy3eyC9k_0\tcat\nhYQBaiC8d6Y_0\thorse\nhYTIV5X87S4_0\thorse\nhYgzs0gDiiU_0\telephant\nhYkPL7spYMo_1\telephant\nhYlmhAuVVh8_0\tbird\nhYtFyx0799o_0\tboat\nhY0vkwEtjLM_1\tbear\nhZAOhuPJTho_0\thorse\nhZAXlQqCmCI_2\ttrain\nhZCGOP3PHOM_2\tknife\nhZHjTTvcQ88_2\tbicycle\nhZOhuOcxTP8_0\tskateboard\nhZPYHGzIYh0_0\tcow\nhZeekc0i_b8_0\tmotorcycle\nhZiXqP-WaQk_3\tbird\nhZiXqP-WaQk_0\tbird\nhZiXqP-WaQk_1\tbird\nhZiXqP-WaQk_2\tbird\nhZygBhv-nDg_0\tmotorcycle\nhaC0TZbvBEU_0\tcat\nhaMtzn-TnOQ_0\tboat\nhaTl-PeSssc_0\tdog\nhakWXvIYvzo_1\tdog\nhanKUxPHFbA_1\tcar\nhanKUxPHFbA_0\tcar\nhaxabA27SnU_0\thorse\nha3C2hPzaiw_0\tdog\nha8hX-68TqI_0\tbear\nha8hX-68TqI_2\tbear\nhbKjt5OBryI_0\ttruck\nhbKjt5OBryI_2\ttruck\nhbKjt5OBryI_1\ttruck\nhbfiyMHycSs_3\tknife\nhbvJ3t9lpUo_0\ttruck\nhcXtsyICD30_1\tskateboard\nhcpMT5qGQ0U_0\tbus\nhdZkNo0t6wg_0\tboat\nhdbKePdCemQ_0\tcow\nhdqiOcfXejc_1\tzebra\nhdwZF4C-vYs_0\tcow\nhd_yXL53Z9E_0\telephant\nheQRV9di86s_0\ttrain\nheTgOW6o1ho_0\tzebra\nhedgcDGNngs_0\tbicycle\nheucaATRtbI_0\tcat\nhe_j-GZdCNs_0\tperson\nhfCbKe627p0_0\tairplane\nhfEl_mnX9X4_0\tskateboard\nhfGEkaEADUw_0\tmotorcycle\nhfGEkaEADUw_1\tmotorcycle\nhfGEkaEADUw_2\tmotorcycle\nhfcKFLBuJ_g_1\tdog\nJDcAM9ieTp8_0\tbicycle\nJDe9ulv2Nmo_0\telephant\nJD_njBej6V0_0\ttruck\nJD_njBej6V0_2\ttruck\nJEU2rZzAxRU_0\tskateboard\nJEbIHUJTFsM_0\tairplane\nJEdl8GROiQM_0\ttruck\nJExlAUEYZwc_0\tcat\nJE8SV6FOlC0_0\ttruck\nJFH3n9kI6aA_0\tboat\nJFO_Qz1y8-s_4\telephant\nJFQ_GztsLs0_0\tcow\nJFQ_GztsLs0_3\tcow\nJFZG_ebR2mk_0\telephant\nJFZpmduYfv4_0\tmotorcycle\nJFfYNQ2FmHU_0\tcow\nJFk4Qyn58CY_0\ttrain\nJFvQ7wc6c0o_0\tairplane\nJGDf9kSc-v4_13\tdog\nJGDf9kSc-v4_15\tdog\nJGDf9kSc-v4_17\tdog\nJGDf9kSc-v4_19\tdog\nJGDf9kSc-v4_1\tdog\nJGDf9kSc-v4_2\tdog\nJGDf9kSc-v4_6\tdog\nJGGj1z6Kujc_0\tdog\nJGGj1z6Kujc_1\tdog\nJGMfEFj5PVM_1\ttruck\nJGWBjvjqVhw_4\tskateboard\nJGanm9yGTJk_0\ttruck\nJGanm9yGTJk_1\ttruck\nJGanm9yGTJk_2\ttruck\nJGmHpQtJzic_0\thorse\nJGn6Ifa5bWI_0\tbird\nJG0B4rV4KEI_0\tdog\nJG6H3R9rErg_8\tairplane\nJG6H3R9rErg_0\tairplane\nJG6H3R9rErg_1\tairplane\nJG6H3R9rErg_2\tairplane\nJG6H3R9rErg_3\tairplane\nJG6H3R9rErg_4\tairplane\nJG6H3R9rErg_5\tairplane\nJG6H3R9rErg_7\tairplane\nJG6sceNvlnI_3\tboat\nJG6sceNvlnI_2\tboat\nJG872iaucFc_0\tumbrella\nJHBhDpq4HNs_0\tcat\nJHBtawKoltc_0\tcar\nJHTt9PSzrhU_0\telephant\nJHb8IVsjgMs_1\tbus\nJHdc9jvf4qA_0\tmotorcycle\nJHmG34eTWow_0\ttrain\nJHr57YE7IRs_1\tairplane\nJHy85i0So5U_1\tdog\nJH0Jzb0wOXw_3\telephant\nJH0Jzb0wOXw_4\telephant\nJH0Jzb0wOXw_2\telephant\nJISA50Bfj4U_0\tboat\nJIamGji7w9U_3\tbird\nJIiA0pG-MKk_0\tskateboard\nJI6MyG7aTvM_0\tbird\nJJSp2fu3lk8_4\tdog\nJJSp2fu3lk8_3\tdog\nJJq7YAYUado_0\tumbrella\nJJx7GdAuDQY_0\tskateboard\nJJyJR7TlQ7o_0\tmotorcycle\nJJ0Ja1ju2ec_1\thorse\nJJ0NBly53IU_0\tcat\nJJ8Vv2hiCCA_0\tcow\nJJ8Vv2hiCCA_1\tcow\nJKBJuICyV50_1\ttrain\nJKBJuICyV50_0\ttrain\nJKCFS8k_Qis_3\tbus\nJKGV5hbm5g8_0\tskateboard\nJKJQPHspLBs_0\tbird\nJKJQPHspLBs_1\tbird\nJKNRKGSvtsQ_0\telephant\nJKYPluJPL7c_0\tdog\nJKa7rPKrAwY_0\ttrain\nJKgPYc0K_hI_4\tcar\nJKgPYc0K_hI_1\tcar\nJKuhG9WLM2k_0\tairplane\nJK42K36SYLs_0\tbird\nJLE_jNuNoA0_0\tcow\nJLHP-3UxtMU_0\tboat\nJLb2dnuNhqs_0\tbus\nJLoS7DZH_ik_2\tairplane\nJLoS7DZH_ik_1\tairplane\nJLsEcZUU7FM_2\ttruck\nJL64rU6Jvmw_1\tgiraffe\nJL71b_9Cy9I_1\tumbrella\nJMDFSes_w0E_0\tcow\nJMMmrEdfRbk_0\tboat\nJMPKtdq9b0Y_0\ttrain\nJMR4IvE2sDo_0\tbus\nJMaahZTxRLk_6\tboat\nJMgbgNPBIJI_0\tbird\nJMnp6FLLbtw_0\thorse\nJMnp6FLLbtw_4\thorse\nJM1jSU4FEPw_2\tairplane\nJM4yr2pj-zg_0\tairplane\nJNDZBgXZBU8_0\tknife\nJNDZBgXZBU8_3\tknife\nJNDdt_ZPl1s_3\telephant\nJNNbk6jVfB4_0\tcat\nJNZDx8Ro_mM_0\ttruck\nJNe7ZednqQc_2\thorse\nJNkz_3Qtdfc_0\thorse\nJNnnm9ixKrM_3\tcar\nJNnnm9ixKrM_4\tcar\nJNnnm9ixKrM_5\tcar\nJNpuJeqVFxk_0\tmotorcycle\nJONF8-3gEoY_0\tgiraffe\nJONF8-3gEoY_1\tgiraffe\nJONF8-3gEoY_2\tgiraffe\nJObYghNlZas_6\ttrain\nJObYghNlZas_7\ttrain\nJOmeD6G33Dc_1\thorse\nJOoNVY1C6qI_0\ttrain\nJOoNVY1C6qI_2\ttrain\nJOqHfu-WVu8_2\thorse\nJOuB1UkVvKI_0\tairplane\nJOue8LphKc4_0\ttruck\nJOztmtwKz-k_0\tcow\nJPAjGBsi-rE_0\tbird\nJPMFXg-BXDE_2\tcar\nJPTFJk9f2nM_0\tdog\nJPevMGnX92M_0\tairplane\nJPiSmPAIpOI_0\tknife\nJPlZOEew4wg_0\telephant\nJPuDmwlAXzI_0\tskateboard\nJPwUpTvlZDA_0\tperson\nJPwUpTvlZDA_3\thorse\nJPw4R6t-0j4_1\tbird\nJQDX7gVR0qM_2\tknife\nJQDX7gVR0qM_0\tknife\nJQKDDMvCtt8_1\thorse\nJQNsIqNLn40_0\ttruck\nJQRxu6RVGMg_0\tcar\nhfwhbInEJAk_3\ttrain\nhfwhbInEJAk_2\ttrain\nhgFfz_RTcx4_0\ttruck\nhgFfz_RTcx4_1\ttruck\nhgxvhMjH_68_0\tmotorcycle\nhg6Z6JIwRMU_0\telephant\nhg6Z6JIwRMU_1\telephant\nhhM2TSF2GhA_1\thorse\nhhNlkY3SS6w_1\tbus\nhhYOJb0v5Yw_0\tcat\nhhlt4dfZmFE_0\thorse\nhhyzKC353Jo_1\tcar\nhiJ-OdPj_8c_0\tbird\nhiPDdAi1Qs8_0\tmotorcycle\nhiUH1zOfsfo_0\tcat\nhiZLv2E5zI8_0\telephant\nhjBLAHakI9c_0\tboat\nhjRlztwK-vg_2\tbicycle\nhjhbMbrRUWI_0\ttruck\nhj2P25O-nIk_0\tskateboard\nhkR10EU8YPI_0\ttrain\nhk0cDE4A_b0_0\tboat\nhk7M3PGcOhw_0\ttrain\nhk-IVoljyKE_0\telephant\nhk-IVoljyKE_1\telephant\nhlFPCpe8Akk_0\tairplane\nhlLrYrrOcY4_0\tdog\nhlNOQO4BIHg_0\ttrain\nhlnNVsSGjxA_3\tcar\nhlnNVsSGjxA_1\tcar\nhl4yLAJiWjQ_0\telephant\nhl7z1gnPPW0_0\tknife\nhl_YHwW5mrM_1\tbird\nhl_YHwW5mrM_0\tbird\nhmThCl2HK8E_0\tskateboard\nhmThCl2HK8E_1\tskateboard\nhmdH0Olcbx4_0\tbicycle\nhm98pilx9dE_5\thorse\nhm98pilx9dE_1\thorse\nhm98pilx9dE_2\thorse\nhm98pilx9dE_3\thorse\nhm98pilx9dE_4\thorse\nhnJ2wDmXD6w_1\tbicycle\nhnbZY12P-7g_1\telephant\nhne72NMSPuc_0\tbird\nhnffUBbBFoQ_1\thorse\nhnrSBT9miTE_1\tbird\nhnvbE27mWwI_2\ttrain\nhnvbE27mWwI_0\ttrain\nhn19XaR_wIs_0\tknife\nhn7ollCkAy4_5\tbicycle\nhn-1W1O8kZs_0\tboat\nhoLnPrkJ6sE_0\thorse\nhoLnPrkJ6sE_3\thorse\nhoNPAcq_5Ac_1\tbird\nhoNPAcq_5Ac_0\tbird\nhoYDTU50MTk_0\tcow\nhoe88GhFhq0_0\ttruck\nhomQXuwbe04_0\tcow\nhomx5sSuNr4_2\tbear\nhoozxxjd57c_1\tbus\nhotrXXenVAk_0\tcat\nho5YZstr1XE_1\tcow\nho7yo7nJk3o_1\telephant\nhpG2eG_hduA_0\tmotorcycle\nhpRxBuFhZ4M_0\ttrain\nhpRxBuFhZ4M_1\ttrain\nhpRxBuFhZ4M_2\ttrain\nhpRxBuFhZ4M_4\ttrain\nhpkXlhfYZfw_2\tmotorcycle\nhpkXlhfYZfw_1\tmotorcycle\nhpmC3OjLnZM_2\tboat\nhpmC3OjLnZM_0\tboat\nhpo-lwBTbFw_1\tdog\nhp3aTxzS9ms_0\tskateboard\nhqGhmP1u07Y_0\telephant\nhqoQm68UbGo_3\tairplane\nhqoQm68UbGo_2\tairplane\nhqsoIR9v8IY_0\tmotorcycle\nhq7f1_o4eFg_0\tairplane\nhrLkVz3_xGw_2\tbus\nhrW-pkK9osE_2\tbicycle\nhrW-pkK9osE_3\tbicycle\nhrgh69NXZqw_0\tcow\nhrj6I8n8nAc_0\tbicycle\nhrj6I8n8nAc_1\tbicycle\nhrrpTPwLZHA_0\tbird\nhrtiCeqnqLg_0\tcow\nhrziTee4b2c_0\tairplane\nhr5Q08OMeAU_0\ttrain\nhr7wUBMikww_0\tzebra\nhr7wUBMikww_1\tzebra\nhsMptx7tOLo_0\telephant\nhsMptx7tOLo_1\telephant\nhsMptx7tOLo_2\telephant\nhsM1eKbrqLs_0\tcat\nhsPK4wlNtI8_0\tcow\nhsYL355Fzio_0\ttruck\nhsfS5oT1y5M_2\tboat\nhskEM8GUmDE_2\ttrain\nhsmxUKxzapo_2\tskateboard\nhsmxUKxzapo_0\tskateboard\nhsyCfsJx7DI_2\tskateboard\nhsyCfsJx7DI_1\tskateboard\nhs2foQ_Xo8A_0\tskateboard\nhs-OEgnsLZs_0\ttrain\nhtDilkoPA-M_0\tairplane\nhtSBZwTBX98_0\thorse\nhteze9Fz1dc_0\tknife\nhtkybhLm0uk_0\tumbrella\nhtwBHgatd9c_2\thorse\nhtwBHgatd9c_3\thorse\nhtwBHgatd9c_0\thorse\nhuCxpuVT4GI_0\tdog\nhuDCqh-KRy4_8\tbicycle\nhuDCqh-KRy4_2\tbicycle\nhuDCqh-KRy4_3\tbicycle\nhuDCqh-KRy4_4\tbicycle\nJQnf7j7HpKY_0\tcow\nJQpJv-SOMS0_0\tdog\nJQ9LtiJVsd8_0\tcat\nJQ_dyIlBnGM_0\tcow\nJQ_6xcOuEfU_4\tcow\nJQ_6xcOuEfU_1\tcow\nJRA3LCwRGu0_0\tknife\nJRBLFsevgg0_0\ttrain\nJRJjI6mFa6s_1\tskateboard\nJRJnSf2qOXA_0\tairplane\nJRT0FH2KEsc_0\tcow\nJRcTFvzRC10_0\tbird\nJRcTFvzRC10_1\tbird\nJRsNcoTJJjE_0\tcat\nJRsn1likB7c_0\tboat\nJRyc_lxMJzs_0\tskateboard\nJR6JAx7xdGg_0\tcat\nJSA0JWvQbJg_2\ttrain\nJSdEdTcUHHI_0\tknife\nJSfXE4ExZ1U_0\tbird\nJSfXE4ExZ1U_2\tbird\nJSs6Sa8zR6c_0\thorse\nJS2cbpFwahY_0\tskateboard\nJTE0ABGzb30_1\tskateboard\nJTE0ABGzb30_2\tskateboard\nJTJgZcBM93k_1\tknife\nJTa9HkbXfSw_0\tcow\nJThBohLxRSc_0\tcow\nJTi4Oy6v9mM_1\thorse\nJTi4Oy6v9mM_2\thorse\nJTtjfwrK4Ls_0\tdog\nJT5zUQio3B0_0\tbus\nJUHMTmjUswE_0\tknife\nJUVHXeFTe3Q_0\thorse\nJUVHXeFTe3Q_3\thorse\nJUbPqBVbGQQ_1\ttruck\nJUpxTW6_BAI_0\tcow\nJUtd4FLjXio_0\thorse\nJU1N1nqXjII_0\ttrain\nJVKkxo7adX8_1\tknife\nJVQ6Gx2hGxs_0\tairplane\nJVTIzApj2UA_0\tgiraffe\nJVVtcOIACz0_0\tgiraffe\nJVg62b0T408_2\ttrain\nJVg62b0T408_0\ttrain\nJV2A3zWMRj8_0\tumbrella\nJV3Tbp30yp4_2\tmotorcycle\nJV3Tbp30yp4_1\tmotorcycle\nJV3Tbp30yp4_3\tmotorcycle\nJV-OfjEsQDs_0\tumbrella\nJWKZlCk_cts_0\ttrain\nJWXSXvHgoo4_0\tcar\nJXEyPb4Nzro_0\tskateboard\nJXP_CNg8grg_0\tcat\nJXi5KrVPz0M_1\tbird\nJXj_lj5QUp8_0\tperson\nJXmBBTT0YXQ_0\tcat\nJXobiO1_7Ts_0\ttrain\nJXwfPpl53Fs_0\tdog\nJYYAwimr2XQ_0\ttruck\nJYi7bWDL5os_0\tperson\nJYsWtLH_mjM_0\tbus\nJYsWtLH_mjM_1\tbus\nJYsWtLH_mjM_2\tbus\nJYvBo5FwjSg_0\telephant\nJYvBo5FwjSg_2\telephant\nJY2d1dohCDs_0\telephant\nJY3rSX-blgA_0\tcow\nJZBJ35lKlXw_0\ttruck\nJZOZuTiifHM_2\tboat\nJZXr-dGLkpU_0\tboat\nJZcy1T--d4M_0\tskateboard\nJZ_ri3awsso_0\tcat\nJaI9UR2n7ZE_0\thorse\nJaLswoS3xO8_0\tknife\nJaumrq8clZY_0\ttruck\nJa9rAQpB2_M_0\tcat\nJa_ofQ1ynAc_1\tairplane\nJa_ofQ1ynAc_2\tairplane\nJa_ofQ1ynAc_4\tairplane\nJbA11YWHpW0_1\tskateboard\nJbBxvvoOvpg_0\tbear\nJbK17NE3dvk_1\ttrain\nJbK17NE3dvk_0\ttrain\nJbK17NE3dvk_2\ttrain\nJbK17NE3dvk_3\ttrain\nJbPP4AwiNEc_0\tcat\nJbSkoHG6Vq4_0\tairplane\nJbfzd9wIyi4_0\tcat\nJbw0KUJqWpE_0\ttrain\nJb03yqEB5WI_1\tbus\nJb03yqEB5WI_4\tbus\nJb5lFDvKqXA_0\tbus\nJb6FIuynIuw_0\tbicycle\nJb-q7z_Mygg_0\ttruck\nJcJKjdDKuc4_0\ttrain\nJcRvhoBwgNg_0\tcow\nJcU-cdQmKV8_3\tbus\nJcU-cdQmKV8_1\tbus\nJcixSQRUnY4_1\telephant\nJcmTLrQZ7sE_1\tcow\nJcmTLrQZ7sE_0\tcow\nJcwl0kCsUTw_0\tumbrella\nJc5PS0Ejejw_1\telephant\nJc8eE1ayaX8_0\tcow\nJc9PdqC1rpg_0\ttrain\nJdUehtxAfys_1\tbicycle\nJdUehtxAfys_7\tbicycle\nJdwSAFvKg74_0\tcar\nJeAykU3MiKg_2\tairplane\nJeET8zb_gPQ_4\tknife\nJeNu9WVQOHY_4\tbicycle\nJeNu9WVQOHY_1\tbicycle\nJeNu9WVQOHY_7\tbicycle\nJeYCd0VP5EY_0\thorse\nJeb4SSyyZD8_0\tdog\nJe_fuH6-34I_0\tskateboard\nhujF3CEgAXI_0\tskateboard\nhulFEZUNu10_0\ttrain\nhutTW7ORN8g_0\tbicycle\nhutTW7ORN8g_1\tbicycle\nhuy9NXPynro_0\tcat\nhu6nRmzUcAw_0\ttrain\nhvWHb1kiV5g_0\tdog\nhvWs1FhyQlw_0\tumbrella\nhvhWoRQZMUU_0\tcat\nhvjNVTle8bQ_6\tairplane\nhvjNVTle8bQ_0\tairplane\nhvjNVTle8bQ_1\tairplane\nhvjNVTle8bQ_2\tairplane\nhvjNVTle8bQ_3\tairplane\nhvjNVTle8bQ_4\tairplane\nhvjNVTle8bQ_5\tairplane\nhvkIo-dZUUY_1\tbird\nhvlXyPikLUY_0\tbus\nhv49V2RzgHw_0\thorse\nhv7b1I-cRvI_0\ttruck\nhwOL2G-Lo54_0\tumbrella\nhwPkgOB1mEU_0\tcow\nhwTVAkfjjCY_0\tcat\nhwikEC2Jc0c_1\thorse\nhxC7dFDqfXo_0\tcar\nhxUn2A7Ko2g_0\tcow\nhyMlfx_ZEeI_0\ttrain\nhyMlfx_ZEeI_1\ttrain\nhyX6rKHZcLs_0\tperson\nhyb_qBoKG9Y_0\ttrain\nhyjjdUcyanE_1\tdog\nhyj8BJ_PMgQ_2\telephant\nhyrBL1wMHts_1\ttruck\nhy9Ml-3zAtM_2\tknife\nhy9jrpamopE_0\tumbrella\nhzBqPVIC7IQ_0\ttrain\nhzUTA7mGyKE_0\tbicycle\nhzeHyMcUmO4_0\tmotorcycle\nhzeHyMcUmO4_1\tmotorcycle\nhzz9JBRYjFs_0\tbicycle\nhzz9JBRYjFs_1\tbicycle\nhz5anqtArdI_0\ttrain\nhz5anqtArdI_1\ttrain\nhz7PXI6R6DI_0\ttrain\nh0IiMbTwz1Q_0\ttruck\nh0IiMbTwz1Q_1\ttruck\nh0hIpf9O0Vg_0\tbus\nh1MxYGy1SBc_0\tdog\nh1XtVmXF7CQ_1\telephant\nh19z0Ap_5Pc_0\tbus\nh2R46pcCEVg_0\tcow\nh2SNrfK0yQQ_2\tbus\nh2X0to3hDA4_0\tbicycle\nh2b9t_pnnNA_0\tcow\nh22FyeO_lyE_0\tumbrella\nh23R8X1WKjU_1\thorse\nh24uuiI34yI_0\tskateboard\nh27DK_oMwYY_0\tdog\nh3FnAKBB9Xc_1\telephant\nh3Lz61ficjc_2\tmotorcycle\nh3aEao1bRIY_0\tcat\nh3aZGHTjBwc_0\telephant\nh3o5ZykGOxI_4\telephant\nh3o5ZykGOxI_2\telephant\nh3o5ZykGOxI_3\telephant\nh3qOwaRYAi8_1\tbear\nh3uPELFKoCc_3\tknife\nh3uR99WtOh4_4\tbear\nh3_cWsxi4Qw_1\tskateboard\nh4CySJb83XI_2\telephant\nh4KXG16xA_Y_0\tdog\nh4LE2YVwHL0_0\tmotorcycle\nh4jU8ZrDZd8_0\tskateboard\nh4kmvN6NmyA_3\ttrain\nh4kmvN6NmyA_2\ttrain\nh4wsDcj7kcE_0\tcow\nh45-zE2gKFA_2\tperson\nh45-zE2gKFA_3\telephant\nh47dExP6oXQ_0\telephant\nh5C2RKknWfg_3\tbicycle\nh5C2RKknWfg_5\tbicycle\nh5C2RKknWfg_6\tbicycle\nh5KSLdybLIE_5\tbicycle\nh5KSLdybLIE_1\tbicycle\nh5KSLdybLIE_3\tbicycle\nh5dsU3N4joc_0\tcow\nh5hkvWWp7Qg_0\tknife\nh55Exp2rpSM_0\tknife\nh6FtP-5VnYM_2\tcow\nh6FtP-5VnYM_1\tcow\nh6McnZDPX3I_12\telephant\nh6McnZDPX3I_1\telephant\nh6McnZDPX3I_2\telephant\nh6McnZDPX3I_6\telephant\nh6McnZDPX3I_7\telephant\nh6McnZDPX3I_9\telephant\nh6McnZDPX3I_10\telephant\nh6Mvzt5e_eE_0\thorse\nh6jGPQLkE48_0\tperson\nh6ztcoDHYaY_0\tcat\nh62bO9Mfl9Y_0\tcat\nh64dmoPNWw0_0\tcar\nh7OZUnDKWbA_0\ttruck\nh7cXxMNxlcY_0\thorse\nh7uwd7opKjI_0\tmotorcycle\nh7uwd7opKjI_1\tmotorcycle\nh8BDqFH8e_w_0\ttrain\nh8BDqFH8e_w_1\ttrain\nh8BDqFH8e_w_2\ttrain\nh8EHrA_OM7c_0\tperson\nh8LiHNo4904_4\tairplane\nh8LiHNo4904_5\tairplane\nh8LiHNo4904_6\tairplane\nJfb3XGdt6VE_0\tcat\nJfdoYsRxF5k_2\tknife\nJfnHVMyUT0E_4\tbicycle\nJfqHeWyD5DQ_0\tskateboard\nJgLXpgcnjAA_0\tcow\nJgQbvDmM2Nk_0\tbird\nJggJWWHhlc4_0\tumbrella\nJg8FXSKMvTQ_1\telephant\nJhDNC6XRVG8_0\tcow\nJhDNC6XRVG8_1\tcow\nJhFvJHfP_NY_0\tcar\nJhPLC0PS9I0_0\tknife\nJh87zKRgN68_2\tboat\nJiMyZFGmGgM_0\tdog\nJifa2spqYV8_0\tairplane\nJijtEhm-Dk8_0\tbus\nJikSLpJ2xKw_0\tcow\nJinIHVE4_MI_1\tbear\nJioS9DumyIM_1\tcar\nJixd9HKGzWA_0\ttrain\nJi6bpPIPScI_0\tumbrella\nJjIvWQ-198c_0\tknife\nJja500M50Yw_0\tcow\nJja500M50Yw_1\tcow\nJj4KvC3TXro_0\tcar\nJj4KvC3TXro_1\tcar\nJkC1Udoysk8_1\tcat\nJkC4nV8LcTE_1\tbicycle\nJkH8ZtuvzDQ_0\tdog\nJkpQkpiRpVI_0\tbird\nJkzNUiOu1GI_0\tbus\nJk28bpr063o_4\tairplane\nJk28bpr063o_0\tairplane\nJlJQlaoy3ec_0\tcat\nJlrPaJIAP9k_1\thorse\nJluvPpeI2DY_0\ttrain\nJluvPpeI2DY_1\ttrain\nJlzsUphxgIY_0\ttruck\nJl1bEdoRG9I_0\tcow\nJl6gTtZcQH0_3\thorse\nJl6gTtZcQH0_0\thorse\nJl6gTtZcQH0_2\thorse\nJmblo1iMURo_0\tmotorcycle\nJmdMhGsyZvk_0\tboat\nJmvNubLPYGo_0\tbird\nJmxixgKAKzc_0\ttruck\nJm0S-kE2yVc_0\ttruck\nJm3dtu8GTos_0\tdog\nJnAaSoaN3FI_4\tboat\nJnHUNCeHEDc_0\tbird\nJnMkFSGB6Vw_0\ttruck\nJnXmNI53DWE_0\tperson\nJnrrNu9udj0_0\tbear\nJnvIx5y-ijs_1\tumbrella\nJnysuevt_4A_0\ttrain\nJn1gvGhxU5U_0\tbear\nJocAgPv-ZJo_0\tskateboard\nJohmecnKktI_0\tboat\nJopGEGMo-DQ_0\tdog\nJo50LBwjHIk_0\tbicycle\nJo50LBwjHIk_2\tbicycle\nJpDOBaNBwkc_0\ttruck\nJpFiApmpoHA_0\tcow\nJpL4Mv-uFi4_1\tdog\nJpRMc6MtCH8_0\ttruck\nJpWh1yQThRo_0\ttrain\nJpZwF6hOCDg_1\ttruck\nJpjAxQ_vsZw_7\tbicycle\nJpjAxQ_vsZw_1\tbicycle\nJpsOsewgXAg_1\tbird\nJpuCWzsE35k_1\tbird\nJp0GKZ9vA0c_0\tairplane\nJp1tvS1y4eI_0\tboat\nJqCaTxH5Ovk_0\tmotorcycle\nJqC81ViWFeE_0\tbear\nJqPkaGRIz6c_2\telephant\nJqT_Bx4fd1Q_0\tcow\nJqauh1bsJy4_0\tbear\nJq2ml2xQkHg_0\tcat\nJq8D628IlV8_1\tskateboard\nJq8D628IlV8_2\tskateboard\nJq8OMvgG6wc_0\tcow\nJrAvVMnkKEo_3\tbear\nJrKxxhHGR7E_0\tgiraffe\nJrZTstVj2wg_0\thorse\nJrbrXXDuxnc_0\thorse\nJrmyPAW-ItI_0\tdog\nJsNQXxg1PvE_0\tperson\nJsPtP21j3f8_3\tbear\nJsPtP21j3f8_1\tbear\nJscnB4QfAhY_0\ttrain\nJsiSPt3nv1Y_0\tcow\nJsiSPt3nv1Y_2\tcow\nJs2ZDfWZWtc_0\tcat\nJs69iFgcic0_2\tbus\nJtMMD0aJnPI_0\ttrain\nJtMMD0aJnPI_1\ttrain\nJtQzeWNt8IA_0\tumbrella\nJtQzeWNt8IA_2\tumbrella\nJtfp49L4LHg_0\ttrain\nJt1zVsUQGhI_2\telephant\nJt1zVsUQGhI_3\telephant\nJt8ikZGW768_0\tbicycle\nJuGusvu6Z7o_0\tskateboard\nJuKJKHykMKM_0\thorse\nJuKgukJ63eM_4\tskateboard\nJuME8_jaVdE_2\tcar\nJuME8_jaVdE_3\tcar\nJuMNRsOc0nU_1\tcat\nJuMNRsOc0nU_0\tcat\nJuNubQtCvrU_0\tbird\nJuNubQtCvrU_1\tbird\nJuO7qvp2GBs_0\tknife\nJuXqLoCgK4o_0\tbear\nh8OcTR0Z4yo_1\tairplane\nh8OcTR0Z4yo_2\tairplane\nh8OiIYhIPTs_2\ttrain\nh8PJps4Sj1E_0\tairplane\nh8PmDAKiKVc_0\tdog\nh8oTFl4XWTc_0\tbus\nh8ysn_L9udY_0\ttrain\nh8ysn_L9udY_1\ttrain\nh9FtsOFR3p8_0\tcat\nh9veoEpzRH8_0\tcow\nh9w20ChZ_7Y_0\tbicycle\nh9w20ChZ_7Y_1\tbicycle\nh96rR-VkJZA_1\tbear\nh96rR-VkJZA_2\tbear\nh966cxQyjvc_1\tairplane\nh-PS5v6ZTBY_0\ttruck\nh-VSmS49g5M_0\tskateboard\nh-npKkPbHSA_0\tboat\nh-qRpUteJV4_0\tbird\nh-vGllteZnI_0\ttrain\nh-1NdCqoxdU_1\tbird\nh-2DBPzbKUM_0\tcow\nh-27oWBBirE_0\tdog\nh-9WCj8sB6o_7\tairplane\nh-9WCj8sB6o_8\tairplane\nh-9WCj8sB6o_10\tairplane\nh-9WCj8sB6o_11\tairplane\nh-9WCj8sB6o_12\tairplane\nh-9WCj8sB6o_0\tairplane\nh-9WCj8sB6o_1\tairplane\nh-9WCj8sB6o_3\tairplane\nh-9WCj8sB6o_5\tairplane\nh_DH9wUjJZA_0\tcow\nh_Ey7gQJCSc_0\tcow\nh_KKvY3cK4o_0\tcow\nh_KKvY3cK4o_1\tcow\nh_XHdrNdD98_0\tbus\nh_tQ-ZVYe1M_0\tbird\nh_6GMOpsIOk_0\tcat\niACKPRGNEOU_0\tbus\niADpOEGdwQI_3\tbird\niALubFRPBXQ_1\tknife\niAL5KD5BwGQ_0\thorse\niAuV09oxF_c_0\tbus\niAzvkn-2C9s_4\thorse\niA_tYzSGuVg_0\tdog\niBDVD9if3VA_1\tbear\niBDVD9if3VA_3\tbear\niBDVD9if3VA_4\tbear\niBF1Cfv7RpE_2\ttrain\niBF1Cfv7RpE_3\ttrain\niBO6oNBr4hM_2\ttrain\niBmHl4vB2p8_0\tboat\niBmHl4vB2p8_1\tboat\niB2e_0wI6Cs_1\tbird\niCA5LKIvUak_0\thorse\niCUmfkHj2MM_0\telephant\niCWBysiT4fE_0\tairplane\niCoklLBZGi0_0\ttruck\niC-r2odD6Ss_0\tdog\niDBWSSj3Yag_0\tbus\niDMMfw0zrvQ_0\tcow\niDy5BzJGt50_0\tskateboard\niD0ptJ7ucww_0\thorse\niD0ptJ7ucww_2\thorse\niECVUNZOPOM_0\tcow\niEIRSDANY7g_0\tbird\niEcsL-BdEp8_0\tskateboard\niEeZD9_-mw4_1\ttrain\niEe9Qed4A6w_0\telephant\niEfRHR6In04_1\tdog\niEnwhpHkWPA_0\tdog\niErN5WNQuZ8_1\tbear\niFLG6c3XcMw_1\tknife\niFgR4_OYpgU_0\tboat\niFk_jNFfItI_0\tcar\niFsAXsW8t-8_1\tbus\niFsAXsW8t-8_2\tbus\niGB1OkMGELk_1\telephant\niGE04YY7P68_0\tmotorcycle\niGE8oPBzavo_0\tairplane\niGKh6_bzEe8_9\tairplane\niGKh6_bzEe8_5\tairplane\niGWCy-zysHU_7\thorse\niGWCy-zysHU_0\thorse\niGWCy-zysHU_2\thorse\niGWCy-zysHU_5\thorse\niGf0rCvWhZE_1\tbird\niGivgJkDWVo_0\telephant\niGivgJkDWVo_4\telephant\niGivgJkDWVo_5\telephant\niGivgJkDWVo_1\telephant\niGivgJkDWVo_2\telephant\niGmHR-MYdts_2\tskateboard\niGtwAlGgpuQ_0\tmotorcycle\niG3IZAIpSos_0\tcat\niG4w2A16Qy0_3\tboat\niG4w2A16Qy0_0\tboat\niG7OG-yAmkg_1\tboat\niHNSjj9GO9k_0\thorse\niHZNqzCjd7k_0\ttrain\niHbirUiASog_0\tskateboard\niH0SvXt_QEE_0\tcow\niH9qrmQO5wg_3\thorse\niH9qrmQO5wg_1\thorse\niH_5naROy0I_0\tmotorcycle\niIYyUq4tPbc_0\tcow\niIZw5oU3kz4_0\tdog\niIa2i3Fyyp8_0\tcat\niIgi9EuB83A_0\ttrain\niIlu4DSMMwM_0\tskateboard\niIoEhVh0sac_0\tbird\niIoEhVh0sac_3\tbird\niIoEhVh0sac_1\tbird\niIwKnWnoXd0_0\tskateboard\niI66ySv1M1E_0\tbear\niJcYkYS6CgE_4\tairplane\niJcYkYS6CgE_0\tairplane\niJcYkYS6CgE_3\tairplane\niJqRpAI5q0M_0\tcow\niJ0Pe8-N6i4_0\tbus\niJ5fEZLxnPw_0\tknife\niJ5fEZLxnPw_2\tknife\niKLuvvisn6Y_0\tairplane\nJvHU5ncnmtc_1\tcow\nJvkp32eVZyc_0\tcat\nJvm2k8MgJ5k_0\tcat\nJv1ayezpka4_0\tbird\nJv6b9zItltw_3\tbird\nJv6b9zItltw_0\tbird\nJwNWcW7nUBE_0\telephant\nJwNWcW7nUBE_2\telephant\nJwaPyA7kWhc_0\tcow\nJwnMWPlx6KU_0\tcow\nJw_nc2U4pKs_0\tskateboard\nJxKJB-QdFUA_1\tumbrella\nJxRKwF7KNOA_0\tbird\nJxSYbvgXcT8_0\tcar\nJxVoSlh710g_2\tbird\nJxc3ArJpyuY_0\tmotorcycle\nJxc3ArJpyuY_3\tmotorcycle\nJxdIZhohCtg_0\tcow\nJxlB8wLncYc_0\telephant\nJxzCLy2VyJA_0\tskateboard\nJx03EEph0bw_1\ttruck\nJx2PgBxlrLY_3\tairplane\nJx6xyX5sPMk_0\tcat\nJyKJFochwIQ_0\ttruck\nJyLFLF4shyY_0\tairplane\nJyLqTlaGOww_0\tknife\nJyM0FDmoMyQ_0\tairplane\nJyePA4nzTx8_0\ttruck\nJyhAOfW608o_0\tcow\nJyliijVyyUc_0\telephant\nJyliijVyyUc_1\telephant\nJy1hmMPCNks_0\tdog\nJy1hmMPCNks_1\tdog\nJy37u1dt8Qc_0\tdog\nJy_3PqINBss_1\tbird\nJzGkRevP9mU_1\ttruck\nJzNvJYTN1Pw_1\tbus\nJzNvJYTN1Pw_0\tbus\nJzNvJYTN1Pw_2\tbus\nJzNvJYTN1Pw_4\tbus\nJzNvJYTN1Pw_7\tbus\nJzm0H_o-LyA_1\tbicycle\nJzwF2_O5qho_0\tcow\nJzwF2_O5qho_1\tcow\nJzwF2_O5qho_2\tcow\nJ0Gb34OfhGs_0\tairplane\nJ0m2haAO_Pg_0\ttruck\nJ0uOEHqVD0g_1\telephant\nJ01a05fNHz8_0\tairplane\nJ05eYTq5pFE_0\tcow\nJ1BVFlR3Pzc_2\tbicycle\nJ1VVax1uIGc_0\telephant\nJ1YSacTJR64_0\tbear\nJ1YqrkAsUIs_1\ttruck\nJ1YqrkAsUIs_2\ttruck\nJ1YqrkAsUIs_3\ttruck\nJ1rYOpOlNqs_0\tcat\nJ1reV7ZinzE_2\ttruck\nJ1sQZHaGRVY_0\tcow\nJ1uF4oCMmtU_0\tcar\nJ10PTSVhLnQ_0\tcar\nJ10PTSVhLnQ_1\tcar\nJ10PTSVhLnQ_2\tcar\nJ142X1ly-gY_0\tcow\nJ17uKo2HgxY_0\tbird\nJ2R5C_XNnek_0\ttrain\nJ2Sh2XKvWOA_2\thorse\nJ3EToJg72Es_0\thorse\nJ3d48McH1L0_0\telephant\nJ3gk0p9Hm0o_0\tknife\nJ3hgEqlUzpg_0\tbus\nJ3hva1l0CWM_1\thorse\nJ3jOAuADP44_0\tboat\nJ3sMC-99CWs_1\tcow\nJ3zIT2YwDdY_0\tbicycle\nJ315ju7gD8Q_2\ttrain\nJ4eK5nQv9E0_0\tmotorcycle\nJ4hu4X1Hr7k_0\tbear\nJ4ithFdbyKY_0\ttrain\nJ4mDzsuGR1M_2\tbear\nJ43AWiRkRAI_0\tskateboard\nJ46c4FEAjQ8_0\thorse\nJ46c4FEAjQ8_2\thorse\nJ5CA6t8d7uA_0\ttruck\nJ5JNgpMvPks_0\thorse\nJ5Ss-cEKg9o_0\tskateboard\nJ5TS-1YKlWE_0\telephant\nJ5TS-1YKlWE_1\telephant\nJ51qDcGqOV8_0\tairplane\nJ5-O6tDEZO0_0\thorse\nJ5_8xLaPuIU_0\tcat\nJ6AHeX1RqWk_0\tbus\nJ6nRLSf9kms_1\tdog\nJ61MSyhI5Xg_0\tbird\nJ68NptJ9oRE_0\tskateboard\nJ7h1DaonvHY_1\thorse\nJ7jTtirQ85g_0\tmotorcycle\nJ7vNGyyYQ30_0\tdog\nJ73WpGWHEuE_0\tgiraffe\nJ73WpGWHEuE_15\tgiraffe\nJ73WpGWHEuE_1\tgiraffe\nJ73WpGWHEuE_2\tgiraffe\nJ73WpGWHEuE_14\tgiraffe\nJ79qVoBV6TM_0\tcar\nJ8Akt0d4r_k_0\ttrain\nJ8Akt0d4r_k_1\ttrain\nJ8dIP05jqRw_2\ttruck\nJ8dIP05jqRw_5\ttruck\nJ9SzI8MQm6Y_0\tairplane\nJ9ZGJucbLiw_0\tairplane\nJ9mX4rrWQto_0\tknife\nJ9n9_-FSk4Y_0\tdog\nJ916-YD5Qms_0\telephant\nJ-sHEYA-20k_1\tgiraffe\niKjaiW6gHPQ_1\telephant\niKjaiW6gHPQ_0\telephant\niKlCbkZsFzE_1\tcow\niLeUN6d8Aew_0\tgiraffe\niLeUN6d8Aew_1\tgiraffe\niLk3v-m1Z0U_0\thorse\niLvLOw8Jigg_0\tmotorcycle\niL0GMZ7iO3c_0\tdog\niL5OOut4Jek_3\tbus\niL9TAERxS4A_1\tbicycle\niL9hLZ_cXaI_0\tperson\niMfVd5_HBcE_0\tbus\niMqYyOcO4Fw_0\tumbrella\niMtt9-ROv_o_0\tdog\niMukpec9Vmo_0\tairplane\niMukpec9Vmo_2\tairplane\niMxzNRMiKMA_0\ttruck\niM3tOs60qxk_1\tairplane\niM8Lua_zTug_2\ttrain\niNQNSmu2BD8_0\tskateboard\niNWrFmCCfXw_1\tbear\niNa2jg_1Vyc_0\tcat\niNghTa86iWY_0\tcat\niN-bJwlR2i8_1\tbicycle\niOEuAB0dIs8_0\tdog\niOH00pYaMhY_0\tcow\niOJiYp298qc_3\tairplane\niOJiYp298qc_1\tairplane\niOd4NCiEBLw_4\tairplane\niOd4NCiEBLw_2\tairplane\niOgScMDTX_I_0\tskateboard\niOvWAp7U61k_0\tcow\niOzYv5IpFng_0\thorse\niO7wHeFO6Js_1\tcow\niO7wHeFO6Js_2\tcow\niPWL6FSzmS8_0\tumbrella\niPbg6G7tUVo_1\thorse\niP98M3c1PJw_0\telephant\niQB9bgZJCwA_0\tmotorcycle\niQPn_3iB6aU_0\tumbrella\niQYiakvHwnk_0\tbicycle\niQZ1QN-A3JQ_0\telephant\niQfs0MyXA-s_0\tairplane\niQxGihgbiM8_0\tcow\niQ_2xA5J-Zg_4\tbird\niQ_2xA5J-Zg_5\tbird\niQ_2xA5J-Zg_1\tbird\niQ_2xA5J-Zg_2\tbird\niRI3AkfYykI_0\tknife\niRLMFxqd6Vk_0\tbear\niRTTlG8M9FE_0\tcar\niRTTlG8M9FE_2\tcar\niRTTlG8M9FE_1\tcar\niRWWnw104cE_0\tbicycle\niRklgBUz8ME_0\tbus\niRk0aHyYWdM_0\tbird\niRlBKC_jfE0_1\thorse\niRlBKC_jfE0_2\thorse\niRlBKC_jfE0_4\thorse\niRmfa0b6jJk_0\tcar\niRpibBNFoiY_0\tknife\niRv5dyfU3ZQ_1\tcar\niRv5dyfU3ZQ_2\tcar\niRw-TCiikqw_0\thorse\niRw-TCiikqw_1\thorse\niR3sRTxVGtg_0\tairplane\niR4rImxKjK0_0\tcar\niR4rImxKjK0_1\tcar\niR5Zew8NcYU_0\ttruck\niR5Zew8NcYU_1\ttruck\niR5Zew8NcYU_2\ttruck\niR5Zew8NcYU_3\ttruck\niR5Zew8NcYU_4\ttruck\niR5Zew8NcYU_5\ttruck\niR5Zew8NcYU_6\ttruck\niR5Zew8NcYU_7\ttruck\niR5Zew8NcYU_8\ttruck\niR5Zew8NcYU_9\ttruck\niSCFoiWm7Xk_0\tbear\niSLNkNnHOXQ_0\tbicycle\niSYNvKIuAXc_0\tmotorcycle\niSbXpgu-7qA_0\tbicycle\niSeR1wQ4sl0_0\ttrain\niTF1bWOtrew_1\tbus\niTF1bWOtrew_2\tbus\niTWyYCJO0FI_2\ttruck\niTbEmIOM3Bg_2\tcar\niTbEmIOM3Bg_0\tcar\niTbEmIOM3Bg_1\tcar\niT3LIkn9wh4_0\tcar\niT5clmXCTEc_0\telephant\niUDGzAPkGLI_1\tairplane\niUEEnhAvRoY_0\tcow\niUSZKTFqatw_0\tairplane\niUX8ST-BSFg_1\tbus\niUZnCaGp148_0\tdog\niVH9ehKyau0_0\tgiraffe\niVRs9h04NcM_0\tcat\niVzRc0RW_Y4_0\tbird\niV4UGeMqQeY_0\tdog\niV8NpvUXve4_0\telephant\niV8NpvUXve4_1\telephant\niV9CFIQTmfs_2\tbicycle\niWP_wo9OSe4_0\tbird\niWo66ztRt0o_3\tboat\niWtj7if5cK8_1\tboat\niWv1rxdhH1E_0\tbear\niW1aIV39PQo_0\tmotorcycle\niW2g2j2VhbM_1\tskateboard\niW2g2j2VhbM_2\tskateboard\niXKQX0UfOqA_0\tcow\niXKQX0UfOqA_1\tcow\niXKQX0UfOqA_3\tcow\niXh4-KWp9S4_0\thorse\niXl114K8Y1E_0\tcar\niXxi1CQpbBk_2\tcow\niXzEoHyipJM_0\ttruck\niX7b9tWhoKg_0\tgiraffe\niYGSi3t8Do0_2\tcow\niYO5SD120r4_0\telephant\niYYdiX4oGjM_0\tskateboard\niYjiqdn7fVk_0\tbird\niYsgKLWI96c_2\tknife\niYtDe_tT_wo_1\ttrain\nJ-6KxfbaI6M_2\tcow\nJ_HdQVHBeco_0\tmotorcycle\nJ_l7W4IMhJo_0\tdog\nJ_n_3-KXet0_0\tdog\nKAGadYR0_LM_4\tbird\nKAGadYR0_LM_6\tbird\nKAGadYR0_LM_8\tbird\nKAKn8JmKESU_0\ttrain\nKAjM8ENV-F4_4\tskateboard\nKAxsc-ratJ4_0\thorse\nKA1A0hH1nVQ_0\ttrain\nKBIGw8UrUG8_0\tcow\nKBKaaEaIPRc_0\tcow\nKBNqKcj0xoc_0\ttrain\nKBP3moB3vz4_0\tbird\nKBRkCaaDjxU_3\tbus\nKBRkCaaDjxU_0\tbus\nKBe3_8RL_MI_0\tperson\nKBoY6Pa8f_M_0\tcow\nKCbzyGKBwC8_0\ttrain\nKCdR8nTa3p4_0\tskateboard\nKCipBL5_e5M_0\thorse\nKCy-RKy_KN0_0\tbicycle\nKC1md4Q_DlQ_0\tskateboard\nKDSxlGW6eRc_0\tumbrella\nKDZsS4MjllY_0\tmotorcycle\nKDaVTe3RbUY_0\thorse\nKDyYkCLIImM_0\tknife\nKD0Qm4z53a0_0\ttruck\nKD0Qm4z53a0_5\ttruck\nKD5LwDdfw0o_0\thorse\nKD9qqVSiPu0_0\ttrain\nKEGLFAbfrxs_0\tmotorcycle\nKERo3bKldwM_0\telephant\nKEW0fAHE_74_0\tbus\nKEW0fAHE_74_2\tbus\nKEagowlFwzI_0\tcow\nKEll3gbyIsk_0\ttruck\nKEll3gbyIsk_1\ttruck\nKEll3gbyIsk_2\ttruck\nKExfLNe3IbY_0\tairplane\nKE2StZtSBfk_0\tairplane\nKE3O7h2RC-s_1\ttrain\nKE_UJpQulNU_0\thorse\nKFEorB8NRcE_0\tboat\nKFFTHBaYcbw_0\tbear\nKFJtVwXfusI_0\tboat\nKFRZOFB41Jk_0\ttrain\nKFk_7p6X-zI_6\tcar\nKFk_7p6X-zI_1\tcar\nKFk_7p6X-zI_2\tcar\nKFk_7p6X-zI_4\tcar\nKFk_7p6X-zI_5\tcar\nKFnvvsS8eIE_1\tknife\nKGYrelsyNbk_0\tairplane\nKGbYHbiOfd8_0\tgiraffe\nKGwEL4VozSA_0\tboat\nKG8zBA9Gudg_0\tknife\nKHBsJZVKzks_0\ttruck\nKHG1hZsfjwQ_0\ttrain\nKHHyhgm1jZ0_3\tskateboard\nKHSjivlhX30_1\tbear\nKHcEC33udEg_0\tcow\nKHgLQP4XH9Q_0\tskateboard\nKHsYYKcSCSI_1\tcow\nKH0F1sJXKss_3\telephant\nKH0k5jfUZGg_0\tbicycle\nKH8QlsYIT1M_1\tbear\nKIPptA8AzYg_0\thorse\nKIjf6QGqdsw_0\ttruck\nKIjf6QGqdsw_1\ttruck\nKIqePeskBSk_0\ttruck\nKIy2LK1jsQ8_0\tperson\nKI8Arf5-ekw_1\ttruck\nKI8Arf5-ekw_4\ttruck\nKJIBdy7_10k_1\tbus\nKJIBdy7_10k_2\tbus\nKJJBVXnnqIw_0\tzebra\nKJcXjJ5S9yA_1\tdog\nKJrPyuVxWRg_0\tairplane\nKJrPyuVxWRg_1\tairplane\nKJvAK-5ExwY_2\ttruck\nKJ30mU3h4f4_0\tbear\nKJ7PQiJAKRM_0\telephant\nKKKiTv_k23A_0\tgiraffe\nKKO1QGoVQYU_0\telephant\nKKpwJEMQYv8_0\tdog\nKKsKKMjHYGM_0\thorse\nKK06xbUhklk_1\tbus\nKLC8OgkQnNQ_0\tboat\nKLEKnTRMmo0_1\tcow\nKLGAT1GQYGA_2\tbird\nKLMz6_P5QmA_0\thorse\nKLNmQqyAs54_0\tcow\nKLUTy4pqLZ0_0\tbicycle\nKLVZqPfRuTg_2\tbear\nKLVZqPfRuTg_7\tbear\nKLlN4H-eGYI_1\tskateboard\nKL6-Iu09-C8_0\tcat\nKMNaWZZK2Os_0\tskateboard\nKMOOcO5yE9E_1\thorse\nKMXuGjMAt7k_5\tbicycle\nKMXuGjMAt7k_6\tbicycle\nKMXuGjMAt7k_3\tbicycle\nKMajGvVnol0_1\tairplane\nKMajGvVnol0_4\tairplane\nKMajGvVnol0_5\tairplane\nKMajGvVnol0_6\tairplane\nKMajGvVnol0_7\tairplane\nKMiZgk_f50g_0\tdog\nKMlZbzTdutw_1\tcar\nKMlZbzTdutw_2\tcar\nKMsL64iYfOA_0\tcar\nKMtu1xThH2k_2\telephant\nKMyoO6YYfZk_0\telephant\nKNaoNUMT7m0_1\tcar\nKNg4K_bbY5Q_0\ttrain\nKN5hxi96gW0_0\tcat\nKN-_uhPPfoE_0\tcow\nKOKdrC_foXo_0\tairplane\nKOOd5IO8seo_0\tboat\nKOSUWuFIQjQ_1\tairplane\nKOVZk2ixqc0_0\ttruck\nKOgmgqcT21Y_1\tbird\nKOl1EDiK2e8_0\tmotorcycle\nKO6T6QdloiM_0\tbus\nKO7Ncyx1-9c_0\ttrain\nKPJDHcE-qeQ_0\tbicycle\nKPYtlDJa43o_0\tskateboard\nKPfbBNvFcmA_0\tskateboard\nKPj_wrsubOE_2\tbear\nKPkzyHL7IPg_0\tcow\nKPmvpNEHsPk_0\tskateboard\nKPzWIuvRlr0_1\tskateboard\nKP4ApNQiIEI_0\tcat\nKQB-ZyriFmI_0\tboat\nKQg6eO2jr_Y_0\tumbrella\nKQ5mchVgTXo_0\ttruck\nKRCLiP-JUsc_0\ttruck\nKRCLiP-JUsc_2\ttruck\nKRCLiP-JUsc_1\ttruck\nKRW0HyqDLg8_0\tdog\nKRjN1nx8mcE_0\tairplane\nKSDxU99SF6g_0\tmotorcycle\nKSHVle4SAM4_0\telephant\nKSZ7nkMWOsU_0\tskateboard\nKSZ7nkMWOsU_1\tskateboard\nKSj7hZ7oO18_0\tcow\nKS1ge4vlv64_0\tbicycle\nKS4vsIYGaCM_4\ttruck\nKS4vsIYGaCM_0\ttruck\nKS8UAlyHoCg_0\tdog\nKS_fak2guWU_1\tdog\nKTAMaZKxpF8_2\ttrain\nKTDhNtr8XF4_0\tairplane\nKTDzrCvIVQs_0\tdog\nKTQQtbUbWbA_0\tairplane\nKTZ2Jsj6_ig_0\ttruck\nKTdzxOjJNgI_0\tcar\nKTsTGNqrFuE_0\tumbrella\nKT7YiBWXqNk_0\tairplane\nKUZxnRyU2e8_0\tcat\nKUbSnz1yWxc_0\tknife\nKUc8Kw30V1Q_2\ttruck\nKUc8Kw30V1Q_3\ttruck\nKUc8Kw30V1Q_4\ttruck\nKUgY_2bsBC0_1\tskateboard\nKUhzqYZoYCI_0\tcow\nKUkcrqulhqg_0\tcow\nKUlpA-cpCpM_0\thorse\nKUumLype4AE_0\telephant\nKVFlTVdKQVw_0\thorse\nKVJCkQzQbMs_0\tperson\nKVmS-yiYu2c_0\tbicycle\nKVzW5MPT25A_0\tairplane\nKV0o55FO4XA_0\tskateboard\nKV3jtdzXA9U_0\tdog\nKV__RQ75-vw_1\tcow\nKWJiCsomGTA_0\tcow\nKWLl4vVumIs_0\ttruck\nKWSDQebY3dA_0\tcat\nKWwbFKgHqW0_0\tcar\nKWxd8IQ9_a0_0\tcat\nKW10UlO19uo_0\tbus\nKW4ovUCg7uU_0\tbicycle\nKW4ovUCg7uU_1\tbicycle\nKW5S4gsTVaQ_0\tknife\nKW7gAr7kgow_0\tdog\nKW_6RyjLGPI_3\thorse\nKXCQuD9phb4_1\tbird\nKXENib5sk78_0\tcat\nKXLWiz5ZUh0_1\ttrain\nKXLWiz5ZUh0_2\ttrain\nKXdF5__0yVQ_0\tcow\nKXf6k7PrX7E_1\telephant\nKXf6k7PrX7E_2\telephant\nKXrQkw1WPnk_0\tbird\nKXzu3MDaZn8_0\tcar\nKYK_Wg8JlTg_0\tskateboard\nKYK_Wg8JlTg_1\tskateboard\nKYTRCD2p-8Y_0\tmotorcycle\nKYZzKKYD7Yc_1\thorse\nKYaB_EEk344_0\tcat\nKYc__uUZkwc_3\tbicycle\nKYd6wCR0jVc_1\thorse\nKYd6wCR0jVc_0\thorse\nKYs4hm9X1Rg_1\tbicycle\nKYvXJXEbUMg_0\tbird\nKY0x7p41Q_A_0\tcat\nKY04L4VTsXc_1\tairplane\nKY04L4VTsXc_2\tairplane\nKY7D2Y5MQSo_0\thorse\nKZAf2uPS-us_1\thorse\nKZAf2uPS-us_0\thorse\nKZFniGi-fes_0\tdog\nKZJcgoY3r3U_0\tairplane\nKZSLQpdbGps_0\tmotorcycle\nKZYe6pqrLaQ_1\tdog\nKZhX7tDfYIA_0\tbus\nKZl_XArvSXk_0\thorse\nKZ4OuA1t3ZY_0\telephant\nKaUGkf-3N-4_0\thorse\nKaiX3d83DWA_0\tzebra\nKaj5B4nrWJU_0\tskateboard\nKapwOqVyzUk_0\tcat\nKaqToIfNxMY_1\tbicycle\nKauPg8P2kC4_1\tairplane\nKazepPKQz1M_1\tcow\nKazepPKQz1M_3\tcow\nKazepPKQz1M_4\tcow\nKa978At0k0Y_0\tairplane\nKa-4ZfE0GMQ_0\tmotorcycle\nKbA6UDJg1LE_0\ttrain\nKbA6UDJg1LE_1\ttrain\nKbGl5jqOQ7o_0\tcat\nKbRIbBeLBsM_3\tmotorcycle\nKbosOWR7ZSg_1\tboat\nKb3lxArGO8Y_0\tbicycle\nKb3lxArGO8Y_1\tbicycle\nKcDpzG8kKho_0\tcat\nKcL-zz1sb6I_0\tdog\nKceqMsKO-zc_0\tcat\nKcpGWNCD-uk_0\tcat\nKct9k6Q2YM8_0\tcar\nKcuEc9WwYSQ_0\tcow\nKcuEc9WwYSQ_1\tcow\nKcyLR4RxylE_0\tcow\nKcyMYgt62Go_0\thorse\niY5Sh73Lem0_0\tbird\niY6eEC8uY4E_2\ttrain\niY6eEC8uY4E_1\ttrain\niY9QlFmEBFY_0\tmotorcycle\niZsSK_iIOoA_0\thorse\niaGO2mTgoPo_1\tbicycle\niaGO2mTgoPo_3\tbicycle\niaWSU1ISWXQ_2\tairplane\niaWSU1ISWXQ_0\tairplane\niaflfMXT7QQ_0\tboat\niamGAsKNRhY_0\ttrain\niana0Lz1gs0_1\tmotorcycle\niasZRb9p3lg_0\tmotorcycle\nia1XmqAwn7M_0\tbus\nia6R3fqdlnE_0\tbear\nibcBDIGpMfo_1\tbus\nibd-Wxcr_x4_0\thorse\nibpj369yzbw_0\tumbrella\nibxmk7cGhTQ_3\thorse\nib5fWzJWV5A_0\tcow\nicDyRH3P-nM_0\tairplane\nicGjENlINL4_3\tskateboard\nich9rXZWjGY_0\tcar\nicic9NkCnf0_0\tcow\nicnuBKQZNBg_2\tbus\nicnuBKQZNBg_0\tbus\nicnuBKQZNBg_1\tbus\nicxOfJQ-l9I_0\tcar\nicxOfJQ-l9I_1\tcar\nicy3pC1Q0eA_0\tcat\nic7k8fkUDXs_0\tcow\nidnOwkwaCm4_0\thorse\nidnSzg_rV_k_3\tbicycle\nidoGYHCXGJs_0\telephant\nidq0Jqw8Oa0_2\telephant\nid1yzZ3HkTs_1\tknife\nieCL4lz7IJw_1\tboat\nieOpqoYhMOQ_0\ttruck\nieOpqoYhMOQ_1\ttruck\nieOpqoYhMOQ_2\ttruck\nieULzTIs9ls_0\tcow\niedgnWefCA0_0\tairplane\niedgnWefCA0_2\tairplane\niedgnWefCA0_3\tairplane\niewlg5CteEs_1\tairplane\nie8gkh6nQcA_0\ttrain\nifKKR-gCLSk_0\tcat\nifRQKBKIRSI_0\tdog\niff3KW8leKw_0\tairplane\niff3KW8leKw_1\tairplane\nifghH4Jo8D8_0\ttruck\nif31ci9xz_8_4\tbicycle\nif31ci9xz_8_1\tbicycle\nif31ci9xz_8_2\tbicycle\nigGtS-jZCQM_2\tcar\nigGtS-jZCQM_0\tcar\nigLVqNKw-yE_0\tbird\nigMWvnK1jEE_0\tgiraffe\nigMWvnK1jEE_3\tgiraffe\nigMWvnK1jEE_1\tgiraffe\nigQUACDrluw_0\thorse\nigU61tmxeE4_2\tskateboard\nigWsPt0nelg_1\tbus\nigcpSvypduQ_0\ttruck\nigcpSvypduQ_1\ttruck\nigdqmLfZ_cw_0\tairplane\nigjBIRwjlko_1\tdog\nigm6X4CZLmk_1\tbus\nignREcFRyaQ_7\tairplane\nignREcFRyaQ_8\tairplane\nigwghbZYjgg_0\tairplane\nihMDaxeTpZs_1\thorse\nihTjIMWOjuQ_1\tmotorcycle\nihUpF22zo4M_0\ttrain\nihUpF22zo4M_1\ttrain\nihWWle00xEE_0\tmotorcycle\nihh0J0AaWBs_0\ttrain\nihh0J0AaWBs_2\ttrain\niiA0hIRwwJA_0\ttrain\niiSWvRk3YfU_0\tbird\niiextKoe48U_0\tcat\niigPPpoo0W8_0\tknife\niiiOUcmwJPw_0\tcow\nii0PDMs-a0o_2\tcar\nii2ghwDAI3w_1\tairplane\nii_sG2SkeXM_0\tcat\nijB2Yh71VIg_2\tbear\nijJAWtORd2w_0\ttruck\nijJAWtORd2w_1\ttruck\nijVpcnt8HN8_0\tbus\nijXmwWOLvpM_2\thorse\nijXmwWOLvpM_1\thorse\nijdipMmraWc_0\ttruck\nijwhkKzyWE8_0\tairplane\nij0zLKtr0sA_0\tbird\nikGzd6ivk64_0\tmotorcycle\nikKFRS8Hivk_0\tbear\nikVu6XfZ3_A_1\tbicycle\nikafEc8p6rI_0\tbicycle\nikafEc8p6rI_5\tbicycle\nikafEc8p6rI_1\tbicycle\nikafEc8p6rI_3\tbicycle\nikafEc8p6rI_4\tbicycle\nikfmjumoUlM_2\ttrain\nik868nOtrZo_4\tbus\nik-jgdZW4Ek_0\thorse\nik__zZ1HZNg_1\tgiraffe\nilKErQ8ojz0_0\tumbrella\nilKErQ8ojz0_2\tumbrella\nilKErQ8ojz0_3\tumbrella\nilKW98Qvobg_0\tskateboard\nilvsheh1Cqs_0\tdog\nilxXSgvtFgw_0\tcow\nimEWC_Q-BSg_1\tcar\nimcRxs0K7H8_0\tbus\nimmhpBi8eWw_6\tskateboard\nim_FneG303c_0\tdog\ninEZ7ZLAS7s_5\tskateboard\ninJLKInP5kw_0\tdog\ninZmM8c-9NI_3\thorse\ninedUh-74-A_4\ttruck\ninodVLfFogA_0\ttrain\ninynAJrGhVU_0\tmotorcycle\nin061qZJjWI_0\tdog\nKc8WMzLKvvk_0\tcow\nKc-f3X7O-pw_0\tcat\nKc-x73DCumI_0\ttruck\nKdGgVhM0Ihg_0\tbird\nKdKlI0ZN6qo_0\tairplane\nKdQQqsAuU7o_1\tbicycle\nKdUSJz6UWLQ_0\tgiraffe\nKdXRnPKKeTU_0\tbird\nKddQJwFfv9s_2\tskateboard\nKdjMgSuON5w_5\tbear\nKdpUjVhfjG0_0\tperson\nKdyadP7Y1nU_0\tcar\nKd9Em2ABfN8_0\tcat\nKd-jTE5-2uE_1\tmotorcycle\nKeMITKdjHtk_0\tcat\nKenV2bIQf1o_0\tbicycle\nKevYmLAAigc_1\ttrain\nKe3R9FrGLcY_0\tdog\nKfJU66erPWo_2\tknife\nKfMO45jz-68_0\tboat\nKfS_UKkbQAA_0\tbird\nKfTV1TFY2b8_0\tbird\nKfaTw0euPQA_0\tmotorcycle\nKfjmKiZzSlY_0\tcow\nKfjmKiZzSlY_5\tcow\nKfkKe7q45KA_1\tmotorcycle\nKfkKe7q45KA_2\tmotorcycle\nKfkKe7q45KA_3\tmotorcycle\nKfpCncLoqOw_0\tcow\nKfwbVpPI0nU_1\tmotorcycle\nKgAFD_JvgrQ_0\tcow\nKgD3H0APDy0_0\tbear\nKgNS5HwFF_c_1\telephant\nKgVEQYicksA_0\tcow\nKgY5OrVnzv4_0\tcow\nKgo7SWtDdz4_1\tdog\nKg3xuyjNU7w_0\tumbrella\nKg7Qk4Gx9n0_0\tmotorcycle\nKhKZwdKiqms_0\tcow\nKhKcHaH_ALo_0\thorse\nKhPKq8O30VM_0\tbicycle\nKhPKq8O30VM_2\tbicycle\nKhPKq8O30VM_4\tbicycle\nKhuC9snWfpI_0\tcow\nKh7rAO7jCGc_0\tairplane\nKh_KwBHfGQ8_0\tcow\nKiHy8IMQ6zA_0\tairplane\nKiaUDlPLxzk_1\tbear\nKixl-Wmj3kg_0\tmotorcycle\nKjaag6B-MIQ_1\tskateboard\nKjca1u6P3NE_0\tcow\nKjiI2E3l3Mk_1\ttruck\nKjiI2E3l3Mk_2\ttruck\nKjqaJ25GUBI_0\tbus\nKj3dRtd4xQI_1\tcow\nKj3dRtd4xQI_0\tcow\nKkD23XYUG9c_0\tumbrella\nKkMNGzvNkg4_9\tbird\nKkNYBz9ZaVA_0\tbird\nKkNYBz9ZaVA_1\tbird\nKkPf9AB1HZo_1\telephant\nKkRq1ogJq-4_0\tskateboard\nKkXTT9C4xfc_0\tcow\nKkdSKHS7P50_1\tskateboard\nKks6eJqnZLQ_0\tdog\nKks6eJqnZLQ_2\tdog\nKks6eJqnZLQ_3\tdog\nKks6eJqnZLQ_4\tdog\nKks6eJqnZLQ_5\tdog\nKk6BgYl9OjA_7\tbicycle\nKlEK-vv3DVo_0\tbear\nKlENnLskuCU_0\tcat\nKlG0czACle4_1\tcow\nKlG0czACle4_0\tcow\nKlG0czACle4_2\tcow\nKlG0czACle4_3\tcow\nKlqbHICh4G4_0\ttrain\nKmJhshcviXA_0\tknife\nKmbMzgXFdKs_1\tairplane\nKmbMzgXFdKs_2\tairplane\nKmbMzgXFdKs_0\tairplane\nKmfmqwmQneM_0\tbird\nKmr5uVYVSDo_0\tcar\nKmuV8XfAjvw_0\thorse\nKm3GmgNJlL8_0\ttrain\nKm3GmgNJlL8_1\ttrain\nKm3GmgNJlL8_4\ttrain\nKm7w520V5vs_0\tairplane\nKnIxVxIho9w_1\tbird\nKnN2yDre-aM_0\tboat\nKnTu6keaGs0_2\telephant\nKnTu6keaGs0_0\telephant\nKnXPxa1RzmU_0\tcow\nKncYvkV6rwc_0\tboat\nKnql8E5Khc8_0\telephant\nKnuD87lrS8w_0\tskateboard\nKnvGRqLQ5iM_1\ttrain\nKoA6bPmALeA_0\tcat\nKoXgGmdVCBM_1\tbicycle\nKoXgGmdVCBM_10\tbicycle\nKoXgGmdVCBM_2\tbicycle\nKoXgGmdVCBM_3\tbicycle\nKoXgGmdVCBM_4\tbicycle\nKoXgGmdVCBM_5\tbicycle\nKoXgGmdVCBM_6\tbicycle\nKoXgGmdVCBM_7\tbicycle\nKoXgGmdVCBM_8\tbicycle\nKosi26dm76A_0\thorse\nKo5wlBGl200_0\thorse\nKo_Nx24OGxM_2\tairplane\nKpDzoM2xtwc_2\ttruck\nKpDzoM2xtwc_3\ttruck\nKpDzoM2xtwc_5\ttruck\nKpHFaYsgWrg_2\telephant\nKpHFaYsgWrg_1\telephant\nKpVflkpC7d4_3\tbus\nKpVflkpC7d4_5\tbus\nKpVflkpC7d4_0\tbus\nKpVflkpC7d4_2\tbus\nKpXxo2n6AYw_1\tmotorcycle\nKphl0WRacss_0\tknife\nKqAvXx4bN5k_0\tcat\nKqQgFUEAS-M_0\ttrain\nKqavxpR698k_6\tdog\nKqavxpR698k_0\tdog\nKqavxpR698k_1\tdog\nKqfo6_qcthc_0\tcar\nKqjhaIJMY5U_0\tcat\nKqnqyAczaqs_4\tbus\nKqqyldSpJh4_0\thorse\nKqqyldSpJh4_1\thorse\nKqzkADa-Lqw_1\ttrain\nKq1x16QvM1g_0\tdog\nKrGJjt0yq-s_1\tbus\nKriNb3dhqVQ_1\tskateboard\nin9LFcixPXo_0\tskateboard\nioEMtB2bP6o_0\tbird\nioESr4H79KY_0\tboat\nioGc_R8NJow_0\tcow\nioKahF3aFWw_0\thorse\nioKahF3aFWw_1\thorse\nioOHxrHumIk_1\tairplane\niobYquCNk5k_0\tcow\niojaZ646ie8_0\tskateboard\nipLnwxta1Jc_0\tboat\nipOJVFLMLIk_2\tbird\nipOJVFLMLIk_0\tbird\nipgB9KXnzK8_0\thorse\nipg_y1T2OsM_0\tcow\nipg_y1T2OsM_1\tcow\nipqQlNsINy8_2\tairplane\nipt6gWgCgis_0\ttruck\nip5xVRJOpP8_0\tumbrella\nip8BFE94TKo_0\tairplane\nip8BFE94TKo_2\tairplane\niqDJJqLVBBk_1\telephant\niqExYW2fPfc_0\tbear\niqicuLBaF_g_0\ttruck\niqlKzflOl00_1\tbus\niq1FaWFylpI_0\tmotorcycle\niq6izTYp-DU_0\tmotorcycle\nirBsER6ITHw_2\tskateboard\nirDs_vWExnM_1\tbicycle\nirDs_vWExnM_2\tbicycle\nirU_BJXoU9I_1\tcow\nirWY8s-JuBs_3\tairplane\nirWY8s-JuBs_0\tairplane\nirWY8s-JuBs_1\tairplane\nirWY8s-JuBs_2\tairplane\niramP9ihj_w_1\tbird\nirgacv6LobE_0\tmotorcycle\niri1MtEgOjQ_0\tbear\nirs2O6YOB5I_3\telephant\nirs2O6YOB5I_5\telephant\nirs2O6YOB5I_1\telephant\niruY-BU0rpg_4\telephant\nirzcPf--6uQ_0\ttrain\nirzcPf--6uQ_4\ttrain\nirzcPf--6uQ_5\ttrain\nir4EYn7Fz5A_0\tdog\nir5E9O2Tonk_0\tboat\nir7Dq5dPxOQ_0\thorse\nisPplb7aotI_0\tboat\nisPplb7aotI_3\tboat\nisU4229ndXM_0\tcat\nisfwmnXNmeM_2\tcow\nislz_HxqOnI_0\tbird\nisvvRHvNuIw_4\tumbrella\nisynk11V9s8_3\tairplane\nisynk11V9s8_1\tairplane\nisypXPZMgns_2\tboat\nisypXPZMgns_3\tboat\nitKyPMv5z0Y_0\tumbrella\nitKyPMv5z0Y_2\tumbrella\nitc-A2zwSGM_0\tdog\nitrvgHryhIY_0\ttrain\nits4C4ty2oA_0\tskateboard\nittQcsrECUE_1\tbear\nittQcsrECUE_2\tbear\nit1EatlrBkg_0\tcat\nit3KS-r39EQ_1\tknife\nit3hCzfmyfs_0\tcow\nit6DtEGdhas_0\tcat\nit8Fid-mqRQ_0\ttruck\niuEbY8B4Qo4_0\tcow\niuEbY8B4Qo4_1\tcow\niuFmdispR2U_0\tbicycle\niuRmu4BN6bw_0\ttrain\niumTd9IGDho_0\ttrain\niusgUMlrYFA_0\tairplane\niutdZMWA8f0_0\tperson\niuumrgHW8zM_0\tumbrella\niu3sd1qnr8g_0\tcar\niu9Av4HCmiw_0\tknife\nivDeIaJYIlE_0\ttruck\nivT103z2bwc_0\tgiraffe\nivdfO5VqKo4_0\tcat\nivgTXhIqccY_0\tcat\nivi1frbFnGw_1\tgiraffe\niwFO7lcVjKc_2\tcow\niwFO7lcVjKc_0\tcow\niwFO7lcVjKc_1\tcow\niwX4cgfQn5s_0\tbird\niwczN64AC9Y_0\tbus\niwp5aVOXWaM_0\tairplane\nix8S6CRuUFg_3\tbear\niyAvqfMVOeA_0\tcat\niyLZZlL-B80_0\tcow\niyMbIICjtcg_0\tcow\niybJfH6iVdU_0\tbus\niygW3-Ovcic_0\tcow\niyn1OZFmvXE_2\tbird\niyz9Lq13Mcg_0\tcow\nizbTUTqkG7c_0\tcow\nizx70OqPYBc_0\tdog\niz9-Vl4e9po_0\ttrain\niz9-Vl4e9po_2\ttrain\niz9-Vl4e9po_3\ttrain\niz-BT0NAs6k_1\tknife\ni0Eg02B3JoM_0\telephant\ni0Ez1KT7sTo_0\thorse\ni0ZE0kXl5oU_1\tskateboard\ni0eMgZ0riHI_2\tbird\ni0gg-mJNKlU_0\tcow\ni05OPAsrmJI_3\telephant\ni05OPAsrmJI_1\telephant\ni05OPAsrmJI_2\telephant\ni09cuoC14q4_0\tbear\ni1DfyWe0Jh4_0\tcow\ni1DfyWe0Jh4_1\tcow\ni1NfFxZmBSA_0\tbus\nKrvsSuIgrJQ_4\thorse\nKrvsSuIgrJQ_1\thorse\nKsT2_VxPkb4_0\tknife\nKsXzFCpHMPU_0\tgiraffe\nKsyud0_i1zI_0\tbus\nKtINrfbQSXk_0\tknife\nKtV59qZg7BU_0\ttruck\nKtX4x9k3J2A_0\ttrain\nKthi3i2WM3s_1\tskateboard\nKtkN77asAj4_0\thorse\nKtplZx6_ecU_1\tknife\nKtqvSap6uig_0\tskateboard\nKtxb4OmaAjA_0\tcar\nKt3uQcxNltk_0\tzebra\nKt9neWWjkHM_2\tbear\nKuBa9tep8xk_0\tbear\nKuQgP71vfZ0_0\ttrain\nKuYBJ90zNYw_0\tumbrella\nKuYjBUvU-ws_0\tumbrella\nKuYrzelSfIw_0\tcar\nKulks153IS8_0\ttruck\nKulks153IS8_1\ttruck\nKu0XhH2YeG4_0\tbear\nKvH6JyHG3H8_0\tmotorcycle\nKvH6JyHG3H8_1\tmotorcycle\nKvLXxaGooPk_0\tcow\nKvPLPO4A5R8_0\tknife\nKvRsu4xefwo_0\tperson\nKvcxzJxNkkU_1\tbird\nKveRZ7dBNGU_0\tboat\nKvgupPBw5rc_0\tcat\nKvjDDIthDDM_0\tcow\nKvkOTtqxJlo_1\tcat\nKvsaKWirK7Y_0\tskateboard\nKv0ui3mEWGE_0\thorse\nKv0ui3mEWGE_4\thorse\nKv0ui3mEWGE_1\thorse\nKv0ui3mEWGE_2\thorse\nKwkcPYl8Lv4_0\tcow\nKw7t6l8h2Ns_0\tbear\nKw7t6l8h2Ns_1\tbear\nKw8037OwDjc_0\ttruck\nKxWI3M2FGOw_0\thorse\nKxZXot9AIY4_0\ttruck\nKxflrYttp20_0\tbird\nKxlTxdqDDzo_0\tcat\nKxuqb_htGwY_0\tgiraffe\nKxuqb_htGwY_2\tgiraffe\nKx40to29YnE_0\tskateboard\nKyDXCruNNj4_0\thorse\nKyUM64yfNCA_0\thorse\nKyWUn_bj5rM_0\tmotorcycle\nKyZWWIsQUbg_0\tskateboard\nKyZWWIsQUbg_1\tskateboard\nKyaKfhOfKhE_1\tbird\nKyt325n06oI_0\tcat\nKywHhzvsm3Y_0\tbird\nKyyS9PYJ9Zo_0\ttruck\nKzK3iwncxbY_0\tbicycle\nKzK3iwncxbY_1\tbicycle\nKzc17TzutkM_0\tskateboard\nKzc17TzutkM_1\tskateboard\nKzyD-e7N2D4_0\ttrain\nKzyD-e7N2D4_1\ttrain\nKz3zulHzEE4_1\ttrain\nK0CwoXVMp0M_0\tbicycle\nK0L3_2UquEY_0\tboat\nK0Zt-EcXkj8_1\tairplane\nK0cgwgX_8fo_2\tboat\nK0xs4bH65_Q_1\tmotorcycle\nK02fUURwCiY_2\tcar\nK02fUURwCiY_0\tcar\nK02fUURwCiY_1\tcar\nK1Qbgm__2iE_0\tcat\nK1ccfBgR_kg_0\ttruck\nK1-s4sk63R4_0\thorse\nK1_J3d_yH64_0\tmotorcycle\nK2F6TCgVfR0_0\tboat\nK2hV4KVruLc_0\tairplane\nK2my8qWjyn4_0\tcat\nK2yjgwFV15k_1\tmotorcycle\nK2yjgwFV15k_0\tmotorcycle\nK26jSjClwaQ_0\tskateboard\nK3Cgw_EFdbw_1\tmotorcycle\nK3DniaFnn9E_0\tcat\nK3KhxEuf8mY_0\thorse\nK3KhxEuf8mY_5\thorse\nK3Ov5rPJ2LE_1\thorse\nK3XsEMr7Qt4_0\tperson\nK3qgW4Y3yrk_0\tmotorcycle\nK30LSGFu6hs_0\tmotorcycle\nK4RE7AZWGv0_0\tcar\nK4U_AmqQFDY_0\tbear\nK4VnWy2-8xQ_3\tcar\nK4ec2MqDkPw_0\ttrain\nK4fCUNjbdf8_0\tmotorcycle\nK4wp52Zn5d4_0\thorse\nK5NooGgwD1E_0\thorse\nK5NooGgwD1E_1\thorse\nK5pBkPv_1sg_5\tcar\ni12y-zJl-nA_0\tcat\ni17EaDmRPCg_0\tumbrella\ni2Yjl6kF8iY_2\tairplane\ni2Yjl6kF8iY_0\tairplane\ni2cujNbMSKc_1\tskateboard\ni2diIHrCsbk_1\tbird\ni3AK_cujBxY_1\tmotorcycle\ni3BpSeFJdgo_0\tcat\ni3HeGqUyibM_4\tbicycle\ni3HeGqUyibM_9\tbicycle\ni3HeGqUyibM_12\tbicycle\ni3LFAemLFW0_0\thorse\ni3Z5pFF2dH0_0\tbird\ni3a4U770GtE_0\tperson\ni31nG3E36WE_0\tknife\ni32p4KoRD2o_0\ttrain\ni33S_D8TBc4_0\tdog\ni35wpbpl8qY_2\tboat\ni38dpYWvJN8_0\tumbrella\ni38dpYWvJN8_1\tumbrella\ni4CFI7MtlRs_0\tcat\ni4ExemfAEO8_0\tbicycle\ni4IpgDIqTrs_0\tboat\ni4RZtd1cCw8_0\tumbrella\ni4bRNqQ32MI_0\tcat\ni4clJpNvw4M_2\tbus\ni4hqN47R0oU_1\ttrain\ni45JoRzDdI0_0\tcow\ni46jok5cjyY_0\thorse\ni5GJ6mIp8zc_0\tboat\ni5G6RkcL4m0_0\tcat\ni5OdBE4QG6c_0\ttrain\ni5g87UeVkBU_0\thorse\ni5g87UeVkBU_1\thorse\ni5sT2ifoPyM_0\tknife\ni6MF-PGtJiE_1\ttrain\ni6WTNPwIjW8_0\tcat\ni6aJqhBh5wg_0\tskateboard\ni6j6P7ITxYg_0\tcow\ni6vwTWezXmU_1\tboat\ni66Gsq6zzqI_0\tmotorcycle\ni6-YQ6rSnDI_0\tcat\ni6_oBTD2-YA_5\tbird\ni7P2tq4TS_4_2\tbus\ni7UQGL5uxvw_1\tskateboard\ni7WeV3CfJV8_0\tknife\ni7a8sQcVRgE_0\ttruck\ni7umCLnxVXw_0\tcat\ni791If0qoBU_5\tknife\ni8KQCu2cMAc_2\tbicycle\ni8KQCu2cMAc_4\tbicycle\ni8bVI1667K4_1\ttruck\ni8hjK42sseE_0\tmotorcycle\ni8lG7Ux3wlc_0\tdog\ni8nbuADJjmE_0\tcar\ni8nbuADJjmE_1\tcar\ni8nbuADJjmE_2\tcar\ni9PUn4sF30g_0\tmotorcycle\ni9T-NwSBqPE_1\tknife\ni9VWkuQHBls_0\thorse\ni9nmvkDiFGc_0\tcow\ni9sP7mWuQ_8_2\tmotorcycle\ni9sP7mWuQ_8_1\tmotorcycle\ni9u4vsQUBTQ_0\thorse\ni90TDb7evCY_0\ttruck\ni9_FG4-2VIM_0\tskateboard\ni-CQVFq1JI8_1\tbicycle\ni-CQVFq1JI8_3\tbicycle\ni-T9Q2g8xbk_0\tairplane\ni-kodOT_ufM_0\tcow\ni-nP7aFTZb8_0\tbird\ni-xdWDN7Eys_2\tknife\ni-3aAuwOmxc_0\ttruck\ni-8W-K4y3nY_0\ttrain\ni_HHc85mP4Q_0\ttrain\ni_h0vOCrd_U_0\tairplane\ni_h0vOCrd_U_1\tairplane\ni_iXTMX4Vls_0\tcat\ni_nZ8ImBf18_1\tbicycle\ni_nwFUP7QJM_0\tknife\ni_4c71HPXOI_0\tgiraffe\ni_-PIEIGkQE_0\thorse\ni_-PIEIGkQE_1\thorse\njAH-80rHWKY_3\tbear\njAW8iLGAgdQ_1\tbear\njAW8iLGAgdQ_0\tbear\njAh4oBD0Bsw_0\ttrain\njAnV_6fFGnI_0\tcow\njAy3VhkJauE_2\tknife\njAy3VhkJauE_5\tknife\njA6aZl1f4Wg_0\tbicycle\njBMmFLPc7nA_6\tbus\njBMmFLPc7nA_0\tbus\njBMmFLPc7nA_3\tbus\njBMmFLPc7nA_5\tbus\njBTJgbVspOA_0\tairplane\njBl50J7bOEw_1\tairplane\njB1IT1aBj-Y_0\tdog\njCDFU72N7Mc_1\tskateboard\njCJGjjNBSk8_1\tairplane\njCJGjjNBSk8_0\tairplane\njCMWNtCzuqU_0\tknife\njCUnLxCoYMA_0\tmotorcycle\njCY67ybfyqU_1\tcow\njCZx5dn_4KA_0\tbear\njCcW1MW6PTE_0\ttruck\njCcW1MW6PTE_1\ttruck\njCiwgfC1uN0_0\tdog\njCtFgJ1qhJE_0\tbird\njC5Px208OVY_4\thorse\njC5Px208OVY_5\thorse\njC5YGckTiIU_2\ttrain\njDFqxB4rC7M_0\tcat\njDJNC5fzvfA_1\tmotorcycle\njDYks7hSKbg_0\ttruck\njDbHjQZ5R70_0\tairplane\njDbHjQZ5R70_1\tairplane\njDdFavN2eWY_0\tdog\njDgpggXdBIc_1\tmotorcycle\njDgpggXdBIc_2\tmotorcycle\njD2RjyxG6ow_0\tmotorcycle\njD4621IQz3w_0\tdog\njD4621IQz3w_1\tdog\njEASZOuNSS0_3\tskateboard\njEASZOuNSS0_0\tskateboard\njEASZOuNSS0_2\tskateboard\njEEOkCjU9y0_0\tbear\njEJZ76_xhog_2\tbear\njEQDhb_Zewo_0\tcat\njEYG-qIv34o_1\tcat\njEYG-qIv34o_0\tcat\njEfwj-JzFXo_0\tperson\njE1Rq_Ot02M_0\tdog\njFAm4tikj6E_0\thorse\njFSIX_KuRK8_0\thorse\nK5p31PQkx3I_1\thorse\nK5q4FoXnLwI_0\ttrain\nK5sQWplX-D8_1\tskateboard\nK5sQWplX-D8_2\tskateboard\nK6JHTga6VU8_0\tairplane\nK6SFafS3Zv8_0\tcar\nK6SFafS3Zv8_2\tcar\nK6jf51to7dU_0\thorse\nK6jf51to7dU_1\thorse\nK6sKjN_MOsE_1\tbear\nK6srgkSvZdw_1\tskateboard\nK6srgkSvZdw_2\tskateboard\nK6vEY0vOlSg_1\ttrain\nK66dqG9OJuo_1\tdog\nK66dqG9OJuo_0\tdog\nK6_WEh-eizw_1\tairplane\nK6_WEh-eizw_2\tairplane\nK6_WEh-eizw_4\tairplane\nK7uSHqISah0_0\ttrain\nK702Tx5vkp4_0\thorse\nK78iEUHTTZc_1\tcat\nK8aa-7brUTs_0\tbear\nK8vGdEhh_jU_0\tbicycle\nK81vEhukX4U_0\tmotorcycle\nK9LhqtvfZ10_0\tdog\nK9LhqtvfZ10_3\tdog\nK9LhqtvfZ10_4\tdog\nK9LhqtvfZ10_5\tdog\nK9TPOifKCmU_0\tmotorcycle\nK9hTkmr_71A_2\tcar\nK9jCx7G3_Mw_0\tknife\nK9kNamc2c5Y_1\tdog\nK9kNamc2c5Y_0\tdog\nK9wE7VzJD00_0\ttrain\nK-Dz6gr96Lo_0\tdog\nK-s8RPMLRw4_0\tbird\nK-s8RPMLRw4_2\tbird\nK-x3x3kGGqg_0\tdog\nK_PGa9Eo6mo_1\tdog\nK_VS3tyB-Cc_0\tperson\nK_Z28TO4stg_0\tbird\nK_h1L3P_j1M_0\tbird\nK_pO-MBS7lI_0\tdog\nK_qFWKniImU_0\tskateboard\nLAKF499FHX0_0\ttrain\nLAKF499FHX0_4\ttrain\nLAKF499FHX0_1\ttrain\nLAKF499FHX0_2\ttrain\nLAKF499FHX0_3\ttrain\nLARRHwtW8fE_1\tdog\nLAZoyKF7lbQ_0\ttruck\nLAZoyKF7lbQ_2\ttruck\nLAZoyKF7lbQ_3\ttruck\nLBJEbJfzvW4_1\tskateboard\nLBOXDMZvtBY_1\ttrain\nLBnsLkuQ8kE_0\tperson\nLBwm49n5rKo_0\tmotorcycle\nLB6fi4oTKvQ_2\tdog\nLB8Wc8hU4Hc_0\tairplane\nLCGZmNGyPhM_0\tboat\nLCghaNtVeM0_1\tknife\nLCjQb5zLTCs_0\ttrain\nLCoIwiCBlW4_0\tdog\nLCxiwbrpEFI_2\tbus\nLC5Qly11BZs_0\ttrain\nLC5q2G2pxT0_0\tbus\nLDEju5sQWOU_1\tbear\nLDH_eiO0aFE_0\tboat\nLDJ9xB-n5Sg_0\tdog\nLDJ9xB-n5Sg_1\tdog\nLDQiOOCMhs4_0\ttruck\nLDQqhsLKyjs_0\ttrain\nLDYFndJjRGA_0\tskateboard\nLDgpZlJ_QYM_0\tboat\nLDh-8GoBSLw_0\tbear\nLDlR_gDbVFk_0\tairplane\nLDvN2rB8p44_0\ttrain\nLD-8yzPoOIQ_0\tcar\nLD-8yzPoOIQ_1\tcar\nLD-8yzPoOIQ_2\tcar\nLEH61oMv2So_1\ttrain\nLEIkLV_S5yA_0\tcat\nLEP6ZOl5iw0_0\thorse\nLEUCQjNIm9E_0\tknife\nLEYBNQUwruU_0\tdog\nLEiolk6i9RI_0\thorse\nLEmU61Tdqxs_1\tmotorcycle\nLEverFsHygc_1\tairplane\nLE2ks85I17U_0\tbird\nLFDqskJozig_1\tskateboard\nLFMUePhHPAk_1\tcar\nLFZYYpjP3FA_0\tknife\nLF4xVBfV5SI_1\tbird\nLGRkVRP-RTs_0\tcar\nLGgzD_ng3aA_1\tbear\nLGrMlBi0l6Y_1\tboat\nLGuSLUeKcTo_0\tbird\nLG0w1oTdXgY_0\tbird\nLHEuYW96FG0_0\tbear\nLHEuYW96FG0_4\tbear\nLHbVe_bjGp0_2\tdog\nLHbVe_bjGp0_0\tdog\nLHbVe_bjGp0_1\tdog\nLHmvAqv6kYE_0\tzebra\njFneoJr36o8_0\tcar\njGCw13fkf0Q_2\tmotorcycle\njGPtq4pO8Ug_0\tcar\njGTNsTUkNUw_0\tcat\njGTr1LSaGGw_1\tbicycle\njGTr1LSaGGw_2\tbicycle\njGTr1LSaGGw_0\tbicycle\njGlNsqDOz8Y_0\thorse\njGqRX9IwGI0_8\tbear\njHK3JYa_Ypg_0\tumbrella\njHM867g1K8k_1\thorse\njHM867g1K8k_0\tperson\njHy5deaCjQE_0\tdog\njH_YxkU_JwE_0\tmotorcycle\njINuUqU6sJI_0\tdog\njIP9FdmB0_E_0\ttrain\njIbmC5sed8I_1\tairplane\njIjEX8I5SHo_1\tbird\njIjEX8I5SHo_2\tbird\njInMbuzvtiQ_0\tumbrella\njInMbuzvtiQ_1\tumbrella\njI0xgoZ8QDA_0\tboat\njI1Swlwj_wc_0\thorse\njJMefDe4r9w_1\tskateboard\njJR-emvmi9s_0\tbear\njJR-emvmi9s_1\tbear\njJf_N_p-Gjo_1\tskateboard\njJnz3tS1uME_0\tmotorcycle\njKBU4c1AdSQ_0\tcat\njKv6Q1RRxVM_1\tboat\njLBSOa5iDgE_0\thorse\njLR7LmbNekc_0\tmotorcycle\njLXuZdAveV0_2\tboat\njLXuZdAveV0_0\tboat\njMNaKigE1eI_0\ttruck\njMNaKigE1eI_1\ttruck\njMVeJ3RbcH4_0\tcar\njMaYIgpjxlk_0\tdog\njMmjaxXWaUk_1\tbus\njMo01X2mBq0_0\tbus\njM79QETqts8_1\thorse\njNCq29f3J8Y_0\tairplane\njNE_FcqbQN8_0\tmotorcycle\njNJJgAg79KA_1\tairplane\njNJJgAg79KA_0\tairplane\njNKO9msLe34_1\tairplane\njNKO9msLe34_0\tairplane\njNSTcIQwl_g_3\ttrain\njNSTcIQwl_g_1\ttrain\njNSTcIQwl_g_2\ttrain\njNllRQ66Re4_3\tdog\njNn7v2MFg_U_0\ttruck\njNsEePln1_U_0\tbird\njNsEePln1_U_1\tbird\njNt8Vn-WKRI_1\thorse\njN-BXoM15Qs_0\tcat\njOQ0W0Z_-Uo_0\tdog\njOl4m5QdOZQ_0\tbus\njPaVdR2IRu8_0\tairplane\njPiVFMGvHbM_0\ttrain\njPiVFMGvHbM_1\ttrain\njPrY_Xz0CDM_0\tknife\njP5RhcwO4E4_1\tdog\njP7mwBStU3w_0\tdog\njQBc1CqjGOk_0\tskateboard\njQCrA8Bjbp8_0\tbird\njQXYSlXk7_c_3\tbear\njQXYSlXk7_c_1\tbear\njRIy_wUojcs_0\tcar\njRR6sU59uTo_0\tairplane\njRTkny0bdY0_2\tmotorcycle\njRTkny0bdY0_1\tmotorcycle\njRh5WphQGDI_0\thorse\njRqdnQ8HlwQ_0\tairplane\njR7eq8CAmbs_0\tairplane\njR-Cbp3qBJI_2\thorse\njR-Cbp3qBJI_0\thorse\njSS6b2iz2hk_0\tknife\njSk-3X-hjyg_1\tknife\njStwl7WfsVE_0\tskateboard\njTAz5HO8mQw_0\tcat\njTHDoLyfTLc_0\tdog\njTQ5A95TKw8_0\tcat\njTYsK4JKns8_0\tgiraffe\njT1mDaHStHU_0\ttrain\njUDnkkvVKNo_0\tairplane\nLIw68irBLtE_3\tairplane\nLIzgqx7Ykxw_0\tairplane\nLI286rLHd0I_0\tbird\nLJGQA810BtE_0\tbus\nLJJuw5mLJ4Q_0\tskateboard\nLJhCGLht3Rw_0\ttrain\nLJhCGLht3Rw_1\ttrain\nLKe9a7L3vkk_0\tbird\nLKhjmARDv7k_4\tbear\nLKhjmARDv7k_6\tbear\nLKoaXogFTbc_0\tdog\nLKyQ2fBNVmw_3\tskateboard\nLK2-EMocZQs_6\tdog\nLK2-EMocZQs_1\tdog\nLK2-EMocZQs_3\tdog\nLK9zoUrrEHc_0\tskateboard\nLLJiqe0d06I_0\ttrain\nLLOwSRx9hxo_0\tbird\nLLVr7tG42kw_0\tmotorcycle\nLLW1jx3S-Hw_0\ttrain\nLLjDNseEw0c_0\tskateboard\nLL_DiAJ71rc_0\tbird\nLMGo4BXG4Yw_8\tknife\nLMRH29tlDrM_0\tcat\nLMrDuKEYJ3k_0\ttruck\nLM1djNtENzA_0\tcat\nLNQHybwdHRk_0\tairplane\nLNX244qUx7M_0\tdog\nLNntRLW2bHA_3\tskateboard\nLNntRLW2bHA_0\tskateboard\nLNntRLW2bHA_2\tskateboard\nLN6DT1DOaTg_5\tskateboard\nLOBD9yc5YPM_1\tskateboard\nLOMTlGqGyHc_0\tmotorcycle\nLOjc-npcSjs_0\tairplane\nLOjc-npcSjs_2\tairplane\nLOjc-npcSjs_4\tairplane\nLOjc-npcSjs_9\tairplane\nLOlUKQgr7Qg_0\tboat\nLOosqz3z8Xw_0\ttrain\nLOzh9vxSHPg_0\tdog\nLPQv6LdOZHo_2\tmotorcycle\nLPQv6LdOZHo_1\tmotorcycle\nLPZjxIqs8Uw_2\tairplane\nLPd_Y8gk5uI_1\ttrain\nLPgmaebC-L0_2\tboat\nLPtcpZXDhHw_0\tknife\nLPvsAAlZI_8_1\tbus\nLP3a2L1ZCyg_2\tdog\nLP8dyCxmCrI_2\ttrain\nLQAF34GzpMY_0\tairplane\nLQO68Aj4ons_0\tcar\nLQRuelaTZd4_0\tbear\nLQRuelaTZd4_1\tbear\nLQT4GnnPhA8_1\tdog\nLQbQVeZrwEk_0\tmotorcycle\nLQdP4gNX9Aw_0\tbird\nLQjzonTrY2o_0\tbear\nLQr5vK-X1fQ_0\tcat\nLQ2EDJSNIN0_1\tdog\nLQ2EDJSNIN0_3\tdog\nLQ4z96EA6co_2\tbird\nLRSii99-QIo_1\tzebra\nLRgsl5_TJVg_2\tskateboard\nLRgsl5_TJVg_0\tskateboard\nLRgsl5_TJVg_1\tskateboard\nLRtLr32oPAw_0\tskateboard\nLR7IHIbXtrE_0\tbird\nLSE0KHhFxps_0\ttrain\nLSMKaXjXnhE_1\tboat\nLSi1i5lSUjA_0\tdog\nLSqIpguEI04_0\tmotorcycle\nLSqIpguEI04_1\tmotorcycle\nLSvVMD-SF48_1\tbus\nLS8qQoB3Uw8_0\tdog\nLS8qQoB3Uw8_1\tdog\nLTEyQSswTVI_0\tbus\nLTQPc_WVFOw_0\tairplane\nLTQPc_WVFOw_1\tairplane\nLTQPc_WVFOw_2\tairplane\nLTQPc_WVFOw_3\tairplane\nLTaExiLK2S0_2\tbear\nLTaExiLK2S0_3\tbear\nLTaExiLK2S0_4\tbear\nLTaExiLK2S0_6\tbear\nLTaExiLK2S0_7\tbear\nLTjSA_-Q5DU_1\tknife\nLTkuM5IoNV4_0\tmotorcycle\nLUCDeZOOhlg_0\tcat\nLUUYKUhaYZs_0\tbus\nLUjqWGI9KSo_2\ttruck\nLUphe242a5g_0\ttrain\nLU4-QjhixQU_0\tmotorcycle\nLU4-QjhixQU_1\tmotorcycle\nLU__7PPUMTo_0\tskateboard\nLVCMA3LXlkc_0\tairplane\nLVfXvn7elFI_0\tperson\nLVfrWLnu7T8_0\ttrain\nLWHshdXjBCY_0\ttruck\nLWQhidgjZno_0\tmotorcycle\nLWRXboX1o5Y_0\tmotorcycle\nLWTYrbFCPl0_0\tdog\nLWY9Y2YVtHA_1\ttruck\njUQUg-qsfgI_0\tmotorcycle\njUWm1Mc1Tno_0\tairplane\njVEM2JpS4sE_0\ttruck\njVZhyibQ31g_0\tcat\njV9-Lr_rsf0_0\tbicycle\njWCpff7m0LE_1\tairplane\njWCpff7m0LE_8\tairplane\njWCpff7m0LE_0\tairplane\njWCpff7m0LE_2\tairplane\njWCpff7m0LE_10\tairplane\njWGulD3X0qw_0\tcar\njWIFscsXRmo_0\tskateboard\njWLv1BQ4PsA_0\tbear\njWawsbm6dCc_0\tbear\njWfItNlOURk_0\tmotorcycle\njWfItNlOURk_1\tmotorcycle\njWruD-mHxrQ_0\tcat\njW4VRs_uVZw_2\tairplane\njW4VRs_uVZw_5\tairplane\njW4VRs_uVZw_0\tairplane\njW4VRs_uVZw_4\tairplane\njXBBnV6cop0_0\tcar\njXDxesHRKAc_0\tumbrella\njXLUgu4rET0_1\tcat\njXkzrsfYgbs_0\tdog\njX84bwkb-r0_3\tbus\njYBgSw-woGw_2\tbear\njYIWAGlIq9c_0\tskateboard\njYZmjlzKhL8_1\tskateboard\njYhAd9FFxqI_0\tumbrella\njY37CiJCKJk_0\tcat\njY9ihstGQwU_0\tcat\njZWITYFghgA_0\tcat\njZZBR49_vR0_0\tmotorcycle\njZiuOZwq7gQ_0\tmotorcycle\njaS19NIXdrk_0\tmotorcycle\njaVgyhuxK_4_3\tskateboard\njaVgyhuxK_4_0\tskateboard\njalIqFA40pI_1\tmotorcycle\njalIqFA40pI_2\tmotorcycle\njaoXgM9c7u4_1\tcar\njaovVHNORuA_0\tcat\njauLT1ElBPc_1\ttrain\njauLT1ElBPc_2\ttrain\njbN4y-wz5-s_13\tgiraffe\njbN4y-wz5-s_1\tgiraffe\njbN4y-wz5-s_4\tgiraffe\njbN4y-wz5-s_5\tgiraffe\njbN4y-wz5-s_11\tgiraffe\njbhxM5eNgO0_0\ttrain\njboQE0Z0280_0\ttruck\njbrhKjPDzhE_1\ttrain\njbwSKNFH66s_0\tdog\njb23jXcxaHE_1\ttrain\njb23jXcxaHE_2\ttrain\njb23jXcxaHE_8\ttrain\njb23jXcxaHE_9\ttrain\njb3uct7NumU_0\ttrain\njb4crk58m88_0\tskateboard\njb4672rSRIs_0\tdog\njcLbvoEUbj0_0\tairplane\njc2fijpD8vI_0\tbicycle\njc-IKl7He7U_0\tknife\njduOxfYHRGQ_0\tperson\njeBcjSSkUhw_0\tcat\njeFFdyPLUts_1\tboat\njeWf_4ARan0_1\tbicycle\nje8cw_bajbc_1\tcat\njfENtrpYNKE_2\tbear\njfENtrpYNKE_1\tbear\njfixAXjax5I_1\tmotorcycle\njfixAXjax5I_2\tmotorcycle\njfixAXjax5I_0\tperson\njgAt3qPg7A8_2\ttruck\njgD77Vh-X28_0\tmotorcycle\njgGLyRuFOdk_0\tbus\njglg4qcOpWw_0\tskateboard\njg7I2TXyQ2Y_2\tbus\njhQ4iIJ42Yw_0\tcat\njhSH0EjNy0k_0\tcar\njhjKdc7FtE0_5\tairplane\njiAVTB1keAQ_0\tbicycle\njiCp6fAMISg_0\tcat\njiJWjndM8hI_0\tknife\njjDZnXMMhEA_0\ttrain\njjKsYbTw1qk_0\ttruck\njjNxX05CDNc_0\tbird\nLWv0LbGIDi8_0\tcar\nLWxkJ4fux_I_0\tknife\nLWy-Lhb3YEk_0\tbear\nLWy-Lhb3YEk_1\tbear\nLW3bZPt1qrw_5\tboat\nLW7XQWZjBIw_0\tdog\nLXLI-Bzcsf4_2\tknife\nLXLmpEVYE5E_0\ttrain\nLXgItdZ5DXo_0\tairplane\nLYLuXQRCIJ4_0\tcar\nLYXMPTRr40M_0\tdog\nLYXMPTRr40M_2\tdog\nLYmsSNBP634_0\tknife\nLY-hwswMG4g_0\tcat\nLZJjKCpcAWA_1\tknife\nLZ_qufxYP3I_0\tcat\nLaA51BrvHGw_1\ttruck\nLaA51BrvHGw_2\ttruck\nLam8oTdJids_0\tcar\nLanX2twvMmw_1\tairplane\nLanX2twvMmw_0\tairplane\nLan3os3aUl8_0\tboat\nLbC7nqh0Uyg_2\ttrain\nLbEPmGgzUIE_0\ttruck\nLbvEMq_DQTU_1\ttrain\nLbv8FZelQCM_0\ttruck\nLcD_I0Lkw3k_0\ttrain\nLcD_I0Lkw3k_2\ttrain\nLceJwFxs3q8_0\tdog\nLdEeXsYfzE0_0\tcar\nLdLtHx09mII_0\tskateboard\nLdL-cFGaJqU_0\tbird\nLdRX8-r4Cpc_0\tcar\nLdggIc_gAew_0\tmotorcycle\nLeAl87F6eS0_2\tumbrella\nLeOCD9rZsSI_0\tbird\nLeX-zqgzN3k_1\tbird\nLeljDmw2CGU_0\tskateboard\nLfAbAKrmMq0_6\tgiraffe\nLfAbAKrmMq0_7\tgiraffe\nLfAbAKrmMq0_1\tgiraffe\nLfatUu2cH3Y_0\tcar\nLfbQRAjsucU_0\tcat\nLf5ebV_NH78_0\ttrain\nLgVi03EiPlQ_2\ttrain\nLgVi03EiPlQ_0\ttrain\nLgZrI3dxws4_0\tmotorcycle\nLgrPr2OxWcw_0\tgiraffe\nLgyj-vOk72M_0\tumbrella\nLhdXtQ8SbGE_1\tbird\nLhgyObbNmLI_0\tbus\nLhhzzaKmVO4_2\tmotorcycle\nLhm6JF_1lQg_1\ttrain\nLhnNboAgtNg_0\tcat\nLhtrfEijGHU_0\tairplane\nLiMriWExmQM_0\tboat\nLiZxvVZfUdU_2\tumbrella\nLiwliE18fA4_0\tmotorcycle\nLiwliE18fA4_1\tmotorcycle\nLiwliE18fA4_2\tmotorcycle\nLizh5Kae5Nk_2\tknife\nLizh5Kae5Nk_4\tknife\nLiznFL6_r2A_0\tmotorcycle\nLjLWamF9HyA_0\tgiraffe\nLjjGe9bnQ3Q_0\ttrain\nLj0zBxRWoIU_0\tskateboard\nLkFbAjpWRAw_1\tgiraffe\nLkFlT3d8MuQ_0\tairplane\nLkmioXgRyo4_0\tcat\nLk7Z-AUDCuQ_0\tcat\nLlA5ioDqRns_2\tbus\nLlA5ioDqRns_1\tbus\nLlNCPsiSjOU_0\tairplane\nLlS3_VvB4Nw_0\ttruck\nLlfRY71K2AU_0\ttruck\nLliRBHO1A_E_0\ttrain\nLlplZ9JJtQw_0\tdog\nLlplZ9JJtQw_2\tdog\nLmFx-lJ6-_M_1\ttruck\nLmR0Ur4owgw_0\tbicycle\nLmT8BFH5c7k_0\tumbrella\nLmYKmKucl28_0\ttruck\nLm4mghtFu-I_0\ttrain\nLm5GStt7KBw_0\ttruck\nLm5GStt7KBw_1\ttruck\nLnGeYd1AsoA_1\tbicycle\nLnKLql5jAXo_0\ttrain\nLnLlD-mNTtE_0\tbear\nLnPyjqgA37I_0\tgiraffe\nLndUw9o_3ME_0\tskateboard\nLnhmeU6oRBE_0\tbus\nLntuuj_mi9c_3\tknife\nLnyfbZ7-fP4_1\tumbrella\nLnyfbZ7-fP4_0\tumbrella\nLnyfbZ7-fP4_2\tumbrella\nLnyfbZ7-fP4_3\tumbrella\nLn_tNsQVuwc_0\tdog\nLomkA_DJyEM_1\tbird\nLo2GqBe8-Qc_0\tbus\nLo8Q0MdVi9A_1\tbear\nLo8ZEKusM1o_0\tdog\nLpXfY3oQDIc_0\tskateboard\nLpXfY3oQDIc_1\tskateboard\nLpnkxmohHZ8_1\tairplane\nLpt6bE36Uuw_0\ttrain\nLpt8i9V2MK0_1\ttrain\nLp88aaB29zE_0\tzebra\nLqOv_DqIWEk_0\tboat\nLqf8Q1pPNFg_1\tknife\nLrIVNsObdso_0\tbird\nLrKKU5rjq38_2\tzebra\nLr-9DI7T7JE_0\tbird\nLr-9DI7T7JE_6\tbird\nLsdHOclMPh4_0\tdog\nLshP_zqoBc0_0\tknife\nLsuQhEjteSE_0\tdog\nLtGXT385l_I_1\tdog\nLtabCE1oaCw_0\tbird\nLtt24ke9SIA_0\tbicycle\nLtyHCo5uPrQ_0\tumbrella\nLuA9aRIic7s_1\tbird\nLuM1ie5yy70_1\tumbrella\nLuM1ie5yy70_3\tumbrella\nLuQiLJ7-B-8_0\tcat\nLuQxQm7FqD0_0\tcat\nLua1id9drCA_1\tgiraffe\nLuv05fYUS1Y_0\tskateboard\nLu6WLASNWIM_0\ttruck\nLu6rn2EQSEM_0\tmotorcycle\nLu6rn2EQSEM_2\tmotorcycle\nLvPDEznT9Yo_1\tbird\nLvgprOdn070_2\ttruck\nLvhxnDPWfXw_0\tknife\nLvv3Ei45X_4_1\tknife\nLvz3fP96sew_0\tdog\nLv7JaIYWXV4_1\tdog\nLv8u2aPVHmc_2\tbird\nLwChAirlUno_0\tskateboard\nLwMepJ25LgQ_0\tbear\nLwPB4qPCelk_2\tcar\nLwPB4qPCelk_0\tcar\nLwgyjrFlc5M_0\tbicycle\nLwiTfwL3bCs_0\tcar\nLxAhZAbzn7k_2\tbird\nLxjlAGLccRw_0\tmotorcycle\nLxlu3NusDCM_0\tbicycle\nLx0IybSITTc_0\tboat\nLx25sZ_GeqA_0\tmotorcycle\nLyOo_B0KLAs_0\tcar\nLyReFCR-oq8_1\tbicycle\nLyReFCR-oq8_0\tbicycle\nLyiT3ute8W0_0\tbird\nLyiT3ute8W0_1\tbird\nLyiT3ute8W0_3\tbird\nLyiT3ute8W0_4\tbird\nLyiT3ute8W0_5\tbird\nLy-uIzZCdn0_1\tbus\nLzMxggGTH1I_0\tmotorcycle\nLzP0t153jKw_0\tskateboard\nLzY_TxIbKpw_0\ttrain\nLzk6uj8FMsE_0\tcat\nLzp-Yej0-7E_1\tbird\nLztNNlg_fXs_0\tknife\nLz0Gxxs0FUE_2\tbus\nL0IXFlnu6Qg_0\tmotorcycle\nL0US3Aiu1q0_0\ttruck\nL0kRKO8zzsI_0\tbird\nL0kRKO8zzsI_3\tbird\nL0kRKO8zzsI_1\tbird\nL1EZ_RVwD8E_0\tcat\nL1LQOPj7NBs_0\ttruck\nL1U2YrjRao0_0\tbear\nL1VgJBGpBz8_0\tbird\nL1iiOGDSByA_0\tmotorcycle\nL19ZzBwAHrU_0\tknife\nL1_86Xd176w_3\tknife\nL2Efv5kJpc0_0\tskateboard\nL2FE5Lr0wnY_3\tbicycle\nL2FE5Lr0wnY_4\tbicycle\njjZl3tMuO6w_0\tdog\njjcoVigCzgg_0\tskateboard\njjk9P9gQq3E_0\tbus\njj-p0K2XoQY_0\tboat\njj_pv9SFrnU_1\tumbrella\njj_pv9SFrnU_0\tumbrella\njkGvuOC8azU_0\tmotorcycle\njkGvuOC8azU_1\tmotorcycle\njkKU7T0wpj4_0\tbus\njkdEq1MRNws_0\tcat\njkkk9vsCYVA_0\tcar\njkqKyvow-ww_1\tskateboard\njkqKyvow-ww_0\tskateboard\njk2gGx6dIWA_0\ttrain\njlA3_oF9j-Q_0\tmotorcycle\njluiJgeyCa4_0\ttruck\njluiJgeyCa4_1\ttruck\njlu4Ry8dDus_0\tcat\njmXmA9egY4s_0\tbird\njmXmA9egY4s_1\tbird\njmeVwD4p83w_0\tumbrella\njm8AZ0aSF0U_0\tmotorcycle\njnD_9KMnzpk_2\tskateboard\njnD_9KMnzpk_1\tskateboard\njnQYikiCbAM_0\tbicycle\njnQgVTaiaXk_0\ttrain\njnSm3vCtu1k_0\tdog\njnu28BEM2j0_0\tbird\njnwQHd-sNW0_0\tcat\njous_VGiSK0_0\tbicycle\njoxEhiwL-qg_1\tskateboard\njpBcdceCHgY_0\tskateboard\njpCdMdRzmuY_0\tcat\njpuFdyVJJwQ_0\tmotorcycle\njpuFdyVJJwQ_1\tmotorcycle\njpyidnScqNQ_0\tumbrella\njpzKefnhMA4_0\ttrain\njqHtlrHk5Cw_0\tdog\njqO4FvS_v54_0\tboat\njqRXcc7rPaY_0\tcat\njqWXHWqSVX8_0\ttrain\njqu6Gjc1hCE_0\tperson\njq9ZPuTO7Rc_0\tumbrella\njrAyEPgy1LM_1\ttruck\njrLRiCFtlvY_0\tskateboard\njrNGiQLJ0ug_1\ttrain\njrg8oKSN6bk_1\tbird\njrg8oKSN6bk_0\tbird\njsJprPZCPvA_0\tboat\njskm6kDOao0_0\tcat\njslKL8yQ7v4_0\tbird\njslKL8yQ7v4_1\tbird\njsp_sWu7g7Q_1\tbear\njsx0cE948y8_2\ttrain\njtQGgQPHofk_0\tboat\njtWerSK0atA_0\tumbrella\njtqUFmuGnVs_0\tperson\njtx5yVxuLzA_0\tbicycle\njtx5yVxuLzA_2\tbicycle\njuC5lVOX-R8_0\tbear\njuC5lVOX-R8_1\tbear\njuMoEfLbbI4_11\tbicycle\njuUIMSiDGm0_0\tumbrella\njuownJlkGfA_0\ttrain\nju08Y0j4rAI_1\tcar\njvKKm9UbcbE_0\tcat\njvKqk7Yfq5Q_0\ttruck\njvdYM-W5Kmo_2\tbear\njvxjOOQa_JQ_3\ttruck\njwxSjxJVyOc_0\tdog\njxIyftPYPsc_0\tcat\njxIyftPYPsc_1\tcat\njxlDJ0D2Tec_0\tbicycle\njxn5iX8buaE_0\ttruck\nL2XOsdnKegA_0\tdog\nL2bV5Mh6tLM_0\tdog\nL2e6nVyZ33k_0\tcar\nL2gSKheIL48_0\tdog\nL2zsyBTtcqE_0\tbird\nL21bM4j4bEc_0\tmotorcycle\nL21bM4j4bEc_4\tmotorcycle\nL21sWlIIkHA_1\tskateboard\nL28I6_ASmq0_0\tmotorcycle\nL3F2ir5MPj4_3\tskateboard\nL3Q42kZ8Ap8_0\tbus\nL3oyk4iYySM_0\tboat\nL3urWJiuom8_0\tbear\nL32hlxmCYZU_3\tbicycle\nL32hlxmCYZU_6\tbicycle\nL32hlxmCYZU_7\tbicycle\nL32hlxmCYZU_14\tbicycle\nL4NZ3vAx87A_0\tboat\nL4kK9gTKA3Q_2\tbear\nL4w-P2UsvBE_0\tbird\nL5VC4bXm6Kc_0\tdog\nL508o9A8028_0\tbicycle\nL52ZiKJ5NLM_0\ttruck\nL5499EWzDaQ_0\tmotorcycle\nL6QaXTuDftA_0\tbird\nL6vLixMpRZg_1\tdog\nL6vLixMpRZg_0\tperson\nL63p00d7BPY_0\tcar\nL7TR8yCVhN0_0\tcat\nL7ZTQMPeHYo_1\tknife\nL7iHAg6bHw4_0\tbicycle\nL7rQQ4IVPrU_1\tskateboard\nL70Zv9DFAhc_0\tskateboard\nL71JgB-L1mA_0\tmotorcycle\nL779-Nw9GV4_0\tcat\nL780lAoEC2M_0\tgiraffe\nL780lAoEC2M_1\tgiraffe\nL8H_7qqaEOM_1\tmotorcycle\nL8SF7xF6Ucs_8\tbird\nL8h9dw2kYRA_2\tknife\nL9EAUBlNvLU_1\ttruck\nL9LWOPIuvcE_0\ttrain\nL9L-OlYNdL0_6\tknife\nL9Tx4-RNDqo_2\tmotorcycle\nL9Tx4-RNDqo_3\tmotorcycle\nL9Tx4-RNDqo_1\tmotorcycle\nL9Vt1klujtA_0\tdog\nL90g72YGdVA_0\tcat\nL97eqv7bBCE_0\tdog\nL985IUAQ8u8_1\tskateboard\nL-S4CNhlvlM_0\tcat\nL-w35NTF7vA_0\tcar\nL-0JgkugTvw_0\tgiraffe\nL_AcMGC96O8_0\tmotorcycle\nL_ZdaWupJcU_1\tboat\nL_xPWB4viT8_1\tdog\nL_xPWB4viT8_0\tdog\nMAJonEdmXNA_0\ttruck\nMAVqUxAjlbg_0\tskateboard\nMBAPF4RVq7E_0\tcar\nMBLHIupmPNk_2\ttruck\nMBLHIupmPNk_5\ttruck\nMBl4bkFRZUY_2\ttruck\nMBl4bkFRZUY_0\ttruck\nMBuwlS32gjE_0\tdog\nMC8Lal5Lp5Y_0\tcat\nMC-KkFD07Ts_0\tdog\nMDxAuy6D1ks_0\tskateboard\nMD5P0EFFnUQ_1\tskateboard\nMD8RTKTEaM0_1\tmotorcycle\nMEi_ikuUJoQ_0\tskateboard\nME0CETCuaK0_0\tboat\njyY5W5HiWUQ_1\tcat\njyeqCulSuVM_0\ttruck\njy_Dr_R-svo_1\tumbrella\njy_Dr_R-svo_3\tumbrella\njzRWRRcWffo_0\tskateboard\nj0BXwDs11NY_0\ttrain\nj0OALCZbAJQ_0\tbus\nj0ii12pbeag_0\tknife\nj0yk2O6HAHA_0\tbird\nj0_9iwi_dm8_0\tdog\nj1CQLHBLwew_0\tcar\nj1NePJe1agU_0\tbird\nj1XwtnPy1Ik_1\tbear\nj1rU13Z_fxc_0\tbicycle\nj1utZs4pDTc_0\tbicycle\nj10ev-4-0Fg_0\tmotorcycle\nj11_jPnp4Pc_0\tcat\nj2-VEpDwbyo_0\tdog\nj3X6elDpZ-Q_0\tbicycle\nj4K9kM9p16o_1\tbear\nj4Qv6RH4lPk_1\tbird\nj4U8EcQ8K34_0\tumbrella\nj4daTphUuBw_0\tcat\nj4mpJ3QE8VU_1\tcat\nj4ofs57G2Uk_0\tskateboard\nj4rMKhohDps_0\tbicycle\nj4zZbJTAcC4_0\ttrain\nj4zZbJTAcC4_1\ttrain\nj5EP2UNErRE_0\tdog\nj5Evt1HJ2ck_0\tskateboard\nj5ayq3AbImg_2\tbird\nj5uxE5IUOhk_0\tdog\nj6GdrMPrcNU_0\ttrain\nj6P1j6Ed1Hg_0\tboat\nj6Ybo1yk-lE_0\tmotorcycle\nj7v1htyJtdo_1\tboat\nj7v1htyJtdo_2\tboat\nj7xvqf1mrUo_2\tbird\nj707fRdtbEE_0\ttrain\nj8jip_gthjs_0\ttrain\nj8s5sMFYoiM_3\ttrain\nj8s5sMFYoiM_1\ttrain\nj82ZCaABxl8_0\ttruck\nj8-maioFCxo_2\tboat\nj924hdZilyY_0\tcat\nj-MwElKg8Tw_0\tcat\nj-VN0PFvkDg_0\ttrain\nj-a26pZGsKA_5\tbicycle\nj-r3lQdwYeI_0\tboat\nj-r3lQdwYeI_3\tboat\nj-x8lbwsObQ_0\tmotorcycle\nj-0kVn7sEvQ_0\tmotorcycle\nj-0-IDS-OD4_1\ttruck\nj_DE_vsqSZg_0\tmotorcycle\nj_D7oxUpZqs_0\tbicycle\nj_D7oxUpZqs_1\tbicycle\nj_FCzH1rLDw_0\ttrain\nkABwo7h7ILg_18\tbicycle\nkABwo7h7ILg_13\tbicycle\nkANh1n3sh5M_0\tgiraffe\nkANh1n3sh5M_3\tgiraffe\nkAekmn2pgpc_0\tskateboard\nkAekmn2pgpc_1\tskateboard\nkAhVhIYl-GE_0\tmotorcycle\nkAhVhIYl-GE_1\tmotorcycle\nMFw-_3fTBzA_0\tbicycle\nMF06s9T8iJA_0\tskateboard\nMF06s9T8iJA_1\tskateboard\nMGFx6Irt70E_0\tknife\nMGMJ6ocyKXQ_2\tboat\nMGQw41RhBfc_0\tmotorcycle\nMG9MouhNLjY_1\tknife\nMG96iokcNoY_0\tcar\nMG96iokcNoY_1\tcar\nMHIEOK-O3Q4_1\tbird\nMHT9BbNzNJo_0\tknife\nMHqZCkvaub8_1\tcar\nMHsxwUMk-_s_8\tumbrella\nMIHg2KAYh5c_0\ttrain\nMIHg2KAYh5c_3\ttrain\nMIHg2KAYh5c_1\ttrain\nMIKCpSFDh4M_0\tbear\nMIKCpSFDh4M_1\tbear\nMIKCpSFDh4M_2\tbear\nMIKCpSFDh4M_3\tbear\nMInom2mFpwg_0\tskateboard\nMI2d7Rd8_Zs_9\tbicycle\nMI2d7Rd8_Zs_10\tbicycle\nMI2d7Rd8_Zs_2\tbicycle\nMI2d7Rd8_Zs_4\tbicycle\nMI2d7Rd8_Zs_5\tbicycle\nMJOztUhgARo_1\tbear\nMJvPtT5tzRI_0\tmotorcycle\nMJ3I-JfOG48_0\ttrain\nMJ6b6iOY7CI_0\tcar\nMK2aqzY-UTQ_0\tcat\nMLXY5iff2rU_0\ttruck\nMLZ5bpXr5fk_0\tbicycle\nMLrWgAcIumk_3\tknife\nMLrWgAcIumk_1\tknife\nMLtRUMzqhDk_1\tdog\nMLwCW5HBfWQ_0\tbicycle\nMLwCW5HBfWQ_1\tbicycle\nMLyrsP65yc8_0\tcat\nMMGw177uo60_8\tbicycle\nMMGw177uo60_11\tbicycle\nMMGw177uo60_0\tbicycle\nMMGw177uo60_1\tbicycle\nMMGw177uo60_2\tbicycle\nMMGw177uo60_4\tbicycle\nMMGw177uo60_6\tbicycle\nMMX4my6X-xg_0\tcar\nMMfLN7_khoc_0\tskateboard\nMMwk9bxedYo_1\tbird\nMMxfwNbWaxc_0\tbus\nMMxfwNbWaxc_1\tbus\nMMzNcR3qtX0_0\tknife\nMM9D2A52FM4_0\tcat\nMNBfv2S-yco_0\tdog\nMNDWyaUDfAM_0\ttruck\nMNKwR4IK04k_0\tbus\nMNnYExmY67E_0\tbus\nMNnYExmY67E_3\tbus\nMNuhuq3FP5Q_0\tmotorcycle\nMNuhuq3FP5Q_1\tmotorcycle\nMNuhuq3FP5Q_2\tmotorcycle\nMORtJq8MelU_2\tdog\nMORtJq8MelU_3\tdog\nMORtJq8MelU_0\tdog\nMORtJq8MelU_1\tdog\nMOR6ErlJIp8_0\tgiraffe\nMOcTGHSkER0_0\tcar\nMOgN13g3SzU_1\tmotorcycle\nMOxIwc0MqZ0_1\tcar\nMO5aNU1mc1s_2\tboat\nMPQqmw9gvF0_0\tdog\nMP8ETGMyhnU_0\tdog\nMQAJWDp31ag_0\tcat\nMQimJolkMRI_0\tcat\nMQ5mTW70Ebs_1\ttrain\nMRzphcX41T8_0\tumbrella\nMSWR-YqRwqk_0\tcat\nMSjYJFNM2HU_0\tboat\nMSjYJFNM2HU_3\tboat\nMSonF1662RI_3\tskateboard\nMSp3-aHmNP4_1\ttruck\nMSp3-aHmNP4_2\ttruck\nMSvmSEk-UJ0_0\tbicycle\nMSxdHgV7e6o_0\tcar\nMS7Emoy0Foc_1\tboat\nMTDl42dubw8_0\tbear\nMTr54KYSQBw_0\tperson\nMTvLNcYmHhQ_0\tcar\nMT-VkX2ZUYs_1\tbear\nMT-VkX2ZUYs_2\tbear\nMT_GWiXfC2k_0\tknife\nMUAuC-rgc9Q_0\tdog\nMUPAcFVQjlE_0\tzebra\nkAkZoxVhM3I_4\ttrain\nkAkZoxVhM3I_1\ttrain\nkAkZoxVhM3I_2\ttrain\nkAkZoxVhM3I_3\ttrain\nkAmtMpdj5F8_0\tdog\nkAsA28fm6YM_0\tdog\nkBZZqBNk68M_0\tcat\nkBg_1xTx4Dw_0\tcar\nkBsc-5sxeTw_1\tknife\nkBsc-5sxeTw_3\tknife\nkCWupS0PNHk_0\tcar\nkC0y-y4Y9zQ_0\tknife\nkC4_7iM24Uw_0\ttruck\nkC7fdR62Lto_0\tperson\nkDU_m-Zhi-I_2\tbicycle\nkDsGVRUxg9s_3\tbicycle\nkDsGVRUxg9s_4\tbicycle\nkDvYbh9_fvY_0\tdog\nkDwVR3eWyA4_0\ttrain\nkD0shq5M7Xw_1\tskateboard\nkD_zeOiIsTM_0\ttrain\nkEw-F2KrxLQ_0\ttrain\nkE3cb1gtxpM_0\tperson\nkFihVzuPlGI_0\ttruck\nkF9uWuyPP8g_0\tskateboard\nkGB7yQn8jpQ_0\tbicycle\nkGkvBOa6Ao0_0\tmotorcycle\nkHCbADkGOsE_0\tskateboard\nkHEfe-TDtS0_0\tmotorcycle\nkHkZCi873e4_1\tmotorcycle\nkH2Vmad_zzc_0\ttrain\nkH9YVTvwmpM_0\tbicycle\nkIGuIdHDwIw_0\ttruck\nkIasEX-cJb8_0\tcat\nkIqavvGxvh0_0\tbird\nkIyZZm3zk5M_0\ttrain\nkIyZZm3zk5M_1\ttrain\nkIyZZm3zk5M_2\ttrain\nkI14RuB6ab4_1\tboat\nkI9E5m5l4Uo_2\tbird\nkJFQOFR0l0w_0\tmotorcycle\nkJJuX1cGFYg_0\ttruck\nkJJuX1cGFYg_3\ttruck\nkJR59i4f5HA_0\ttrain\nkJR59i4f5HA_2\ttrain\nkJR59i4f5HA_4\ttrain\nkJR59i4f5HA_1\ttrain\nkJUDpKKsNQ8_3\tboat\nkJYZ-XE8ZEQ_0\tcat\nkJuBcbws_zM_2\tcar\nkJuuymSuBLA_3\tboat\nkJ2eEJ07dR8_0\tcat\nkJ4rlYx4HDQ_0\tmotorcycle\nkKJAqMzsMHo_0\ttrain\nkKOKJLrWCro_0\tmotorcycle\nkKSyjiL5foc_0\tskateboard\nkKTvKA8cd-c_0\tbird\nkKTvKA8cd-c_2\tbird\nkKeaUBfwuG4_0\tdog\nkKfiOXnjX0E_1\tbird\nkKtawdL8xDU_0\tumbrella\nkLL_YMFYoQw_1\tcar\nkLL_YMFYoQw_3\tcar\nkLgtAl-xGI0_0\tbus\nkL3r_JUstGU_0\tbus\nkL7sfsNuNVw_0\tgiraffe\nkL7sfsNuNVw_1\tgiraffe\nkL777xHctO4_0\ttruck\nkMMe5H6THlA_1\tboat\nkMuQLvHlZM8_1\tskateboard\nkMuQLvHlZM8_2\tskateboard\nkM3Ml3gsG1g_0\tboat\nkM3yM5qONQc_0\tperson\nkNNLDq_wPc4_0\tdog\nkNQYLVUS5ag_1\ttrain\nkNQYLVUS5ag_0\ttrain\nkNTqRDpy6Jg_0\tbicycle\nkNVh6uD0bMs_0\tcar\nkNlVF3ROFLs_0\tdog\nkOOlwQ0DrQU_1\tcat\nkOjjXFA4JLo_0\tbicycle\nkOksVTxs6S0_0\ttruck\nkPEf41FB6w4_2\tbear\nkPH88UubFMg_0\tbird\nkPLn0enV644_0\tmotorcycle\nkPPya6oadAk_0\ttruck\nkPSuwjI94G8_1\tbus\nkP4KkSrY81s_0\tmotorcycle\nkP4KkSrY81s_1\tmotorcycle\nkP7xV2Efw9c_0\tcar\nkQBqt_vvAUc_0\ttruck\nkQHn-cRLiDk_1\tcat\nMVG65Om9g1k_0\tcat\nMVG65Om9g1k_1\tcat\nMVPQRjLFz6E_0\tboat\nMVRf770zXL0_0\tbus\nMVZinfPagDI_0\tbicycle\nMVhsNNsDFWo_0\tknife\nMVxJBHYueGI_0\tboat\nMVxJBHYueGI_1\tboat\nMV5174rsbEY_0\tbus\nMV-CnX4Gf7A_0\ttruck\nMWGRoXhqRgQ_0\tboat\nMW78cTfzq0c_0\tcat\nMXGO41E37k0_1\ttrain\nMXVOVBJlezc_1\ttrain\nMXW5J8Fq8aw_0\tbicycle\nMYW0loI0g8M_0\tdog\nMZJtj9J3P2w_0\tknife\nMZU8lpmJhxg_0\tbus\nMZaYMDyaATI_5\tskateboard\nMZaYMDyaATI_0\tskateboard\nMZfxKiKSuFU_0\ttrain\nMZfxKiKSuFU_1\ttrain\nMZfxKiKSuFU_2\ttrain\nMZr4cAj7j28_0\tmotorcycle\nMZtheeh470g_0\tcar\nMZxz9C8nBdA_0\tbus\nMZ4A6ItKCn0_2\tknife\nMaApAnpbJwE_0\tmotorcycle\nMaNGPVuxXqo_0\tbicycle\nMaUrOzoC1qE_0\tmotorcycle\nMaV9LY8Yf7c_1\tskateboard\nMaeWb_sv_KU_9\tbus\nMaeWb_sv_KU_10\tbus\nMaeWb_sv_KU_1\tbus\nMaeWb_sv_KU_7\tbus\nMaeWb_sv_KU_8\tbus\nMalEpweFuSM_0\tmotorcycle\nMarA93dcZrA_0\ttrain\nMbCJqlLjY_o_2\tknife\nMbK94OERQUw_1\tbicycle\nMbK-28LCQ1g_0\tboat\nMcV3_FGrKNw_1\tboat\nMccB4r2uPG8_2\tbus\nMctKaOAWQ2g_0\tskateboard\nMc_qufFsRZQ_0\ttrain\nMdP8tqMgy-c_0\tboat\nMdcfoMlgxyI_0\tboat\nMdcfoMlgxyI_7\tboat\nMdcfoMlgxyI_6\tboat\nMeGIovLiBUs_0\tcat\nMeNT1BqRoSk_0\tskateboard\nMeR6T05EfeY_4\ttrain\nMeR6T05EfeY_5\ttrain\nMedPaDPXclw_0\ttrain\nMe6y3gzfhGA_1\tcat\nMe7wQZBbtkw_1\ttruck\nMe9X6zA_WSI_2\tcar\nMe9X6zA_WSI_3\tcar\nMe9X6zA_WSI_0\tcar\nMe9X6zA_WSI_1\tcar\nMfEA9RwWf8s_1\tcar\nMfKpwmhyptQ_6\tknife\nMfQe_WreL6U_0\tcat\nMfVLnZLXmvw_0\tboat\nMfYYHsKxgn0_0\tcat\nMfYYHsKxgn0_1\tcat\nMfaYiIkR0D8_10\tdog\nMfe3mmOd7co_0\tskateboard\nMflUSzEyPQA_0\tdog\nMf1njOx66R4_0\tknife\nMf1njOx66R4_1\tknife\nMgR0ON5CM-E_1\tdog\nMgR0ON5CM-E_0\tdog\nMg7Ve43Durw_0\tzebra\nMg9oRrgGKv0_0\tskateboard\nMhFgGvNvIPU_1\tmotorcycle\nMhOdsv74XK4_0\tbicycle\nMhPIl5JGvTQ_2\tdog\nMhdkxaMWwb4_0\tdog\nMhfYe7VajGQ_1\ttrain\nMijD0ZqMorA_3\tbear\nMijD0ZqMorA_4\tbear\nMixmJ2mkl18_5\tmotorcycle\nkQhvp8FqRRI_0\tmotorcycle\nkQ0WAbN3uvE_2\tbicycle\nkQ0qYUhkgXE_0\tzebra\nkQ0qYUhkgXE_2\tzebra\nkQ27FYyayCg_0\tumbrella\nkQ9C8T343Bg_0\tumbrella\nkQ97WPM3Qw4_0\tskateboard\nkROqNf1kadg_0\tbicycle\nkRWaghM9Bng_4\tknife\nkRYejzNzz-k_0\tbird\nkRYejzNzz-k_2\tbird\nkRYejzNzz-k_5\tbird\nkRtAJBnrb0o_0\tcat\nkSnUCbQ4k4c_1\tgiraffe\nkSxPGqWydhQ_0\tcar\nkSxPGqWydhQ_1\tcar\nkTBAPJCn4AI_1\tcar\nkTNOY900Hbk_0\tcat\nkTVuc-2UjPI_0\tumbrella\nkTbS3XR-Xhc_7\tbear\nkTdT3aGZVmo_0\ttrain\nkTm1R3GaJzg_1\tumbrella\nkTyJyGREDR8_0\tboat\nkUX28ytNCwc_0\tcar\nkUcErGH2rjs_0\tdog\nkU8IsLpAlXg_0\tmotorcycle\nkU8IsLpAlXg_1\tmotorcycle\nkVCic6S6ITo_0\tknife\nkVmUxntjOEk_1\tskateboard\nkVxw5-K9zZk_0\tmotorcycle\nkVyJVrTWLwo_0\tcat\nkVzNGKIHA44_5\tgiraffe\nkVzNGKIHA44_2\tgiraffe\nkVzNGKIHA44_3\tgiraffe\nkVzNGKIHA44_4\tgiraffe\nkWHw0OdDAes_0\tboat\nkWHw0OdDAes_1\tboat\nkWo2PlJB2Nc_0\tmotorcycle\nkWxJX4oVzMo_3\ttrain\nkXKTNNclCns_0\tdog\nkXOYPLKJDdI_0\tknife\nkXOYPLKJDdI_2\tknife\nkXVHu_jzgek_0\tknife\nkXj4YpwnHVs_0\tcar\nkXliGVQWoAE_0\tmotorcycle\nkXwzICrP2CA_1\tdog\nkX-rqtb_n5w_0\tboat\nkYAGyQOUOAw_5\ttrain\nkYAGyQOUOAw_6\ttrain\nkYAGyQOUOAw_9\ttrain\nkYRvBDpWk_0_0\tskateboard\nkYd1dxkZ7Q8_0\tdog\nkYh89aM71_c_0\tbicycle\nkYie2clM8Jg_0\tmotorcycle\nkYjiRbFWFuE_0\tumbrella\nkYwzLhWdjYc_0\tbird\nkY1mYWiL24M_2\ttrain\nkY1mYWiL24M_11\ttrain\nkY1mYWiL24M_0\ttrain\nkY1mYWiL24M_1\ttrain\nkY1mYWiL24M_3\ttrain\nkY1mYWiL24M_4\ttrain\nkY1mYWiL24M_5\ttrain\nkY9lrTOcuxY_1\tknife\nkZNZbhh6P3g_0\tcat\nkZrG7mMww7I_0\ttruck\nkZrgKUm3pUs_0\tboat\nkZ1L8FBg_P4_0\tcat\nkZ3A6bY6RHo_0\tmotorcycle\nkaKhLfdT3z4_0\ttruck\nkaNpALWiNSQ_0\tcar\nkadq7fGv_zg_1\tmotorcycle\nkao854-T3zw_0\tbear\nkaxFMN_9CfM_0\tbear\nkaxFMN_9CfM_1\tbear\nkazbC0JbsUY_1\tboat\nkazbC0JbsUY_0\tboat\nka1HMN9Mxho_1\tcar\nka8YGdEujsQ_0\tmotorcycle\nkbEenS2dRTc_0\tcat\nkbF3h-YQ7m8_0\tskateboard\nkbuWFd9Vthc_1\tumbrella\nkb2LQHXd2zk_0\tcar\nkb-A8wbnvQg_0\tbicycle\nkcBIvi6fhUo_1\tbus\nkcTwHA-N1cg_0\tbird\nkcip1032v3E_1\tskateboard\nkco1LYK4z_w_0\tperson\nkdIBzH30zKA_0\tdog\nkdIBzH30zKA_1\tdog\nkdP5V_afg7E_0\tskateboard\nkdRLqCUbWts_0\tbird\nkdUrK5I-cNo_0\tcar\nkdU-XJEwZsQ_1\tbird\nkd3DLyL1JMw_0\tbicycle\nkeGrBBWcGE4_1\tbus\nkeGrBBWcGE4_0\tbus\nkePvCa53REA_0\tgiraffe\nkePvCa53REA_1\tgiraffe\nkea2UOTXlhs_0\tcat\nkea4eM8Blz8_0\tdog\nketFGT3U5D0_0\tbicycle\nkexKkPOprms_0\tcat\nke3yWKL94kE_0\tskateboard\nMi4HJYsPBPk_0\tskateboard\nMjGAi_5coGY_0\tbicycle\nMjGAi_5coGY_7\tbicycle\nMjGAi_5coGY_5\tbicycle\nMjGAi_5coGY_6\tbicycle\nMjxkMQcgRss_1\tcar\nMkF-jfvzRJU_0\tbus\nMkGLvilh-P4_2\tdog\nMkIK8kdqU2I_0\tmotorcycle\nMkQzgwai9zk_0\tzebra\nMkYtT0L4_3A_0\ttruck\nMktDGOflp1w_0\ttruck\nMktDGOflp1w_1\ttruck\nMk82qF_xfzI_1\tmotorcycle\nMk9tGnGNkkE_0\tbird\nMlLHwysBUiY_0\tknife\nMlVr20XSJMY_1\tdog\nMmQIeOEPu9g_2\tskateboard\nMmQIeOEPu9g_0\tskateboard\nMmQIeOEPu9g_1\tskateboard\nMnE1EjTWbTA_2\tskateboard\nMnGGl7pusvI_0\tmotorcycle\nMnGGl7pusvI_1\tmotorcycle\nMnd7aZxjoEg_0\tbird\nMnvqegl_fME_1\tcar\nMnvqegl_fME_3\tcar\nMnvqegl_fME_8\tcar\nMnyV8-43fRY_0\tbicycle\nMn2Nul_w66I_1\tmotorcycle\nMn2Nul_w66I_3\tmotorcycle\nMn2_fRbVluE_0\tknife\nMoHDZuwBO4E_0\tcat\nMog-qUf6B1c_1\tcat\nMo6Q7lGmAw0_0\tskateboard\nMp42DoVxbWY_0\tmotorcycle\nMp91b_edytM_1\tdog\nMp91b_edytM_0\tdog\nMqAlMygAZto_0\tcat\nMqPKFAIxZpE_0\tdog\nMqlxERdGjdg_0\tmotorcycle\nMqvfJOEW4oE_0\tcat\nMrsXy6DL4DA_0\ttruck\nMrssB6CtGrM_1\tgiraffe\nMrvbaDZm6gY_7\tknife\nMrvbaDZm6gY_8\tknife\nMrwi7WoPJSs_0\tcat\nMrxYHk0ghfM_0\tboat\nMr1A4et0ESg_0\tbird\nMsFvL8N-3ds_0\tumbrella\nMsQJkEOyREY_0\tbicycle\nMsY_zz2OeKU_0\tmotorcycle\nMs8x8pjN7Fw_1\tbicycle\nMs8x8pjN7Fw_0\tbicycle\nMtIjkcXspsU_2\tmotorcycle\nMtfpgvzOlW8_0\tperson\nMtiQjguNpH0_2\tboat\nMtiQjguNpH0_0\tboat\nMt_4bFjyYuU_0\tcat\nMuLk_dOouJY_0\tknife\nMuOG8PoK21o_0\tbus\nMuVtFYK_nH0_0\tbird\nMuYixry0epc_2\tmotorcycle\nMuYixry0epc_0\tmotorcycle\nMuYixry0epc_1\tmotorcycle\nMu51W-lkSEc_0\tcar\nMvIYOnRinSo_0\tbicycle\nMvxRpbl0BBk_0\tbus\nMv6v4w7VDFk_1\tcar\nMv_9l8fWiP4_0\ttruck\nMwAM4o2GCuM_0\tcar\nMwHQb6ZryRA_0\tskateboard\nMwIKOqSMRwk_0\tcat\nMwLnGflxcqc_2\tzebra\nMwNsM6f6fNY_3\tbicycle\nMwNsM6f6fNY_5\tbicycle\nMwN7iYEim6k_0\tbird\nMwW14_GuwLg_1\tbus\nMwdX3PbgC34_0\tgiraffe\nMwjq136uMe0_0\tcar\nkfInF5cUU98_0\tmotorcycle\nkfInF5cUU98_1\tmotorcycle\nkfLnoXlGBvU_0\tdog\nkfhspLhCU5Y_0\tcat\nkgDOVDDZ9eQ_0\tcat\nkgONObiF8Hg_0\tcat\nkgT-NsRkv1c_0\tcar\nkgco3sZv7BY_0\tcat\nkgi1KajW_ZU_0\ttruck\nkglv-2P5ow4_4\tbus\nkgrFzgXO9Q8_0\tskateboard\nkgsyAMgjuL4_0\tbus\nkgxQ03-tSek_0\tbear\nkg6RFppR4MM_0\tknife\nkhUURgtFYBY_1\tbicycle\nkhUURgtFYBY_0\tbicycle\nkhVST8w3Zzw_0\tskateboard\nkhlqzkfBCfc_0\tcat\nkhpJlBWPPr4_1\tcat\nkimZApwsJEY_5\tbicycle\nkimZApwsJEY_6\tbicycle\nkimZApwsJEY_0\tbicycle\nkimZApwsJEY_2\tbicycle\nkimZApwsJEY_3\tbicycle\nkimZApwsJEY_4\tbicycle\nkizrM5CZzPk_0\ttruck\nkjBdTAkRijw_2\tbus\nkjM0hJl-L44_0\tskateboard\nkjtOW8OAIeY_0\tmotorcycle\nkkC5lqQb0t0_0\tumbrella\nkkR7pnou7hc_0\tknife\nkkeBMT1ixs4_0\tboat\nkkkc9xwKGp8_1\tskateboard\nkkvU3dvMkSI_0\ttruck\nkk4KuU5X6Lk_0\tcar\nklGHWdeD-qw_2\tbear\nkldR5yJFeOo_1\tbicycle\nkldR5yJFeOo_3\tbicycle\nklgANznh5x0_1\tbicycle\nkl2buVrYbX8_0\tskateboard\nkl3_w8_h6ts_0\tskateboard\nkl4RYG6OCIY_2\tknife\nkmZFQEGncaI_2\tbicycle\nkmZFQEGncaI_0\tbicycle\nkmllekf2nKc_0\tcat\nkmoaGUqL6bI_0\tskateboard\nkmvCtYXRUhM_0\ttruck\nkm7aR2fTJlA_2\tknife\nkm-3wnNLVYY_0\tboat\nknDRZU9u-Lw_1\tboat\nknVcB-GeINU_0\tcar\nknqi3OAHNO8_0\tboat\nkoOxoaMnXZc_0\tskateboard\nkoOxoaMnXZc_1\tskateboard\nkphV7yVMBOQ_0\tbicycle\nkphV7yVMBOQ_2\tbicycle\nkqDbbFz-XQQ_0\tbird\nkqDxyoQKFfE_0\tcat\nkqVaHPJzEro_0\tdog\nkq4tOnX3m2Y_3\tbus\nkq4tOnX3m2Y_0\ttruck\nkrSKV36ocSs_0\tbear\nkrvyahlS1z4_0\tbus\nkryv5em-VHk_2\tbear\nksB15ebtJeM_0\tumbrella\nksCempldLAA_0\tskateboard\nksCempldLAA_1\tskateboard\nksCjOk8r4rU_0\tperson\nksSVtTRXRyI_1\tbicycle\nksk5uCVKU7Y_0\tskateboard\nksxTUcFqlZw_0\tknife\nksx219-g47A_0\tcat\nktHzii2XMh4_0\tboat\nktPLKpH7-mk_5\tdog\nktcodoKjIvE_3\tbicycle\nktcodoKjIvE_4\tbicycle\nktcodoKjIvE_5\tbicycle\nMwtWyQiagOk_0\tbicycle\nMwvYg837DFU_0\tmotorcycle\nMxEjkI5fRh0_0\tdog\nMxHBWltYQX0_0\tboat\nMxKuZbSiZ4s_0\tskateboard\nMxK1dXmYQU8_0\tknife\nMxr-1toRi3s_0\tskateboard\nMyS7UVUc55M_0\tcar\nMybir4gfQaU_3\tbird\nMzB160hQlFE_9\tgiraffe\nMzB160hQlFE_2\tgiraffe\nMzB160hQlFE_4\tgiraffe\nMzB160hQlFE_5\tgiraffe\nMzB160hQlFE_6\tgiraffe\nMzB160hQlFE_7\tgiraffe\nMz9ZTHPYJxk_0\tdog\nM0Ga521uzoA_0\tdog\nM0qQQArQdTU_0\tbird\nM088XJeXBS0_0\tcat\nM1UsEMPrCc4_0\tknife\nM1cuEQppjNk_0\tbus\nM1p1DBTuqmk_3\tbird\nM1p1DBTuqmk_1\tbird\nM1xxFVktlzw_1\tbird\nM1zDeqozcU4_1\tbus\nM2R_9l38IUQ_0\tbus\nM2uSqd8ohUk_0\tbus\nM3CUpLmpRBo_0\tcat\nM3OhLKUgQho_0\tcat\nM3P38sLk0pc_0\tdog\nM3tK5YBjyKI_0\ttruck\nM3tK5YBjyKI_1\ttruck\nM3tK5YBjyKI_2\ttruck\nM4CENhQ5vWo_0\tcat\nM4Hqq89bZiE_1\tdog\nM40QOQPocV4_1\tcar\nM45MyaeogPU_0\tcar\nM5BEqJFfJYw_0\tskateboard\nM5NRM7UQv5c_0\tcat\nM5bLnqKDa1U_0\tbear\nM5kj9SEKNAo_0\tbus\nM6POMFHs-ec_0\tbus\nM6bin6X9FSI_0\tknife\nM6eRY9q89aQ_2\ttruck\nM6tXmkLy-2Y_1\tbird\nM7465rUWBzY_1\tbicycle\nM8Lhm-CgqH4_0\tcat\nM8cFdveIy4g_0\tcat\nM8drJLCDOL8_0\tcat\nM8ea7gWeDQ0_0\tbird\nM8f0VhN1ZnY_0\tumbrella\nM8i-DGTEw9M_3\tskateboard\nM8i-DGTEw9M_1\tskateboard\nM8sMZ15CLIU_0\tskateboard\nM9McwXGtZnI_0\tcat\nM9QtHKxypyI_1\tknife\nM9UrZSSK1MA_2\tmotorcycle\nM9eiVambl5s_1\tdog\nkuRfhOqyXeY_0\tumbrella\nkuzyHmE3SI0_1\tknife\nku68PhgE8bk_0\tbird\nku7gA5ZLk1Q_0\tcat\nkvFSzJHIsVg_1\tknife\nkwDNLBoEQq8_0\tskateboard\nkwDX0_2B3A0_0\tumbrella\nkwGGXvXtsjI_0\ttruck\nkwY370WQYUg_0\tcar\nkwbt-wHLPkY_1\tcar\nkwlcEg9G1bE_0\tknife\nkwsp30ykR4U_0\tboat\nkxeSYfuQl-I_0\tbird\nkx1bCqhLcbY_0\tbus\nkx5tIvM-9dE_0\tknife\nkyAEyX8zMWQ_0\ttruck\nkyPXCwNh7Rg_0\tcat\nkyW_f8sv5iw_1\tgiraffe\nkye1Q_k-_Gc_0\tbicycle\nky1FAcaT3UE_0\tdog\nky6uivneqIg_0\tbird\nkzblQQcpTdk_0\tskateboard\nkzblQQcpTdk_1\tskateboard\nkzfxn1c7_xc_10\tbicycle\nkzg7y0rERTY_0\tbicycle\nkzi3zDJR9Bc_0\tdog\nkzpJkBQxgE0_1\tbicycle\nkzp3UEwOkJA_0\tknife\nkzw5a8z9cXs_0\tbird\nkz6HYpF3pLo_0\tdog\nk0cUZwgJzB4_0\tumbrella\nk0uDHQea9sg_1\tdog\nk00mpKYHsuU_0\tskateboard\nk1F_TFA3Bbk_0\tbicycle\nk1LrJEfFKag_0\tmotorcycle\nk1NVg8uaPE4_1\tskateboard\nk1Q5wms4euk_0\tbird\nk1TOwPACsvY_2\tgiraffe\nk1TOwPACsvY_3\tgiraffe\nk1vz1ZSBSoo_0\tbicycle\nk2O0XiVn5kw_0\tskateboard\nk2QiX8c3t50_0\tbird\nk2SEBRgras8_3\tcar\nk2Z0W54JwB4_0\tskateboard\nk2bQG12smw0_0\tcat\nk2imYphEfo0_0\tcar\nk2ocqQxARpQ_0\tskateboard\nk2yx7C__3wY_1\tcat\nk3HKP8CV3CY_0\tbus\nk3LnBcn5zlU_0\tboat\nk3QuANDFgVQ_2\tboat\nk3QuANDFgVQ_3\tboat\nk3QuANDFgVQ_5\tboat\nk3fZgTTMj1g_0\tgiraffe\nk3fZgTTMj1g_1\tgiraffe\nk3im7HEvSCI_1\tbear\nk4D-Ql4Fg7c_1\tbird\nk4PWQfz5NGo_0\tmotorcycle\nk4U1AP6KV4E_1\tskateboard\nk4c6D3ZsdL4_0\ttruck\nk5Pp6BYXono_3\tbear\nk5R3cUyyyWo_0\tcar\nk5nvWBLlS2c_1\tboat\nk5nvWBLlS2c_2\tboat\nk5vlZTySXDk_0\tknife\nk5yJqWnvZzg_1\tbus\nk5yyV32-nOM_0\tmotorcycle\nk5yyV32-nOM_2\tmotorcycle\nk55nlQZwGz0_1\tboat\nk57rVPEq54k_1\tbear\nk57rVPEq54k_2\tbear\nk6Bwd6af64Y_2\tbear\nk6gc4du1FqU_0\ttruck\nk6l0hwjaeMA_0\tmotorcycle\nk6l0hwjaeMA_1\tmotorcycle\nk6l0hwjaeMA_2\tmotorcycle\nk60P5osD0rU_0\tbus\nk64DU45ej5M_6\tcar\nk64DU45ej5M_0\tcar\nk64DU45ej5M_1\tcar\nk64DU45ej5M_2\tcar\nk64DU45ej5M_3\tcar\nk64DU45ej5M_5\tcar\nk640Wtpq-mU_3\tumbrella\nk640Wtpq-mU_0\tumbrella\nk640Wtpq-mU_1\tumbrella\nk7TCyTff1aM_0\ttruck\nk7uTiiG-Ez0_0\tbus\nM-8Zbj9mU9U_0\tboat\nM_miIFgy1Ro_0\tbear\nNAGKrEjU7Sk_3\tbird\nNAGKrEjU7Sk_2\tbird\nNAkFaQBgOvo_0\ttruck\nNA9hxGtSLCM_0\tbird\nNA_DgxP18c4_2\tmotorcycle\nNBE97NAHACk_0\tgiraffe\nNBdhmPgSS2o_1\tmotorcycle\nNCNgKQCU8BM_1\tbird\nNCP6Cna8jtY_0\tskateboard\nNCQ5340WhY8_0\tcar\nNCSygygs2Dw_0\tskateboard\nNCWp95If4uM_0\tmotorcycle\nNCazYWutlOc_0\tboat\nNCoJmkRt2nE_0\tbicycle\nNDUhlmH9Rz4_0\tcat\nNDYT9jTE54Q_0\tbus\nNDYT9jTE54Q_1\tbus\nND_GyhH6zgI_0\tmotorcycle\nNEQIR06VuP4_1\tgiraffe\nNEQOLn6QBuE_8\tbird\nNESQ70PhJU0_1\tboat\nNElB9jKqhLc_0\tdog\nNFjb4XxSoHI_0\tskateboard\nNFye-cUktCg_0\tbicycle\nNFz_zzAU_Hc_2\tskateboard\nNFz_zzAU_Hc_0\tskateboard\nNFz_zzAU_Hc_1\tskateboard\nNF_o01qBrtI_0\tskateboard\nNF_o01qBrtI_1\tskateboard\nNGCjiEfG4C8_0\tskateboard\nNGM0enFRa7E_0\tcar\nNGO_7sJEeyk_0\tbus\nNGRBYn2OatE_0\tmotorcycle\nNGU-5KGKEJ0_0\tbear\nNGmJtkXyJpc_0\tcat\nNGmKyRRNL_E_0\tbird\nNGw5-auup1k_0\tcar\nNG7FgzWn8Gw_1\tgiraffe\nNG9SIDqXvic_0\tknife\nNHlayOfSZJc_0\tdog\nNHlsNDcNZqU_0\tcat\nNHmxckr22ws_0\tskateboard\nNIPnaoHgzdU_0\tbird\nNIPnaoHgzdU_1\tbird\nNIPnaoHgzdU_2\tbird\nNIvYcbJIYdA_0\tcat\nNI_YQKOQEvM_1\tbird\nNJeNAw2RnNc_0\tbus\nNJeNAw2RnNc_1\tbus\nNJeNAw2RnNc_3\tbus\nNJeNAw2RnNc_4\tbus\nNJ0O48Pkn2k_0\tbird\nNJ9DpLHaGl8_0\tskateboard\nNKLemqoJ_hA_0\tcat\nNK4942wyYgk_0\tbus\nNLKK4VUbuuI_5\tbear\nNLp8voZylqM_1\tknife\nNLsGPrwnRug_1\tbus\nNLsGPrwnRug_2\tbus\nNL3CG8KGwis_3\tgiraffe\nNL5j52SH-yQ_0\tbus\nNL9o4JgV25A_0\tdog\nNMJB2K_UOLc_0\tdog\nNMJLv-oYyNc_1\ttruck\nNMJLv-oYyNc_0\ttruck\nNMecCV-gtK8_1\tdog\nNM7OVTITkaA_0\tcat\nNNCjf9Qu2RI_0\tbear\nNNHOtBx0FOY_0\tmotorcycle\nNNkLZRrMEv4_6\tboat\nNNl4nD5_b_o_0\tskateboard\nk8NHRbiB2Dc_0\tdog\nk8OEoDpqSLk_0\ttruck\nk857sWPtmcs_0\tcat\nk9BuU6A21DQ_0\tskateboard\nk9HxprAZods_0\tumbrella\nk9KmR4MNI7o_0\tcat\nk9KtLV0IMgI_0\tdog\nk9PCp-8PFZ0_0\tdog\nk9PX9l8Fnlw_8\tbus\nk9PX9l8Fnlw_0\tbus\nk9PX9l8Fnlw_2\tbus\nk9PX9l8Fnlw_4\tbus\nk9PX9l8Fnlw_5\tbus\nk9VDPqCbqj0_0\tbear\nk9VVUD9wVxk_1\tboat\nk9zLR7VKKpE_0\tskateboard\nk9-PLHxxGHc_0\tcar\nk-DOe-pD_MY_0\tdog\nk-Nl-39bZnw_1\tskateboard\nk-SqR4BEw3s_4\tmotorcycle\nk-SqR4BEw3s_1\tmotorcycle\nk-izgq4Wj4E_0\tdog\nk-izgq4Wj4E_1\tdog\nk_X3oj841SQ_1\tmotorcycle\nk_e_YVhclfg_4\ttruck\nk_e_YVhclfg_3\ttruck\nk_iI2BJQpqo_0\tcat\nk_jXopyxdo0_1\tboat\nk_sLp7QKSu8_0\tboat\nk_tkXRmI_O0_1\tskateboard\nk_tkXRmI_O0_0\tskateboard\nk_vnzrtDfAw_1\tcat\nk_5e1d-vpBU_3\tumbrella\nk_5e1d-vpBU_4\tumbrella\nlAA5eXeYwpo_0\tcat\nlAFonTk_uSA_1\tbear\nlAI9mfwKMM8_1\tdog\nlAQxdRz4PlQ_0\tbear\nlA3btp7QIxg_0\tbus\nlBH0KOGRswc_0\tcar\nlBXWSN3ciPY_0\tmotorcycle\nlBsOiAR5dAk_2\tbird\nlBsOiAR5dAk_3\tbird\nlBsOiAR5dAk_4\tbird\nlBsOiAR5dAk_7\tbird\nlBsOiAR5dAk_8\tbird\nlByHH7yvxpA_0\tboat\nlB7j8Z4gGtQ_0\tcar\nlB_bnqdnexA_5\tbird\nlB_bnqdnexA_1\tbird\nlB_bnqdnexA_4\tbird\nlCYwepuY9qY_0\ttruck\nlCZry6FRpsk_0\tbicycle\nlCf6uL_GkYw_2\tbear\nlC0yidNH6B8_2\tbear\nlC4BoFWvHs4_3\tbear\nlDLYtKqlr5M_0\tbus\nlDf9b9Kr-24_1\ttruck\nlDgzFjqokik_0\tboat\nlDqk6pRbY3M_0\tbus\nlDybC3N70so_0\tcar\nlD63JOjqTDg_5\tbear\nlD63JOjqTDg_9\tbear\nlD63JOjqTDg_10\tbear\nlD63JOjqTDg_0\tbear\nlEG4DGADyEU_0\tbird\nlEIbERGmlJw_0\tumbrella\nlEWOScSt-Ks_0\tdog\nlEaMfPfi9wI_0\ttruck\nlEwJRP_FRW0_0\tdog\nlFYONMOuW_o_0\ttruck\nlFqrTC4j9AU_0\tcat\nlF3vWAJRnek_0\tmotorcycle\nlGPyv8wlqaw_1\tknife\nlGaQV9YhOac_0\tmotorcycle\nlGrVM91Cav8_0\tperson\nlG5xlt4odEs_0\ttruck\nlHKKhuJtJ9A_0\tknife\nlHXHAD73KC4_0\tmotorcycle\nlHX5VdjDPMg_0\tknife\nlHuiaqmISAM_0\tmotorcycle\nlHyHQQF-8K0_0\tcar\nlIE0SbW_gCY_0\tdog\nlIH_in2H5ds_0\tknife\nlIrvgqkirS4_0\tcar\nlIrvgqkirS4_1\tcar\nlI6hnnAL_54_1\tskateboard\nlI7VzYQQ8DY_1\tbus\nlJBeZTzXuSk_0\tumbrella\nlJJU-pzIbgs_0\tboat\nlJKxeHgRugQ_0\tbicycle\nlJa2bLMFljk_0\tknife\nlJa2bLMFljk_1\tknife\nlJa2bLMFljk_2\tknife\nlKC5LtWPL6s_0\tboat\nlKEgqjR4HeU_0\tbicycle\nlKEgqjR4HeU_1\tbicycle\nlKJZ4AYoO9g_0\tcar\nlKJZ4AYoO9g_1\tcar\nlKJZ4AYoO9g_3\tcar\nlKJZ4AYoO9g_4\tcar\nlKJZ4AYoO9g_5\tcar\nlKJZ4AYoO9g_6\tcar\nlKJZ4AYoO9g_7\tcar\nNOEix5l-1TE_1\tbear\nNOVqPOoUWiM_2\tbear\nNOmc38WuhVA_1\tzebra\nNOmc38WuhVA_2\tzebra\nNPX9qxaZXGQ_1\tboat\nNPc_EhpqV9I_0\tcat\nNPlhHkKnD-o_3\tbird\nNPlhHkKnD-o_1\tbird\nNPnIcXU4TO4_0\ttruck\nNPnJoNuZw64_0\tbicycle\nNP2YBNp1eMo_0\tbus\nNP8MrtR7UMQ_0\tskateboard\nNQRWmK2DAwo_1\tskateboard\nNQ7XVf2jPCk_1\tbear\nNRBtrgg-ACI_0\tumbrella\nNRGqiXyM4H0_0\tbus\nNRRxMVw0Fv0_0\tumbrella\nNRV62o4HAaI_0\tdog\nNRkeO8cWvlY_0\tskateboard\nNSEdAs2W7io_1\tbus\nNSrCO0JVjrQ_0\tbus\nNS6Z7neTE58_2\tbear\nNS7vapDr5vE_0\tdog\nNTJsuoSzIX0_8\tboat\nNTi-7LowE5E_4\tbicycle\nNTi-7LowE5E_0\tbicycle\nNTurL251ndw_0\tbird\nNTyAmrmpD-w_0\tcat\nNUOXJlGoyJk_0\tmotorcycle\nNURGtF3McGo_0\tknife\nNUU3df9bDmc_0\tmotorcycle\nNUhIeMVykto_0\ttruck\nNUkuVMR_rDA_0\ttruck\nNUkuVMR_rDA_1\ttruck\nNUo3_VxkQWs_0\ttruck\nNUo3_VxkQWs_1\ttruck\nNU5WfPjxGO4_1\tcat\nNU60EZnPyy8_0\tbird\nNVAF-TWNge8_0\tboat\nNVeRtjaMVVM_0\tcar\nNVz1RXwlQQM_0\tskateboard\nNWOVEKbfu_M_2\tcat\nNWwoSS6oanE_0\tbus\nNW6ZEfS5YY0_0\tdog\nNXHWi70uXME_0\tmotorcycle\nNXU1Yxq08KQ_0\tskateboard\nNXe33k8YYzQ_4\ttruck\nNXe6DkOAbbo_0\tcat\nNX2FQE2RlgI_0\tdog\nNX2FQE2RlgI_1\tdog\nNYBxFsoPtLU_7\tknife\nNYBxFsoPtLU_2\tknife\nNYVtLPBMGDA_1\tdog\nNYVtLPBMGDA_2\tdog\nNYpkdx_Wzos_0\tbicycle\nNYrd2o8DQhw_0\tbird\nNYsYKDH1T0Y_0\tbear\nNYs9voRwmTk_2\tmotorcycle\nNZGyAc3mNmM_1\tskateboard\nNZOBtVvtpfo_0\tbird\nNZoU9njpjBc_2\tbird\nNZoU9njpjBc_1\tbird\nNaOwM5jaBb0_0\tbear\nNaTP9E6Ee6k_0\tmotorcycle\nNahvbbnqXN0_0\tknife\nNaszpQMnSmM_0\tskateboard\nNbXn5vr55Ik_0\tmotorcycle\nNbnAyKWQOgU_2\ttruck\nNbnAyKWQOgU_3\ttruck\nNbz45at2suY_0\tbird\nNb1nL_IG2Tc_0\tumbrella\nNb4FhqzK_80_0\tbird\nNb9Ee0cdc90_4\tknife\nNb9Ee0cdc90_0\tknife\nNcD7EzR9VKc_0\tcat\nNcODwqAl8wA_0\tbird\nNcODwqAl8wA_1\tbird\nNcnPt-ksZkA_0\tmotorcycle\nNcnr9xhL4RE_1\tbird\nNcnr9xhL4RE_5\tbird\nNco2IqVnrXc_0\tcat\nlKiN4UeEuCQ_0\tcar\nlKrgSHU_lF4_0\tmotorcycle\nlKrgSHU_lF4_1\tmotorcycle\nlL9OwfLG-LQ_0\tskateboard\nlMPus-gGijc_0\ttrain\nlMw3GHYr5nI_3\tbear\nlM2lr9vONXE_1\tbird\nlNDNEdNtW4w_0\tumbrella\nlNLvw0Ga8IY_1\tskateboard\nlNLvw0Ga8IY_2\tskateboard\nlNLvw0Ga8IY_0\tskateboard\nlNShteFjBFI_0\tbird\nlNh4Dhf0JC8_0\ttruck\nlNj5zp4Gbsw_1\tbird\nlOGti3Hfk6A_2\tbird\nlOglyCevyZo_0\tmotorcycle\nlOzlZJwo_U8_0\tmotorcycle\nlO0DJaFrguw_0\tmotorcycle\nlO0Nas9ogL0_0\tbird\nlPG5xsRX0U0_0\tbird\nlP3Jv00bEG8_0\tbear\nlQf2-zTERI8_0\tmotorcycle\nlQ8AFjrjX64_0\tumbrella\nlRSTcmXYwzM_2\tknife\nlRyY7rtPGJ0_1\tdog\nlRyY7rtPGJ0_0\tdog\nlR-HPtCgbFY_0\tcar\nlSefRz_ad2I_0\tperson\nlS7IFw-rHNE_0\tcar\nlTNivynkdBQ_0\tbear\nlTNivynkdBQ_2\tbear\nlTW53YPXtYw_0\tumbrella\nlTgxSRoCADM_1\tboat\nlTgxSRoCADM_2\tboat\nlTgxSRoCADM_3\tboat\nlTgxSRoCADM_0\tboat\nlTyeSMENfFI_0\tdog\nlT1oYaEt3l0_0\tskateboard\nlT1oYaEt3l0_2\tskateboard\nlT1oYaEt3l0_1\tskateboard\nlUEz6tmtuxs_0\tdog\nlUQr1JtEFAM_0\tcat\nlUSPy6WOhvw_1\tboat\nlUk_G-9RjSE_0\tbird\nlUq042i-r3E_1\tdog\nlUq042i-r3E_2\tdog\nlVCS7_AhLDg_0\tcat\nlVKT0DahELk_0\tbus\nlVKT0DahELk_2\tbus\nlVOqUh5DjZE_0\tbicycle\nlVWFKjMWyF8_0\ttruck\nlVWFKjMWyF8_1\ttruck\nlVWFKjMWyF8_2\ttruck\nlVoO_SiGxpw_0\tcat\nlVohP88BOwU_1\tgiraffe\nlWDh4SPr76A_1\ttrain\nlWGBmSVTvwo_2\tskateboard\nlWLYqz3RhXs_0\ttruck\nlWkC8ABD6YI_0\tknife\nlWnVG1WyzTQ_0\tdog\nlW8axrSg7EY_0\tdog\nlXJGVOcVinA_1\ttruck\nlXkkzYM416M_12\tknife\nlXkkzYM416M_8\tknife\nlXkkzYM416M_11\tknife\nlXshoTSoReY_0\tmotorcycle\nlXshoTSoReY_1\tmotorcycle\nlXshoTSoReY_2\tmotorcycle\nlYC47pEoyKc_2\tskateboard\nlYEiGk0pa9w_1\tdog\nlYP4KB7dANc_0\ttruck\nlYcCLy33mJA_0\ttruck\nlYcCLy33mJA_1\ttruck\nlYrLCKi7wHw_0\tknife\nlYrvoVOM7i8_1\ttruck\nlYrvoVOM7i8_2\ttruck\nlYzirpo9X4Q_2\tknife\nlY38gkpHWQA_0\tdog\nlZWg3rt2bp4_0\ttruck\nlZWg3rt2bp4_1\ttruck\nlZWg3rt2bp4_2\ttruck\nlZgIg28WsqA_1\tdog\nNcs0SIaAZjk_0\tskateboard\nNdDPhB7JjOc_1\tcar\nNdFMcVN8fkc_0\tskateboard\nNdFMcVN8fkc_1\tskateboard\nNd2smOOuPs4_0\ttruck\nNd5Cyi1P2AQ_0\tperson\nNd5Cyi1P2AQ_1\tmotorcycle\nNesRw9JE-bc_4\tdog\nNesRw9JE-bc_0\tdog\nNesRw9JE-bc_1\tdog\nNe_T9PyoaOA_0\ttruck\nNe_T9PyoaOA_2\ttruck\nNfQ_F7iyFT4_0\tbus\nNfoq-vLwXMs_0\tcat\nNfuM3ceM9Lg_0\tbird\nNf4iPszryRI_1\ttruck\nNgA6Mi5Qj6Y_1\tcar\nNgHJhpedfLw_0\tcat\nNgfJ42fUH10_0\tskateboard\nNglZtOBkn1M_0\tboat\nNgp2Yvug4N4_0\tskateboard\nNg7YPssESZs_1\tumbrella\nNhDdHfwovA0_1\ttruck\nNhHYQ1QBPq4_0\tcat\nNhJWY87UJGA_0\tmotorcycle\nNhKgTGZXrk4_0\tmotorcycle\nNiN42Yupn8k_0\tmotorcycle\nNiQLFJ_8gI0_1\tbird\nNifFA8VfbMY_0\ttruck\nNjPnw9Ofph8_1\tbicycle\nNjr2CQDoQ0w_2\tboat\nNj1tu2uzjf8_0\tumbrella\nNj4IqLuQBd0_0\tcar\nNkHiSqSViG4_3\ttruck\nNkSVC1QmlzA_2\tboat\nNkXF30FQWUs_0\tbicycle\nNkajkrLx-Pg_1\tgiraffe\nNkdGD4jRmVk_2\tskateboard\nNkdGD4jRmVk_3\tskateboard\nNkdGD4jRmVk_4\tskateboard\nNkvfxcYCIfg_0\tperson\nNkxm_Grldgg_0\tboat\nNlKX0Q_a4qM_0\tbicycle\nNl2e8ERoEYk_1\tskateboard\nNl27zjpvGZk_0\tcat\nNmCxdejUxjE_2\tumbrella\nNmGnWjSHIGc_1\tdog\nNmGnWjSHIGc_3\tdog\nNmGnWjSHIGc_0\tdog\nNmHo6hH22gY_0\tcat\nNmRjRjuwWGU_0\tumbrella\nNmm4H7xWWeE_0\tgiraffe\nNmnOIU5yzmo_0\ttruck\nNm3Wkz8ClY8_4\tbicycle\nNm3Wkz8ClY8_0\tbicycle\nNm3Wkz8ClY8_3\tbicycle\nNnQubFQHcUU_0\tcat\nNnSwVsUnfj8_0\tdog\nNnV7SskfNiQ_1\tbicycle\nNnYCP4YouSI_0\tskateboard\nNnYCP4YouSI_1\tskateboard\nNncDYgsTFic_0\tmotorcycle\nNn1fsXlRDQg_7\tbird\nNoKz0p_h8xA_0\tcar\nNoRnxJ4D8OY_0\tmotorcycle\nNoglbvaRxAM_1\tcar\nNoglbvaRxAM_2\tcar\nNopFymjXZBE_0\tcar\nNosN0T3He9Y_2\tknife\nNowQILLv6pM_1\tmotorcycle\nNoxncYznLDw_0\tmotorcycle\nNoxncYznLDw_2\tmotorcycle\nNoxncYznLDw_3\tmotorcycle\nNoxncYznLDw_5\tmotorcycle\nNpbXizTCNgs_0\tmotorcycle\nNpciaYlS9Bs_2\tskateboard\nNpptiWtuy7U_1\tbird\nNp0p_ITfRiE_0\tboat\nNp0p_ITfRiE_2\tboat\nNqA0sKGQZbc_0\tbird\nNqD8w0_R9y8_1\tmotorcycle\nNqLEhuNiS-A_0\tknife\nNqzZbJJl3E4_0\ttruck\nNqzZbJJl3E4_2\ttruck\nNq-mC-BLk1c_0\tbird\nNrGByfXIMJc_0\tdog\nNrGHtOFFLxU_0\tmotorcycle\nNrJIz8M3oNM_0\tboat\nlaSVNAwUDQc_0\tgiraffe\nlaiFgjfWMS8_1\tbird\nlajujsJ1J4k_0\tbird\nlajujsJ1J4k_1\tbird\nlauIpA9lVMo_0\tskateboard\nla0ygpbR6t4_0\tdog\nla0ygpbR6t4_1\tdog\nlbCW72FyaQ8_0\tumbrella\nlbC8rsjkZ8Y_1\ttruck\nlbDdPmkMwnw_0\tmotorcycle\nlbSldeZXn6I_0\tskateboard\nlbZo-rTovyc_0\tskateboard\nlbod3X-5Z40_4\tbus\nlbod3X-5Z40_5\tbus\nlbzHPZpNNjg_0\tdog\nlcSqXrVIbwo_0\tmotorcycle\nlcWTw6rAYfI_0\tcat\nlcv8jXnPWQU_0\tcat\nlc6jM9I3ffc_0\tmotorcycle\nlc8hZxMLAr4_0\ttruck\nldjVc4u8LUc_1\tmotorcycle\nldqpSPYa-3U_1\tbicycle\nld5g39_bixY_1\tskateboard\nld5g39_bixY_2\tskateboard\nlew1kgMUujc_0\tcar\nlfPmXUBRa-k_1\tbird\nlfVb7VtGUAI_0\tperson\nlfYoLXfvmyo_0\tbus\nlf29DRtjGcY_1\ttruck\nlf29DRtjGcY_2\ttruck\nlf4Xwro4NOQ_5\tbus\nlgLHq8p_CnA_0\ttruck\nlgVXhalKM3w_0\tboat\nlgne-5wGRTg_4\tbird\nlgwnVArDAa0_2\tbear\nlg3udJdBBoI_0\tdog\nlg_4H9FLVog_0\tdog\nlhBsZjQzf8Q_0\tmotorcycle\nlhEN_T9FduQ_0\tknife\nlhoMpa49rvU_0\tumbrella\nlh1Brsyb0aE_0\tbicycle\nlh21_LSx_G8_1\tdog\nliDzsyAmMJQ_0\tmotorcycle\nliThgzeBkVY_0\tcat\nlite73A-c3o_0\tbicycle\nli8IvNy_DW4_1\tbird\nljrwXgV0j9o_0\tmotorcycle\nlj3DWkRI_HM_2\tbear\nlj3mqLiqSRw_0\tknife\nlj5bI1M_0ZA_0\tskateboard\nlj-BTMsCDdY_0\tdog\nlj-BTMsCDdY_1\tdog\nlkOFpGLmX9s_0\tcat\nlkYuyUsRfWE_1\tdog\nlkg_nXf_W88_0\tbicycle\nllBtQEKaglQ_2\tbird\nllFPEcbP7m8_0\tcar\nllWG8M6Fsrg_1\tskateboard\nllu7uI6yzns_0\tmotorcycle\nllu7uI6yzns_1\tmotorcycle\nllu7uI6yzns_2\tmotorcycle\nlmCsOrgM7zE_0\tcat\nlmVNyKFiuQw_3\tknife\nlmVNyKFiuQw_2\tknife\nlm-deiNDAW4_0\tmotorcycle\nlnFmVwj7oMg_1\tcat\nlnk0OtCMbBc_0\tcat\nln5IAoaoPHc_0\tdog\nNrX1AnOpS98_0\tbus\nNroEppStyZI_0\tbicycle\nNrvQhlD_Fuw_0\tdog\nNrvQhlD_Fuw_1\tdog\nNsCdsMqUNFc_0\tbicycle\nNsaAbiSbaCc_0\tcat\nNsdCvelNA0g_0\tmotorcycle\nNsgZVfgUWco_0\tskateboard\nNsgZVfgUWco_1\tskateboard\nNs78CA77Hmk_0\tbird\nNtHFEE2Ii0o_0\tknife\nNtQSi_L3_e4_0\tbear\nNttRY9GKNOE_1\tcar\nNt38ikEgqJg_1\tdog\nNt-UKy4Uq0o_0\tcar\nNuOq_HSf26I_0\tboat\nNucr0ksCppE_0\tdog\nNumUCmB1MLA_0\tbus\nNu6g6OfLbKU_0\tzebra\nNu6g6OfLbKU_1\tzebra\nNu-gGh3BQo0_0\tskateboard\nNvDafPMMZtg_1\tcat\nNvDafPMMZtg_0\tcat\nNvFUKJ9Y500_0\tbicycle\nNvTRLNn1Tk4_0\tcat\nNwC3jHQ65I0_0\tbear\nNwG3zY4-qHs_0\tskateboard\nNwHv08KS8WU_0\ttruck\nNwHv08KS8WU_2\ttruck\nNwgEA2yRlYk_0\tbird\nNwgEA2yRlYk_5\tbird\nNwlCLmmFUzM_0\ttruck\nNwoCpDkRUOc_0\tskateboard\nNwzkWW45Qx0_6\tbird\nNw1pLrkHm1E_1\tcat\nNw8ZySxnzIA_0\tcat\nNxPgLux4spk_0\tmotorcycle\nNxgst3FR84g_0\tcar\nNyOC1kV5fqc_2\tknife\nNyOVnxlZw44_0\ttruck\nNyQlYlDdA1Y_2\tskateboard\nNyg0BliJTCI_2\tumbrella\nNy14oMm9C9k_6\tskateboard\nNzIOn70DDCU_0\tbicycle\nNzfwqHNApI8_0\tbear\nNzqr9pq3W0g_0\tbus\nNzwcia0dVls_0\tbear\nNz5AnTEPNKY_3\tbird\nNz_Dn60wY8c_0\tdog\nN0p_wrAammI_1\tbird\nN0wFxDTDhrA_0\ttruck\nN0yYt90fBGo_0\tboat\nN049Vl1eC9E_0\ttruck\nN1C5Wk1HQEk_0\tcat\nN1jUvtD_RyY_0\tumbrella\nN1xm5YdzSfQ_0\tbird\nN13r5ZKqAZI_1\tboat\nN2GiHfyj2sY_0\tknife\nN2Y3LmbOWhM_1\tcat\nN2Y3LmbOWhM_2\tcat\nN2e24fXBD58_0\tboat\nN2u1zVHzrfc_0\tcat\nN3D5PnaCpHs_1\tknife\nN3D5PnaCpHs_2\tknife\nN3Iy7f2RrrQ_0\tmotorcycle\nN3OIM_qi7dY_0\tcat\nN3VKNNdiRhs_0\tumbrella\nN3ZGT5VDX7A_0\tdog\nN3vCQPsPb7k_0\tcat\nN3x4Fw8PZ04_1\tbird\nN4BazwxnEJU_1\tumbrella\nN4T6B8WAeyw_1\tbear\nN4bUNLwIt-I_0\tbicycle\nN4gBOlxfYUI_0\tgiraffe\nN5T8bgYdTg8_0\tbird\nN5cC5-506Yg_0\tmotorcycle\nN5uwMT9YWA8_2\tumbrella\nN6FCEWFj0vc_0\ttruck\nN6XH-20xsPk_0\tbus\nN6Xl8e3GRcY_0\tbird\nN6gcbwR93B4_1\tmotorcycle\nN6rvYTX52x4_0\tcar\nloFhsa4OXsA_0\tzebra\nloFhsa4OXsA_1\tzebra\nloFhsa4OXsA_2\tzebra\nloS5Iy7HDhY_2\tcar\nloyp0oi9idU_0\tcat\nlpPnun9oDq4_1\tboat\nlqEgRMyazN4_0\tdog\nlqi9uYhr1lU_3\tboat\nlqybkPUTuGk_4\tbird\nlqybkPUTuGk_0\tbird\nlqybkPUTuGk_1\tbird\nlqybkPUTuGk_3\tbird\nlrbJ-8myxJA_1\tskateboard\nlrd8TXYq2Co_0\tzebra\nlrgLAWtIFbQ_0\tbird\nlrk-LSpxnaQ_0\tbus\nlrsspehYW2Q_0\tcat\nlrusc_A2xpY_1\tskateboard\nlsQ4p_XwS3U_1\tskateboard\nlsW8rve_6F0_0\tbird\nlsslg2HK3as_1\tbird\nls7K9Ga_TDo_0\tcat\nls8cJ6QPPdI_0\ttruck\nltfbVFmlGNs_0\tmotorcycle\nltfbVFmlGNs_1\tmotorcycle\nlti3EMrk6hA_0\tbird\nltyDB0DzJ4o_0\tbear\nluZpSqhxjzc_0\tskateboard\nlujnNrfylcM_0\ttruck\nlu4gOMv2LmA_0\tdog\nlvW9JvQnv_U_0\tcat\nlvXow0J0_Z8_5\tboat\nlvpmaJx7Ydo_0\tmotorcycle\nlvxwGSPs5eo_0\ttruck\nlvxwGSPs5eo_1\ttruck\nlv79L0E9KbU_0\tcat\nlv8ApAxhQxg_9\tdog\nlwIzp1ny_cc_0\tbicycle\nlwqQ1SyQ6oc_0\tbird\nlwu1229kxGE_0\tumbrella\nlw-_X5H5dsA_1\tskateboard\nlxfLak4qc0w_3\ttruck\nlxxazO-lUhg_0\tskateboard\nlxz5eN6gYvE_0\tskateboard\nlx4WDd9A1jM_0\tcat\nlyBbm0su2N8_0\tdog\nlyDsv_jEl3M_0\tmotorcycle\nlylbDiRYA18_0\tskateboard\nlym5pBjKK44_1\tboat\nlyx_DnTpBx4_0\tbird\nlzAGCQoeAug_1\tboat\nlzISnRATBZY_0\tmotorcycle\nlzrv6Lmaqhc_0\tbicycle\nlz9wsaAdD3g_0\tdog\nl0HBjPE-vp4_0\tbicycle\nl0LztA4KLq8_1\tumbrella\nl0TccajPnLs_0\tcat\nl0YyZLT2r0Q_0\tdog\nl0dbu61iEXU_0\tcat\nl0kogcjKlvI_0\tbird\nl01YbT30Uzw_0\tcar\nl1PoAFZPnAI_0\tcat\nl1cfghmMFfA_0\tmotorcycle\nl1dkS9dCOZs_0\ttruck\nl1eSoNjG7g4_3\tcar\nl1smSqKCK4k_0\tperson\nl1wXtZDVtTw_0\tbear\nl120CJB_tWI_0\tcar\nl2Cytaq3_MU_0\tbird\nl2d3stMmMjs_0\tcat\nl2pGQEcySt4_0\tgiraffe\nl23teWgsK_Q_1\tskateboard\nl23teWgsK_Q_0\tskateboard\nN7HX62OM1Jo_1\tcar\nN7WtVRWgYEs_0\tbird\nN8RE_7TdVGo_0\tskateboard\nN8wDSOXX8q4_0\tcat\nN9TwNh9IZug_0\ttruck\nN9TwNh9IZug_2\ttruck\nN-bSoL4tlX0_0\tcat\nN-ehGzRtoj8_0\tbird\nN-4XvHMsGCk_0\tperson\nN-9RtI_ifsk_0\tmotorcycle\nN_MWs_Dxjio_0\tknife\nOAJTjsjrFlQ_0\tcat\nOATLx4-34zQ_0\tdog\nOAtOdcwMjgs_0\tskateboard\nOBDA-yKAC_k_0\tumbrella\nOBDA-yKAC_k_2\tumbrella\nOBLc4YWkCqU_0\tmotorcycle\nOBYJdeMHD3g_0\tmotorcycle\nOBlj7XKW4lc_1\tboat\nOBlj7XKW4lc_0\tboat\nOBti9g_xdjg_0\tbus\nOBuDg5pF8EM_0\tmotorcycle\nOBvMQQZSs6Q_0\ttruck\nOCEGSfdedcM_1\tdog\nOCYvV1-sQQQ_1\ttruck\nOCYvV1-sQQQ_0\ttruck\nOCijTz38zrU_0\ttruck\nOCpuPcuJN68_1\tcar\nOCp5hNHBPpU_6\tknife\nOC3VHGBHbMY_1\tdog\nOC3VHGBHbMY_2\tdog\nODXPmCSXZDc_1\ttruck\nODXPmCSXZDc_2\ttruck\nODbUQUd4jSU_0\tskateboard\nODdK6tzKWWs_2\tbicycle\nODdK6tzKWWs_3\tbicycle\nODlDtYOtoQs_0\ttruck\nODo-zlQ_GB0_0\ttruck\nODp6c6uSvaU_0\tgiraffe\nODuka2U9fkA_0\tbird\nOD4XXIos2Zo_0\tdog\nOEJox-XKatw_0\tknife\nOEJox-XKatw_1\tknife\nOEMh8A9j_pg_3\tbear\nOEQV-Uetx8M_0\ttruck\nOE0tYMQn8GU_1\tbird\nOFA22Poj7lQ_0\tbicycle\nOFA22Poj7lQ_1\tbicycle\nOFbK3M6Z_QU_2\tdog\nOFbK3M6Z_QU_1\tdog\nOFdr0zUfrlE_0\tbus\nOF2H-LBDSPk_2\tbird\nOF2H-LBDSPk_1\tbird\nOF6Up9vV9Qc_3\ttruck\nOGMTfwEYzHA_0\tknife\nOGNQnbR2jAw_1\tbear\nOGVemy4LnsA_0\ttruck\nOGbVuwjdEDU_0\tmotorcycle\nOGnQhL7HZyI_0\tbus\nOGsEC0i33BY_0\tknife\nOG7Gqq0yNXc_0\tskateboard\nOHWx9W6ECl8_0\tgiraffe\nOJ0c10BvtRY_0\tdog\nl3U_T7n5YD8_0\tbicycle\nl3YBS5nRxUY_0\ttruck\nl3lkSnsgzx4_0\tumbrella\nl3qhbFnoRvI_0\tcar\nl31h7cMiU1I_0\tbear\nl4LQx_ua4m0_0\tbus\nl4MLa-2lkQI_0\tbus\nl4dzsbhTXr4_2\tbird\nl4lv0qkvs10_6\tbear\nl43lNQ5Vq_s_0\tbird\nl4-nRuAZNyY_2\tcar\nl5FUU1e4Y60_2\tbicycle\nl5ecq1OhBsk_1\tskateboard\nl5ecq1OhBsk_0\tskateboard\nl508a0nbyQI_6\tbicycle\nl508a0nbyQI_13\tbicycle\nl508a0nbyQI_14\tbicycle\nl508a0nbyQI_18\tbicycle\nl6NgJ2NHnt4_1\tbear\nl6S8h_QnD7U_0\tcat\nl63MzTHehFQ_0\tcat\nl7p6AfqPX2Y_1\tmotorcycle\nl7p6AfqPX2Y_0\tmotorcycle\nl8-hpsjvPaw_1\ttruck\nl8-hpsjvPaw_2\ttruck\nl9PH4iTXdYs_0\tskateboard\nl9ZtaPU3mB8_0\tknife\nl9j2X0rGhIY_0\tbird\nl9qm2_xBYHQ_0\tcat\nl9urEyEnxnU_1\tknife\nl96fQdjYlLs_0\tknife\nl-MCmCPjH7k_0\tbicycle\nl-QCC522u8A_0\tcar\nl-eNrq-WUQo_0\tboat\nl-98mL8hxMY_0\tbicycle\nl-98mL8hxMY_8\tbicycle\nl_DmnPQxj7k_0\tzebra\nl_scPJDEOuI_0\tbird\nmAE8hqG3eSk_0\tbus\nmAPlm5rMa-w_0\tmotorcycle\nmA5ZTSfwetI_0\ttruck\nmBEMpccxmBw_1\tmotorcycle\nmBEMpccxmBw_0\tmotorcycle\nmBTsr9NKqos_0\tdog\nmBTsr9NKqos_1\tdog\nmBTsr9NKqos_2\tdog\nmBivNgtX2dc_1\tskateboard\nmB2K7Cqy5sA_0\tknife\nmB2K7Cqy5sA_1\tknife\nmB2K7Cqy5sA_2\tknife\nmCA3YMqp59Y_0\ttruck\nmCVUS1SHxdc_1\tbicycle\nmCaHiS25d_c_0\tbird\nmCipOiHzL24_0\tcar\nmCnfYEJ7_nM_1\tboat\nmCplUoipq_M_0\tumbrella\nmCshfLJNDZc_0\ttruck\nmC9gh-poTgc_1\tbus\nmC_yfZI-Kfw_0\tcar\nmC_8_BVmM48_0\tbus\nmDOnks0KH3c_0\tbus\nmDO2Jg5oyPM_1\tumbrella\nmDTcvH2cBAk_0\ttruck\nmDTxktaf2Z0_0\tcat\nmDio2Blh76Y_3\tknife\nmDio2Blh76Y_0\tknife\nmDio2Blh76Y_2\tknife\nmDoksuME2bk_0\tknife\nmECu0xa8vxM_0\tbird\nmEFIkGBIFT4_0\tumbrella\nOKHhm13mZYw_0\tbicycle\nOKJlHLunIJ4_5\ttruck\nOKL9IGXZDqg_0\tcat\nOKOBYUJfsW0_3\tbus\nOKVeF8WX7nM_1\tdog\nOKXlOHWMVYI_0\tbicycle\nOKXlOHWMVYI_2\tbicycle\nOKniUxVle4E_0\tdog\nOK1lt5Hbk8U_0\tbird\nOK1lt5Hbk8U_1\tbird\nOK72g05p_nY_0\tbird\nOLWhwdr2s3U_0\tmotorcycle\nOLqz23zKUZ0_0\tskateboard\nOMJA4N9BRjk_0\tbus\nOMJA4N9BRjk_1\tbus\nOMROj6nJzNU_0\tumbrella\nOMscf19CmfE_0\tcat\nOMszUYfxt-k_0\tdog\nOM7YDn8Aj8U_0\tcat\nONQt1uMKjzM_0\tcat\nONQ7_XR_YoE_0\tcar\nONvq-WMS04Q_0\tbus\nON25DCtbtZI_0\tbird\nON25DCtbtZI_1\tbird\nOOOsedHMhFE_0\tdog\nOPFx79LTPYQ_0\tknife\nOPFx79LTPYQ_1\tknife\nOPNrGuEJKfQ_0\tcat\nOPRxB1VUSzc_0\tbus\nOPZI6LUwe80_0\ttruck\nOPny4vHo5EQ_1\tmotorcycle\nOQWmlKTZbJA_1\tboat\nOQh45xm5OzM_0\tcar\nOQlHcCttP0Y_0\tmotorcycle\nOQ5Q0IvSVJw_0\tskateboard\nOROW-2FDArE_0\tknife\nORjDIPVlrpY_3\tboat\nORyOEpNkmQU_0\tbicycle\nORyOEpNkmQU_1\tbicycle\nOR1UJ2WJswk_0\tumbrella\nOR8th1OG-XE_0\tumbrella\nOSia7sePfOs_0\tdog\nOS2Ga4W91oU_0\tboat\nOTGZvd8HEBs_6\tumbrella\nOTGZvd8HEBs_1\tumbrella\nOTGZvd8HEBs_5\tumbrella\nOTK2nAcxHMw_0\ttruck\nOTSLZbr15Rk_0\ttruck\nOTXkN6YTPBY_2\tbear\nOTvtQllL8ho_0\tgiraffe\nOT1tUDnxHUY_1\tbird\nOT1tUDnxHUY_0\tbird\nOUDo6Wi3Mx0_0\tbicycle\nOUaP4Qe7K_k_0\tskateboard\nOU9OQRs4Ff4_3\ttruck\nOU9OQRs4Ff4_0\ttruck\nOVBUoFuLqko_3\tboat\nOVBUoFuLqko_4\tboat\nOVBUoFuLqko_5\tboat\nOVBUoFuLqko_0\tboat\nOVBUoFuLqko_1\tboat\nOV8AfAYiWos_3\ttruck\nOWe4Ah3rUkU_3\ttruck\nOWe4Ah3rUkU_4\ttruck\nOWwYp5TMtyo_0\tdog\nOW09PhbCZ2c_0\tcat\nOW9poTV3Pw0_0\tmotorcycle\nOXDBegRD_hY_4\tbear\nOXleFWP00RU_0\tskateboard\nOXn_z6r4tTM_0\tbicycle\nOX46gFmob50_0\tboat\nOYAOM3GxoFs_0\tumbrella\nmEhLlaG7ivE_0\tcar\nmEyJVUti9TA_0\tbird\nmFCrAjplP-s_1\ttruck\nmFQSD32phtQ_0\tmotorcycle\nmFoVk3mdfVs_0\tboat\nmFpufihJP34_0\ttruck\nmF3uYMbMsrA_1\ttruck\nmGAgv6gfUIA_1\tgiraffe\nmGP0JfjwxXU_0\tcar\nmGP0JfjwxXU_1\tcar\nmGwC1aGK8EQ_1\tmotorcycle\nmGwC1aGK8EQ_0\tmotorcycle\nmG6Uz7wciew_0\ttruck\nmHNOyEXbwsg_4\tbear\nmHORHQS-7WE_0\tmotorcycle\nmHPMxlukQ30_0\tmotorcycle\nmHfy3z8lzZY_0\tbus\nmHicqYMm5B8_0\tbird\nmHicqYMm5B8_1\tbird\nmHtWCmdt2ck_0\tcat\nmHwCC0jnHbI_0\tbear\nmHwgF2IQCd8_0\tmotorcycle\nmIn-Tkvx0xg_0\ttruck\nmIx7ZeZ2Vv8_1\ttruck\nmJ0xD-4leB8_0\tcat\nmKRUuWYJC2k_0\tmotorcycle\nmKRUuWYJC2k_1\tmotorcycle\nmKRUuWYJC2k_2\tmotorcycle\nmKRUuWYJC2k_3\tmotorcycle\nmKWmMLNNRAQ_0\tzebra\nmKgld1efJss_0\tbus\nmKu97ivRVSM_0\tknife\nmKu97ivRVSM_1\tknife\nmLDjtK6d-W0_0\tknife\nmLGU-BL1agI_1\tcat\nmLG8EyllDhA_0\tcar\nmLIp-YLvQaA_0\tzebra\nmLgNPTUe_XI_0\tcat\nmLmtVR-AGCk_0\tbear\nmLpoizHo-v4_0\tdog\nmMG1DT2mUAo_0\tskateboard\nmMUflfP_ZMY_0\tcat\nmMXGos8VYQI_1\tdog\nmMt-gdadsY4_1\tdog\nmNFkEphgV18_1\tbicycle\nmNdM6zfb6FA_0\tcat\nmNeHO27e_i4_0\tbus\nmNeHO27e_i4_1\tbus\nmOMvL5XuAZs_0\ttruck\nmOVza6TV55E_0\tbicycle\nmOcxsTLCyfM_0\tumbrella\nmOjLK3sW2lA_0\tskateboard\nmO3CzDojFYs_0\tdog\nmO8cYs6iJlE_0\tcat\nmPCBb4ndGx0_3\tcar\nmPCBb4ndGx0_2\tcar\nmPPaPa0iD_c_0\tdog\nmPV3eyH3uiY_0\tbicycle\nmPW-nXWaC4U_0\tcat\nmP223OT32Rc_0\tknife\nmP223OT32Rc_1\tknife\nmP553XrHpVs_0\tmotorcycle\nmQD1eeRC1Q4_0\tknife\nmQf2FppJTEM_0\tbird\nmRFdLfB4a1s_3\tbear\nmRI6bXmeH0U_0\tknife\nmRMc_QxifPU_0\ttruck\nmRMc_QxifPU_1\ttruck\nmROsO1LIGpo_0\ttruck\nmRYB4i5ld-k_0\tdog\nmRkf0ciWPgI_9\tbird\nmRl54j1LWx8_0\tperson\nmRyO8jtjseY_1\tcar\nmRyO8jtjseY_2\tcar\nmR0m08J8B08_4\tboat\nmR0m08J8B08_0\tboat\nmR0m08J8B08_1\tboat\nmR0m08J8B08_2\tboat\nmSTIz-CdXqU_0\ttruck\nmSf7pQlzXuw_0\tgiraffe\nmSgbTXZAzDk_1\tumbrella\nmSvLPzkZzps_0\tknife\nmSxrYqw4oqg_1\tumbrella\nmSztwZ01Pck_0\tbus\nOYIPropF-hA_2\tknife\nOYTwB7sOFYE_0\tbird\nOYa8DOvcJkU_0\tcat\nOYf6rSUrwxc_0\tdog\nOYnjEcx19SM_0\tcat\nOZBLMb8bGX8_0\tzebra\nOZcS8vrufig_0\tdog\nOZeialzVvBQ_0\tbird\nOZqsh8FFeFo_0\ttruck\nOZqsh8FFeFo_3\ttruck\nOZqsh8FFeFo_4\ttruck\nOZstdGSfBBw_0\tbird\nOZ2Xf6zzI5Q_1\tskateboard\nOZ2Xf6zzI5Q_0\tskateboard\nOaR_KKoBRYA_0\tboat\nOai5vIFRADY_0\ttruck\nOaxb1TjNF5A_0\tboat\nObG3TG10dF0_0\tdog\nObLBCGg01UY_5\tskateboard\nObLBCGg01UY_1\tskateboard\nObLBCGg01UY_2\tskateboard\nObLBCGg01UY_3\tskateboard\nObLBCGg01UY_4\tskateboard\nObMci_3wRII_0\tboat\nObmxs3FqVc0_0\ttruck\nObol9FzC6qw_0\tboat\nOburzWcRnbc_0\tskateboard\nOb5o_Ufzxvo_0\tumbrella\nOb6-UrKFrTY_5\tboat\nOcFGISpeAn0_0\tskateboard\nOcQBa7E9-AI_1\tcar\nOcZG24cCgsU_2\tboat\nOchOHb4q-iE_0\tbicycle\nOcmRyP_n53E_0\ttruck\nOcuYOC6GylA_0\tcar\nOc1tfJzLD3o_1\tbus\nOc1tfJzLD3o_2\tbus\nOc1tfJzLD3o_0\tbus\nOdGHHAUYow4_0\tboat\nOdl4k8y8GfI_1\tskateboard\nOdo1ZvyEbqs_3\tbear\nOdo1ZvyEbqs_6\tbear\nOdo1ZvyEbqs_7\tbear\nOeJet0TZ0Ns_0\tcat\nOecO1BnSygU_0\tumbrella\nOecO1BnSygU_1\tumbrella\nOepCeq6zNOc_0\tumbrella\nOevlneuqSNg_0\tskateboard\nOe3qCUtDCoI_0\tbear\nOfD7c6vcSKc_0\tmotorcycle\nOfFZrl_Ltoo_0\tdog\nOfQ3Y3DEgNI_0\tskateboard\nOfZ9wyeuMaU_0\tskateboard\nOfcr6xsiMGY_1\tknife\nOfmW_n1WB-0_0\tbird\nOfpLj-uw2VM_0\tskateboard\nOfv2SMoyg_8_0\tboat\nOgG3xES-A9s_0\tbicycle\nOgG3xES-A9s_1\tbicycle\nOgtTZgAAtrk_0\tbear\nOg77fxfsfzI_0\tskateboard\nOg83XjWPr30_0\tbird\nOg_sRGRP2fw_0\tmotorcycle\nOhQqfPIVR_o_0\ttruck\nOhh5X9j8-P4_0\tskateboard\nOhvnlA9rzUA_0\tumbrella\nOh4vuNdjqGg_1\tboat\nOh4vuNdjqGg_3\tboat\nOh79QNRx0m0_0\tbus\nOiHa7vhbW0g_0\tumbrella\nOiT0hP6IU_0_1\tcar\nOidiasYmhhk_0\tdog\nOiuo__vi77s_0\tmotorcycle\nOi3BJVuj3f8_0\tbus\nOjst9j_7TPs_0\tmotorcycle\nOjxLYDs9O2w_1\tskateboard\nOkd1qAIUuZo_0\tskateboard\nOkd1qAIUuZo_2\tskateboard\nOlO9xdVfniA_0\tbus\nOlPObAsvFRE_0\tbear\nOlQykWy5_d0_0\tskateboard\nOlVZS0O7Xcc_0\tbus\nOlVZS0O7Xcc_1\tbus\nOlVofey46c8_2\tgiraffe\nOlVofey46c8_0\tgiraffe\nOldv3-_fn3E_0\tmotorcycle\nOlufwgkC9nA_0\tcat\nOl3C5MWakic_0\tbus\nOl63TPS0wjE_0\tskateboard\nOmTHe4jPR30_0\tumbrella\nmTnSFF649v4_0\tmotorcycle\nmTtOhVJYmco_0\tbicycle\nmTuXb1mo6ms_1\tmotorcycle\nmTwbZIC2mjs_0\tumbrella\nmUllN4tCjhg_0\tcar\nmVWf8BrbbQc_0\tskateboard\nmVZVZPz-0uk_0\tknife\nmVztYl0hyR0_1\tbird\nmWOuUa5VTIU_4\tbird\nmWSWZi7ef2Q_0\tbus\nmWULzZ-r0BE_10\tbear\nmWULzZ-r0BE_0\tbear\nmWULzZ-r0BE_1\tbear\nmWULzZ-r0BE_3\tbear\nmWULzZ-r0BE_6\tbear\nmWULzZ-r0BE_7\tbear\nmWULzZ-r0BE_9\tbear\nmW85x5O3sQM_1\tbus\nmXYQlH9le8Y_0\tdog\nmXt-xLcVJTM_0\tknife\nmXuPzw4I-wQ_0\tdog\nmXu238CeGfQ_0\tmotorcycle\nmXu238CeGfQ_1\tmotorcycle\nmX3SlrHHN8A_2\tknife\nmX3SlrHHN8A_3\tknife\nmYFsdZ6ZiHg_0\tskateboard\nmYYLIkI65fA_0\tcat\nmYgcUWeYKeE_0\tcat\nmYhujznmuic_0\tmotorcycle\nmYtEL2P4G64_2\ttruck\nmYtEL2P4G64_0\ttruck\nmY6M_QMVm6A_0\tdog\nmY6M_QMVm6A_1\tdog\nmY6M_QMVm6A_2\tdog\nmY6M_QMVm6A_3\tdog\nmZEPBKLKQLU_0\tskateboard\nmZStBRJGz0o_0\tbird\nmZWugKrC8fs_0\ttruck\nmZ0LxtaLk9s_0\tbicycle\nmZ0LxtaLk9s_1\tbicycle\nmZ1ae3QtMqY_1\tskateboard\nmZ6SXifL_5I_0\tcat\nmaANeKOpibc_0\tbus\nmaATqEbCdmA_0\tboat\nmaOsv3Gen0Q_0\tmotorcycle\nmagDXuphf6E_0\ttruck\nmavzqjj21eQ_0\tmotorcycle\nmbFrW58khSM_0\tmotorcycle\nmbtyAyprPhQ_0\tmotorcycle\nmbuozxoOynA_0\tbus\nmb9G4GF56RA_1\tumbrella\nmb9G4GF56RA_2\tumbrella\nmb-nes45JeE_1\tbird\nmb-nes45JeE_0\tbird\nmcCOvhuC86Q_0\tcat\nmcxoHsKM444_0\tcat\nmc0A1NsuIBI_0\tbird\nmdDoBuc7jag_0\tboat\nmdZbK8mOA5Y_0\tmotorcycle\nmdxbRZzm2Fo_4\ttruck\nmdzJDnEx5AI_2\tboat\nmd8Xi01GJ0Q_3\tbird\nmeRJPfPZTpw_3\tbird\nmebu5O8auic_1\tbird\nmehKWfZTJQE_0\tbird\nmfPFvq57cxM_0\tskateboard\nmf4LyMZ6wyY_0\tskateboard\nmgEZVZrBkrg_0\tbicycle\nmgEkK74q1Lo_0\tmotorcycle\nmgTCPe8eM00_1\tumbrella\nmgVB0o0U17w_1\tskateboard\nmgVB0o0U17w_0\tskateboard\nmhSSgOcQwd8_0\tcat\nmhqhGszzAR8_0\tcat\nmiC3NPxHofU_1\tbird\nmiQyhDocW3I_1\tdog\nOm1q-9YbJu0_0\tbus\nOm-NvWZY9XM_0\tbear\nOnKqSIvDmuM_0\tskateboard\nOnemsYazBrQ_0\ttruck\nOnemsYazBrQ_5\ttruck\nOnemsYazBrQ_1\ttruck\nOnemsYazBrQ_2\ttruck\nOnemsYazBrQ_3\ttruck\nOn3Yd3AHFp0_0\tdog\nOn3b0cn9QYE_0\tbear\nOn-GcAXLGZ0_0\tmotorcycle\nOn_5UKUJi7U_0\tcar\nOohVLB8HrmU_0\tbear\nOo2Ux9rWYGo_0\tskateboard\nOo8VLA_C0ho_5\tbicycle\nOpAPsb8a7ck_1\tbicycle\nOpD07kt9gdg_0\tmotorcycle\nOplcFe9OOMA_0\tboat\nOplcFe9OOMA_1\tboat\nOpqmXBQU87o_0\ttruck\nOp3764NveuQ_1\tbicycle\nOqKwAAWtANM_0\tcat\nOqPOCcEAHqk_0\tskateboard\nOqbhEJlCp48_0\tskateboard\nOqc407hvhn8_0\tskateboard\nOqjbl3c9LYU_0\tbear\nOqo2P7az_Jw_2\tmotorcycle\nOrhDfcZqq1E_0\tcat\nOrhipZ8lZHo_2\tbird\nOrhipZ8lZHo_3\tbird\nOrhipZ8lZHo_1\tbird\nOrmkaB0vrG8_0\tdog\nOr-E2m2p4X8_0\tmotorcycle\nOr--toMjK3I_3\tboat\nOsvYa6TnsFI_2\tcar\nOtIV8clF1-o_0\tbird\nOtIV8clF1-o_2\tbird\nOtKIh5W3Uro_0\tbird\nOtw43WNrlsM_1\tbicycle\nOtw43WNrlsM_2\tbicycle\nOtyYn5vEHbM_2\tskateboard\nOuBzZzA9Q7o_0\tbicycle\nOuJMq2UqA-s_0\tdog\nOuVznEsiyyA_1\tmotorcycle\nOui9ZgfJiJE_0\tskateboard\nOu1yCmmAuSY_0\tcat\nOvEJdKYqvF4_2\tdog\nOvEJdKYqvF4_5\tdog\nOvZguhO8UVQ_1\tbird\nOvlqAWflXBs_1\tbus\nOwQktS0dM3k_1\ttruck\nOwUWoVRKf7E_2\tzebra\nOxADHlAb7dM_0\tboat\nOxInmNOeLHY_0\tbicycle\nOxInmNOeLHY_1\tbicycle\nOxInmNOeLHY_2\tbicycle\nOxInmNOeLHY_3\tbicycle\nOxInmNOeLHY_5\tbicycle\nOxInmNOeLHY_6\tbicycle\nOxInmNOeLHY_7\tbicycle\nOxZdEZCJtcw_1\tmotorcycle\nOxkx4bWzOMo_0\tskateboard\nOxp9w62kg0Y_2\tknife\nOxp9w62kg0Y_3\tknife\nOx1idrJvs2E_0\tcat\nOx_8K3szIs0_0\tcat\nOyGSbm149i8_0\tboat\nOzBFCX0vpiU_0\tmotorcycle\nOzCvvptC7o8_1\tbicycle\nOzHYG5kpMbw_0\tcar\nOzItTAjpb9U_1\tknife\nOzwlwZq46z8_3\tbus\nOz89_rVdBV0_0\tknife\nOz89_rVdBV0_1\tknife\nO0JwQIk5pZY_5\tknife\nO0Xl3AF_T0s_0\tdog\nO0dforbCqKM_0\tcat\nO0lfImzhCM4_0\tdog\nO0p5eAP2AyA_0\tboat\nO0rSIIipDT0_0\ttruck\nO02oVGyCZDI_0\tmotorcycle\nO0_GC-1pCYk_0\tbear\nmj155rqWO3k_0\tmotorcycle\nmj5oMHI4Ch0_1\tskateboard\nmkPWdHTd5X8_0\tbear\nmkVYY1EvetE_1\tbicycle\nmkZA72VL1oI_0\tzebra\nmlAzMb61fYU_0\tcat\nmlJYoZVHztc_0\tbear\nmlT2XD9k5Ro_2\tbird\nmlophh4mK4A_2\tbicycle\nmluR2OjQTmU_0\tcar\nmlwpiHjyzIA_1\tmotorcycle\nml4BVi7cCV4_0\tbird\nmmnFugXdqlQ_1\ttruck\nmmnFugXdqlQ_0\ttruck\nmmojCWiaNYI_0\tcat\nmm_Udf1FG0s_0\tcat\nmnB2hBuySsI_1\tbear\nmnB2hBuySsI_2\tbear\nmn_cuBRZu8M_0\tcat\nmoZR-AtZJnI_0\tcat\nmobg7uEQTmo_0\tumbrella\nmogyHm8Jiok_0\tbird\nmoh4TWSe9Fc_0\tumbrella\nmotFo9G-GLs_0\tskateboard\nmoyxRLHHeiI_0\tbus\nmo5ZpMFELUQ_0\tmotorcycle\nmpO9dBwTeW4_0\tbicycle\nmpYAM0x6L5M_0\tmotorcycle\nmphFmT6TzLM_1\tknife\nmphFmT6TzLM_2\tknife\nmphFmT6TzLM_3\tknife\nmp25XfIJhQY_0\tcat\nmp8USuQKinc_0\tbird\nmqUyhzbCpig_0\tmotorcycle\nmqjilBZByTI_0\tskateboard\nmq5DqmYGVM4_0\tperson\nmrJAakc7Fj8_0\tbus\nmrOsDCuEdRQ_1\tdog\nmrY8gIFiUhE_0\tcar\nmrhfyNpFMq4_1\ttruck\nmryDGEujJno_0\tmotorcycle\nmsNXnb1a02o_0\tknife\nmsbOXFTsSVU_0\tgiraffe\nmszokIKsdUk_0\tbus\nms0_k1aLULU_0\ttruck\nmtITgRv95Sw_0\tdog\nmtU7bHAsI8Y_0\tcat\nmtZHgLGJiu4_0\tskateboard\nmtmzPf2AZuI_0\tskateboard\nmtnURpE0wyE_0\tbicycle\nmtnURpE0wyE_1\tbicycle\nmtnURpE0wyE_2\tbicycle\nmtpTPJtG8F4_0\tmotorcycle\nmt_LZ5UsG_w_5\tknife\nmt_LZ5UsG_w_1\tknife\nmuKQy-1p4fg_0\ttruck\nmuWIt0X4pKQ_0\tdog\nmuZ7xPF8odU_2\tbicycle\nmueRS6nKTdA_0\tbicycle\nmujGcuAzOdo_1\tbear\nmujGcuAzOdo_4\tbear\nmulQIomc988_1\tbicycle\nmulQIomc988_3\tbicycle\nmuoqLEyrhhI_0\tdog\nmursOuNatdc_0\tboat\nmu65YolQZds_0\tknife\nmvEcWlHP6u4_0\tbicycle\nmvYBfdZkCe8_0\tdog\nmvb5jVJeuGE_0\tperson\nmvhEFfQeFCY_0\tbear\nmv2FHxOHSR0_1\ttruck\nmwAPVTEbZGM_0\tskateboard\nmwAPVTEbZGM_1\tskateboard\nmwBKrjOpxkY_0\tskateboard\nmwIroQ9RbXA_0\tbird\nmwrxbdZraRk_0\tcar\nmw5fQZ8EB5I_0\tknife\nO1KrpGSvXAY_0\tknife\nO19Mlhhzqgc_2\tbear\nO2ZR7HPYZCo_0\tcat\nO2u5126JYpY_2\tmotorcycle\nO3DA7qzf2s8_1\tbus\nO3DA7qzf2s8_0\tbus\nO3y2taxKvCA_2\tboat\nO4CfuT5BDcc_0\tskateboard\nO4VQQaJ07zY_0\tcat\nO5b3XcEGZ4M_0\tcar\nO54XRvo6VU0_2\tmotorcycle\nO54XRvo6VU0_0\tmotorcycle\nO59A3lMogSo_0\tgiraffe\nO59A3lMogSo_1\tgiraffe\nO6BXRuq_YcE_0\tdog\nO6BXRuq_YcE_2\tdog\nO6EtCByhFZI_0\ttruck\nO6Jf2yxCTuI_0\tcat\nO6Uln7GkqDA_0\tskateboard\nO6b3a--pX3E_0\tbird\nO6kqsEuKhis_0\tbird\nO69gCmR0LvA_2\tmotorcycle\nO69gCmR0LvA_3\tmotorcycle\nO7ReHsig5IQ_1\tknife\nO7Wrpfzb8_g_0\tbear\nO7lvzdzmX5k_2\tbicycle\nO8BNclEPo5w_2\tdog\nO8BNclEPo5w_1\tdog\nO8f0Dhn1as0_0\tumbrella\nO8sB46kfM28_7\tumbrella\nO8sB46kfM28_6\tumbrella\nO8sB46kfM28_13\tumbrella\nO9Duu2Un8AE_0\tskateboard\nO9Duu2Un8AE_1\tskateboard\nO9Duu2Un8AE_2\tskateboard\nO9EqKcj_CPs_0\tumbrella\nO9iWg3ZqLcU_2\tbear\nO-NZJ4-eoQ8_0\tbus\nO-ZUr1bQzp4_6\tumbrella\nO-kJ078YJq4_7\ttruck\nO-2S79hisI8_0\tzebra\nO-4CV4-x7Tk_0\tdog\nO_D7M00pmjQ_0\tmotorcycle\nO_PCiV3NICw_0\tdog\nO_bAX_ruSNQ_0\tskateboard\nO_fZm7Mblgg_0\tknife\nO_mRo8YLc50_0\tumbrella\nO_3VssPsSVQ_5\tbicycle\nPABLxf3U8qc_2\tbicycle\nPABLxf3U8qc_4\tbicycle\nPABLxf3U8qc_1\tbicycle\nPASMcbnOtUM_1\tbird\nPAZBEMKPQEw_4\tboat\nPAbB9I6MC_o_2\tboat\nPBD1IW-vA6Y_0\tdog\nPBD1IW-vA6Y_1\tdog\nPBQjiKBWtao_1\tbicycle\nPBQjiKBWtao_3\tbicycle\nPBqIT1T_Tl4_2\tumbrella\nPByJb40LNJ4_28\tbicycle\nPByJb40LNJ4_30\tbicycle\nPByJb40LNJ4_3\tbicycle\nPByJb40LNJ4_13\tbicycle\nPByJb40LNJ4_18\tbicycle\nPByJb40LNJ4_22\tbicycle\nPB8sWVNFkDw_1\tmotorcycle\nmxA8JbJ0Do8_0\tcar\nmxFga0703Mc_0\tbird\nmxMCBmJ5owQ_2\ttruck\nmxXH5aZCSJ8_0\ttruck\nmxYl5Y1KAiY_0\tdog\nmxZgNkjbyxk_1\tknife\nmxeuMHAWMxo_6\tknife\nmxeuMHAWMxo_7\tknife\nmxeuMHAWMxo_9\tknife\nmxsTfEQlVgM_0\tmotorcycle\nmxvG6gSVYuo_0\tbicycle\nmxwmtm7rKF8_0\tcar\nmxxiqhZzhEE_0\tmotorcycle\nmxyHDUSMhLs_0\tcat\nmx2i3CYeEEE_0\tbear\nmyRelcztkqo_1\tknife\nmyWzn06fmDI_0\tdog\nmyY1Ijlbknw_1\tbicycle\nmyY1Ijlbknw_4\tbicycle\nmyY1Ijlbknw_5\tbicycle\nmyY1Ijlbknw_2\tbicycle\nmymtiyldysk_0\ttruck\nmzGmbowEFfA_1\tknife\nmzMgXA_v8q4_0\tmotorcycle\nmzYPSSUS--w_2\tboat\nmzYPSSUS--w_0\tboat\nmzdD_0CKekQ_0\tmotorcycle\nmzfrEqAhHeY_0\tbus\nmzm_D3J8zqQ_0\tumbrella\nmzyu28WsuFs_0\tmotorcycle\nm0MVwwL_0MM_0\tbicycle\nm0gukhoxW0Q_0\tskateboard\nm0gukhoxW0Q_1\tskateboard\nm0gukhoxW0Q_2\tskateboard\nm08CnM1FBR0_0\tcat\nm0_tPmnque0_0\tbicycle\nm0_tPmnque0_1\tbicycle\nm1Qhj9jYohk_0\tbus\nm1pFyDGuVzk_1\tskateboard\nm1pFyDGuVzk_2\tskateboard\nm2StZDAc1yw_0\tbird\nm2uQowbhYDc_1\tbear\nm3AM4AQLDo0_0\tzebra\nm3AM4AQLDo0_1\tzebra\nm3RCOnTUyMY_0\tboat\nm3RCOnTUyMY_1\tboat\nm3SOT8NCOEY_0\tbicycle\nm3cgfDs0_G8_2\tdog\nm3fctWcU4as_0\tmotorcycle\nm3sztS1QC3s_0\tcat\nm3uDjNrfbD8_1\tbear\nm35CwgXROHw_0\tcar\nm4qZSrgBZkc_0\tbird\nm4qZSrgBZkc_1\tbird\nm6NemUzZQFc_1\tmotorcycle\nm6NemUzZQFc_0\tmotorcycle\nm6S6MEQgo2E_2\tmotorcycle\nm6S6MEQgo2E_4\tmotorcycle\nm6hQABEUkQQ_4\tboat\nm6z3sbKYwcc_3\tbus\nm6z3sbKYwcc_4\tbus\nm669S-54lMc_0\tmotorcycle\nm669S-54lMc_1\tmotorcycle\nm7djLwb_a5k_0\tcar\nm7k5fJXTZPI_5\tbird\nm7xUarlXKEw_0\tumbrella\nm7xUarlXKEw_4\tumbrella\nm7xUarlXKEw_1\tumbrella\nm7xUarlXKEw_2\tumbrella\nm8B-pb1I7nc_0\tcat\nm8YA8dXocmg_2\tboat\nm8t6gPBCxr8_0\ttruck\nm9HGLakPqSo_1\tbear\nm-NEL2Jq0nQ_2\tcar\nm-dKTMwfPqo_0\ttruck\nm_JHW_eCKY0_0\tumbrella\nm_dOsn1chuA_1\tbus\nm_dOsn1chuA_2\tbus\nPCC9sJ4Gdxw_0\tcar\nPCeoeGBYrJU_0\tdog\nPCqa_yHJ32g_2\tbicycle\nPC2plr6JdQg_0\tumbrella\nPC_wbEzLNLQ_0\tbicycle\nPC_wbEzLNLQ_1\tbicycle\nPDU92To89cE_1\tbird\nPDlKUKo06lI_0\tknife\nPDvSiH5Pf_0_0\tbus\nPEC7E1t79A8_0\tcar\nPEJFRzyvIBc_0\tbird\nPEJvGdLGOjU_0\tzebra\nPEY59JrOz5I_1\tbird\nPEY59JrOz5I_0\tbird\nPEfpmwboH3w_0\tbus\nPEtsR4S5Zzg_0\tbicycle\nPE_zE5T1ayo_0\tcat\nPFJiRWGaPaw_0\tcar\nPFJiRWGaPaw_1\tcar\nPFa_RCiQVjA_0\tskateboard\nPFjuIzuDmJs_1\tknife\nPF8HAptOIC8_1\tcar\nPGEM0ys1sGE_0\tknife\nPGMimFwsl54_0\tcat\nPGP0PEOv3zw_2\tbear\nPGP0PEOv3zw_0\tbear\nPGipyYSRHso_0\tbicycle\nPGn623RKWNA_1\tcar\nPG8bMx6DuSo_0\tknife\nPHeQ1xoUBgg_1\tboat\nPHmnvFIAtHo_0\tbus\nPHxuey2u6UE_0\tskateboard\nPIDvuyKFIJ8_0\tcat\nPIT2XsuODRE_0\tbird\nPIa767e6xuQ_0\tcat\nPIkhnCxrF9g_0\tcat\nPInIdEVTPn0_7\ttruck\nPI5ROW9ewOg_0\tcat\nPJoSJpMWo0Y_3\tskateboard\nPKTJIVIuSFw_0\ttruck\nPKZXF6Hj0kw_0\tbird\nPKZXF6Hj0kw_2\tbird\nPKZXF6Hj0kw_1\tbird\nPKtfgOMwx4A_0\tdog\nPK-4bXZDtlA_1\tskateboard\nPLO2xY76oh4_0\tmotorcycle\nPLVEvFhXHAE_0\ttruck\nPLVEvFhXHAE_1\ttruck\nPLd8HlO4HYo_1\tcat\nPLd8HlO4HYo_0\tcat\nPLwQ0AHwZgg_1\tskateboard\nPLwQ0AHwZgg_2\tskateboard\nPL2FcMREy_0_0\tbicycle\nPMRnsvlMF4A_0\tskateboard\nPMUqAknVm2Q_0\tmotorcycle\nPMXmKup8jy4_0\tboat\nPMkiPjm9XdY_1\tmotorcycle\nPM028PEyjv0_4\tbear\nPNpDnymoq8w_0\ttruck\nPN6PB668zV4_0\ttruck\nPN86cQumWDU_0\tmotorcycle\nPN_b6R9HxwQ_2\tcat\nPOQalChDjmU_0\tskateboard\nPOW6F8MZMTQ_1\tbird\nPO-OnjGHjDk_0\tbus\nPPI6aG2QFaM_0\tbird\nPPdV273cZC8_0\tskateboard\nPPhYyYHNaQ4_2\tboat\nPPhYyYHNaQ4_3\tboat\nPP5_L_EZsmE_0\tbird\nPQI2zG7I8jI_1\tbus\nPQjM0fGHXds_0\tbird\nnARlDpJ1mzQ_1\tdog\nnAmX6FEKmTg_0\ttruck\nnAsHFcuT16U_0\tskateboard\nnAsHFcuT16U_1\tskateboard\nnBLWjCuzp2g_0\tdog\nnBPhMvA4QIs_0\tdog\nnBXKLM2hLN0_1\tcar\nnBtF1BDR8wE_0\tmotorcycle\nnCKBmlhUPYg_0\tcat\nnCPhfqQsjIQ_0\tmotorcycle\nnCPhfqQsjIQ_1\tmotorcycle\nnCe_XQHu77g_0\ttruck\nnCgjbB7wxoE_0\tbus\nnDsb271W8XU_1\tcar\nnEFtdboPB2w_1\tbear\nnEIawnnD8V8_0\ttruck\nnELgP3wAnm8_0\tdog\nnEM7mY_k1_4_0\tboat\nnEVFHD_9xCw_1\tbird\nnEtqWL5nz_U_0\tbus\nnEtqWL5nz_U_1\tbus\nnEyJKW3bMCc_0\tdog\nnE6lY5G16lE_1\tbicycle\nnE6lY5G16lE_2\tbicycle\nnE6lY5G16lE_0\tbicycle\nnFQvQPqMjpk_0\tcar\nnFZrdv6K4pg_0\tmotorcycle\nnFa5TGw-b5Y_0\tbicycle\nnF28ACSGHM8_0\tboat\nnF444n6UUJE_0\tbear\nnF444n6UUJE_4\tbear\nnGQ3Hq6P5tM_0\tcar\nnGnDoylbNm8_1\tbear\nnHAF0LI8CPk_0\ttruck\nnHAF0LI8CPk_1\ttruck\nnHAF0LI8CPk_2\ttruck\nnHApjxTb0fI_0\tumbrella\nnHAt_MmKZtA_0\tdog\nnHRioXgb-Fo_0\tbird\nnHbHOfTnrtg_0\tdog\nnHbHOfTnrtg_2\tdog\nnHe8j-osZck_0\tdog\nnH9AXssn9vw_0\tumbrella\nnIIQLgiJpz4_0\tmotorcycle\nnIqnT8pJFz0_0\tknife\nnJF2wWsJCd8_0\tcat\nnJuhir_bIpw_0\tcat\nnJ6iwd_XQso_0\tumbrella\nnJ6uR6SE01w_0\tbicycle\nnKM_iCO6bKs_0\tbus\nnKS1tzA_Hrk_0\tskateboard\nnKUBzJ38GgY_1\tboat\nnK-2zxkNCuA_0\tcat\nnLED5Us6rMo_0\tmotorcycle\nnLL3PMe48dQ_0\tboat\nnLXX8_SfZs0_0\tcat\nnLn2LN33uxg_0\tcat\nnLx78Uv2dmc_3\tskateboard\nnMbLyO3605c_0\tknife\nnMo_-oHL7bU_0\tknife\nnMtxrG4hH5M_0\tskateboard\nnMyhi847s6A_0\tknife\nnNNF1j89RS0_0\tbear\nnNScwJL6ym0_0\tmotorcycle\nnNeaR2o9KMY_0\tboat\nnNwEBFJZT8U_0\tbird\nnOe7o_AaOUs_3\tskateboard\nnOfyHwhf35s_0\tbus\nnPBFLS60OYk_5\ttruck\nnPhpYRGfHlw_0\tbear\nnP5wigEk-3A_3\tknife\nPQsHE_w_Q5I_1\tknife\nPQuYVLwcT7k_0\tskateboard\nPQ4gPP2l3RY_0\tbus\nPQ9ZEkeKIzs_0\tskateboard\nPRIJbfolHpE_0\tumbrella\nPRIw6kIS_oM_0\tmotorcycle\nPRg6CE_exgE_2\tdog\nPRoAGpjxUIQ_1\tdog\nPSdh0lzfg3M_0\tbus\nPSrvUaBxbgU_0\tmotorcycle\nPS_CABKe3Yk_0\tmotorcycle\nPTKnZd28Sac_2\tdog\nPTORa3OCyoU_1\ttruck\nPTxm2ZRQbNg_0\tskateboard\nPTxm2ZRQbNg_5\tskateboard\nPTxm2ZRQbNg_1\tskateboard\nPTxm2ZRQbNg_2\tskateboard\nPT2XxI2FufM_0\tbus\nPT3felQmrwU_1\tbear\nPT6KXLLxhes_0\tbird\nPUFo51ngpe8_0\tbus\nPUeS5CCMoa4_1\tzebra\nPUgpXWoI6nw_2\tbird\nPUiSf8EuinE_2\tbear\nPU3x1IpbndQ_0\tknife\nPU5v_AtaKKw_9\tbird\nPU5v_AtaKKw_2\tbird\nPU5v_AtaKKw_3\tbird\nPU5v_AtaKKw_4\tbird\nPU5v_AtaKKw_5\tbird\nPU5v_AtaKKw_7\tbird\nPU-lRdkaqdg_0\tcat\nPVV-saboi8Q_0\ttruck\nPVXtjPyNMms_0\tdog\nPV6mXKbH058_0\tskateboard\nPWIWGwJZENs_0\tdog\nPWQGxn3c5iQ_2\tknife\nPWQGxn3c5iQ_0\tknife\nPWs7zuWiKZo_0\tbus\nPW7XGdRhgKI_0\tcat\nPW97rAj3_84_0\ttruck\nPXb9PHJghpA_0\tcat\nPYH5FxLfm3M_0\tbus\nPYOwGQUBJXY_1\tboat\nPYWfE8WhDKk_1\tknife\nPYohJALR7DA_1\tmotorcycle\nPYohJALR7DA_2\tmotorcycle\nPYsiftgJNrs_0\tmotorcycle\nPZEun35Hcoo_1\tdog\nPZNXXWorkrY_0\tmotorcycle\nPZSGccVPUm8_1\tbird\nPZjQiLyqHkw_0\ttruck\nPZoM9dv8P3A_1\tbear\nPZuGSUZ1N2w_0\tskateboard\nPZz86aIvTWU_0\tskateboard\nPZ3PfRXk2rQ_0\tcat\nPZ9YkHds_00_0\tdog\nPaVPMVUQwtM_7\tboat\nPaVPMVUQwtM_2\tboat\nPatPjxyHqvY_0\tboat\nPbPu-cnEMqo_0\tcat\nPbUb1IktyM0_0\tmotorcycle\nPbdnWP3AnKQ_1\tperson\nPbhIhdwp7nI_5\tknife\nPceERP83N7g_1\tdog\nPceERP83N7g_2\tdog\nPdER58jIvPg_0\tcat\nPdRRvS5p7TM_4\tbicycle\nPdRRvS5p7TM_0\tbicycle\nPdRRvS5p7TM_1\tbicycle\nPdgOy1B6ByE_0\tperson\nPdgOy1B6ByE_1\tmotorcycle\nPdkRSALRJOE_1\ttruck\nPdne4jISJMk_0\tbird\nPeXODrjPJpU_0\tmotorcycle\nPecvaJstdYE_0\tknife\nPejvg4LHBXw_1\tskateboard\nPeur7tMeMNc_11\tbicycle\nPeur7tMeMNc_12\tbicycle\nPeur7tMeMNc_20\tbicycle\nPeur7tMeMNc_21\tbicycle\nPeur7tMeMNc_5\tbicycle\nPew5sug67ao_0\tdog\nPfKS2L_bxBc_0\tcat\nPfOYq_uyVF8_1\tbird\nnQPFPYvmWtU_0\tskateboard\nnQd33JTaurM_0\tbird\nnQd33JTaurM_2\tbird\nnQmH_VIOI4o_0\tcat\nnRG70FCdevw_0\tbus\nnRP28gcIe5Y_0\tbird\nnRP8SwdbUGw_1\tbear\nnRr5gMvJ77k_0\tskateboard\nnSgcLfwMJu4_0\tdog\nnSvaQz0i9i8_1\tskateboard\nnSvaQz0i9i8_0\tskateboard\nnSz_BdDSYsk_1\tbear\nnS_SY6iDJ2U_3\tbear\nnTjbCPXR408_1\ttruck\nnTjbCPXR408_2\ttruck\nnTjbCPXR408_3\ttruck\nnTjbCPXR408_4\ttruck\nnTz3LA23B4U_0\tskateboard\nnUBgjOAcKBw_0\ttruck\nnUDvay-MfVs_0\ttruck\nnUVSuT7wfDs_0\tmotorcycle\nnUdbTm-FW0I_0\tbus\nnVAOU6r15Ww_3\tknife\nnVTMM3F16j0_1\tboat\nnVi9QbrUrjE_0\tmotorcycle\nnWvR8fiLxGw_0\ttruck\nnXD-zvpjC50_0\tcar\nnXG_fwbJQ-E_0\tcar\nnXjIIWFPSd4_0\tcat\nnXlSVy8CmMk_0\ttruck\nnXpq0p9VBXc_0\tboat\nnXqE-XROi78_0\tbear\nnXqQPuJmTZo_0\tcat\nnYYFquwhxeI_0\tcat\nnYqRuOF_Uao_2\tcar\nnYqRuOF_Uao_0\tcar\nnYqRuOF_Uao_1\tcar\nnYut3zBSbuM_0\tbear\nnY0xtzTME34_1\tcat\nnY2XarSrm7Y_0\tboat\nnY2XarSrm7Y_1\tboat\nnY3BS_3Mq6o_0\tmotorcycle\nnY3fRfvoh9w_4\tbear\nnY3fRfvoh9w_0\tbear\nnY_icz32gn8_0\tcat\nnZHGbmVkhrE_0\tcat\nnZn4xAbcGSk_0\tcat\nnaE1svJuCTw_0\ttruck\nnaE1svJuCTw_1\ttruck\nnaR-9rNf5fE_0\tskateboard\nnalqTKM6890_0\tumbrella\nnalqTKM6890_1\tumbrella\nnalqTKM6890_3\tumbrella\nnbCix4zvF_E_0\tumbrella\nnbcH6NfapD0_0\tboat\nncuqh0iglYU_2\tskateboard\nncu8gbqMkMc_0\tcat\nnc9aHs1_xzs_2\tmotorcycle\nndBPYFAVIiM_0\tbird\nndJ2_mPZktw_2\tbear\nndJ2_mPZktw_1\tbear\nndMfXyYPfAM_0\tbird\nndNs3q8tY9U_0\tbus\nndO2b-r-Krs_0\tmotorcycle\nndO2b-r-Krs_1\tmotorcycle\nndj7VTH_PhE_0\tbird\nPfi9ZEQtgjY_0\tknife\nPfnFeL4ArA8_0\tskateboard\nPfpTZKfKeKY_2\ttruck\nPfpTZKfKeKY_0\ttruck\nPfpTZKfKeKY_1\ttruck\nPgBMaMqbYqA_0\tmotorcycle\nPgE6BAQmVQQ_0\tumbrella\nPhFFfxYo2_o_1\tdog\nPhJOcszed6A_1\tcar\nPhJ5rQ5VmeY_0\tskateboard\nPhjPRYTcJwQ_1\tcar\nPhyQoxFlTMU_0\ttruck\nPh8Vag9VxRU_0\tzebra\nPh8Vag9VxRU_4\tzebra\nPh8Vag9VxRU_1\tzebra\nPh8Vag9VxRU_2\tzebra\nPh8Vag9VxRU_3\tzebra\nPiO6F4X8k_M_1\ttruck\nPiO6F4X8k_M_2\ttruck\nPiRy-T8d0gQ_1\tskateboard\nPiRy-T8d0gQ_0\tskateboard\nPi_aEuQD5gA_5\tumbrella\nPi_aEuQD5gA_8\tumbrella\nPjAWqdid4rw_1\tumbrella\nPjBOLvrlicY_0\tcar\nPjBOLvrlicY_1\tcar\nPjBOLvrlicY_2\tcar\nPjTtsfl7KZ4_0\tcat\nPjjO6IaSiuo_0\tskateboard\nPjjV-pCjgqc_1\tbird\nPjk0d9eP2gI_0\tdog\nPjm-ptGWuWU_0\tdog\nPjpGwiZ8mK8_0\tbus\nPjuUsIXzSzQ_0\ttruck\nPjwfhUvbBNI_0\tskateboard\nPj9588RHCHM_1\tcar\nPkktNSL9IjE_0\tbird\nPlKRGU_XIzs_3\tboat\nPlfCXfMXcs0_0\tskateboard\nPlfCXfMXcs0_1\tskateboard\nPltDcKetGYw_0\tknife\nPl6ja9eNHzE_3\tskateboard\nPl6ja9eNHzE_4\tskateboard\nPl6ja9eNHzE_1\tskateboard\nPl6ja9eNHzE_2\tskateboard\nPml224S87BE_0\tbird\nPm_2At7P8Yo_0\tbus\nPnt2XmUpT8Q_1\tbear\nPnt2g-tHwK4_0\ttruck\nPn1VFdKk5vQ_0\ttruck\nPoL9E8Yc2vo_0\tcar\nPoL9E8Yc2vo_1\tcar\nPoUPC9WCdiE_5\tdog\nPoV7Wn66UTo_0\tbird\nPolaH6r1Qds_4\ttruck\nPolaH6r1Qds_2\ttruck\nPpI7DZdWcfc_0\tperson\nPpZHxI0N3Wo_1\tmotorcycle\nPptqwylntWQ_1\tboat\nPp6vch1kMqE_0\tcat\nPqJOWTjp0ww_0\tcat\nPqKlF5nnOFs_0\tmotorcycle\nPqNDvGH2-iM_0\ttruck\nPq7tfwAqhIM_0\tmotorcycle\nPrV4kyVAwWE_0\tbear\nPrynn7mNQdQ_0\tknife\nPsVhOsDIopI_0\tumbrella\nPsfddppUmSk_0\tskateboard\nPsgPXqr-N7A_0\tskateboard\nPsvVwYAeKEc_1\tboat\nPsytJKFxV8c_0\tboat\nPszGWhekz-Y_0\tumbrella\nPs9ReRjYLVk_0\tbird\nPs9f-iFqX4M_0\tskateboard\nPtL5k4ew4q0_0\tcar\nPtR7vRI9mn0_0\tmotorcycle\nPtVUPVUYld8_1\tskateboard\nPtnFOxat4hE_0\tbear\nPtq7-B4P9Bw_0\tskateboard\nPt04IRhfVFk_0\tboat\nPt1vVuKH3fk_1\tskateboard\nPuV7SV-FwOU_1\tskateboard\nneA0T50G8TU_0\tcar\nneA0T50G8TU_1\tcar\nneA0T50G8TU_3\tcar\nnewqX6GTbrA_0\tcar\nne8K6jHnOT8_0\tboat\nnfnKsQItZjE_0\tskateboard\nnfxMe31pjec_4\ttruck\nngAKsr62ACQ_0\tknife\nngOtFD7Fxd4_2\tboat\nngZtMG--t4I_2\tbear\nnga4aEZQhJw_0\tknife\nngslQPG3kEI_1\tbird\nnhI3C5y85gw_0\ttruck\nnhdMHfvazLY_0\tumbrella\nnhoO0Evj7OQ_0\tumbrella\nnhoO0Evj7OQ_2\tumbrella\nnh56dQ3T3Mc_2\tboat\nnh56dQ3T3Mc_3\tboat\nniBK6HGH16U_0\tcat\nni3trEPOXck_0\tbird\nnjBEUyoUzlQ_0\tbird\nnjK1OLFCvv4_0\tcat\nnjMC5HAlnMU_1\tumbrella\nnjnGmGuXNdE_1\tknife\nnjn4TkIDn0k_0\ttruck\nnkJxMYiG9Ho_0\tbus\nnkSvwnLvBmw_4\tmotorcycle\nnkSvwnLvBmw_0\tmotorcycle\nnkSvwnLvBmw_2\tmotorcycle\nnkVPvJ3Smrg_0\tcat\nnkZ6NDOt4r4_0\tcat\nnkv5eof4q_M_0\tknife\nnlAePf94uwk_0\tcat\nnlupdJzbyKs_1\tbird\nnl83jp96h9s_2\tknife\nnmmeE-Dfds8_0\tbus\nnmwFYDopqBc_0\tdog\nnmwFYDopqBc_1\tdog\nnnIGNFEnlw8_0\tcar\nnnNkJ09YO9M_0\tmotorcycle\nnnUkcXbXbFM_0\tumbrella\nnnhUxSjBHP8_0\tumbrella\nnoCrLkdGSXw_0\tbus\nnoGmFOxKIr0_0\tperson\nnoIHydna8tw_3\ttruck\nnonoyrFpKVA_1\tzebra\nnonoyrFpKVA_4\tzebra\nnonoyrFpKVA_5\tzebra\nnonoyrFpKVA_0\tzebra\nnosbeVXMgAk_0\tknife\nnqN2uJfit8o_1\tcar\nnqPkd_Quci0_0\ttruck\nnqWs5hqd8Ps_0\tbus\nnqbsnsBZULc_0\ttruck\nnqnjh-NO9go_0\tbus\nnq8oHNlU_BQ_0\ttruck\nnrJURcGigjE_0\tmotorcycle\nnrlcROgdPlI_1\tcat\nPumbYcoJ5zE_0\ttruck\nPu8rYMOC0Iw_0\tdog\nPu_KMtdCGZY_1\ttruck\nPvSzSsQ4YCY_0\tcat\nPvuGk2XhJW8_0\tbird\nPv77ig8kBgE_0\tcat\nPwBNm2_oKbQ_1\tzebra\nPwE-w-S8nQc_0\tcat\nPwRb6q11-rw_7\tbear\nPwRb6q11-rw_0\tbear\nPwRb6q11-rw_3\tbear\nPwRb6q11-rw_4\tbear\nPwRb6q11-rw_5\tbear\nPwgxDMnN1SA_0\ttruck\nPwmBtcc64nM_0\tzebra\nPwmBtcc64nM_1\tzebra\nPxN14d54as8_0\ttruck\nPxOYpOxjFFc_0\tcat\nPx02MS-Ywo0_0\tknife\nPyevrWYsc8k_0\tmotorcycle\nPyr-sHCH2wc_4\ttruck\nPyvyP3J13FI_0\tknife\nPyvyP3J13FI_2\tknife\nPy6rKt-beyk_0\tknife\nPy-bAIGcQ1Y_1\tboat\nPzZ-Jr7jMk8_0\tbus\nP0S7eBa6_S4_0\tdog\nP0e6zPkZO5s_1\tknife\nP1_bfvyTku0_0\ttruck\nP2NRNopueuo_0\tumbrella\nP2SgXG0mMWU_0\ttruck\nP2Wv0vXNCqQ_0\tzebra\nP2kLj1DZq3I_1\tbird\nP2kLj1DZq3I_0\tbird\nP2ldC-_7nrs_1\tboat\nP256TqMIJZk_0\tdog\nP3MLJSbWlpg_1\tmotorcycle\nP3jB1tXpVMw_0\tbird\nP3q6jIrZyo4_1\tdog\nP4jpdzY2as8_0\tdog\nP43doVXj3y0_0\tcat\nP5DcP_VLnP4_0\tbear\nP5Gd_8k2O5s_0\ttruck\nP5VAaJj-1Rc_0\tdog\nP5kFeiFmPxw_0\tperson\nP5xsJqm2v6c_1\tmotorcycle\nP5xsJqm2v6c_2\tmotorcycle\nP5xsJqm2v6c_0\tmotorcycle\nP5yrLRVD86M_0\tdog\nP6Qm9u9GIE4_0\tmotorcycle\nP72vKWjKtik_0\ttruck\nP741OzHLvig_0\tdog\nP8BX8WSWRm8_0\tbus\nP8K2yXmSMwY_0\tbird\nP8MCMBcqM00_0\tmotorcycle\nP8MCMBcqM00_1\tmotorcycle\nP8h9iD7kPRQ_0\tbear\nP80sglFzhRI_0\tbear\nnsS9iSqNMew_1\tbus\nntO6br-N89w_0\tcat\nntVDuucoRIk_0\tcat\nnuVxM9m1nb8_0\tmotorcycle\nnuVxM9m1nb8_2\tmotorcycle\nnvIi1SvX-sU_0\tdog\nnvXKI_MhTTE_4\tknife\nnvYTcYLFUvc_2\tdog\nnvdIoQ5mj64_0\tknife\nnvxwnGRXwZY_1\tdog\nnxJkhdCqhc0_0\tdog\nnxUe9yoeHvs_0\tbear\nnxYGMvfgi8g_0\tperson\nnxj_aavOM50_0\tboat\nnxmr9gg0ses_1\tbear\nnx9Uisdggps_3\tknife\nnx9Uisdggps_0\tknife\nnyOaHbw3DLo_0\tcat\nny2pC-BfLT0_2\tdog\nny2pC-BfLT0_0\tdog\nny2pC-BfLT0_1\tdog\nny3nZLL4cQ0_3\tmotorcycle\nnzGPh9yFDTI_5\ttruck\nnzQqdKnkQ9I_0\tzebra\nnzppX26-51c_0\tboat\nnzytVTFaYvs_0\tknife\nnzytVTFaYvs_1\tknife\nnzytVTFaYvs_3\tknife\nnz9DMQ9cPrw_0\tcat\nnz_YTLNErSY_1\ttruck\nn0P8wVonqY4_0\tmotorcycle\nn0T51DP8868_0\tbird\nn1VbuQk_3JY_0\tbird\nn1ZrqU8VSBA_2\tbus\nn2Xd8e_vz0w_0\tcat\nn2Xrvmq2r2I_0\tcat\nn2jvWkboChM_11\tbus\nn2jvWkboChM_10\tbus\nn2jvWkboChM_14\tbus\nn3EKpxnV5U8_0\tcar\nn3bFZVLqNvI_0\tumbrella\nn3iNRmzhO1U_0\tmotorcycle\nn3pRNFU0ovc_0\tbear\nn3pRNFU0ovc_1\tbear\nn38NmPI7Sss_0\tboat\nn4cdQF8d8UI_0\tknife\nn4mWuEmbbEM_0\tbird\nn5J7UxAi_70_5\tcar\nn5J7UxAi_70_1\tcar\nn5J7UxAi_70_3\tcar\nn5J7UxAi_70_4\tcar\nn5i5aZXPgok_1\tbus\nn5ojrsEczYM_1\ttruck\nn5wZ3Zin9uQ_0\tbus\nn5wZ3Zin9uQ_1\tbus\nn6cpTMT-Ci0_1\tcar\nn6sMWDd_j1c_0\tcat\nn6wMhru1Mx0_2\tcar\nn7HaOXaXWJw_2\ttruck\nn7NWTiq_W-c_0\tboat\nP9sfOBt9FI8_1\tbird\nP95Pyq4kglE_0\tknife\nP95Pyq4kglE_1\tknife\nP-EecPZ9zV4_0\tmotorcycle\nP-JbMZ89Hac_0\tcar\nP-SIr3rYBzg_0\tumbrella\nP-lf6syyjAs_0\tcat\nP-tXkGlSa_8_0\tmotorcycle\nP_A56tkbbmk_8\tumbrella\nP_A56tkbbmk_1\tumbrella\nP_A56tkbbmk_7\tumbrella\nP_un1_qBDWo_0\tumbrella\nQATQMMA9vo4_2\tmotorcycle\nQATjEG1LPL0_0\tbear\nQA4LOoc1Crg_0\ttruck\nQA__knfzZZM_0\tbird\nQBZUbx6SUyU_2\tbear\nQBbAz7q7E9c_0\tbus\nQCDUv9KNiWQ_2\tdog\nQCKzW_uA3vY_0\tmotorcycle\nQCl4OGNJdos_1\tbus\nQCqvd4xHZLs_0\tcat\nQCzgTA2cABU_0\tboat\nQDQgSF9ciHk_4\tknife\nQD4ioxu8LAk_0\tcat\nQEMoyw7o_f8_0\tdog\nQEQfoQOU_F8_1\tbird\nQFB5gDukoqg_0\tbus\nQGDhzG35q8c_0\tdog\nQGDhzG35q8c_1\tdog\nQGDhzG35q8c_2\tdog\nQGDhzG35q8c_3\tdog\nQGFSTul5MDQ_0\tknife\nQGcd6O1NAkY_1\tbus\nQGcd6O1NAkY_2\tbus\nQGv8jcDgmBY_0\tmotorcycle\nQG25-t2CqY0_0\tbus\nQG5tLrHw5Hk_0\tcat\nQHVkPy7f680_0\tcar\nQHVkPy7f680_2\tcar\nQHhXgNBSjV0_0\tumbrella\nQH2Vo_5h-x8_0\tcar\nQIe7ky6mJO8_0\tbear\nQIqf221MKYo_0\tbird\nQItwshU9sAQ_0\tcar\nQI65w7sMLtA_0\tcat\nQJIgRLU_fU8_0\tmotorcycle\nQJfS9bR2S4I_0\tcat\nQJsyPZ31U-0_0\tcat\nQKG7PXh0UoU_1\tbus\nQKG7PXh0UoU_5\tbus\nQKG7PXh0UoU_6\tbus\nQK9WWQe1WQU_0\tbus\nQLTztdEJ8Ts_0\tmotorcycle\nn7dIhGKEzWM_2\tboat\nn7hFNcaW9rw_0\tknife\nn77hlwjlW_Y_0\tdog\nn8IsRKE9S6k_0\tmotorcycle\nn8kFOAqnMao_0\tmotorcycle\nn9RozRHi7iI_1\tknife\nn9RozRHi7iI_3\tknife\nn9xiuvCd5Lw_1\tbear\nn-fT4fcLulk_0\tbear\nn-fT4fcLulk_4\tbear\nn-gEIxTHjBk_3\tbear\nn_EpRXVan0M_0\tcat\nn_J23TUQdl0_1\tbear\nn_PRUX4zrLw_0\tcar\nn_bIC-prc2E_0\tmotorcycle\noARh23g1-LA_0\tcat\noAhYK7brhk0_0\tdog\noAhYK7brhk0_2\tdog\noBDdj5mkGyc_1\tknife\noBraEPvaSi0_0\tbird\noBuzx2dwA_Q_2\tknife\noBzhDbxL57k_0\tbird\noCUkN7ySpf8_0\tmotorcycle\noCZ3WCK5BZU_1\tmotorcycle\noCf-LgXx6Dw_0\tbird\noDHO9J7vFwI_0\tboat\noDUJYHwNuS8_0\tbus\noDsRL8dvgLA_1\tbus\noDsRL8dvgLA_2\tbus\noF81nMQlA-4_2\tumbrella\noGMlnXjD9R0_0\tbird\noGuIyQiDsy0_2\tboat\noGuIyQiDsy0_0\tboat\noH-XJADp0FM_1\tbear\noH-XJADp0FM_2\tbear\noH-XJADp0FM_4\tbear\noH-XJADp0FM_5\tbear\noI5l1By4H7U_0\tcar\noI_peuU5xk8_5\tmotorcycle\noI_peuU5xk8_0\tmotorcycle\noI_peuU5xk8_3\tmotorcycle\noJD17uQnW_o_0\tdog\noJK_TUb7HoQ_3\tknife\noJLVcOe7CEU_0\tmotorcycle\noJervxxOCvY_0\tdog\noKTgwWf3FKA_0\tdog\nQLxMt8F3oYA_0\tcat\nQL4uK4sZxIU_0\tcat\nQL-hkYCV0BQ_0\tmotorcycle\nQMEIKO8LcEU_0\tmotorcycle\nQMGNMAZLRFY_1\tknife\nQMGNMAZLRFY_0\tknife\nQMHCb6-qyQE_4\tbird\nQMHCb6-qyQE_0\tbird\nQMHCb6-qyQE_3\tbird\nQMJHMIdkS0w_0\tboat\nQMVKAdAOrNY_0\tdog\nQNUGl2q9luk_6\tdog\nQNVeq1dY-gY_0\tbus\nQNV_xE7TePM_0\tumbrella\nQNV_xE7TePM_1\tumbrella\nQNaFT-Ch0Oc_1\tbird\nQNgnQe-MASw_0\tbus\nQNgnQe-MASw_2\tbus\nQNibPLG3_Q0_0\tdog\nQNibPLG3_Q0_1\tdog\nQNibPLG3_Q0_2\tdog\nQNrg73bCl7M_0\tbus\nQN5joVuigKw_0\tdog\nQOCUHjNieAs_0\tcat\nQOGKQmMhYE0_2\tknife\nQOQU7N2vIdQ_0\tdog\nQOcPhbRnGh4_0\tbird\nQOm8zog21wI_0\tbear\nQOp31EvHfRU_0\tcat\nQOs2s2r3hpY_2\tbird\nQOs2s2r3hpY_3\tbird\nQO1T0Gc_cJk_0\tbird\nQPwnbNFbZyY_0\tmotorcycle\nQQAQLPTkDwg_2\tbird\nQQAQLPTkDwg_0\tbird\nQQh4Cpr7tpM_0\tbear\nQQ7EaN8ArmM_0\tmotorcycle\nQQ-MUe-ni48_2\tmotorcycle\nQRXtuZBCXtA_0\tumbrella\nQRZ_xQK1gx8_0\tbus\nQRZ_xQK1gx8_1\tbus\nQR3BO_SYrpQ_0\tbird\nQR5EuXvYbms_0\tcar\nQSK1oOt_5R4_0\tknife\nQSld_dZQvpY_0\tbear\nQTPAOir-oYM_1\tknife\nQThuW0gGa20_0\tdog\nQTlzTtcPjwk_3\tcar\nQT0-oUhQtbk_0\tdog\nQT17xRXmBGA_0\tumbrella\nQVCd5pTgbds_0\tboat\nQVRM0OueKFY_0\tdog\nQVXv0Z1FCdg_0\tmotorcycle\nQVXzwEenImE_0\tbus\nQWBwnViynQA_0\tmotorcycle\nQWFR4XdQv2Y_0\tumbrella\nQWPkooq95So_1\tknife\nQWPkooq95So_2\tknife\nQWSsyFwwdO8_0\tdog\nQWl839SnUOs_0\tdog\nQW1BlOtH1bo_0\tcat\nQXAw2xD7Sgc_0\tmotorcycle\nQXB7sLTVqfM_0\tbear\nQXIGeVZ6Uqk_0\tbear\nQXVQ8S7aUB4_0\tknife\nQXjfaOwHSFo_1\tmotorcycle\nQXwh-lAa3Pk_0\tknife\nQXwh-lAa3Pk_4\tknife\nQXwh-lAa3Pk_5\tknife\nQY2pVib4cZE_0\tmotorcycle\nQZOPux7sysI_1\tdog\nQZOPux7sysI_0\tdog\nQZhaeUKdGYk_0\tmotorcycle\nQZpfX1aipco_1\tcar\nQZui5buTy7k_0\tbus\nQZ3FD2qszF8_0\tmotorcycle\nQZ3MWq6qwJI_0\tbus\nQaGjoVfIWLQ_0\tmotorcycle\nQaM6ny5gEFQ_0\tcat\noKY-KsLfJe4_0\tbird\noKY-KsLfJe4_1\tbird\noKbCNTwLJoI_0\tdog\noKe3Rcvn_TU_2\tcat\noK9TjDSQdSs_0\tcat\noK9erjaiRq4_0\tbus\noLRDfgRIJ-A_1\tbus\noLSjl-qN4M8_0\tdog\noLrou9S3K-0_1\tmotorcycle\noM_FQGUvPIk_1\tmotorcycle\noNFmLa8pU3A_0\tknife\noNLkf1j-v6Q_0\tcat\noNZOg6XoSrY_1\tdog\noNbWPkOIdxg_5\tcar\noNbWPkOIdxg_4\tcar\noNyfqJGJhrY_0\tmotorcycle\noPhE3ECqxf0_0\tbear\noPlhh62giKI_0\tcar\noPrG5_acHVU_2\tbird\noP0yHq-dlRY_0\tmotorcycle\noQV827pXDXA_0\tmotorcycle\noQXdls5ffZc_2\tbear\noQXdls5ffZc_0\tbear\noQXdls5ffZc_1\tbear\noQ7ARK51eHE_1\tdog\noQ7ARK51eHE_0\tdog\noR-7d677bYw_0\tmotorcycle\noSPVZs6_Bd4_0\tmotorcycle\noSVes8uNT5E_0\tmotorcycle\noSao8txZd7A_0\tmotorcycle\noSb17xrITtY_0\tmotorcycle\noSqq5UHBveo_0\tbear\noSxoAvNHNB0_0\tmotorcycle\noS60CV9BFs8_4\tbear\noTYr-qD5JOE_0\tbird\noTj1e8RI67A_0\tboat\noTlwKNdm3rE_0\tdog\noTuVBf1jiPM_3\tbear\noTuVBf1jiPM_0\tbear\noUHa0FV0wwM_1\tdog\noUVJrf3WBrs_1\tbus\noUVJrf3WBrs_3\tbus\noUuQYVAvtgs_0\tbird\noVUE-0XhhsQ_0\tcar\noVUE-0XhhsQ_2\tcar\noVUE-0XhhsQ_3\tcar\noV1vhE0ypUE_0\tcat\noV6wthYHnKA_3\tknife\noWFO_yss01s_0\tcat\noWI2O83zUJk_1\tcar\noWI2O83zUJk_0\tcar\noWYSJgX0THI_1\tdog\noXMW3YjDAqQ_2\tboat\noXaieymppqU_0\tcat\noX4YRc-No7Q_0\tdog\noYY_svQfTs0_1\tboat\nQahJqWjC1v0_0\tmotorcycle\nQakBz4K6hqw_0\tumbrella\nQbHAXTRKk8w_0\tknife\nQbHAXTRKk8w_1\tknife\nQbNU92uEUSc_0\tcat\nQbk_YIfY5q4_7\tknife\nQcLZ-b-0PxY_0\tboat\nQcU2S6m_GJk_0\tdog\nQcuHNJWb-AY_0\tcar\nQc0kbcpophI_0\tcar\nQc5ZW-ni9ZQ_0\tboat\nQeRfpcI_TTQ_0\tbear\nQebJi8pjWkk_0\tcar\nQeeG_4eNyg0_0\tdog\nQe1-M3oVaFs_1\tknife\nQfOdxYnCAKc_0\tbear\nQfOdxYnCAKc_2\tbear\nQfaVCQOGlMM_0\tmotorcycle\nQfgJh_s9H0I_1\tbird\nQfgJh_s9H0I_2\tbird\nQfr5Fc1k7Ic_0\tknife\nQfwCa3YapRg_0\tcat\nQgRbpAz8TuI_0\tbear\nQgRbpAz8TuI_5\tbear\nQgRbpAz8TuI_2\tbear\nQgXjMUMIe4Q_0\tcat\nQhbwOw5dHPg_0\tcat\nQhc3Bb_6Uq4_1\tmotorcycle\nQhc3Bb_6Uq4_0\tmotorcycle\nQhnEXqWFBuw_0\tbird\nQhxv39Tkzbs_1\tdog\nQiHJ2uYByjM_0\tmotorcycle\nQjV-g1D6Be0_0\tmotorcycle\nQjV-g1D6Be0_3\tmotorcycle\nQjV-g1D6Be0_1\tmotorcycle\nQjV-g1D6Be0_2\tmotorcycle\nQjdGUh1FtN4_1\tbus\nQjqhhoIx6nQ_0\tboat\nQj4Mfd45GOE_3\tbus\nQj4Mfd45GOE_0\tbus\nQkPH2LBso5c_0\tumbrella\nQkPLEWaH1bo_0\tcat\nQkkuZ_G7t48_0\tboat\nQkwI5-_QspU_0\tcat\nQk6G7eAHlCs_0\tdog\nQlcaO8pkzd4_0\tbear\nQliTvc637Yk_2\tboat\nQlieDL9xPyU_1\tmotorcycle\nQlxQKy1yzyI_3\tmotorcycle\nQmP4xj9S0mQ_0\tmotorcycle\nQmR3bvWDA1s_0\tboat\nQngGa73C1G8_0\tcat\nQnnV6lKKIgI_1\tknife\nQnuD7a8BM30_0\tdog\nQn9CU5O4FHU_0\tbus\nQn9Z0LVIxbo_0\tcar\nQoTopiP9k2o_2\tbus\noZLdU13R4uU_0\tmotorcycle\noZoTyJNjCJI_0\tbus\noZ6Py8Tx-sA_0\tdog\noZ9qkN9Q1X4_1\tbird\noaXGm1MdDoA_0\tcat\noajaYAOs_oI_1\tknife\noa_73oVbH38_0\tbird\noa_73oVbH38_1\tbird\nobbzKGrHOP0_0\tbird\nob70dcN35yg_0\tbird\nocNVbpQhB5g_0\tcat\nocPgZeXuFqs_0\tcar\nocj3mV2T-ls_1\tbird\noc4RRoFoUo0_0\tboat\nodsCgfz0yM8_0\tmotorcycle\noeIBPeBAEv8_0\tdog\noeVUkEvC3To_0\tboat\nofDmsqy24k0_0\tcar\nofJOKOICGco_0\tmotorcycle\nofvHImJKiAg_1\tbear\nofy3Sid451s_1\tbear\nogIewcLFxLo_0\tdog\nogLOXI-Kvcg_0\tknife\nogzWVQ5TC80_0\tcat\noh7uEf_YE40_1\tdog\noiItk_51540_5\tmotorcycle\noiKC4SxYNJE_0\tbus\noiRnmB7WQjQ_0\tbird\noiu_53B5AAc_0\tmotorcycle\nojFBoKltgfQ_0\tbus\nojFBoKltgfQ_1\tbus\nojFBoKltgfQ_2\tbus\nojQfL_XgMM0_2\tboat\nojz2xLrH-Ts_7\tcar\nokKrvzNb9IU_0\tcar\nokiIzmV8YLw_0\tcat\nokiIzmV8YLw_1\tcat\nokzrd8v1G-w_3\tboat\nomGx_muz0SY_1\tboat\nomngVtTFM1I_0\tumbrella\noms2XkgghV8_0\tboat\nQoqeX-W0RFw_0\tboat\nQoqeX-W0RFw_2\tboat\nQo0mxFOMVGc_0\tdog\nQpAWeYA1pc8_0\tcar\nQpDm5g1dELc_0\tbus\nQpD7CVh2Z_c_3\tknife\nQqdW9IMDHgs_0\tboat\nQqdW9IMDHgs_2\tboat\nQqdW9IMDHgs_3\tboat\nQqhZnuITXs8_2\tbird\nQqhZnuITXs8_3\tbird\nQqkblYN1YOg_0\tbus\nQrEjYyinITM_0\tcar\nQsQFhUd04jI_0\tmotorcycle\nQsQFhUd04jI_1\tmotorcycle\nQsV9BTogrKc_0\tknife\nQt78_24lkeM_0\tboat\nQu8xNQ6Vd04_0\tcat\nQvgmjwKuAeM_0\tumbrella\nQvqNodq3NxA_3\tbear\nQvsjDkJ_oho_0\tcat\nQwALBOsUby0_1\tknife\nQwYxgsacjx0_0\tknife\nQw9UvjSO9_Q_0\tbird\nQxx3WjrGmtE_2\tbear\nQyc0xSSPT1E_0\tdog\nQzCvBtKWPjg_0\tperson\nQzPFEeJYDcE_0\tumbrella\nQz1R2sk37qg_3\tbear\nQz1R2sk37qg_5\tbear\nQz1R2sk37qg_6\tbear\nQz1R2sk37qg_7\tbear\nQ0HX6Jfnnb8_0\tbird\nQ0J1QbF_Vis_0\tbird\nQ0KhMTnvbxM_0\tbus\nQ01P6P7bm7E_0\tmotorcycle\nQ0-7SsSXMV0_0\tknife\nQ0-7SsSXMV0_2\tknife\nQ1RqyDERgxM_1\tbird\nQ1VXWNHzPqI_1\tcat\nQ1VXWNHzPqI_2\tcat\nQ197NAaQodY_0\tdog\nQ2Sop28spdM_0\tknife\nQ2bha73kLKM_0\tmotorcycle\nQ2vBCDtNAGI_0\tmotorcycle\nQ2zRXVl7bLI_0\tmotorcycle\nQ3ZxsgPKTGY_2\tbird\nQ3ZxsgPKTGY_3\tbird\nonoO4tamBlA_0\tknife\nonpRejbK_VE_0\tumbrella\nooJg7-nxmUw_0\tmotorcycle\nopOHceUyoXk_0\tcat\nopb_qoqO05s_0\tbird\noqUbqkDsSzI_1\tknife\noqvnxRx-0J4_1\tbird\noq4KPP5PYAo_1\tmotorcycle\norQkUDPfTg8_0\tboat\norTFjuPHzxU_3\tdog\norcE_uPKO_c_0\tbird\normZXNXni-U_0\tdog\nosYgSn6yOG0_0\tcat\nos3H6KzvGEg_1\tknife\notHFt4YAKeI_2\tdog\notvQKWvIXAE_0\tbus\nouFwG2YU59c_0\tmotorcycle\nouNsmVT6GRU_0\tcar\nouqFEe0ud_U_0\tmotorcycle\novHCJGK35r0_0\tknife\novHCJGK35r0_1\tknife\novQY7VA36gU_0\tbird\novRBelXjQ-A_0\tbird\novaFSf6jda4_1\tboat\novnkb_MuAlg_0\tbus\nov9yaGUtSEw_0\tbear\nov9yaGUtSEw_1\tbear\nowKiuZVov4U_2\tdog\nowaIraEDvqI_0\tumbrella\nowaIraEDvqI_1\tumbrella\nowb-43QL8Dc_0\tcat\noxKhcqfQV7k_0\tumbrella\noxZ42ECABUo_0\tmotorcycle\noxdCJK5GPS8_0\tdog\noxyS9oNIBaQ_2\tboat\noy52khlb79k_0\tcat\noy885M8rmDM_0\tbus\noy_Efqu_Zhk_0\tknife\no0CsAQaDp1k_0\tboat\no0VArHW9gpE_2\tdog\no0yyk1GchoE_2\tknife\no06poedEjtM_2\tknife\no1RqDbHx0IA_0\tumbrella\no12Lc5yZNco_1\tbear\no2E2ypLvzOo_1\tcar\nQ34_kBWh3QU_0\tmotorcycle\nQ4IH3ZOVKFQ_5\tbus\nQ4TELEHdcjA_0\tmotorcycle\nQ4YD_lW8JFE_1\tknife\nQ4afI-fku0A_0\tknife\nQ4d0z-q-UXQ_0\tbird\nQ4jZeoLzZXs_2\tbird\nQ5DrYh7pcTg_0\tcat\nQ5RabF9bK3o_0\tcar\nQ5cY3mt9NHI_1\tcar\nQ5cY3mt9NHI_3\tcar\nQ6Lg4c8W2XQ_0\tbus\nQ7SXsNoT9cc_1\tboat\nQ7TDTHQoPGc_0\tbird\nQ7TZ3TlDNzI_0\tbird\nQ7V8JjnLW_A_0\tperson\nQ7a4tWAU7-o_0\tdog\nQ8gHTSzR6h0_0\tcat\nQ807ZgwscUk_0\tcat\nQ9LvGsq1Mas_2\tbird\nQ9fbeFbARPY_0\tbird\nQ9qA-2ofuFc_0\tdog\nQ9qA-2ofuFc_1\tdog\nQ-JQokKqXZM_0\tmotorcycle\nQ-STF8c8RSE_0\tmotorcycle\nQ-S6ypfxn4w_1\tbus\nQ-VqbNMPAjE_0\tdog\nQ_a7bRv2dM0_1\tcat\nRAQAfTprH5s_0\tcat\nRAc8MyscjAA_4\tbear\nRAc8MyscjAA_0\tbear\nRAc8MyscjAA_3\tbear\nRAqMmf5FS_Y_0\tdog\nRBNNklw-NjE_0\tcar\nRBNNklw-NjE_1\tcar\nRBdpxD5mMy8_0\tcat\nRBssHo0ygdI_2\tcar\nRBssHo0ygdI_1\tcar\nRBvocl1t9qM_0\tcar\nRBvocl1t9qM_1\tcar\nRCzBVv_Vddo_0\tdog\nRC444E40nLY_0\tcat\nRC_ckl7o7sc_0\tdog\nRDq9wvYEiSI_0\tumbrella\nRD8OUO8u7oQ_0\tperson\nREBpFtJosSc_3\tbear\nREBpFtJosSc_4\tbear\nREBpFtJosSc_0\tbear\nREbm5i5vhcQ_0\tumbrella\nREbm5i5vhcQ_1\tumbrella\nREiwqNPkmew_4\tbear\nREiwqNPkmew_3\tbear\nREjT99mHV_g_0\tcat\nRFIE-agz3SA_0\tdog\nRFUZkHtGWvg_2\tbird\nRFUZkHtGWvg_1\tbird\nRFZG72_XG3U_0\tmotorcycle\nRFcz2p3w1oc_0\tbus\nRFhEq5WF9Io_0\tmotorcycle\nRFqSKdzXQFQ_0\tbus\no2z2zu4L1Ho_0\tcat\no3OdAgJnYlw_0\tumbrella\no3TpeQ7mhIQ_0\tbear\no4It_gqHKoM_0\tbus\no4It_gqHKoM_4\tbus\no4It_gqHKoM_5\tbus\no4bpCoFINtY_0\tbird\no4yKF7ZQge8_0\tcat\no4yxnKhoWrQ_0\tcat\no49yvv0vmJQ_0\tknife\no5TWf69h978_0\tmotorcycle\no5bJmNSZmGE_0\tcat\no6vw6_1pc_g_1\tperson\no6x94jhuMEw_0\tcat\no7UXYGmFww0_0\tknife\no8BqJTsAjnI_0\tboat\no8BqJTsAjnI_2\tboat\no8Gr9wZzcA0_0\tknife\no83uI_tdkrE_2\tcar\no9UpoUWgJWw_1\tmotorcycle\no9YqiVSTBVs_0\tmotorcycle\no9qB9kYt9Bc_0\tmotorcycle\no9vRwcqz30w_2\tbear\no98cAmKOAtk_2\ttruck\no_BpJHlv8bY_0\tcat\no_NYHfqWzBw_0\tcat\npAP3j2UmTAA_0\tcar\npAuz372kMrs_0\tboat\npAvBjM_cSCk_0\tumbrella\npA_f-DZ2FdI_1\tbus\npBj4KFDTwGg_0\tcat\npCPwOGObTcs_0\tumbrella\npCXmnj6vY7o_1\tknife\npCa3Tf27TcY_3\tbear\npCdwcy8npiE_2\tbear\npCfA0E-TIXo_0\tmotorcycle\npC9mu-CQ9fg_0\tcat\npDjjH1_G6Z0_1\tmotorcycle\npDjjH1_G6Z0_0\tperson\nRGT-FumEK7I_0\tcar\nRGXgv5gqM8k_0\tumbrella\nRGiE9-CME30_0\tmotorcycle\nRG6y27UUUMI_0\tknife\nRHHOcUqVF80_0\tknife\nRHSfZLRz95o_0\tboat\nRHrnX__15lI_0\tcar\nRIBigSX5_90_1\tbear\nRImslgwYbYk_2\tboat\nRIwUvnURoqs_0\tcat\nRI14PaJgb7E_0\tumbrella\nRJ95URcz63g_1\tmotorcycle\nRJ95URcz63g_0\tmotorcycle\nRKZ4YVnDywQ_0\tknife\nRKa1tJXFTAw_1\tcat\nRK8ZJaF2QHQ_5\tbear\nRK8ZJaF2QHQ_6\tbear\nRLP9M0bfpWo_0\tumbrella\nRMapunE2wEc_0\tboat\nRNPKsQSr2o8_0\tknife\nROfxuPZWET8_2\tbear\nROkJ79Y9T7s_0\tmotorcycle\nRPJ0SJeC5ck_1\tcar\nRPJ0SJeC5ck_2\tcar\nRPWms_VL6wY_0\tbus\nRPhdhEKBBAM_0\tmotorcycle\nRP81F6rIP4w_0\tmotorcycle\nRQ5liX_fOJw_0\tumbrella\nRREV1E0Mbhs_1\tknife\nRSXIvkOJQq0_1\tknife\nRSq71vJH9yc_0\tbus\nRStmsJCm7mo_1\tcar\nRSztnKS1IYI_0\tcar\nRTTysK1hBpg_0\tboat\nRTvVXaA35DI_0\tmotorcycle\nRT0tTVP14XE_1\tumbrella\nRT0tTVP14XE_4\tumbrella\nRT0tTVP14XE_6\tumbrella\npFCVfOX_UJ0_0\tumbrella\npGJMt9Jmk_Y_0\tcar\npGnZDXcCjSc_0\tbus\npHC850dBc-E_1\tcar\npHf0EP0QU9Y_0\tcat\npHueI1IUqzg_0\tcar\npIhqwiD8cks_0\tbus\npJXxn2DRWyI_0\tbus\npJYetmKuiE0_4\tbear\npJj28cMLcZc_0\tknife\npJl14EZ6-Mc_0\tumbrella\npKPRv5lL_DQ_1\tmotorcycle\npKz_g-J2O-A_1\tbus\npK1umZxS4nE_0\tknife\npLEV-uFmv6I_0\tcat\npLI_HgRsRow_4\tbus\npLQDtquQaSE_0\tbear\npLp7vmowqNs_0\tmotorcycle\npMHRlQ2NxeA_1\tboat\npMaT7qWMaV4_1\tbear\npMg2xwjkfVc_4\tumbrella\npNHKmiurxTg_0\tknife\npOCvwILBOCY_0\tboat\npOjuNMevoaM_0\tcar\npOq6RrgrXWY_0\tmotorcycle\npPyL4U8gYpM_0\tcat\npP22coNl6r4_0\tbus\npP5q-Bszfh0_0\tmotorcycle\npQMkOOTP0Lk_0\tcat\npSJypg6az1w_0\tbus\npSjKd_x9ycU_1\tboat\npSz961UYSrY_0\tmotorcycle\nRVvfyYc8jws_0\tumbrella\nRXAW31Vm7pU_0\tmotorcycle\nRXQ-E6_Y__c_1\tcar\nRZAlTTj0Z4o_0\tmotorcycle\nRZAlTTj0Z4o_1\tmotorcycle\nRZL2H_-y3vE_0\tumbrella\nRZrAehHE8aA_2\tknife\nRZrAehHE8aA_0\tknife\nRZ0yQkyeSd8_0\tboat\nRaZy_JiiJ3E_0\tmotorcycle\nRa48MJPLmUw_2\tmotorcycle\nRa48MJPLmUw_0\tmotorcycle\nRa48MJPLmUw_1\tmotorcycle\nRbQTcoldE8M_0\tbus\nRbRqkcC6l_A_0\tknife\nRb5tGSqtlFU_1\tmotorcycle\nRcSm0O0Ylc0_0\tcat\nRdNjlTlNbEA_0\tbus\nRdP6hW5p6ys_4\tcar\nRdUjywh70lM_1\tcat\nRdlWUo9fYmA_0\tmotorcycle\nRd4TvDZNwHs_0\tumbrella\nRfNyu5aooJs_0\tcar\nRfrtTbza00c_0\tboat\nRgBWTOo9hqo_0\tcat\nRgC0rdZCy2c_0\tmotorcycle\nRgFR8z8IzAQ_0\tcat\nRgUwlXzmX4Q_0\tboat\nRhYw3jSi0xY_0\tbus\nRhqz5maRjNs_0\tcat\nRh0zI8vpRWk_2\tknife\nRh7Y69j41EY_0\tbus\nRiCptCjnrqk_0\tcat\nRiOw5wO0xTg_3\tknife\nRid6twPtgIo_0\tcat\npTGbMPGsbCU_0\tcar\npTSbrP23T0s_0\tmotorcycle\npVCT-jEaSPE_1\tbear\npV8hPodV-zY_0\tmotorcycle\npXBltXzZZe0_0\tcar\npXcoix_wq4E_0\tcat\npZC4kceO-0g_0\tbus\npZJDlV5VS3Y_0\tmotorcycle\npZ7RohF8JgE_1\tknife\npaF1hQf-YFk_0\tboat\npalM4nIm6GU_0\tmotorcycle\npba0HVNnmbc_1\tmotorcycle\npcOsY0MSbh0_0\tbus\npcb_jPcg_U8_0\tbus\npcpHHo_gp-Q_0\tcat\npc2aHxzJDtQ_0\tcat\npdDVE4LsX54_4\tcar\npdDVE4LsX54_0\tcar\npdDVE4LsX54_1\tcar\npdDVE4LsX54_2\tcar\npdDVE4LsX54_3\tcar\npdDVE4LsX54_5\tcar\npd0IEWCwpUY_0\tbear\npd1BZjvbFNI_0\tknife\npgKdcFb2680_1\tmotorcycle\npg4m5Fi0Mhc_1\tcar\nRiq87Q_unPU_0\tcat\nRjDo0UDX9Ws_1\tknife\nRjItZnZQBKk_0\tcar\nRjqDxu3wf5o_0\tcat\nRkSzsg-k14I_0\tboat\nRktoQu-Wk0M_0\tcat\nRmFxIMl1tSU_0\tbear\nRmpv0oMhUCc_0\tbus\nRnEWcQNxWGY_0\tmotorcycle\nRnPY8wgKxj4_1\tcat\nRnQ-v8AJQbc_0\tmotorcycle\nRnjU70B_0cU_0\tbear\nRpTRF_oB1-I_2\tbear\nRpn1EcI_ESo_0\tknife\nRp8euBdhkR0_0\tmotorcycle\nRp8euBdhkR0_1\tmotorcycle\nRqs856i0jbs_0\tumbrella\nRrj0e5VSIgY_0\tcar\nRsw947loMaA_0\tcat\nRtSEfWF3PdI_1\tknife\nRtng6SCToEM_0\tcar\nRufUHX-TjyM_0\tbear\nRvHvTQC9Kr4_0\tbear\nRwC5kkt5VDU_1\tperson\nRwC5kkt5VDU_5\tmotorcycle\nRwVgY7zgnYM_0\tknife\nRwVgY7zgnYM_1\tknife\nRwYiNSlAYcE_0\tcar\nRwpY0u7t3vE_0\tumbrella\nRwp_dTfFI28_4\tboat\nRwz5T35lNgY_0\tcat\nRw5dzv79c-M_1\tmotorcycle\nRxLwy_iZqKg_1\tbear\nRxWhDOyHYNo_0\tcat\nphJS1iN6HFo_0\tumbrella\nphTyZcbKeQw_5\tbus\npihR4mhfwxM_0\tmotorcycle\npim0lzR8i1g_0\tcat\npix5Cxt_fUM_3\tknife\npjgi60dJalw_0\tcar\npjgi60dJalw_1\tcar\npjhNnA0142Y_0\tmotorcycle\npmszdloBDwA_0\tbear\npmszdloBDwA_2\tbear\npmszdloBDwA_5\tbear\npnMd28rPX7M_0\tmotorcycle\npncTBxEM4WM_0\tbus\npnjPhdpuKGc_0\tmotorcycle\npn0ZChK2ASs_0\tbear\nppAj6dnl62Y_0\tknife\nppAj6dnl62Y_1\tknife\nppJXGy7snUw_1\tknife\nppwjIgwParM_0\tboat\npq1swOh85gc_0\tboat\npq1swOh85gc_2\tboat\npq1swOh85gc_1\tboat\npriwWNrQnkI_1\tbear\nprwglbuvyZ8_1\tknife\nprw0IWDYBUM_0\tcat\npr3LOwTWNnk_1\tbus\npsOuOLCJNk8_0\tcat\npsTqTt0np_I_11\tbear\npsTqTt0np_I_3\tbear\npsTqTt0np_I_6\tbear\npsUASBNRwIE_0\tcar\npsUASBNRwIE_2\tcar\npsUASBNRwIE_4\tcar\nptCx-L_n2Yg_2\tbear\nptNC5ou_rOQ_1\tmotorcycle\npuZUIBS4Ceg_0\tcat\npuw9BfAKOHU_0\tbus\nRxiBbfFH3is_0\tknife\nRxiE2beIvjQ_0\tbear\nRyWLXS1Vrco_0\tknife\nRy4q0UokRjo_0\tmotorcycle\nRzWczJnyzmg_0\tcat\nRzWdM4_lg2c_4\tbear\nRzj5xv434WA_0\tbear\nRzrQOptkjFM_0\tmotorcycle\nRzrQOptkjFM_1\tmotorcycle\nR0hj1kAnMgs_0\tcar\nR0w6j1wmwo0_2\tknife\nR0w6j1wmwo0_3\tknife\nR1Fkwaa8CxU_0\tmotorcycle\nR2FlyNrjZBQ_2\tboat\nR2FlyNrjZBQ_1\tboat\nR2Fps165H9g_2\tknife\nR2XiIC1qbAM_0\tbear\nR2YmjDNC8oo_0\tbear\nR2duXYQhnFA_0\tcar\nR2sy6qbPc4c_0\tcar\nR23ZSmBA2Rg_0\tknife\nR3zhr1iboG0_0\tbus\nR4ktPNCb564_1\tbus\nR4vLajpLSMk_0\tcat\nR5CBlOfUL4w_0\tperson\nR5cIoEcqZ9E_1\tknife\nR5r3AIx_BoU_1\tknife\nR5r3AIx_BoU_2\tknife\nR6PuHPDiwPs_1\tcar\nR6f_t-MqO_s_0\tbus\nR6tsNuvoTus_0\tcar\nR6uZ5JpxQ88_0\tcat\nR6wk6JHQSeI_0\tknife\nR6wsV6cYN_w_1\tbus\nR7w-mdDyhG8_2\tknife\nR8TV702EIqs_0\tknife\nR8j0mjQR4lI_4\tboat\nR84Bj4PKOvE_0\tbear\nR84Bj4PKOvE_1\tbear\nR9LK4x3pO0Y_0\tcat\nR9L1I9EEE0g_0\tmotorcycle\nR9zDzUslz9g_0\tcar\nR9607CioN3U_0\tcar\nR99fGQRB6rM_1\tcar\nR-UGxl6KGoo_1\tbus\nR_LEKDTlVvs_5\tboat\nR_NxqXdz3RA_0\tcar\nR_UPR78XIvA_0\tknife\nSAFptHT-UpM_1\tboat\nSAFptHT-UpM_2\tboat\npvrO7c2imos_4\tcar\npwgqJO3yKHI_0\tcat\npwwdlKxLCqQ_1\tknife\npxBtDlmwesI_0\tcar\npxIlEGkEw5U_0\tcat\npxwl3iVkx08_0\tboat\npyAuY2v2U0I_0\tcat\npyTXP2GZRuM_0\tknife\npyTXP2GZRuM_1\tknife\npyTXP2GZRuM_2\tknife\npy0K3KEYfjA_2\tumbrella\npy0K3KEYfjA_4\tumbrella\npzZvI_g1S8M_0\tmotorcycle\np03u2BJIvyE_0\tbear\np03u2BJIvyE_1\tbear\np1p9QUFIi_8_0\tbus\np1_thBtA2-g_1\tbear\np2pRN03gXFk_0\tcat\np26eBX5AGCo_0\tboat\np3MF-uxvtWk_0\tbear\np32jOqTS5ec_0\tcat\np4MmW7gFlLI_0\tmotorcycle\np4MmW7gFlLI_1\tmotorcycle\np5NxEAfgmro_0\tmotorcycle\np5bLvlU8ua0_0\tmotorcycle\np5lUPYsz-HE_0\tcat\np5vt7l9pW-0_1\tmotorcycle\np5vt7l9pW-0_0\tperson\np5_O08ZNK_c_0\tmotorcycle\np6GkhJZsCi8_0\tcat\np6Rtu645O08_1\tmotorcycle\np6Rtu645O08_0\tmotorcycle\np6dBx3tBRr4_5\tbear\np6dCoZRaQOA_0\tboat\np6dCoZRaQOA_1\tboat\np6dCoZRaQOA_2\tboat\np7OlEbiu5to_0\tcat\np7WwUD62qfY_0\tmotorcycle\np7gjVQyX07A_0\tcat\np7pnYAaDqPI_0\tumbrella\np7sHze5SC0g_4\tbear\np8MEDllYMKg_0\tcat\np8RUtiaGu5U_0\tcat\np8ZUCNMnKpE_0\tcar\np89fuT8e_zk_0\tcat\np8-8JqAgtv0_0\tmotorcycle\np9XjLjpQX-8_0\tcat\np9by0qLqHOQ_0\tknife\nSAkHT1Ozg1c_0\tmotorcycle\nSAkHT1Ozg1c_2\tmotorcycle\nSA1Tb1XbngU_0\tcat\nSB1UBp1PVf4_2\tbus\nSDKsL-L7GbI_0\tknife\nSDbe9JVnITk_0\tknife\nSDk3Y3jzalg_0\tknife\nSEp92WMharw_0\tbus\nSExW2mVb1Mc_2\tcar\nSExW2mVb1Mc_0\tcar\nSExW2mVb1Mc_1\tcar\nSE5Rg8Qpb8c_1\tknife\nSFB2FGuZb6w_0\tmotorcycle\nSFMc-UCkcT8_0\tcat\nSF8c7EeFPPk_0\tmotorcycle\nSHcJfBJBQe4_0\tbear\nSHxyKRdKRc8_0\tcat\nSHxyKRdKRc8_1\tcat\nSH1noq6GrKw_0\tknife\nSISqo1FBefA_0\tbus\nSIbLAYX2J_A_0\tbear\nSJAZnOnRtag_1\tbear\nSJsxWsiEuTg_0\tmotorcycle\nSKNl4frouUY_1\tknife\nSLEOr8bmm2w_0\tmotorcycle\nSLEOr8bmm2w_1\tmotorcycle\nSLzqvins4p8_0\tbear\nSMYpv_Ea3w8_0\tperson\nSM6BtnyDz5w_0\tcat\nSNZ0xGGmZvU_0\tknife\nSNhnfqJHoI4_0\tmotorcycle\nSNl4Gq_2aVQ_0\tbear\nSNrosAtwG2k_4\tbus\nSOYkQc-toMU_0\tbear\nSOYkQc-toMU_2\tbear\np-J0yyoF0lU_0\tmotorcycle\np_C9Zwt3N5c_0\tumbrella\nqAJSLnflSrQ_0\tcat\nqA5rC8MxCoA_2\tbear\nqCzILENpEWk_0\tboat\nqCz4ft26CAw_2\tknife\nqDobzjbo_aM_0\tcat\nqEcNn2_TQC8_0\tcat\nqEei5YCRiHA_0\tcar\nqEj3r8dtvKg_0\tboat\nqE5fKHWTLMw_1\tbear\nqFR-yuWiHVk_3\tknife\nqFR-yuWiHVk_4\tknife\nqFwugOO0pC0_0\tknife\nqGjYX-iNrPE_0\tboat\nqGohF2oMPS0_0\tmotorcycle\nqGxfRwBmBEc_0\tmotorcycle\nqGxfRwBmBEc_1\tmotorcycle\nqHKwI-35nNU_0\tmotorcycle\nqIIu-MIIYIE_0\tboat\nqINDYDOlPLA_0\tmotorcycle\nqIPydTwqwmI_3\tcar\nqIPydTwqwmI_0\tcar\nqIPydTwqwmI_1\tcar\nqIPydTwqwmI_2\tcar\nqIkNPwKd6ag_0\tknife\nqIkNPwKd6ag_1\tknife\nqInP3tWVtWE_0\tcat\nqJMxoAbx9YU_0\tboat\nqKxQVpaLChg_0\tbear\nqLfa8e4ffQY_0\tbus\nqL6LVXg4Vt4_0\tcat\nqMEMl1FFVIM_2\tumbrella\nSPRByN4TiFg_1\tboat\nSPsOjXxZymk_1\tboat\nSQ_ChhUwWng_0\tbus\nSRUB2kzDBTk_0\tperson\nSSFOqr1ARgI_1\tumbrella\nSSaN8vntuYs_0\tbear\nSTTRwCtQ8_8_0\tboat\nST6aA292Pos_0\tmotorcycle\nSUMc-5fiNzQ_0\tmotorcycle\nSUnPNgAE_ho_2\tboat\nSUyRs3xvc9c_0\tcat\nSVBc-W37yW0_0\tumbrella\nSVSMGxy8Z6I_0\tcat\nSVXaBPnNWO0_0\tknife\nSVXaBPnNWO0_2\tknife\nSVt7vQ8LYZU_0\tbear\nSV70cwNA6o8_0\tknife\nSWJyq_mITbE_0\tboat\nSXmy9BLHr84_0\tbus\nSXvXN3waFWs_6\tbear\nSYCg5NuWc60_0\tmotorcycle\nSaHw7yyoeJg_0\tcat\nSaSgclGWGwE_1\tmotorcycle\nSaSgclGWGwE_3\tmotorcycle\nSa1iRLR4d_c_0\tbus\nSa4L2rdyD10_0\tknife\nSbWCXCuXBqY_1\tbear\nSe3XbBA4N4o_3\tknife\nSe3wtx4DzwE_5\tbus\nSe3wtx4DzwE_1\tbus\nSe3wtx4DzwE_2\tbus\nqMlYXZy1Tow_0\tbus\nqNfS9Y5zs-Y_0\tcar\nqOaABf_zb9U_1\tboat\nqO7qHolBYj4_0\tbus\nqO8D0E7MjOI_0\tcat\nqPGkJRPae6A_0\tbus\nqPMDgkgSTnA_2\tmotorcycle\nqPaox7otsVI_0\tknife\nqPwAWEtJBqA_2\tmotorcycle\nqPwAWEtJBqA_0\tmotorcycle\nqPwAWEtJBqA_1\tmotorcycle\nqPyR7CpZ6l0_0\tknife\nqP88t7GfZc8_0\tknife\nqQaIW7IjCZo_3\tmotorcycle\nqQaIW7IjCZo_0\tmotorcycle\nqQaIW7IjCZo_1\tmotorcycle\nqQdtuBd-SgI_0\tknife\nqQlsMjenbfE_2\tknife\nqQ5tf8s7KrE_0\tbus\nqRO6U_tg6SE_0\tcat\nqR4kw8rf-FU_0\tmotorcycle\nqSQGG-K89mg_1\tknife\nqSgOYqBt_8k_0\tbus\nqSnoKy6T22k_0\tmotorcycle\nqTKtODdEZIg_0\tcat\nqTut_O_LppA_0\tbear\nqT00uOC9JpQ_0\tcar\nqUuTEKdKNNg_0\tcar\nqU7DT4ipQHw_0\tcat\nqVSnhT0Luh8_0\tcat\nqVyAlx4rMTo_2\tbear\nqV7U9CRjZGI_0\tcat\nqWN8i7sJyVg_4\tumbrella\nqWcXQWy7yw8_1\tbus\nqW-zRq8VTV0_0\tboat\nqX8RcjE0tjs_0\tmotorcycle\nqX-YEHlu0Kg_2\tknife\nqZWxhCk8AX0_0\tknife\nqZf1fw737A8_1\tcar\nqZyxILyLOv0_0\tknife\nSfZLu5uG7mc_0\tcar\nSgDdyLB3fFo_1\tmotorcycle\nSgHH9KN_nkY_2\tmotorcycle\nSgOvlqqKbEI_0\tbear\nSgSsk-eeClA_0\tcat\nShHLzcBozxo_1\tboat\nShPl28Zw1kU_8\tcar\nShPl28Zw1kU_3\tcar\nShPl28Zw1kU_7\tcar\nShPl28Zw1kU_9\tcar\nShaLoFJZv-M_1\tknife\nShhC84AwZ04_0\tbus\nSh6uHJRUnP4_0\tcat\nSiSP3Kko4VM_0\tbus\nSi3psXQA46c_0\tbus\nSjLNVLIdpbc_0\tcat\nSj0pcvct_3k_0\tmotorcycle\nSkLwUmczAMo_2\tknife\nSkLwUmczAMo_1\tknife\nSkVIH0IZI1I_0\tmotorcycle\nSlBZM22tlSU_0\tknife\nSlIzgQZ63h4_1\tknife\nSlWmnHWeqIE_0\tboat\nSlYqzpZkWho_0\tbear\nSmCvuBfyU5o_0\tmotorcycle\nSmCvuBfyU5o_1\tmotorcycle\nSn8nb_cv5K4_0\tmotorcycle\nSn8nb_cv5K4_1\tmotorcycle\nSo5dCmgNRtU_0\tbear\nSo-dFj7N07Y_0\tcar\nSpGfQe7sWIQ_0\tmotorcycle\nSpuAy2Z1ejE_0\tboat\nSpx8fHkY0Ac_0\tbear\nSqUzKvBRVmQ_0\tcat\nSqkoepvLN3c_0\tmotorcycle\nSqkoepvLN3c_1\tmotorcycle\nSq-LvVdVwhc_4\tbear\nSrBwCHcEe4g_0\tcat\nSrPgW-L7Gps_0\tbear\nSrTxMAryank_0\tknife\nSsQb12lMU_w_1\tcar\nSsQb12lMU_w_2\tcar\nqZ0egYy10zs_0\tcat\nqaKYHGIZ8tU_0\tcat\nqantWNz3Z-k_0\tbus\nqc1U41zjMfI_0\tknife\nqeSfa-Xin3s_0\tbear\nqfZHHSjai5Q_3\tmotorcycle\nqfZHHSjai5Q_5\tmotorcycle\nqfZHHSjai5Q_0\tmotorcycle\nqfZHHSjai5Q_4\tmotorcycle\nqfZHHSjai5Q_6\tmotorcycle\nqf4dZ323eu4_0\tcat\nqf5FQP-vjpY_3\tbus\nqgYBD0GBerg_0\tknife\nqglTXvFe5vw_0\tmotorcycle\nqgr1pdkQkKM_1\tknife\nqhTOaoL2B54_0\tbus\nqhgQ0_y6Jr8_0\tmotorcycle\nqhyihSkbubs_1\tbus\nqiW4cUVZCJA_0\tmotorcycle\nqjfkIHC3sNA_0\tbus\nqj1y76m_WFg_1\tcar\nqklXdTo1CKQ_0\ttruck\nqlGmmBY7ITI_0\tcat\nqlGmmBY7ITI_1\tcat\nqlfCKWLj_xU_0\tboat\nqlvwUVksAC4_0\tcat\nqnaQOGGmyhI_1\tmotorcycle\nqo2tG-wOpLI_3\tcar\nqpBRU2SONe0_4\tbear\nqpNPlLO7Wdo_6\tbus\nSsWwZCQR8pA_1\tbus\nSs6lM7iutJ0_2\tboat\nSs-ENa079_Y_0\tcar\nStg0xs4yv5A_3\tbus\nStg0xs4yv5A_1\tbus\nStg0xs4yv5A_2\tbus\nStoHoHg6XHo_0\tmotorcycle\nSuoVrAXkHsM_1\tboat\nSv-Xsjm8Seo_0\tboat\nSwfda4hcQzo_18\tumbrella\nSwfda4hcQzo_0\tumbrella\nSwfda4hcQzo_3\tumbrella\nSwrxLGIVuNg_1\tbus\nSw01FqLPH0o_0\tmotorcycle\nSxxBAhDGWzU_1\tcar\nSybtH9db7tI_1\tboat\nSybtH9db7tI_6\tboat\nSybtH9db7tI_0\tboat\nSybtH9db7tI_4\tboat\nSybtH9db7tI_5\tboat\nSyk5Jc9_tQA_1\tboat\nSywBQoMh8Q8_1\tcar\nSzD0AW8MKxY_1\tcar\nSz3ay4xexe0_0\tmotorcycle\nSz3oWSS6V3s_0\tbus\nS0AoM2Xz64Y_0\tmotorcycle\nS09dKnW798o_0\tcat\nS12WKCebYHg_0\tboat\nS2YoTKzOHW8_0\tumbrella\nS3O_xjPQToU_0\tknife\nS4lNN0zJE4A_0\tcat\nS49Hdfpc-SI_1\tboat\nS5VjgUVKjV0_0\tcat\nS5Z4g_SORHc_3\tknife\nS5Z4g_SORHc_4\tknife\nS6crKzUWKYI_0\tumbrella\nS6ksiMdECu8_0\tumbrella\nqp11ZgRmeck_1\tmotorcycle\nqqd7FMwn5Ks_0\tcat\nqqmk0BKAubw_0\tboat\nqqo83uqRldw_0\tmotorcycle\nqqumKQ_igJQ_0\tmotorcycle\nqqumKQ_igJQ_1\tmotorcycle\nqrHPEAVq_yE_1\tboat\nqrJljeVBE-k_0\tboat\nqrJljeVBE-k_2\tboat\nqrTOqXRwHqM_1\tbear\nqrTm-7zA5FM_1\tmotorcycle\nqrU7MAMf42A_0\tmotorcycle\nqrfZoDvW7wI_2\tbus\nqsFkwL9ikBE_6\tumbrella\nqsFkwL9ikBE_0\tumbrella\nqsbpGZepU_4_0\tmotorcycle\nqs4ACjrDQvo_0\tcat\nqtEJPGYfmb0_0\tmotorcycle\nqtQNJD43Z30_0\tknife\nqthVtX1KeJY_0\tcat\nqtmXJD337Sg_0\tcat\nquMSh4JZfSE_0\tbear\nquSzbk4CkBE_0\tcar\nquZjkqmOTys_0\tcat\nqvAPzGCqVG0_0\tbus\nqvAPzGCqVG0_1\tbus\nqvCVL7reF8g_2\tbear\nqwBsDRYIhwg_0\tcat\nqwI3fCK486I_0\tcat\nqwZ_bpVY018_1\tbear\nqwcgkEVHQS4_1\tmotorcycle\nqxwgvTIA0Oc_0\tumbrella\nqykj452YYlU_0\tboat\nqzjG5RMNfB0_0\tcat\nq0tjDTtHr00_3\tknife\nq1LbqldHuM0_0\tknife\nq1QElQCedrc_0\tumbrella\nq15Lr3-V3qI_2\tmotorcycle\nq2K3ctdaVGU_0\tknife\nq2MasRNKQxI_0\tbus\nq2NfowB59fs_0\tmotorcycle\nq3J7hUfBGGQ_0\tcat\nq4EXWy685Wo_0\tperson\nq4EXWy685Wo_3\tmotorcycle\nq4EXWy685Wo_6\tmotorcycle\nq4EXWy685Wo_7\tmotorcycle\nS7SEfKdokC0_1\tbus\nS7-k1XdAR7Q_0\tcat\nS8BbQRnxfqY_0\tcat\nS8WFgIrdEyI_0\tcar\nS9LooqaA-VA_0\tcat\nS9wDiwQMla8_0\tperson\nS9wDiwQMla8_1\tmotorcycle\nS9wDiwQMla8_2\tmotorcycle\nS9xCWTCFhNc_0\tmotorcycle\nS-T-e07Bgys_0\tmotorcycle\nS_K_nwYUS2o_0\tcat\nS_09gd9e0zE_0\tboat\nS_5w6lmw0DI_0\tknife\nTAzjOrAfzFM_0\tcat\nTA1NbMN7gNo_0\tmotorcycle\nTBvuwl0phUE_0\tmotorcycle\nTBy---hD-FA_0\tbear\nTB9qJG8A-H4_0\tcar\nTCS6svwO2AE_0\tboat\nTCVj-PtxnsQ_0\tbear\nTDSmQkKnGFU_1\tcar\nTENive2WCAw_0\tcat\nTFUV5Dy2MvE_0\tmotorcycle\nTFu5bNUW02Q_0\tbus\nTIZr3Y-PLQM_1\tknife\nTIpoS2Jymv8_2\tknife\nTJJgVPay9LE_0\tbus\nq4zFevdC3-w_1\tknife\nq5D67534lFM_0\tmotorcycle\nq5ESvcujAps_0\tperson\nq5wOimcVyaI_0\tcat\nq6YyhMSTSjg_2\tbus\nq6YyhMSTSjg_3\tbus\nq65QzEDi_jo_1\tmotorcycle\nq8nG4OvfGhY_0\tcat\nq8oKL5zvWZw_0\tcat\nq9QycGD31Co_0\tcat\nq9ZSVLXRUx8_1\tcat\nq9p4QZdwQ0I_0\tboat\nq-Sw3Dx1Xb0_0\tknife\nq-lbxXK_UY8_0\tbear\nq-nt9k61jqQ_2\tboat\nq_NnyABqOFg_3\tboat\nrAcvNOp95pA_0\tcar\nrApBsMx8ZjU_1\tumbrella\nrAtKVQ_h94Q_1\tcar\nrBLqbf-KdaY_0\tcar\nrBjCxCwLz84_0\tcar\nrBl7T312SPQ_0\tcat\nrBnSmzTRsqE_0\tcar\nrCAA1xoobto_0\tcar\nrCOxllaoO64_0\tbear\nrCrQRhaJeAA_0\tbus\nrDEW_AdTSH4_1\tcat\nrDEdeXsgOdU_0\tumbrella\nrEL7A7rKARs_3\tknife\nrFF0purpqAU_2\tknife\nrGgvqpRsaew_0\tbus\nrGlpoWppAfU_0\tcar\nrG4cDTukyNw_0\tcar\nrG4ld81Rxt8_0\tcar\nrHHUlsaTde8_2\tbus\nTKCXvzTT2ws_0\tumbrella\nTMyv9XNlPGQ_0\tbus\nTQWq_YDrKc0_2\tknife\nTQm0C-2ersM_8\tboat\nTQm0C-2ersM_10\tboat\nTQm0C-2ersM_1\tboat\nTQm0C-2ersM_5\tboat\nTQm0C-2ersM_6\tboat\nTREARdQ16GQ_0\tcar\nTREARdQ16GQ_1\tcar\nTSQwlIeADdw_0\tbear\nTSQwlIeADdw_1\tbear\nTSQwlIeADdw_2\tbear\nTSpUcayboiM_0\tcar\nTS7UuEszy9E_0\tcar\nTTQQky-HcCs_0\tknife\nTTdbV_lHq_s_0\tcat\nTUrnPZr3eXs_0\tbus\nTVjvTR7CrNE_0\tknife\nTVvo40ERO9Y_0\tcat\nTW6cU7OYa60_1\tcat\nTXrnNVUe53o_0\tboat\nTXsQGHJjWhI_2\tknife\nTX2BAlXe5IA_0\tboat\nTX2BAlXe5IA_2\tboat\nrIUepAhKVnM_0\tcat\nrIc3ZEnqjQA_0\tumbrella\nrIezbmq7N9U_3\tbear\nrI79TJwwnW4_3\tknife\nrJGGo2bI150_0\tbear\nrJGGo2bI150_1\tbear\nrJGGo2bI150_2\tbear\nrKiQjOPzf0s_0\tcat\nrKs2bGgU29k_0\tcat\nrLm1866Q28U_3\tumbrella\nrLm1866Q28U_0\tumbrella\nrLm1866Q28U_1\tumbrella\nrNlm7i1BcaQ_0\tcat\nrNw1jiERG4I_1\tcar\nrOtd7pdh-zY_0\tcat\nrO0qo7r4TTc_0\tcat\nrPCOxxRwiTM_0\tbus\nrP6vb-cxVcI_0\tbus\nrQBwAWkz3Ao_2\tboat\nrQBwAWkz3Ao_0\tboat\nrQBwAWkz3Ao_1\tboat\nrRL4f466oNQ_0\tumbrella\nrR9vwlyXtYs_0\tbus\nrSNfdcbzEhE_1\tboat\nrSNfdcbzEhE_2\tboat\nrSNfdcbzEhE_3\tboat\nrSNfdcbzEhE_6\tboat\nrSNzuWEgSeg_0\tcat\nrSWYvSf29vQ_1\tcat\nrTM-3OYHQZA_0\tbear\nrTM-3OYHQZA_9\tbear\nrTreVVS3XVg_0\tumbrella\nrUcsGq10bCk_0\tumbrella\nrWLG9njOx1k_0\tcar\nTYuoW3gezZ4_1\tcar\nTZFETDh9bQo_1\tbear\nTZFETDh9bQo_3\tbear\nTain2YW14ok_0\tumbrella\nTb943q0WnTY_0\tcar\nTcfdUbzZcIc_0\tknife\nTcnKT-jCrxQ_1\tbus\nTcnKT-jCrxQ_0\tbus\nTcnKT-jCrxQ_4\tbus\nTdmeXkKeGmE_0\tknife\nTdxsosl1CIk_0\tumbrella\nTeF2gxyzjF8_4\tknife\nTeM8oPJR8nM_2\tbus\nTeM8oPJR8nM_4\tbus\nTeM8oPJR8nM_7\tbus\nTeSMF-Tw8b8_0\tbus\nTf8ZmK4GZYU_0\tbus\nTf9piH7b4Js_1\tbus\nTihSkV4th6I_0\tumbrella\nTimXSaV1u4M_2\tbus\nTjs55_3zB_o_0\tknife\nTjvHNNlcym8_0\tknife\nTjvHNNlcym8_4\tknife\nTj-U_ZtaHe0_0\tboat\nTkmEiKe_Uto_0\tboat\nTkuUMAPSGiU_1\tcar\nTnN1RBRfLnE_0\tumbrella\nTnN1RBRfLnE_1\tumbrella\nTnXDBpRvE_U_0\tbear\nrWw_OZqgPk8_3\tbus\nrYlL6avPERw_0\tcar\nrZDchhWp8lc_1\tbus\nrZ7XejB4nyk_0\tboat\nrawi3Ka9Oew_1\tcar\nrawi3Ka9Oew_0\tcar\nrbONk59p13Q_0\tbear\nrbWOxoprQ2M_0\tbear\nrbXmAC9QV2A_0\tcar\nrbjK97ECn_A_0\tboat\nrcrE_BJU-n4_0\tknife\nrcrE_BJU-n4_2\tknife\nrfksy8z9X40_0\tcar\nrgWglS6-TTw_1\tknife\nrhIa7DWBXUM_1\tcar\nrjVLfZDg-1g_0\tboat\nrk9SO8fR7-0_1\tbus\nrk9SO8fR7-0_4\tbus\nrlBfiB0epGw_1\tknife\nrlLJTjn9vkk_0\tumbrella\nToclpwxGMe8_0\tbus\nTpKpXHgy7yw_2\tknife\nTpKpXHgy7yw_5\tknife\nTqPnQuSGm2Y_0\tbus\nTqZZfXdm7D0_0\tcar\nTqnj4qeawHg_0\tboat\nTqsQOw3CqXo_0\tbus\nTrXkieSIkII_0\tboat\nTsfcgwFff0k_0\tbear\nTsrQwMo3niY_1\tbear\nTs8Wofx6QYY_0\tcar\nTusmYht5g7o_0\tbus\nTvbiwdoAnv8_0\tboat\nTvvBAOBoHFU_1\tumbrella\nTwEihF94LGQ_0\tumbrella\nTwSkZlbuaEU_0\tbus\nTxUm-m-jFQM_0\tknife\nTyV9inNHHAE_0\tbus\nTy_FDwb_nLY_2\tcar\nT0Mp-gJmMlU_2\tbear\nT0Mp-gJmMlU_3\tbear\nT0tT7l2X1_g_0\tbus\nT1Zywya-PcI_2\tcar\nT1Zywya-PcI_3\tcar\nT1Zywya-PcI_1\tcar\nroNPRQwafcU_2\tbus\nroNPRQwafcU_5\tbus\nroW8_xIYVAk_0\tknife\nroXQ3vv08_A_0\tbear\nrqA8P346qIQ_1\tboat\nrqDqbsbIcc8_0\tbus\nrq5jwk8hqYA_0\tbus\nrq5jwk8hqYA_1\tbus\nrriv5ZJYcJI_1\tknife\nrsMmhzkVg_0_0\tboat\nrta_HO-3L_A_3\tbus\nrwH7x0MR_38_0\tboat\nrwS5mEyV7Go_1\tknife\nrwS5mEyV7Go_2\tknife\nrwcVAIM0TvE_0\tbus\nrwcVAIM0TvE_1\tbus\nrwu0xKkvzvA_0\tknife\nrxRxMZ6DIjw_2\tumbrella\nrxSJHCdoi0c_0\tbear\nrxm15TcjWqQ_0\tknife\nryBGF3WFvsY_0\tbus\nryBGF3WFvsY_1\tbus\nry0Pnb8VkxU_0\tbus\nry0Pnb8VkxU_1\tbus\nry0Pnb8VkxU_3\tbus\nrzDa9eW_dpg_3\tcar\nrzDa9eW_dpg_5\tcar\nrzOhM6n6Amc_0\tboat\nT21Uim3jGuo_1\tbear\nT3wZwUQ_7q4_0\tumbrella\nT5ZgfFcAd94_0\tbus\nT6QiKZd4bH0_0\tknife\nT7h2fJLtABk_0\tknife\nT8C-sLfGg3A_0\tboat\nT-5AESRu0pM_0\tcar\nUAptbKXXoJI_1\tbear\nUBk45sVKl_o_0\tumbrella\nUCnTA86V3o0_0\tknife\nUDmjHWk8iRk_1\tbear\nUE1kUiVy7LA_1\tcar\nUFPrfB6_TJY_0\tbear\nUFQmHju3MrM_0\tbear\nr1JK0UIvyoM_0\tbus\nr1YNttJqXjI_1\tbear\nr2GN4IDacgM_0\tboat\nr2GN4IDacgM_1\tboat\nr2GN4IDacgM_2\tboat\nr2GN4IDacgM_3\tboat\nr2sw-3mWNEQ_1\tboat\nr4U8cMe6_Uo_0\tumbrella\nr4cneWcmGJc_0\tbear\nr4cneWcmGJc_1\tbear\nr43KKtRQNxw_0\tknife\nr5c09tdbF3U_0\tknife\nr6HzXMpwuOg_0\tboat\nr7V8M9vMX8I_0\tboat\nr8oV5neCRZc_1\tbear\nr-Wqqn-oS_0_0\tbear\nr_squ5DWzV0_0\tbus\nsAa0aLc0rvM_0\tbus\nsAo-z30biYY_0\tcar\nsAqB_9DrpiU_0\tboat\nsCGJB9oAeHo_0\tcar\nsCX1zbdQvbE_0\tboat\nUHvwjd6eSDY_0\tcar\nUH6GKx07mu0_2\tbear\nUIlo6WvfABM_0\tboat\nUJ7xasCu9yw_0\tknife\nUKdl8BrKy4g_0\tknife\nULTTzu_-eQI_2\tbus\nULgPda0ny1Q_0\tboat\nULxGPhbhuwI_0\tumbrella\nUMQ6fAZTiLo_0\tumbrella\nUNfKxOwP1V8_0\tbear\nUNyq1SNbNPk_1\tbear\nUP2WXifDFc0_0\tbus\nUQdjo1v_Hv0_0\tcar\nUQrP0Wa7bfA_0\tbus\nUQ90qkTMSes_0\tumbrella\nURiNDCZBU7E_1\tcar\nURmMAndDPfQ_0\tboat\nUSYudaDNkeU_2\tknife\nUSYudaDNkeU_3\tknife\nUTx1Fw7nQcQ_0\tbus\nUVGq9IRroYo_0\tboat\nsDSmkWE8qw4_0\tknife\nsEnhkLttWlw_0\tbus\nsFgXir9g_Os_0\tcar\nsF2EQhRNlQc_0\tumbrella\nsGpQTqemybM_0\tbear\nsGzXdAI4YSQ_0\tbear\nsG_AruJlxiw_0\tumbrella\nsJA7-N7htNo_0\tbear\nsJL716urwpY_1\tcar\nsJL716urwpY_0\tcar\nsJTLB7bgb0k_1\tknife\nsJsEpKneYMs_1\tbus\nsMm8f8vBx7c_0\tumbrella\nsOQWtx6GiR4_1\tumbrella\nsOQWtx6GiR4_0\tumbrella\nsOvnHbg6d_8_0\tumbrella\nsPDY-ey2kNA_0\tumbrella\nsPDY-ey2kNA_1\tumbrella\nsQEBpH647Mw_0\tumbrella\nsQJr7LooP_s_1\tboat\nsQftML4HXiU_1\tknife\nsQvi3OxMoKU_0\tbear\nsQvi3OxMoKU_1\tbear\nsQvi3OxMoKU_2\tbear\nUWJIq_1uAnA_0\tboat\nUXDmIQTthAE_0\tknife\nUYRhIhbuh34_0\tboat\nUanzlUcmoJY_1\tbus\nUbj2t-7KcJk_2\tcar\nUb5O76sDojg_0\tcar\nUcBLQsI3Dzs_0\tcar\nUcKyiCjpXoA_3\tbear\nUdFEBlYt9tM_0\tumbrella\nUdaAkO2f_pU_0\tbus\nUeQLdrnbe8E_1\tbear\nUeQLdrnbe8E_3\tbear\nUgHNBgeg9cY_3\tknife\nUgh33I0Qxi4_0\tumbrella\nUgkXJsrPys0_0\tumbrella\nUhgJaZWsdCQ_0\tknife\nUhupGJ7k3Q0_0\tknife\nUhvhrEMHY0E_0\tboat\nUhwOdFtF8os_0\tbus\nUiZ3tYMpOic_1\tumbrella\nUjTdR_85bTo_0\tumbrella\nsSPe9VqmSuU_2\tbear\nsS-GtompdcQ_1\tboat\nsUhpJsSmrzA_4\tboat\nsU-mmzCCGmg_0\tbus\nsVbrxAG6jtA_0\tcar\nsVkPUjUh0UQ_0\tknife\nsV9ymK-zZ8A_4\tbus\nsV9ymK-zZ8A_6\tbus\nsWfQh6SsvG0_1\tboat\nsW7n8r3vvl8_1\tknife\nsXwrjhXbAwA_0\tumbrella\nsYE45Xnof5I_3\tbear\nsY1i3-cQv70_2\tboat\nsY3G5eOlysI_0\tbus\nsY_jGNxKdYw_0\tknife\nsY_jGNxKdYw_2\tknife\nsaBAx3Xw2PE_0\tbus\nsbR26E99_A8_0\tbus\nsbmsWqsHD9M_0\tbus\nsb1unJ1sby8_0\tknife\nsb1unJ1sby8_4\tknife\nscFiRRTU5jg_1\tbear\nscJFbu3WboQ_1\tcar\nsc-BJ-WirDo_0\tbus\nsdHNJK0mfWQ_3\tbus\nsdd5ViCUDwY_1\tbus\nsfVwMcMm77E_1\tumbrella\nsfVwMcMm77E_2\tumbrella\nUjxwNRWfxBo_2\tbear\nUkBlnrNOssQ_1\tbus\nUlLwBfXpz4A_1\tbus\nUmAOVqCB6UM_0\tbear\nUmBxMf5cHV4_0\tknife\nUmewKWpE2qE_0\tcar\nUrRiUQPaxic_0\tumbrella\nUrxeEW4FBq4_1\tumbrella\nUtvo55GUNyg_1\tbear\nUutgI7H2EPc_0\tbus\nUutgI7H2EPc_2\tbus\nUutgI7H2EPc_4\tbus\nUutgI7H2EPc_5\tbus\nUutgI7H2EPc_6\tbus\nUvsMOU9XGYk_0\tcar\nUvsup5BdpLM_0\tcar\nUwlk3sF-l38_0\tknife\nUxD-6ScNF1U_0\tbus\nUx3oyD0wLig_0\tboat\nUx_-m16Ntqs_0\tbear\nsgDzqYTo0GI_0\tcar\nsgDzqYTo0GI_2\tcar\nsghMPNg9wB0_0\tbus\nshgKQ2FcjfM_1\tknife\nsiNixoeB9Ew_0\tcar\nsi8Uk6frpqI_3\tknife\nsjBWnj8kKVs_1\tbear\nsjESht-PXb0_2\tbus\nsje-nlCBYAk_0\tbear\nsk5gj6VnXds_0\tboat\nslGCyLNlI3w_0\tumbrella\nslgsRri0IUU_0\tbus\nsli0aHrS-l4_0\tknife\nsoPkYPTLD-Q_1\tboat\nsoe3qmwZTEE_3\tknife\nsoe3qmwZTEE_4\tknife\nsplTIYA-rtY_3\tknife\nsrUGXKwzLf0_0\tbear\nU0G9nt_JMp4_3\tknife\nU1jXflUgiSo_2\tknife\nU1p1HQ3ZsUo_2\tcar\nU1tGGfRyOzY_1\tcar\nU3BQYG5-Koc_0\tbus\nU3pwXnANDgk_0\tknife\nU3pwXnANDgk_6\tknife\nU4nccTmpY0A_1\tbus\nU7N--AsibJc_1\tknife\nU7fW1r0kRYw_1\tcar\nU7-_NQlr8l0_1\tbus\nU8EGQyjwfEQ_0\tcar\nU85wCYoCIZ4_0\tknife\nU-B7Xkx_rF0_1\tknife\nsuQJeplwaco_1\tbus\nsvZPjH3EGcI_3\tcar\nswj8kdhr03w_0\tbus\nswkyfcVE17I_1\tumbrella\nsyJ4LBRPwjs_1\tknife\nsyY8MaSUvJI_0\tcar\nsyfJEZrVzqA_0\tbus\nsy9XCn-ebrE_0\tcar\nszClXDUETvQ_0\tumbrella\nszW2Gonojss_0\tknife\nszXVjlTlt3w_0\tbear\nsziUCgMKvrM_0\tbus\nsznHM_K2obc_1\tbus\nsz6Zoh7MfnA_0\tbus\ns0ABooHpZjo_0\tknife\ns09Dr7gZ5G8_0\tboat\ns1t73kIOSQU_2\tbus\ns2BVmX4vImY_0\tknife\ns2gkrcGsOxU_1\tbear\ns2nioy3J4RY_3\tboat\ns2nioy3J4RY_1\tboat\ns2nioy3J4RY_2\tboat\ns2qgkHBVQxo_0\tbear\ns2qgkHBVQxo_1\tbear\ns3lwoM0rD2U_2\tboat\ns3-sF0tSY8w_0\tumbrella\ns6BicsP9eBk_0\tknife\nVA3OWlsrD28_0\tumbrella\nVBPWsv5FfbU_0\tbus\nVBPWsv5FfbU_1\tbus\nVBr3P_OGawE_0\tknife\nVB6eUS7LSfM_1\tboat\nVCCevTa32Ng_0\tcar\nVDz1RZU6x5c_0\tbear\nVESEWamKy10_0\tcar\nVFv1UuT7klg_2\tknife\nVGAYYimByOM_0\tcar\nVGwSM3IXcJ0_0\tboat\nVG_OHq6R1AE_0\tbear\nVHiMLGyNYgQ_0\tcar\nVIASAf569_k_0\tcar\nVIxj6BV3kgM_0\tumbrella\nVJZpavOgVEo_0\tumbrella\nVLaCK3u84vI_0\tumbrella\nVMLuyFD54AQ_2\tboat\nVMXrHUjXjyQ_0\tboat\nVMXrHUjXjyQ_1\tboat\nVMi5mAdZyZI_1\tknife\nVMs0jemUzI0_0\tknife\nVNuYRPiFrus_0\tbear\nVN-BCqBlrhs_0\tcar\ns8vzssNUlOA_0\tknife\ntAOx6NFDD9I_0\tknife\ntAxbjy_edDI_0\tumbrella\ntBOSPNFbuv8_0\tumbrella\ntBQRfKeIYZc_2\tbear\ntBgtSnOMOwM_0\tbear\ntBh6HxQHmrs_0\tknife\ntCZLl-MZJp8_0\tcar\ntDYPtg0At_Y_0\tbear\ntE42n_1PW6w_0\tbus\ntFfqpeBbvr0_0\tumbrella\ntFjlTZqwoWI_0\tbear\ntGycfa97LVU_1\tbear\ntIX4eIYzfD8_0\tknife\ntIX4eIYzfD8_1\tknife\ntIs05U9pd04_3\tknife\ntIs05U9pd04_1\tknife\ntIs05U9pd04_4\tknife\ntIs05U9pd04_5\tknife\ntJXbZyaUOD4_0\tcar\ntJhfshKvRmE_1\tbus\ntJhfshKvRmE_4\tbus\ntJ01Y3R3Qmg_0\tumbrella\nVOcplsa6Gq4_2\tknife\nVOcplsa6Gq4_5\tknife\nVPI_Nm3GHHc_5\tbear\nVPI_Nm3GHHc_2\tbear\nVP0u_E6FOsY_1\tcar\nVR_V9WaFYn0_0\tumbrella\nVSj9dXwt7zI_0\tbus\nVSxoLvaJN2Q_1\tbus\nVUcCABjVSO0_0\tcar\nVU2lUX4NdkM_0\tknife\nVU2lUX4NdkM_1\tknife\nVVg7sbsw9vY_0\tbus\nVWpm6_Uhis0_2\tboat\nVX9TPrjMcOg_0\tknife\nVX9TPrjMcOg_4\tknife\nVZ5r0BHRf84_0\tboat\nVaW7Go5pX-c_0\tumbrella\nVa50KanUO94_0\tumbrella\nVbA0B1JcpNY_2\tknife\nVbeIRLOQ5pI_0\tbear\ntKCjJuulqx4_2\tbear\ntKCjJuulqx4_3\tbear\ntKCjJuulqx4_4\tbear\ntKN3Qo0oUoc_3\tknife\ntNvGTzks1yw_0\tcar\ntNvGTzks1yw_1\tcar\ntO0igm1AwqU_0\tbus\ntPae9uGqDog_2\tbear\ntPzWEC_9_H4_3\tknife\ntQpyrprwwc0_0\tumbrella\ntR2sDFGND7g_0\tbear\ntSEneDiCrqg_0\tbear\ntTFTWquOTi8_0\tbus\ntTjbx39rZMk_0\tbus\ntT2pUZ0W33A_0\tbear\ntUHf6Ynx_vI_0\tknife\ntVJE-0uNX1s_0\tboat\ntVTkAh80t5I_0\tumbrella\ntVuL82POt-I_1\tcar\ntXMBGjGduCM_2\tknife\ntXsMGHCKw7U_1\tboat\ntXwfqREzEtI_0\tboat\ntYGp2PFiAUE_0\tknife\ntYas1z25M_4_2\tknife\ntYcNeSisfpI_0\tbear\ntYdhIaTDwiE_1\tknife\nVdLohVQNC5Q_0\tknife\nVdLohVQNC5Q_1\tknife\nVdLohVQNC5Q_5\tknife\nVdLohVQNC5Q_6\tknife\nVeUIJlyGjkY_0\tcar\nVekx17G8mkk_0\tbear\nVfBqMWT6aRM_0\tknife\nVfKgW5eSGsk_0\tumbrella\nVhmj1OGGQuc_1\tbear\nVhn-8bCU70s_0\tbus\nVh21adwevRU_0\tbear\nViXmx_D5BAY_0\tknife\nViXmx_D5BAY_3\tknife\nVizxeIzWEFw_0\tcar\nVjF-G6FQooU_0\tboat\nVjS5w2pc0tA_1\tboat\nVjvpOU349zY_0\tbear\nVkDn2-1H23o_0\tumbrella\nVkDn2-1H23o_3\tumbrella\nVk43AD4O_hc_0\tboat\nVnrw6Fjmj8I_0\tbus\nVnwwgTO4w_k_0\tumbrella\nVn4aKSlYXX4_3\tbus\nVppPgMZqfEQ_0\tboat\nVp0kah4_m6w_0\tboat\nVp0kah4_m6w_2\tboat\nVqHSuVVKfjs_0\tbus\nVqo2RiAzLnU_1\tcar\nVrnm_kf7OCs_0\tboat\nVsDgOcOWqXw_0\tbear\ntYofvh4_3K4_0\tbear\ntadYkEU5suY_1\tknife\ntbIUesoKv9Q_1\tbus\ntb_hKPkH2co_0\tknife\ntcFQ5kE3PKM_0\tcar\ntcFQ5kE3PKM_1\tcar\ntcSHrlGTFJc_0\tknife\ntc912gGdckQ_0\tboat\ntdjDSO8NFx4_0\tknife\ntdpAPPsHlDQ_1\tbear\nteJyM5tywno_1\tbus\nteQkZqDa1lw_0\tknife\nteb83RDwop4_0\tbear\ntgSfan8G7wo_0\tcar\ntgVXG7H_acI_0\tumbrella\nti3J-8aWPcw_0\tbear\ntjldcvPuif8_0\tbear\ntj4mnSXX2DM_0\tcar\ntm2bmSBR4uE_0\tknife\ntoiMoCxSyKY_2\tboat\ntos1ELGZH0M_2\tumbrella\nVs3Mi3Ch_EQ_0\tbear\nVtHzTaDh4WM_0\tbear\nVtHzTaDh4WM_1\tbear\nVt8DAmG3nHs_0\tcar\nVu4xkIEs6U8_0\tboat\nVvXxRawsOCs_1\tknife\nVvXxRawsOCs_4\tknife\nVwYEgB5HOD0_1\tbus\nVxdUG7Sinyw_0\tcar\nVyDNhpvCuc8_0\tbus\nVyfIuIcelhc_0\tumbrella\nVz3wJsLA_gI_0\tbus\nV0NnR8HLSbo_0\tumbrella\nV0o8kxcOZRc_2\tbear\nV1a9QcSegdw_2\tumbrella\nV1dqjmHNyIY_0\tboat\nV23vmoZYoVw_0\tbear\nV4o7I9cLp-g_0\tbus\nV6nKvvfzWpg_0\tboat\nV64pvhB8sKU_0\tcar\ntrAReSHvUdQ_0\tcar\ntrAReSHvUdQ_5\tcar\ntrAReSHvUdQ_6\tcar\ntrAReSHvUdQ_1\tcar\ntrAReSHvUdQ_2\tcar\ntrAReSHvUdQ_3\tcar\ntrAReSHvUdQ_4\tcar\ntsNhgDUKwHw_3\tknife\nttdTnGOIBmA_0\tumbrella\nttdTnGOIBmA_3\tumbrella\ntvVLkJ0HTQQ_3\tcar\ntvew-P2UPL4_0\tumbrella\ntwiEfNprSoE_0\tknife\ntwiEfNprSoE_1\tknife\ntw7jf9U2-kM_2\tbus\ntxpIIsM1T8U_0\tbear\ntx2dZF1Ckxk_0\tknife\ntx5tKODiGuo_0\tknife\ntx5tKODiGuo_1\tknife\ntyO37NBAS1Y_0\tbus\nt1UtwxOBGvE_1\tknife\nt1vrE0cEB80_0\tbus\nt10FRgv9o5M_0\tbear\nt10FRgv9o5M_4\tbear\nt14PUW9SINk_0\tknife\nt31z17N5skw_0\tknife\nt31z17N5skw_1\tknife\nt31z17N5skw_3\tknife\nt31z17N5skw_4\tknife\nt33TQH8-7tg_2\tboat\nV9UCv2qhsxc_0\tcar\nV9ulnUIQGJU_0\tbus\nV9ulnUIQGJU_6\tbus\nV-KNIu_PsaQ_0\tbus\nV-NvBHig1i0_0\tbear\nV-tMggTxBu4_0\tknife\nV_Bb7A55f-c_0\tcar\nV_dJ2KuqfOA_0\tboat\nV_dJ2KuqfOA_1\tboat\nV_t8pbEf8bA_1\tboat\nWB7fT2tI7Pg_5\tcar\nWCSEuwFm7KU_1\tcar\nWCfc8YGLu1o_1\tbear\nWCfc8YGLu1o_3\tbear\nWDgLmrXq4vg_0\tumbrella\nWHLIJlNh3TQ_1\tknife\nWHQXE5tuTXk_0\tcar\nWHUaoqVF57g_0\tcar\nWIdj4ovuDWQ_0\tbear\nWIdj4ovuDWQ_1\tbear\nt4oaGCoTBZc_0\tcar\nt42tnyTtYWE_0\tboat\nt7OKXKxjHls_6\tbear\nt8X-x_7pv94_0\tcar\nt_-dK1Xhg90_0\tknife\nuAjqm8B-aio_0\tknife\nuB_Hurzj4s0_0\tcar\nuGEDuDcqqvU_0\tboat\nWJ2A2XRRTw4_1\tbus\nWJ_vIH7FJsQ_0\tcar\nWKDhXr_5mbI_0\tknife\nWKKFM7oRSd0_0\tbear\nWKS6aq75gk0_3\tknife\nWKV4j8-G1Nc_0\tknife\nWKfQfA_YQTY_3\tknife\nWKubVTrND7s_1\tknife\nWKzUT3zOIU8_0\tknife\nWLxzHH6iJlk_4\tboat\nWMSu-XOQe5w_4\tbus\nWMSu-XOQe5w_0\tbus\nWMgP1z0x0Io_0\tbus\nWOVTnN-HcZ0_1\tbus\nWOxTA78OlZU_0\tknife\nWPqEyeVtih8_0\tbus\nWPuItCUuEkY_1\tknife\nWQAr1enuPKw_1\tbear\nWQX6ptTAKHg_0\tknife\nWSc0kYKLGTg_0\tbus\nWStgEyiPBBE_0\tcar\nWSvHn5XJq0Q_0\tknife\nWS0DayzAv80_1\tboat\nWS0DayzAv80_2\tboat\nWTXytzbF5lU_0\tumbrella\nWT69VoU2Hps_0\tcar\nWVx9vOoutGo_0\tbus\nWWKuCF2FuYk_0\tcar\nWWm9iMkKk-g_0\tknife\nWW7ib8XAVz0_0\tboat\nuHqj6xQGOYg_3\tbus\nuHqj6xQGOYg_4\tbus\nuHqj6xQGOYg_6\tbus\nuHqj6xQGOYg_7\tbus\nuIKZlXUoHOc_0\tbear\nuJMFDY-BKiQ_1\tbear\nuJMFDY-BKiQ_4\tbear\nuKdOuLYJjrg_0\tknife\nuK-zcpEE8nE_5\tboat\nuLdXkXRsHok_0\tumbrella\nuMK6b2TG8rc_0\tbear\nuMV37U-DNUQ_0\tcar\nuMciOwjd0GU_0\tcar\nuMciOwjd0GU_1\tcar\nuMd1DmjxAZQ_1\tcar\nuMj3V0s7mUo_0\tbus\nuM_jxm7bFp8_0\tboat\nuNDkbmlEYeQ_0\tbear\nuO7OtV3J1AY_0\tbear\nuPE1o5dCYDc_0\tbus\nuQhMkVrdghM_0\tbear\nuRLAyu-3l0A_0\tknife\nuStpLanz0fU_0\tcar\nuTAqzBGMDOc_0\tbus\nWYwRW_t4jb8_0\tcar\nWZK5IqBtpGE_3\tknife\nWZgxjIvc2nk_0\tboat\nWaEyVBSggwQ_1\tbear\nWaaW6bElWCM_0\tcar\nWb20JaIrr8M_0\tknife\nWb20JaIrr8M_2\tknife\nWcNlbTBZM64_0\tumbrella\nWdIATjW74Pc_0\tboat\nWdYFXDv4TEo_1\tcar\nWdgTHJurLx0_0\tumbrella\nWd0xTEH2d9k_0\tboat\nWejCws8AoxE_1\tknife\nWejCws8AoxE_2\tknife\nWejCws8AoxE_3\tknife\nWe4_tuFKyGE_0\tknife\nWf6hHpxRW_Y_4\tknife\nWgx6hhiRLoA_0\tpotted plant\nWjiMUA6_CkY_0\tboat\nWlm2mLKCMlM_1\tbus\nWlsN6HURFTc_0\tbear\nWmFqo8n67Ok_0\tbus\nuWi9-84kTFQ_1\tbear\nuXHJHV0bwUk_2\tbear\nuXe9WOlTFcs_0\tbus\nuXe9WOlTFcs_1\tbus\nuZgcOYmazsw_0\tbus\nuaJ1g0xJ4QY_0\tbus\nual32V7-KJo_0\tboat\nua_5GosOa-c_1\tbear\nubFoUAh6d4g_1\tknife\nubOiomYqbNs_2\tknife\nudSE-6UkgwM_5\tumbrella\nue1CIlwhPEs_0\tumbrella\nufFT2BWh3BQ_0\tbear\nugWs4v6DbUw_0\tbear\nugsJ5cOmFTg_1\tboat\nuhXcL98XNCY_5\tumbrella\nuhXcL98XNCY_1\tumbrella\nWoxbRmDfLeI_0\tumbrella\nWoxbRmDfLeI_1\tumbrella\nWpCyx-QCMec_0\tbus\nWplsTumdQf8_0\tboat\nWqFFUvf-YJk_0\tknife\nWqxU9aIFmNY_0\tumbrella\nWr5BjrtC4Ts_1\tknife\nWsEiHZFGeFs_3\tumbrella\nWsaP8FyRUCc_0\tcar\nWses8y3NyJ4_1\tbus\nWs9V_B7mqJI_0\tknife\nWuTHL7GtG-8_3\tknife\nWvGzCV5ICZM_1\tboat\nWvuZRZqhxk4_3\tknife\nWvuZRZqhxk4_5\tknife\nWvv8cOXaAZI_0\tbus\nWv-Weuc4E1A_0\tumbrella\nWwLtxfDC7ok_0\tboat\nWxWXB9hf7n0_0\tcar\nW0kDpFkg6xU_0\tboat\nW1z3EAv-eJw_0\tbus\nujnUCtI7gzI_0\tbus\nuj4TRH5r_ww_6\tbus\nuklsFjegS-w_0\tbus\nulzto7-Hl64_3\tbus\nul__w-oqHrw_0\tbus\numjU9X1kuYg_2\tcar\numjU9X1kuYg_4\tcar\numjU9X1kuYg_1\tcar\nuoGBYfJo5Xg_0\tcar\nuo1J9BUgQmk_0\tboat\nurRNkZvzuHI_2\tknife\nurmSoxyi9Vo_0\tboat\nurmSoxyi9Vo_2\tboat\nutmsGeHFdvI_0\tboat\nuuBKDGoTmGY_1\tcar\nuu-UptVYr_A_3\tcar\nuvV7cblR4qc_5\tumbrella\nuvZOzZjBKXY_0\tbus\nuwL5LYln0EM_3\tbus\nuwL5LYln0EM_4\tbus\nuwL5LYln0EM_5\tbus\nuwL5LYln0EM_6\tbus\nuwx7UKo4jcg_1\tboat\nuwx7UKo4jcg_0\tboat\nuwzHiGF1YMM_0\tboat\nW2z3SxorVnI_0\tknife\nW2z3SxorVnI_1\tknife\nW38vB3cw2fA_2\tboat\nW4Is7CI2Sfo_1\tumbrella\nW47ZA0onzb4_0\tknife\nW5dSTfMCj-U_0\tboat\nW5zIkmZyS18_0\tbus\nW51Spbo8SQQ_0\tknife\nW6YCv9ZVVOc_3\tboat\nW6uCEMEi7_E_0\tbus\nW7JkNuRYNr0_2\tknife\nW7JkNuRYNr0_3\tknife\nW7JkNuRYNr0_4\tknife\nW7JkNuRYNr0_5\tknife\nW7yqHDA_RMU_0\tknife\nW8EKt6cG0E8_3\tbus\nW8EKt6cG0E8_7\tbus\nW8EKt6cG0E8_1\tbus\nW8xqW-QD_B4_0\tknife\nW87M2lQeWNk_0\tbear\nW87M2lQeWNk_1\tbear\nW-ZpC_K7Df8_0\tcar\nW-x__78AyrI_0\tboat\nW_Wc7lFraRg_0\tbus\nW_v5wpcibRM_0\tboat\nW_2LqiQ_ico_1\tknife\nXAa2L1v8iJM_1\tumbrella\nXBAOFn8KXFo_0\tbear\nXBn6P-IKuis_0\tperson\nXBssw3bqXL0_2\tbear\nXCZv_AjZo08_0\tknife\nXCu0Ea4zHuQ_2\tbear\nXDtfr902CVM_0\tbus\nXD1OYmmeKic_0\tumbrella\nXD1OYmmeKic_2\tumbrella\nuxFX6p61oPY_0\tknife\nuxlDad59mFc_0\tboat\nuyWVUOcgZHg_0\tbear\nu1OhTXTmuWM_5\tbear\nu1TvbkpmEbs_0\tcar\nu1vMDzyFxzI_0\tbus\nu2BVfAFQ1zU_3\tknife\nu2BVfAFQ1zU_2\tknife\nu2EDuPJijZ8_4\tboat\nu4K3jRl7Gag_0\tcar\nu4S9mlFpt0s_0\tbear\nu4uwaq4uf54_3\tcar\nu4uwaq4uf54_0\tcar\nu6XGBXhCJ18_1\tknife\nu7STs8FCy_g_0\tbus\nu-1HZJXwFHo_0\tumbrella\nXF8B5xjRCF0_0\tcar\nXF8B5xjRCF0_2\tcar\nXF_oHXRGd1o_0\tboat\nXGRZLrZC9zY_0\tboat\nXIlybSpq0mg_0\tbus\nXJmn9i57K3g_0\tbus\nXLvSaN_M6lE_0\tcar\nXL0B2niNRCw_2\tbus\nXMlEA_yRojM_0\tknife\nXMyio1ZckJc_0\tbus\nXQBtgwUzEL0_0\tcar\nXQX5y5BQykU_0\tbus\nXQ6u2yTbu_0_0\tcar\nXQ7UbbPjnDo_1\tknife\nXRenv5AHI_8_0\tboat\nXRpgkCuziGY_0\tumbrella\nXSI7M8s2Tc0_0\tbus\nXS4ow1Wcaro_0\tcar\nXTm-jN1RVHA_0\tumbrella\nu_YKLGqrMKQ_1\tknife\nu_gN-dXNRHI_0\tknife\nvARZcTna8NU_0\tboat\nvBEaeqdPsho_4\tcar\nvBEaeqdPsho_3\tcar\nvDT-DShjnjU_0\tumbrella\nvEMHY2cT6kA_0\tbear\nvEi5gkcTDGY_0\tbus\nvE9zapt1WdI_3\tcar\nvFSRrtT5AL8_0\tbus\nvGbt_XsSaVk_0\tknife\nvGi-DjriLLs_0\tumbrella\nvHAlsHYE3mo_3\tcar\nvHAlsHYE3mo_0\tcar\nvHXM9IJdVcM_0\tumbrella\nvIQAK-4lMOc_0\tumbrella\nvIgmRBC2ayQ_0\tumbrella\nvJl9QkAbpc8_0\tcar\nvKxCl7DzJjI_0\tknife\nvK8dgvZ5B6A_0\tumbrella\nvLA-mHM7MAQ_0\tknife\nvL-6uNdrCV4_2\tknife\nvN54ADSnJmE_0\tbus\nvOKH_DIjvAU_3\tknife\nXUkTknKOdrs_4\tknife\nXVa23hmwe-E_0\tumbrella\nXVrNN52RTEs_2\tcar\nXVrNN52RTEs_3\tcar\nXV694aCXY8Q_0\tboat\nXW6BQWpl3bI_1\tboat\nXZl5Luzj6v0_6\tbear\nXaSsc3noeLs_0\tboat\nXbHWOyNM3Bw_0\tbear\nXbHeGzyGejE_0\tbear\nXbWrCVe09YA_0\tboat\nXcLl0qSs9bU_1\tknife\nXcifNE0anDo_0\tknife\nXcifNE0anDo_1\tknife\nXc1jzGFyrnE_0\tcar\nXc5LW1FIVE0_2\tknife\nXc5LW1FIVE0_3\tknife\nXdu-98BUgmA_0\tknife\nXd7VbtoAdb0_0\tcar\nXeOwt5KeVfA_2\tcar\nXeR1DgyOa9o_0\tknife\nXekvrqFtazY_0\tbus\nXeplLROyXyA_5\tumbrella\nXgBTEQN_ZxA_2\tbus\nXgBTEQN_ZxA_4\tbus\nXgBTEQN_ZxA_7\tbus\nXhSmPb3cA_A_1\tknife\nXhSmPb3cA_A_3\tknife\nXiEeY5R56EQ_0\tknife\nvOy0N09kGEE_0\tumbrella\nvO56uCHmSjg_0\tumbrella\nvPVpX6GPY5Q_0\tbus\nvPVpX6GPY5Q_1\tbus\nvQ_8ry_dx68_3\tboat\nvRhGvmXk2js_1\tboat\nvRzpk-thwA0_0\tbus\nvTvjeXsP7TM_1\tcar\nvTwSeYRU_WQ_0\tcar\nvTwSeYRU_WQ_2\tcar\nvUKk9LqKVpA_0\tboat\nvUKk9LqKVpA_1\tboat\nvUg2Sr7Jl-Y_0\tumbrella\nvVKZzTBvsF4_1\tbear\nvVNCUA8hss0_0\tboat\nvVUbZCrCqEU_1\tboat\nvV72xGim-is_5\tknife\nvWMiT73g5-k_0\tboat\nvWO0tyaGuaM_0\tumbrella\nvWUAzQ_EEJ4_0\tknife\nvW_aJr-PSvA_0\tbus\nvW_o48lG_0I_0\tbus\nvXX9FmlwVlk_1\tbus\nvXX9FmlwVlk_6\tbus\nvXX9FmlwVlk_0\tbus\nvXX9FmlwVlk_2\tbus\nvXX9FmlwVlk_4\tbus\nvXaLFnwvrX4_0\tbear\nvXvR0RiGzj4_1\tcar\nvYROjLzMqvY_1\tbus\nvYROjLzMqvY_2\tbus\nvYROjLzMqvY_3\tbus\nvYwdLoOa0Rc_0\tumbrella\nvYwdLoOa0Rc_1\tumbrella\nvY1sAfu99Es_2\tbear\nvZznldYVwGA_0\tboat\nvbfWHUjHR2k_0\tbus\nvcdEtOGEEcU_1\tbear\nvcdEtOGEEcU_0\tbear\nvcdEtOGEEcU_2\tbear\nvch6R3EO9Ec_0\tknife\nXjHJiHO6onE_5\tbear\nXmVv2wQSvjs_1\tcar\nXoJahpK73EM_0\tboat\nXoqPCnlpymI_2\tknife\nXpDVw5mS058_0\tboat\nXp591jCTBOA_0\tbear\nXqfkP1lAkyE_4\tbus\nXqfkP1lAkyE_5\tbus\nXqfkP1lAkyE_2\tbus\nXq-5DHWJ1pk_1\tbear\nXrh68BP53Gw_0\tcar\nXriRhjtrlLE_0\tcar\nXu-ZZl_L38Q_2\tboat\nXv9eEVcD2P0_0\tbus\nXwvKtur_QEk_0\tknife\nXxHnDkI1NdQ_0\tbus\nXxHnDkI1NdQ_1\tbus\nvfzGrdk_Mxo_0\tbear\nvhrRnvGSMMY_2\tboat\nvhrRnvGSMMY_5\tboat\nvhrRnvGSMMY_6\tboat\nvhrRnvGSMMY_8\tboat\nvh4BHzMwVT8_2\tboat\nvh4BHzMwVT8_3\tboat\nvi4ktD0dAD4_0\tcar\nvkfdn7gkQh8_1\tumbrella\nvknUR0K4MqM_0\tbus\nvlNLyHxz1TY_0\tboat\nvlaeAly1nZc_0\tboat\nvmr5UiZekic_1\tbear\nvo0WWdM7UCs_0\tbus\nvo6Uzhx2fcw_0\tboat\nvpItyB8epmQ_4\tboat\nvp8NiaEmk2M_0\tbus\nvqeybXtIwxE_3\tumbrella\nvrK5lDQJnmc_0\tcar\nXy1w-6sjVS0_0\tbus\nXzj_w2QkjRg_0\tumbrella\nX0iu2HmUYfY_0\tumbrella\nX0nevXM5278_0\tcar\nX1drOgA68EU_0\tbear\nX2zWe7ayseQ_1\tbear\nX3ST-FA3VS0_4\tbear\nX4YaqObAEns_1\tbus\nX4kxk4G-BOs_0\tbear\nX4kxk4G-BOs_1\tbear\nX6Y6e6qsVOc_1\tbear\nX6tuO-hL1cg_0\tboat\nX6z7yGyP3UY_0\tboat\nX7AJSe6kUz4_0\tboat\nX7PChwjgRog_0\tboat\nX7mkuAPcpg0_0\tbus\nX8Wc00FiJn8_1\tbear\nX8lHVX9uGm4_0\tcar\nX9dNz1MhFTM_0\tcar\nvtOaPYxGauU_0\tboat\nvwp5f1sTcOM_2\tboat\nvxEizaWVZ2E_0\tcar\nvx7S4ISNz90_0\tbear\nvzKEVGD3E3w_0\tboat\nvzKEVGD3E3w_1\tboat\nvzmWbtFBxb0_0\tbus\nv0DjGmLiuao_0\tcar\nv0P7DOSAooM_0\tboat\nv0Uh3fazz7A_4\tbear\nv4CWziKFAvg_0\tboat\nv4CWziKFAvg_1\tboat\nv4TWD1hSObU_0\tumbrella\nv4TWZQM-t_M_0\tboat\nv4wheqJ7qmw_0\tcar\nv4-PEShPpKo_1\tcar\nv4-PEShPpKo_0\tcar\nX_1xeuzdJII_3\tbus\nYAI5kxAVlag_0\tbus\nYAS9QgwaKuA_3\tbear\nYAacEL8GB8Y_0\tbus\nYCTBEauAnvs_0\tboat\nYCT0ue2AdNE_0\tumbrella\nYC0SWC1thDM_2\tcar\nYDxjfXnUsjA_0\tbus\nYFb4IgdgsQI_1\tboat\nYGm0A03QK-0_0\tbus\nYJklsCjPlRE_0\tcar\nYJrYjEZ4Hfo_1\tbear\nYLNAOu0nAaM_1\tbus\nYMWEbvBeA2k_0\tcar\nYNOl5XssrmA_0\tcar\nv6RTPFSqVAo_0\tbear\nv6d52nxP9CI_0\tboat\nv6d52nxP9CI_6\tboat\nv6d52nxP9CI_2\tboat\nv7R5EfiWsMU_0\tboat\nv7mxF1u1eJA_0\tboat\nv74SVFcInoY_0\tbus\nv77um2oiCmw_1\tbear\nv8vdjpigkqA_3\tbear\nv9EO_34zhPY_0\tbus\nv9dJjyyqJ14_0\tbear\nv-_nfHjdDrM_0\tcar\nwAJI2wAjCLA_0\tcar\nwAktmcUSj0Q_0\tbear\nwAsEbrNlx-Q_0\tcar\nwBEyQdKDniA_0\tbus\nwDOuWmULTDo_0\tbus\nwDwRfk2Ka7A_2\tumbrella\nwFuYr5TAoA4_0\tcar\nwFuYr5TAoA4_2\tcar\nwGqMuP3z6nY_2\tbear\nwHdnCnPBax4_0\tumbrella\nwHrdTEho0Do_2\tbus\nwItLJ3GVPHo_0\tumbrella\nwIzhSLqL-4M_0\tboat\nYPR6uiSn_PI_0\tbus\nYPR6uiSn_PI_2\tbus\nYPWoY6sseHw_2\tbus\nYP9HVTyFrM0_0\tumbrella\nYQRaUcLNZjw_1\tcar\nYRmCe16K5EI_0\tumbrella\nYRxTciapqLc_0\tbear\nYSFyOBQNQzc_1\tumbrella\nYSOeyn1SUIc_0\tbear\nYSx79S6HsRE_0\tboat\nYSx79S6HsRE_1\tboat\nYVueKFH38pQ_0\tumbrella\nYWAY2hVlXwU_1\tboat\nYXC4y1_fd5M_1\tboat\nYYjM_RIWUWk_0\tbus\nYY-G2b46dbU_0\tbus\nYalvFPYggIo_0\tbus\nYbsAJsBizWo_0\tcar\nwJbu3nAVmh8_0\tcar\nwJ-qeIIyve0_1\tbear\nwKlqztWBWCE_0\tbus\nwLXsUww1z0Y_1\tbus\nwLXsUww1z0Y_2\tbus\nwMW3eYDAmiM_0\tcar\nwN6DTQLhQo0_0\tboat\nwOAtMDJ1DIU_1\tbus\nwOqLqQhPKNs_2\tbus\nwPCVya7FjXI_0\tbear\nwPcWihBU6Fc_0\tboat\nwPjzhuBuZ_E_0\tcar\nwPrTnHfCQy0_0\tbear\nwP83jrOriho_5\tboat\nwP83jrOriho_1\tboat\nwP83jrOriho_3\tboat\nwQY4K0ZN5RY_0\tbus\nwQY4K0ZN5RY_1\tbus\nwQY4K0ZN5RY_3\tbus\nwRJ_foSdk2g_0\tumbrella\nwRs7_Un28R0_0\tbus\nwSaf-OQyJzM_0\tboat\nwSkaSUiYB60_0\tboat\nwUG-UKf5xOM_2\tbear\nwUtwwmbus0k_0\tbear\nwVI9BeWuM68_0\tbear\nwVX6wPj2U5M_0\tbus\nYcrP36sQwVc_5\tbear\nYepGVMeHePw_1\tboat\nYe3mi53K_Oo_2\tboat\nYgouPUMM7w8_0\tbus\nYhZT5GU-dEY_0\tbear\nYiDVwrN1Djs_3\tbus\nYi8XHxZACGY_0\tbus\nYlGg5v-AWZc_2\tumbrella\nYlnMI5yk7FU_0\tboat\nYmRfW-9QwH0_0\tcar\nYodCYpx5p8o_2\tbear\nYogxE9OtHGE_0\tcar\nYogxE9OtHGE_2\tcar\nYozOMrrhBWk_0\tumbrella\nYozOMrrhBWk_5\tumbrella\nYo8IaFdsDHQ_0\tumbrella\nYo8IaFdsDHQ_1\tumbrella\nYpGGnhGqqkc_0\tcar\nYpv2bwSbJbg_0\tbus\nYpyrD-P9emk_1\tbus\nYq3H6FwjqwQ_2\tbear\nwXg6MT7--Ms_1\tbus\nwYO_Z3tO-P0_0\tcar\nwYO_Z3tO-P0_1\tcar\nwYO_Z3tO-P0_2\tcar\nwaGAoKeMDbo_2\tbus\nwaZHoBhYNXM_2\tcar\nwan2A1Zp9pg_0\tumbrella\nwa4LKNmoGCI_0\tbus\nwbBafnofeHM_1\tbus\nwcLRQ5lDklc_2\tbus\nwcRJMRP7TtY_0\tcar\nwcUHhJA9ynY_0\tumbrella\nwcUHhJA9ynY_1\tumbrella\nwc6z479m8VU_0\tknife\nwePYCAT9VWI_0\tboat\nweUGYN9mO8M_0\tcar\nwe9P1H3yM9s_0\tumbrella\nwgn5GA4Kt_w_0\tbus\nwioe2rgDFxQ_0\tbus\nwi_60seXhMg_0\tumbrella\nwkCC1-6dZZc_0\tbear\nwkRF61CxvWQ_1\tboat\nYsJGlSMV6fc_0\tbear\nYsKpyV6dNVU_0\tumbrella\nYsKpyV6dNVU_6\tumbrella\nYukb6C-FiPs_0\tbus\nYyqN8OKq7-k_0\tcar\nYy9Cj5ayVow_4\tcar\nY2esC00COVs_0\tumbrella\nwkhiKomfWwo_0\tboat\nwku7FWw9zok_6\tbear\nwmN3gF7czBE_0\tboat\nwoB4lneU8v4_2\tboat\nwoB4lneU8v4_5\tboat\nwoB4lneU8v4_3\tboat\nwonqKYd_Hkc_0\tboat\nwulomSbG8Ww_0\tboat\nwwHyMOLjtHw_0\tcar\nY8gjbHlOSpg_1\tcar\nwz-CYTAvpJA_0\tcar\nwz-CYTAvpJA_1\tcar\nw1xC4CowaVk_2\tbear\nw2d7ZPHVRsQ_0\tcar\nw4QoeqK4vN4_0\tboat\nw5KKrxi32ZU_0\tboat\nw5RAGrRh6N0_0\tboat\nw85PvG-O3JQ_3\tbear\nw-RoxIo67S8_0\tbear\nw_dzHMbP1wk_0\tcar\nxAdflusGMAM_2\tbear\nxAdflusGMAM_1\tbear\nxBQVhJr5tn4_0\tcar\nxBQVhJr5tn4_1\tcar\nxBW2dB1aHqE_1\tbear\nxE-fIbBizEc_0\tboat\nxIjuSe8NERE_0\tboat\nxIr-46lqsbs_4\tboat\nxI3wdcR9GOU_0\tbear\nxJaqlEqJIsg_0\tcar\nxKUjAAXXark_1\tcar\nxKjnn1lJsUE_0\tboat\nxLl8JlHPals_0\tbear\nxL0aucx8LjA_0\tcar\nxM1N_JeMAns_0\tcar\nxNfYVO0HOWA_0\tbear\nxNfYVO0HOWA_1\tbear\nxNqzZtEMt6A_1\tcar\nxOQ_zqhFFoQ_0\tcar\nxOQ_zqhFFoQ_1\tcar\nxOQ_zqhFFoQ_2\tcar\nxPgexGqlrpM_0\tboat\nxQ2ursLiV78_0\tboat\nxVl7ISxNOBo_1\tboat\nxWfIV6ykSZU_0\tumbrella\nxYRbcgZcjTo_0\tboat\nxZdiy-peZpE_0\tbear\nxcC48didfYg_0\tcar\nxds7aav_WA0_0\tumbrella\nxeEFpaZutxQ_2\tcar\nxeEFpaZutxQ_0\tcar\nxemv_TG3nHo_2\tboat\nxf7e7HpnDAI_2\tumbrella\nxhLH-f-e2Ds_0\tbear\nxhLH-f-e2Ds_5\tbear\nxhLH-f-e2Ds_1\tbear\nxhLH-f-e2Ds_3\tbear\nxhLH-f-e2Ds_4\tbear\nxhYRRVSUjcI_0\tbear\nxh6_xD0_FUY_0\tumbrella\nxi1l0PNYmVU_0\tcar\nxi1l0PNYmVU_1\tcar\nxk-PCxxgLyQ_0\tcar\nxlSq_r-1VZI_0\tcar\nxlTBS98u4Xk_1\tboat\nxl03KNG3qcY_2\tbear\nxl03KNG3qcY_3\tbear\nxmXEOSj-QR8_0\tumbrella\nxm61skXJVHY_0\tbear\nxm7yMjZR_HM_0\tcar\nxniXqwdU3rM_1\tcar\nxn_6GQGdyww_0\tbear\nxoL1TWqV2UY_8\tcar\nxoL1TWqV2UY_3\tcar\nxoL1TWqV2UY_4\tcar\nxoL1TWqV2UY_6\tcar\nxo93ACxVFCE_0\tcar\nxu3hCCY1M98_0\tcar\nxvJ-vgSlRFQ_1\tbear\nxyUFBTV5sfA_1\tboat\nxyUFBTV5sfA_5\tboat\nxzFwd6rktG8_1\tbear\nx1PZyiPtcD0_2\tbear\nx1PZyiPtcD0_0\tbear\nx2MUZI0ckUs_0\tboat\nx51qh-jbh2w_0\tcar\nx8bgasvRg_0_0\tcar\nx_PtUMz2m3g_0\tumbrella\nx_yZa__92dU_0\tbear\nyE9ySV90e2U_2\tbear\nyFdbcjv2scY_0\tbear\nyFwt2mHmJQw_2\tumbrella\nyFyTQPoWKrg_0\tcar\nyGYLwBmuRVI_0\tbear\nyGYLwBmuRVI_1\tbear\nyGq_wX2hSms_0\tcar\nyHFbPuIOGec_0\tboat\nyMVPEp44IcU_1\tcar\nyNYzTl3zuSA_0\tcar\nyOeQRz1L-6w_0\tboat\nyPx8JYuB8jo_5\tbear\nyTEPer0Bvnk_0\tboat\nyTr7cqNxVw8_0\tboat\nyVwePYmRfaA_2\tboat\nyVwePYmRfaA_0\tboat\nyV3gYczZGSU_0\tboat\nyWKpg3C3HRA_0\tumbrella\nyWQT0KUXmZs_0\tcar\nyXA2s-Ylkx4_0\tumbrella\nyYt1-j5ltQg_0\tbear\nyZOWsBbP8Dw_1\tboat\nyafgzvvEBsk_0\tcar\nygqn0Cw0cJg_0\tboat\nykAF4z2vPRI_1\tcar\nynSIMn0mh5Q_0\tcar\nynuXudWT-jg_1\tboat\nyqDO3G8QSxs_2\tboat\nysudb_DYv1E_0\tbear\nytzy45KRs4k_0\tumbrella\nyy-1Eaz2SGI_4\tboat\nyy-1Eaz2SGI_5\tboat\nyy-1Eaz2SGI_6\tboat\ny26dbfVQaAI_0\tcar\ny3HDa7ZvWW4_0\tumbrella\ny5rlUzgK0z4_0\tumbrella\ny6l_Xj3A7dU_0\tbear\ny6nMm6sNieE_0\tbear\ny6oa4gTfIaw_0\tboat\ny7_Teuq-Jd4_0\tumbrella\ny-J-zu3KYKk_0\tboat\ny-lv7_3azcQ_3\tbear\ny-lv7_3azcQ_1\tbear\ny-lv7_3azcQ_2\tbear\ny_Kbef75lDk_0\tumbrella\ny_OvZEh5PxQ_1\tumbrella\nzA7rl-0pCw4_1\tbear\nzBCRUfv1YVo_0\tcar\nzBomR9gjgg4_1\tcar\nzCnqglOaM40_0\tboat\nzC1J8hrm_FI_0\tboat\nzGOI3Uds1-A_0\tcar\nzGvuvfZeouY_0\tcar\nzHwK-Ov5Dn8_1\tbear\nzIGdWP0BOPc_0\tcar\nzIoLntgax_4_0\tcar\nzIrTQvy-DtU_0\tumbrella\nzKN-t-wHfVw_0\tcar\nzOxKFs0x_-M_0\tcar\nzPUoexM4GJg_1\tbear\nzS4G-dKS3dg_0\tcar\nzUYNrm52mG8_0\tcar\nzU9O4EpnP8g_0\tboat\nzW4j5HFdFCE_1\tbear\nzW9G9_luulU_6\tboat\nzW9G9_luulU_8\tboat\nzX70EOhK1IA_4\tboat\nzX70EOhK1IA_0\tboat\nzX70EOhK1IA_2\tboat\nzX70EOhK1IA_3\tboat\nzYNSRTs7wcI_0\tboat\nzZMZCzV930Y_0\tboat\nzaXvp0LSorI_0\tumbrella\nzcIJlqUAlyQ_0\tboat\nzcdpKM2gDkA_3\tbear\nzdWOfDZyRWg_0\tcar\nzdp6LbsF3Fo_0\tcar\nzdp6LbsF3Fo_1\tcar\nzglydzoqdNw_1\tcar\nzhSMuVKY4jM_1\tboat\nzhgbbZA2jZo_0\tcar\nzj0QGbLx2Ek_0\tumbrella\nzkC1ygaZUL4_0\tcar\nzkFlovQ2F80_2\tumbrella\nzkFlovQ2F80_4\tumbrella\nzkFlovQ2F80_0\tumbrella\nzkYqOEAbTTE_0\tcar\nzk5BFmxsRfQ_1\tcar\nzmXJ3VmO_yQ_0\tbear\nzmXJ3VmO_yQ_1\tbear\nzn_LOCSgnBI_0\tcar\nzobMJDgPWmM_0\tboat\nzpW9Kjtbu7g_1\tboat\nzp4-YNYr-l8_0\tcar\nzqDdt_wpfcM_0\tbear\nzqyhnAN5qnA_0\tcar\nzq-AjPBQb3w_0\tumbrella\nzsszkZnE24M_0\tcar\nzsszkZnE24M_1\tcar\nzwKNqBmI95k_0\tumbrella\nzxfyvjQQ0QY_0\tcar\nzxuleRJc5Pw_1\tboat\nzySbpWHTUUI_2\tumbrella\nzzDlzbpuFUg_1\tcar\nzzOYV3PIwDo_1\tcar\nzzljeIZDjM8_0\tcar\nz1CT7NYPStE_0\tboat\nz1CT7NYPStE_2\tboat\nz1DFtYFOfsQ_0\tboat\nz1GcDqMXI5U_0\tbear\nz1WPNBklZbo_0\tbear\nz3V1O449zY8_0\tcar\nz3V1O449zY8_1\tcar\nz3V1O449zY8_2\tcar\nz32BNdijIPo_0\tcar\nz4C0C5AtXd8_1\tbear\nz4Nk6je-k5E_5\tbear\nz4Nk6je-k5E_6\tbear\nz4Nk6je-k5E_2\tbear\nz4Nk6je-k5E_4\tbear\nz4YdhKjeNQk_0\tcar\nz5PqRVPhGGo_0\tbear\nz56C-TtwATI_0\tcar\nz6Bzk_B2FVo_1\tumbrella\nz6gL7THeOz4_0\tcar\nz8GzZUKj04k_0\tcar\nz8QYapjsTBo_0\tbear\nz8WzXJMRLkg_1\tbear\nz9CJpzFuqHU_0\tboat\nz-gqhqI7U10_0\tumbrella\nz-n_qZEuRko_0\tumbrella\nz_CWMOiNpzY_1\tboat\n0Ah0DHbJ6Uw_0\tbear\n0B-l9QmJK3I_0\tcar\n0DHXMcNUn60_1\tumbrella\n0EEILwHA4Dg_0\tumbrella\n0FRiwnN3Wv8_0\tbear\n0FUPhsPv9vs_0\tboat\n0FUPhsPv9vs_1\tboat\n0GR555fb7uE_1\tboat\n0GR555fb7uE_3\tboat\n0Gal36CHm94_0\tcar\n0Hf-spRN8iA_0\tbear\n0H81H-1s398_0\tcar\n0JkwSF_s82I_0\tumbrella\n0JxUW6X6VTA_1\tcar\n0JxUW6X6VTA_2\tcar\n0LY3jcKxA2E_0\tboat\n0NN0x0UcFVI_0\tcar\n0NgLxOGQPPM_1\tcar\n0Nh6NERAbQM_0\tumbrella\n0NyneL4SB78_0\tumbrella\n0O2cDoxCAhA_0\tcar\n0PqvPOqRHik_0\tbear\n0ROl0QaHTgU_0\tboat\n0ThOYMXH3Mw_0\tumbrella\n0TyHCEslM-4_0\tboat\n0UGD0u7LEPY_0\tcar\n0UVJn4oJR3I_0\tcar\n0Vu78K6ZsOk_2\tbear\n0XETGtPrUR0_1\tboat\n0XrWsyRsBYs_1\tbear\n0YWXAZlIFZE_0\tcar\n0YWXAZlIFZE_1\tcar\n0YaZ8lrPQJc_0\tboat\n0YaZ8lrPQJc_2\tboat\n0YaZ8lrPQJc_5\tboat\n0ZJeQYZxfGQ_7\tbear\n0ZJeQYZxfGQ_6\tbear\n0agrBEPe_w4_2\tbear\n0bx9mbPU7zo_0\tumbrella\n0c5dV9e0rL0_1\tcar\n0hafN9Sygek_1\tbear\n0jL3xw-Gfq8_2\tboat\n0kyg-HgBo7o_0\tboat\n0lXT8w6Nvz4_1\tcar\n0loh5Nhb32w_0\tbear\n0lyjvzKFjn0_1\tbear\n0lyjvzKFjn0_2\tbear\n0mIwwe5irHk_0\tcar\n0mSZED2I97w_0\tcar\n0mSZED2I97w_2\tcar\n0mSZED2I97w_1\tcar\n0oHtf7nx8m0_0\tcar\n0oHtf7nx8m0_1\tcar\n0peaciSDgqg_0\tboat\n0rIli5nmkus_0\tcar\n0sAim6AJwgY_0\tcar\n0sAukk-qZs8_1\tcar\n0sWjMW4aW_Y_0\tbear\n0sbXLfSaBvk_0\tumbrella\n0tapt-cyoSY_12\tbear\n0vC1j_r-gPc_1\tboat\n0vun54M7U5c_0\tumbrella\n0wXgXCqnblk_0\tumbrella\n0wzUHyuc5JE_0\tboat\n0zKI3bZagm4_2\tboat\n01aEu9jy-zA_0\tcar\n02AiKGZAu3k_2\tbear\n02bMGGTZE_M_0\tboat\n04FPpXq4qHc_0\tumbrella\n04FPpXq4qHc_5\tumbrella\n04jEe0lfdos_0\tcar\n04p58ydbAvM_0\tcar\n05VoMpLo7Cc_2\tboat\n05rSMaVX3yA_1\tboat\n06kAyBeWx5c_1\tumbrella\n08Fj_YF5X8Q_2\tbear\n0-Jhv9dONP4_0\tbear\n0-zDto8pBU4_0\tbear\n0_ByJ0bAD70_1\tbear\n0_P-fui2MeI_0\tboat\n0_soacANAc8_0\tumbrella\n0_2dsK8nudw_0\tboat\n0_2dsK8nudw_1\tboat\n0_2dsK8nudw_2\tboat\n1EIBn1zqhJA_0\tboat\n1Fv0cFr9B_Y_0\tbear\n1Gd-hUsNAsQ_0\tbear\n1Gd-hUsNAsQ_5\tbear\n1HhUsmUQmRY_0\tboat\n1KnTTBiP4ig_0\tumbrella\n1LKTvGMlL60_0\tbear\n1MVBovgEi4s_0\tbear\n1OvseXyo27E_0\tumbrella\n1PYMTwN-dl4_0\tboat\n1REcM5EtrZg_0\tboat\n1REcM5EtrZg_1\tboat\n1SQF7Tb6pUA_2\tbear\n1T4c050qGWo_0\tboat\n1UGqDCwd0TU_2\tbear\n1VziogDsYAs_1\tbear\n1WOfnEUurGM_0\tboat\n1YelAl0OQQg_0\tbear\n1anH_WthXTc_0\tumbrella\n1anH_WthXTc_1\tumbrella\n1avrrmB_Q5s_3\tbear\n1cbY1pGpdhM_0\tumbrella\n1cy1p57Z49c_0\tboat\n1dmbrwAgFuc_0\tbear\n1fPDeE9SwYI_6\tbear\n1gbd0C2wJrI_2\tbear\n1huEYUsV2ng_0\tboat\n1iD7yA3Elk4_0\tumbrella\n1iLq0PGfeCs_1\tboat\n1irtTU-RM8g_0\tboat\n1lCEFERcEKg_1\tboat\n1lSGhF2K_lM_3\tbear\n1l-NcYZKF8w_0\tumbrella\n1miy1sfneCI_0\tbear\n1qIgbCRt2C4_0\tbear\n1qknV5a5WQA_5\tbear\n1rt4XRA4RHE_0\tbear\n1rt4XRA4RHE_3\tbear\n1v8UDwaLZOk_1\tboat\n1yym4MiYTrs_0\tboat\n1yym4MiYTrs_1\tboat\n1zGry9uSuEs_0\tboat\n10oedSsXbw0_0\tbear\n14R96gxvKtU_1\tboat\n15ImffljXUs_1\tumbrella\n16BnXZheZE8_0\tboat\n18XvETJJDqA_0\tbear\n19ID_DbSclo_1\tbear\n19vhT11oPv4_0\tumbrella\n1__PWUxtAJI_0\tboat\n2Da3689mFHo_0\tboat\n2DimBSzdfPw_0\tboat\n2Fo-71zWO5Q_0\tbear\n2F9aM3isFOg_0\tboat\n2HDMk0mGW_w_0\tumbrella\n2IWPUKQEQc0_0\tboat\n2Irm_qCNQ_g_10\tbear\n2Irm_qCNQ_g_2\tbear\n2Irm_qCNQ_g_4\tbear\n2IyAOD0OkOg_0\tbear\n2I_k7e8QpWI_1\tumbrella\n2LWxx48-zmY_0\tboat\n2OYJuEnLK_w_0\tumbrella\n2O-9dVZBFm4_0\tumbrella\n2PL1rgU3jQ4_3\tbear\n2Pxvoh1PnpM_0\tumbrella\n2QOthN0H0jo_0\tboat\n2UBlre798kQ_0\tboat\n2U7mw3Z_nrI_1\tbear\n2ZeSJRQEIDg_0\tumbrella\n2huYkh1UAa8_0\tboat\n2j5p2kIFnF8_0\tboat\n2kAmyrOg2is_0\tumbrella\n2l4-4yNg4uM_0\tbear\n2l4-4yNg4uM_1\tbear\n2nWt5S5AcdM_0\tbear\n2oAbMVTBupI_2\tboat\n2olUVemt4wc_0\tumbrella\n2rbAoA6KuZ4_0\tboat\n2rzjzIvxob0_0\tumbrella\n2sDjXjM3vuk_4\tbear\n2sgrwTqPz-Q_1\tumbrella\n2vC56ILIWK0_1\tbear\n2w5-fxqKaR0_0\tboat\n2xzgP87zGDM_0\tboat\n20nMgEiCqVs_0\tbear\n223bkVsFvUg_0\tumbrella\n23-uEh5ygBE_0\tboat\n24kbYgf2_xM_0\tboat\n27Yd0qtplBs_0\tboat\n2_VfwSLic7o_0\tboat\n3EBKN0vh_8Y_0\tumbrella\n3EQ8WatEGfM_1\tbear\n3FBfwZ1vctY_0\tboat\n3GXWmiQHAA4_0\tboat\n3Hc48OCKEaQ_0\tbear\n3ICqGhWY-HU_0\tbear\n3IOrKwocmOM_0\tbear\n3KUAz0bb87g_0\tumbrella\n3KqDceVP3xg_4\tboat\n3MqGpNqj-fo_2\tbear\n3M5VwMaIzvc_0\tbear\n3PN8pPy1PLc_1\tbear\n3PN8pPy1PLc_4\tbear\n3PuByhkRjdA_0\tbear\n3P8-bKeMTDU_0\tbear\n3P8-bKeMTDU_1\tbear\n3QQYEFonITE_0\tumbrella\n3SJI7j-hBwU_0\tumbrella\n3SbQY-gSjTI_1\tbear\n3SofVK5wM1k_0\tbear\n3T5iqGlQLn8_0\tbear\n3T5iqGlQLn8_4\tbear\n3UJ24QWw0js_0\tbear\n3UUo8exclHk_0\tumbrella\n3VZuzA8i9tI_0\tboat\n3ZWFSRxFKp8_4\tumbrella\n3ZwOfZ6mdTE_0\tumbrella\n3cBiXmqHBLE_0\tumbrella\n3eH1SNLDT7U_1\tboat\n3fiWerkBy1s_0\tboat\n3fm54fM2fh0_1\tboat\n3kOuqiigfhM_0\tumbrella\n3khbnSUKCjw_0\tumbrella\n3khbnSUKCjw_3\tumbrella\n3khbnSUKCjw_5\tumbrella\n3khbnSUKCjw_1\tumbrella\n3leEAIEn6wg_1\tbear\n3oFuTv4g5QE_0\tumbrella\n3oFuTv4g5QE_2\tumbrella\n3ohEBnBnt7o_2\tumbrella\n3pli8lLuPF0_1\tbear\n3qGBc-85DMI_1\tbear\n3q0pJjI8W5o_0\tbear\n3v6DRHFQTz0_1\tumbrella\n3yct6bNJF9c_1\tboat\n3zhjI0Cn1AM_1\tbear\n3z0lIa162ps_0\tbear\n31PMTcBL5-o_1\tumbrella\n31PMTcBL5-o_0\tumbrella\n32GDx70-6cQ_2\tboat\n351brnq0Ryk_1\tboat\n38Tbojzrw80_3\tbear\n3__l885Wkz4_0\tbear\n4A-5QKpDBFE_0\tbear\n4A-5QKpDBFE_1\tbear\n4BbVz6UbHFY_1\tbear\n4GTfq2m-SnY_0\tbear\n4K0agSc78Js_0\tumbrella\n4K0agSc78Js_1\tumbrella\n4MUu-MomyB0_1\tbear\n4N85gqVvlWU_1\tboat\n4OQGDsYtfSg_0\tboat\n4QdM0aAdf4g_3\tbear\n4Qf9iJ-IMDg_0\tbear\n4R5HjEAW6Y4_0\tboat\n4ViaowUogyA_1\tbear\n4ViaowUogyA_3\tbear\n4VxP7VQ-WtQ_0\tbear\n4XCmBo2k6Hc_1\tboat\n4h2kJG8rDAk_1\tboat\n4h8E8d4P5ms_0\tumbrella\n4iktvQjNLS8_6\tboat\n4lyoTIuPa9s_0\tumbrella\n4rxmIDjvHvo_0\tumbrella\n4td5npVxACw_0\tboat\n4td5npVxACw_2\tboat\n4td5npVxACw_3\tboat\n4td5npVxACw_1\tboat\n4u8RQi7_xUQ_1\tboat\n4zYtj8BG_ZA_0\tboat\n4z3XNRP4Qvk_0\tboat\n40Ogw6O8g2M_0\tumbrella\n42-2FjqvBRw_0\tboat\n44nxZjEYqLI_0\tboat\n45HOGdlAVq0_2\tumbrella\n45HOGdlAVq0_3\tumbrella\n45HOGdlAVq0_6\tumbrella\n46Sp7L3iKK4_1\tboat\n47mMBnGHuOE_7\tboat\n48IdCSlEHlM_0\tumbrella\n48pGfV-z-x0_0\tboat\n5AhKWEjMmUw_0\tumbrella\n5AzSuHB6_jc_0\tumbrella\n5Ce6X4i25i4_4\tumbrella\n5Ce6X4i25i4_0\tumbrella\n5EaEfiCIEcA_4\tumbrella\n5EaEfiCIEcA_3\tumbrella\n5FZykf07mxY_0\tumbrella\n5FZykf07mxY_1\tumbrella\n5FviZXBOPWk_0\tumbrella\n5H6nBOIIziQ_0\tumbrella\n5IdOF-nnOkU_6\tboat\n5I2hW9gRRwU_1\tboat\n5JubFWZKmZc_1\tumbrella\n5Kf5KxsLCmI_0\tboat\n5PxBf16_oMg_0\tumbrella\n5WUSwyO4k7A_0\tumbrella\n5XWfGTUYLbQ_6\tumbrella\n5Y3Lrgpl6s8_0\tumbrella\n5dL3vGF_-ug_0\tboat\n5e9luwmv6mU_0\tumbrella\n5g_ugz2HmKM_2\tboat\n5iYpaHYUElI_0\tboat\n5iYpaHYUElI_3\tboat\n5iYpaHYUElI_5\tboat\n5nMhK15X4R8_2\tboat\n5rT33oH7aV4_0\tboat\n5srF-BzF_go_0\tumbrella\n5suoa4TFYd4_0\tumbrella\n5vMpwDm27VM_0\tboat\n5vyqdnOWivc_3\tumbrella\n52m9SGVaiW8_0\tboat\n521jpaMoQ58_2\tboat\n537tF6-uRB4_0\tumbrella\n561s-m-0mqU_0\tumbrella\n561s-m-0mqU_2\tumbrella\n561s-m-0mqU_3\tumbrella\n582V5-HF4yg_0\tboat\n582V5-HF4yg_1\tboat\n597l2xVl9Tc_0\tumbrella\n6C42Di7bIpE_1\tboat\n6FG49plD8TQ_0\tboat\n6FQz5w7HaKg_0\tboat\n6JGioFiqwww_0\tumbrella\n6JLdACYt7D4_1\tumbrella\n6MVLpYA1t8E_1\tboat\n6MVLpYA1t8E_3\tboat\n6OEFFwKhAFw_0\tboat\n6PVjXDW7JlY_1\tboat\n6Sxb0d7xIys_0\tboat\n6Ug54vSsrio_0\tumbrella\n6WP3KFUYTrM_0\tboat\n6XrW8Yjd16I_0\tumbrella\n6c0RAJO-AGg_0\tumbrella\n6inTfRLx_58_0\tumbrella\n6it-xMMovj4_2\tumbrella\n6khDUjxTmdo_0\tboat\n6mvP_NKlIHg_1\tumbrella\n6qpeBvh9pqs_0\tboat\n6rowMK5ERz8_2\tumbrella\n6sN56W9U7tY_2\tboat\n6tLtEuKyj1E_1\tboat\n6tQrO26kwOY_0\tumbrella\n6t0mbpnPPdg_0\tumbrella\n6t55VfdtMWE_4\tboat\n6t55VfdtMWE_7\tboat\n6t55VfdtMWE_8\tboat\n6t55VfdtMWE_0\tboat\n6uM7MFSH15g_0\tumbrella\n6uvJft-l1R0_3\tboat\n6yCsWwj87QI_0\tboat\n6zxrdodJut0_0\tumbrella\n61RreGvIPOk_1\tboat\n66WmMvvZOxI_0\tumbrella\n68C7HGRrJ8o_0\tumbrella\n68kx9VUVhzE_1\tumbrella\n6-Nh0bY1nUk_0\tumbrella\n7HD-o1yj47U_0\tumbrella\n7NXmDbHoJn0_3\tumbrella\n7NXmDbHoJn0_5\tumbrella\n7NXmDbHoJn0_6\tumbrella\n7RcyfoxqADA_0\tumbrella\n7WKzOMuf3Cg_1\tumbrella\n7a_nsGmUZNU_0\tumbrella\n7kSyhlnimb8_0\tumbrella\n7kaTL52xbiY_0\tumbrella\n7tlbytb63z4_0\tumbrella\n7uR1cEVdMDo_0\tumbrella\n7ydX3wCeOgk_0\tumbrella\n71k1TftUiYE_0\tumbrella\n76ljAryU9Bw_0\tumbrella\n78lA-eJGUn8_0\tumbrella\n7-ugeb_4vqE_0\tumbrella\n7_k6DM-PlXg_0\tumbrella\n8AZtNaOO_8A_1\tumbrella\n8FhIv4h9D3E_0\tumbrella\n8FhIv4h9D3E_1\tumbrella\n8H88MFohrUM_0\tumbrella\n8SuTrZ6xu2E_0\tumbrella\n8d_Vt2SWIvg_0\tumbrella\n8fsRltS2ul4_0\tumbrella\n8nReKSsSgGE_0\tumbrella\n8oOer9PS53g_3\tumbrella\n801xOkfqjkM_0\tumbrella\n84Ber6V3IrA_0\tumbrella\n84zKfCKtsDo_0\tumbrella\n9CGTYEUn-mo_2\tumbrella\n9JFicuESmEA_0\tumbrella\n9JiMiflDI68_0\tumbrella\n9J4O20b9qnY_0\tumbrella\n9S2mGfudahk_0\tumbrella\n9UVLb_-RbfA_0\tumbrella\n9bFrwgSSAkQ_2\tumbrella\n9bFrwgSSAkQ_4\tumbrella\n9bFrwgSSAkQ_0\tumbrella\n98OOq0Wh904_0\tumbrella\n99uO6qHrhsU_0\tumbrella\n-PaNPkpeFdI_0\tumbrella\n-PaNPkpeFdI_4\tumbrella\n-Z3_Ixwl1YY_0\tumbrella\n-bA7JdKB0LA_0\tumbrella\n-d9Vg5j5vZU_1\tumbrella\n-eJmt-GItyI_0\tumbrella\n-k8FuC01N5E_0\tumbrella\n-0y7A0GDVY8_3\tumbrella\n-0y7A0GDVY8_5\tumbrella\n-0y7A0GDVY8_7\tumbrella\n-3TIfnTSM6c_1\tumbrella\n-3TIfnTSM6c_2\tumbrella\n-98I0B3kkqw_0\tumbrella\nAAVVg5xx0p8_0\tperson\nACB01WGxOSM_0\tskateboard\nACDc6tGnXXQ_0\telephant\nADWNgv6trag_0\tperson\nADznOfGgfj8_0\tperson\nAEEVGgiuS5c_0\tperson\nAEHbOzlbmOQ_0\tdog\nAEJTsQNMkME_0\tbus\nAFlkSTJ-mF0_0\tdog\nAGRV17_1OS0_1\tbus\nAHsZ4FTQ8Ew_0\ttruck\nAIViQtfacts_2\thorse\nAJBtOVA1KSw_0\tperson\nAJbQP-rIwCY_0\tperson\nAJ9ODXcnhVo_0\tperson\nAJ9ODXcnhVo_1\tperson\nAKBq0oH8IOM_1\ttrain\nAKBq0oH8IOM_3\ttrain\nAL9dFpjFlLM_0\thorse\nAM-TjLTvBSU_5\tbear\nANA-pgSAzGI_0\thorse\nANVnK2HmZno_1\tairplane\nANVnK2HmZno_7\tairplane\nANeOKwjvX7w_0\tdog\nAPP17gURiBU_0\tbear\nAPP17gURiBU_1\tbear\nAPTYyEYJfOY_0\tbird\nAQD8YBCTSPs_0\tumbrella\nARaILMtc8fs_1\tperson\nARsokXpl07Y_1\tboat\nARsokXpl07Y_2\tboat\nASPK-ZSB9Ts_0\tperson\nASfv8cmreoA_0\tperson\nASfwyHCtnIU_0\tperson\nAS5LvQT9rrQ_0\tperson\nATy91FTiYvU_0\tperson\nAVF8lCKe6os_2\tumbrella\nAWRcJpWTPwQ_0\tperson\nAWtY9Y2mPso_0\tmotorcycle\nAWwDsm1WnKE_1\tknife\nAXjDlIFY7ww_0\tboat\nAYAkMpj_MHA_2\tbicycle\nAYAkMpj_MHA_5\tbicycle\nAYAkMpj_MHA_6\tbicycle\nAax6L0Qqgio_0\tbird\nAcYd7y_-V74_0\tperson\nAdY55Q3qVK0_2\telephant\nAgbIDWiOXQ8_0\tperson\nAgsYgmA19z4_0\tperson\nAhWU-QUzOOA_0\tperson\nAiqGEAjF6QI_0\ttrain\nAiu6EH4a8v8_0\ttrain\nAiu6EH4a8v8_1\ttrain\nAiu6EH4a8v8_6\ttrain\nAixV6QSGqto_5\tbird\nAixV6QSGqto_6\tbird\nAjj7WZLukdw_0\tmotorcycle\nAjpbAriY8rU_0\tperson\nAlab3dEYXM0_0\tperson\nAoAoH9yb6zY_11\tbear\nAoAoH9yb6zY_6\tbear\nAo7Sa2afCb4_0\tperson\nApDgLQUsEqc_0\tbicycle\nApakHefqWv0_2\tairplane\nAqIG0zk2bpg_0\tperson\nAqTXLh7DtcM_0\tperson\nAqTXLh7DtcM_1\tperson\nAqdoD9jkBFc_0\thorse\nAqj7VnXQt4s_0\tcow\nAq4dBqb2SbQ_0\tperson\nArgYRdhvlc0_0\tskateboard\nAsPXe7qUyuI_0\tperson\nAuLrPQqrKV4_0\tmotorcycle\nAuY8vITQrsE_0\tcow\nAvBm7iHiDdI_2\tboat\nAvSgTHXgSXQ_0\tcow\nAwVdVzh1Eh0_0\tperson\nAwvDMOeS7no_0\tperson\nAwzt30r0OLQ_1\tbus\nAw2t3AalW4s_4\telephant\nAyh_2ithjCE_0\tcow\nAyh_2ithjCE_1\tcow\nAyh_2ithjCE_2\tcow\nAylQiap7dj4_2\tbear\nAylQiap7dj4_3\tbear\nAy9QToaaTGc_1\ttruck\nAy_a2OkcdEk_0\tperson\nAzVvPUazPYk_0\tmotorcycle\nAzzlFx32dQs_1\tboat\nA1RSx6j_ra0_9\telephant\nA1RSx6j_ra0_4\telephant\nA1RSx6j_ra0_6\telephant\nA27YZAfJmrc_0\tknife\nA27YZAfJmrc_1\tknife\nA3E72P24pf8_0\tperson\nA3cgW1rDOcI_0\tperson\nA32Fi06yKpU_0\thorse\nA5U6AHe9_4A_0\ttrain\nA5pUgLCQq9k_0\telephant\nA5pUgLCQq9k_2\telephant\nA5pUgLCQq9k_3\telephant\nA63BoLTUNAM_0\thorse\nZBzVnA8zj6Y_0\tperson\nZB45YyN1WUM_0\tbus\nZFYGhJKiw5w_1\tgiraffe\nZGfOCwbu-PY_0\tperson\nZHTMfW1eaW0_0\tcat\nZHURcze8rOI_0\tperson\nZIJUWQKzzsQ_0\tperson\nZJgwacILoAw_0\tperson\nZMgP2kxv5E8_1\tperson\nZM3wX5zgKOA_0\tperson\nZNXnJahaXIY_0\tperson\nZOc4wfLX2Jo_0\tcow\nZOnuSLp6asQ_0\ttrain\nZPQNucbAjBM_0\tcow\nZQITHWk17a0_0\tbicycle\nZQxmb_nVoH4_1\tcow\nZRUXj8o10Po_0\tperson\nZSnP5B6NiI8_0\ttrain\nZTqDuCZVTmM_1\tairplane\nZTqDuCZVTmM_5\tairplane\nZU3AYv2eU74_0\tmotorcycle\nZU4XQbNaYQc_0\tknife\nZVZWEWzZg50_1\tbird\nZVjep3tDJjU_0\tperson\nZWL6CshdsuY_1\tcow\nZWogXn8xs7E_0\tmotorcycle\nZXU4Uua3l0E_0\tcar\nZYOUZjfZMhk_0\tcow\nZYS0h2pAK6M_0\thorse\nZYm5iVw0YdE_0\ttruck\nZY8pG-I5Ax8_1\tbicycle\nZZBBcTBPmis_0\tperson\nZZpckGIvGTI_1\tboat\nZana4yKDGxY_3\tskateboard\nZana4yKDGxY_1\tskateboard\nZbnxzLt8FJk_1\tdog\nZbnxzLt8FJk_0\tdog\nZcXtrHkjobw_0\tperson\nZelRUJyMMkw_0\tperson\nZeqhN6ndscE_0\tperson\nZe8cOn59rW4_0\tperson\nZe8cOn59rW4_1\tperson\nZj1TAkYHlQo_0\tperson\nZj7GzCIi_9c_0\tperson\nZlEiOICCDdc_0\tperson\nZlH8Hd961FM_1\tknife\nZl30Oy50PfQ_0\tperson\nZmXKvpkfHZA_0\ttrain\nZmdvunyqJB8_0\tbus\nZqTkqkEbXEk_0\tcow\nZrPn3BODZJM_1\tperson\nZrPn3BODZJM_0\tperson\nZuBD3A8Vecs_0\tbird\nZuEbZKmjxaA_0\ttrain\nZuEbZKmjxaA_1\ttrain\nZu7udgxuUkk_5\tairplane\nZu7udgxuUkk_6\tairplane\nZu7udgxuUkk_1\tairplane\nZu7udgxuUkk_2\tairplane\nZu7udgxuUkk_3\tairplane\nZvadVS1LnQU_0\tbus\nZvadVS1LnQU_1\tbus\nZvadVS1LnQU_2\tbus\nZwLvs9JUsFY_0\tperson\nZw4-vF-vOMk_0\tperson\nZxO4Gd5fhOg_1\ttrain\nZxO4Gd5fhOg_2\ttrain\nZxX6DBopv30_0\tskateboard\nZyEA24Ud3EM_0\tperson\nZyM24-ekpz8_0\tperson\nZzBvzlzuw4M_0\tperson\nZ03ZC9qmwDc_0\tzebra\nZ1N0xBj_H3E_0\tbird\nZ1ns6XidhT8_0\telephant\nZ2S6XnfE5vI_0\tperson\nZ2kb4LiQJUU_0\ttrain\nZ2zB-gtDgOM_1\telephant\nZ22DSYtblFo_0\tbicycle\nZ5rHikLjARg_0\tperson\nZ6XKceRI1bE_0\tbus\nZ6XKceRI1bE_3\tbus\nZ6XKceRI1bE_6\tbus\nZ6XKceRI1bE_10\tbus\nZ6qQE2_jsIM_0\tskateboard\nZ68yTt3upjk_0\tmotorcycle\nZ8SxFPbnptI_0\tperson\nZ8pujku9bPw_0\tperson\nZ9vZk0io0fw_0\ttruck\nZ9vZk0io0fw_1\ttruck\nZ-R7-Ww03t8_0\tknife\nZ_kKBbIzdXM_0\tperson\nZ_pwMCnOdk4_0\tknife\nZ_pwMCnOdk4_3\tknife\nZ_0227AsAvk_0\tbus\nA_a1H0EO64s_0\tperson\nA_a1H0EO64s_1\tperson\nA_pc9ov1cT4_0\tperson\nA_weMKVolQM_3\tbear\nBBC4Jmlky4Y_0\thorse\nBBHBoewIXhw_1\tumbrella\nBBHBoewIXhw_3\tumbrella\nBBHBoewIXhw_4\tumbrella\nBCKR989ZYyM_0\tcar\nBCKR989ZYyM_2\tcar\nBCpaJ-tEv-0_0\tcar\nBFP7MT8RM8U_0\telephant\nBF7cTjrTSwY_0\tcow\nBF8d91cJS3o_0\tperson\nBGcAVF0Zi_o_0\tperson\nBGzetX8Dz-M_0\tcow\nBHurVVjld8Y_0\tperson\nBIUeggZa3SU_2\tperson\nBIUeggZa3SU_0\tperson\nBIUeggZa3SU_1\tperson\nBIfedkd3HEg_0\tboat\nBJaAlMv6b_U_1\tmotorcycle\nBKKSiAed9CI_0\thorse\nBKtAnbXVk1E_0\tperson\nBLCEb_seyUs_0\tairplane\nBLCEb_seyUs_1\tairplane\nBL8o-tdhlxs_2\ttrain\nBL8o-tdhlxs_3\ttrain\nBMhmY9_ltFc_0\tperson\nBO7KZKb9bkQ_0\tcow\nBQRwIXopDJw_0\tperson\nBQRwIXopDJw_1\tperson\nBQswg--xiy8_1\thorse\nBRd8dUMN0a4_0\tknife\nBRmtavy2ZEo_0\tperson\nBR0NNg6gLLo_0\tperson\nBSo8wjoZ7zc_0\tskateboard\nBTSUQrxC6l4_1\tbus\nBUHULgt_7DA_2\telephant\nBU3iU3zJnDI_0\tperson\nBU8sEPifL08_0\tperson\nBVTVHHm7vkA_0\tboat\nBWNTXqGixw8_0\tbird\nBZUE0vDhMvk_1\tknife\nBb2fkGYxp2E_0\tperson\nBckXjb2o93U_0\tperson\nBdHNtn10UKE_1\thorse\nBeXziIDAJDc_0\tperson\nBgHV_87CxNI_0\tumbrella\nBgXr-bSqMIo_0\ttrain\nBhO0SwB8Ee4_0\tperson\nBh4m74dLZaM_0\tperson\nBlYWgnhwvkM_0\telephant\nBlYWgnhwvkM_2\telephant\nBmZNFBFj-ws_0\tperson\nBm2yaWXwgjY_0\tknife\nBpXhq5Awd3U_0\tdog\nBrC6VbCzRGc_1\tknife\nBrHslMc3UMQ_0\ttruck\nBscLJpi3AJc_0\tperson\nBv8WeZ_zrJc_2\tbear\nBzEC1EEC2ts_0\tperson\nBzXWK-LODVo_0\tperson\nBzbzymdK_TM_0\tperson\nBz6Od4GfW6A_0\ttruck\nB0DRHTdmeK4_0\tknife\nB31JkzyQDkg_0\tbear\nB5GVudI81dM_0\tdog\nB6nArbkcRek_0\tmotorcycle\nB6sR2aqScR4_1\tbus\nB7IP-2uNuWs_0\tskateboard\nB7yxjI6dz4s_0\tmotorcycle\nB8iZGZlQcsg_0\tperson\nB8opNd6uzmY_1\tperson\nB9GQwzI2Eqk_0\tdog\nB92X9Xn1P2s_0\tperson\nB-CJ8miJKPs_2\tcow\nB-n15EytPtQ_0\tperson\nB_WnXKd-oZk_0\tperson\nCADW3z8x4AU_0\tskateboard\nCADyh6laNA0_0\tmotorcycle\nCA3wWkrNnRs_0\tperson\nCBSNFKeTnpA_0\tbird\nCCyZAt2Js0U_0\tcar\nCE-LfFDfGKQ_0\tperson\nCE-LfFDfGKQ_1\tperson\nCFN40hxKxM8_1\tairplane\nCFPhXPCobFg_0\tperson\nCGg2FXjvvOA_0\tperson\nCH3phgDW5Fc_0\tperson\nCINfsd8LiOU_3\thorse\nCINfsd8LiOU_0\thorse\nCINfsd8LiOU_2\thorse\nCIqkbJoJhBI_0\ttrain\nCKmnpW6gboU_1\tboat\nCKmnpW6gboU_0\tboat\nCLtQxCqTzcY_1\tknife\nCMgYFnnxQUU_0\thorse\nCOcbSVCp4ig_0\tbicycle\nCOcbSVCp4ig_3\tbicycle\nCOcbSVCp4ig_4\tbicycle\nCOcbSVCp4ig_5\tbicycle\nCRF7PcgB2yQ_2\tbus\nCSnhpel7FTA_0\tperson\nCSriNtLepLs_1\tskateboard\nCVmBocpXeTc_0\tbus\nCWCfCeYh2bA_1\ttrain\nCWvjAYt5eR4_0\tbus\nCW9n8Gahfgg_0\tcow\nCXT98GHNtRU_0\tperson\nCZ-Sh-SXaRQ_0\tperson\nCan5eao1S3Y_0\tbus\nCbB-71R_n9M_1\tmotorcycle\nCbpAv8c2Vsg_2\tcar\nCbpAv8c2Vsg_3\tcar\nCb3iufTFMEU_0\tperson\nCc2vs8vuPmU_1\tbird\nCc8E7aTdEVM_0\tperson\nCdain96L-q0_0\tbus\nCd7g3ZoA5tQ_0\tbus\nCeN22koBQRM_0\tperson\nCe2jOHHBDLk_0\tmotorcycle\nCe7IPtXkNcs_0\tperson\nCfqkbrB0Yy8_0\tperson\nCf2jOSj7eRg_2\ttrain\nCjbhKc3Vjpo_0\tperson\nCkEVvGqgVkQ_1\tknife\nCl13SbLP0hE_2\thorse\nCl13SbLP0hE_3\thorse\nCl13SbLP0hE_0\thorse\nCl13SbLP0hE_1\thorse\nCl-lB_jS8Wg_1\tbear\nCnMMdc6syXM_2\tumbrella\nCoxzc_S3ID0_1\tknife\nCpLMLRdeJJ0_0\ttrain\nCpN-qOO6Qm4_2\tairplane\nCpyK9j001RY_0\tperson\nCqNEwP8PwS4_0\tbear\nCqNEwP8PwS4_1\tbear\nCqYiAanNpo4_0\tperson\nCqbu8vOsszI_0\tcat\nCr5p4NYIR44_0\tperson\nCttKQip6B2E_0\tperson\nCuGu45Z4lt8_0\tknife\nCvszgVrLsgA_0\tperson\nCwYG2Hf6-NY_1\tcow\nCwvR1fjMeSU_1\thorse\nCyuollntwZ8_0\tdog\nC1dCZ9W6WIM_0\tperson\nC2x3rdWMAyg_0\tdog\nC3lwMd_rlG0_0\tperson\nC5MrhYouFTc_0\tcow\nC5SKibJTnR4_0\tcat\nC6dANICzCcg_0\tperson\nC6xJeHO8XSE_0\tperson\nC7NXymSnEFw_0\tbird\nC8ExRKjU1vY_0\ttruck\nC8V2-wEjv5A_1\tcow\nC8sUABBP0Jc_1\tbicycle\nC8sUABBP0Jc_2\tbicycle\nC80bmA0XrjM_0\tperson\nC886JwUWvxw_0\tskateboard\nC-Tal1XUc8o_2\tperson\nC-zp91eJqtk_3\tbird\nDApDao4fUqQ_3\thorse\nDApDao4fUqQ_1\thorse\nDApauH43Ivo_0\tbicycle\nDBArY7gHuoY_0\tcow\nDBsBTVJNxS8_0\tdog\nDBsBTVJNxS8_1\tdog\naCNvyXSuG6w_0\tperson\naCVmJCtuPeg_0\tbird\naCVmJCtuPeg_1\tbird\naDMk7CwLIxM_0\ttrain\naERiDkn_gkY_1\telephant\naEwD6TC8S4w_1\tbicycle\naFEOvm-1KvA_0\thorse\naHM4Dj-2y8o_0\tairplane\naI0y0wY4LQw_1\tperson\naI0y0wY4LQw_2\tperson\naJAd-MiEsfk_1\tperson\naJWETVChAE8_0\tperson\naJoKSWtqs0g_0\ttruck\naLYtaO_J2_U_0\tperson\naLbjxTwAV7o_0\tperson\naMDD0PenhaM_0\tcow\naMgj1BUBexw_0\tperson\naNgAUBTbUUM_0\tperson\naNmgrcJxdw8_0\tmotorcycle\naN2a-rDAYDQ_0\tdog\naN2a-rDAYDQ_1\tdog\naOhumbyx05c_0\tcat\naQcTwMVs1Zk_0\tskateboard\naQcTwMVs1Zk_1\tskateboard\naQx68fklEXA_1\tdog\naSGod2MJ5ww_1\thorse\naSq5ZqH_K7E_0\ttruck\naTAXvSNkuvc_0\tbus\naUFxg301s68_1\tskateboard\naUsTtvWAzAc_0\tperson\naV8S5HLSI_o_0\tperson\naWHaR4ExDpk_0\ttruck\naWIZBHwtII8_0\tmotorcycle\naWgH9T2sGkE_0\tboat\naWmC8Tbgy9A_0\ttrain\naXa5YE_AmKg_0\tperson\naYAuay_bTaw_0\tcat\naYVEZrX4mE0_2\tbear\naZRYQJd-5CQ_0\ttrain\naZRYQJd-5CQ_4\ttrain\naZRYQJd-5CQ_3\ttrain\naZRYQJd-5CQ_6\ttrain\naaZxOcHxPec_0\tperson\nab_RTkwBG_4_0\tperson\nacy4aJnh9SU_0\tperson\nac68trlkEnw_1\thorse\nadsmRxlAJo4_0\tdog\nafE4YqgaPlw_0\tskateboard\nafU2vHgUvaw_7\ttrain\nafU2vHgUvaw_2\ttrain\nafU2vHgUvaw_3\ttrain\nafkiqhwTeRQ_0\tperson\naiOHs3hApm0_0\tskateboard\naiOHs3hApm0_1\tskateboard\naij190b9wtM_4\tbear\nakWe9oXeKzA_0\tperson\nak1XT_Nl7VU_0\tairplane\nak4CfFF9Bpk_0\tperson\nalbeyJBtKD8_0\tperson\nalp0ImrbacI_0\tdog\nal12VKid_P8_0\tperson\namyr6d2Ns6M_0\thorse\namyr6d2Ns6M_4\thorse\namyr6d2Ns6M_6\thorse\nao9LHpxNCqY_0\thorse\napLT3-LKJgE_1\ttruck\napXNcHROKyY_0\thorse\naqp_quyEngw_0\tairplane\naspR9ca28CY_0\tperson\nas3DGRDezaA_0\tperson\natElNgnFvlk_0\tperson\nat-Ex-CnRX4_0\tairplane\nat-Ex-CnRX4_1\tairplane\nau_kgqsZlMU_0\ttruck\navRC7M3_kuA_0\tbird\nawnORAEMUIg_0\tperson\naytqFnOdBLA_0\tperson\nazLbVm88Dzc_3\tairplane\nazLbVm88Dzc_2\tairplane\nazXlb1cxVGQ_1\telephant\na1qoB1eERn0_0\tperson\na2-lZhKXx9E_0\ttruck\na3In51YCqMg_0\tdog\na3T8T1R2wAc_0\tbear\na45XOJQaDQI_0\tperson\na5dffDLeZsI_0\tairplane\na7hjIfPGJqI_0\tcat\na74_tj_B-YA_2\tknife\na74_tj_B-YA_1\tknife\na8v0k4Bz_QA_0\tperson\na9jgDU5THOU_0\tperson\na97S4U5ezQw_0\ttruck\na97S4U5ezQw_1\ttruck\na-M2_3j67qI_4\tknife\na-M2_3j67qI_5\tknife\na-M2_3j67qI_6\tknife\na-NeSgN26Zo_0\tbicycle\nbAKQZ0F7LFw_0\tperson\nbA10PjxgV3w_1\telephant\nbBPKh_BPJ50_4\tbear\nbBPKh_BPJ50_1\tbear\nbBW4swLrEHE_0\tperson\nbB6tIraYEaI_0\tskateboard\nbCDw1dn7M1Y_0\tcar\nbCDw1dn7M1Y_1\tcar\nbCWM39xLsYs_0\tskateboard\nbDFkztSgMko_0\tskateboard\nbD6xZhJfhMU_0\ttruck\nbFnzGS_doNQ_0\tperson\nbGFRHhc7zUI_1\tperson\nbGZtGWULlF0_0\tskateboard\nbGZtGWULlF0_1\tskateboard\nbIOpYFVLesY_0\tperson\nbJviDDrUSwA_0\tmotorcycle\nbKB6ESqkOic_1\ttruck\nbKRAinEnagU_1\tmotorcycle\nbKRAinEnagU_0\tmotorcycle\nbNXcPzWMXsw_0\tcar\nbN43crdYDJE_2\tbus\nbOL9YHt5u-o_0\tskateboard\nbOL9YHt5u-o_1\tskateboard\nbOofbwD246U_0\tperson\nbPKew4jsGkE_0\ttruck\nbPRVRL4x5T0_0\ttruck\nbQkneVc9gaA_0\tairplane\nbQ64JFsWSf0_0\tbicycle\nbRWbXGRwlVY_0\tperson\nbS1Z1k6laqY_0\tperson\nbUqFsPoDKBE_0\ttrain\nbVP58EONEm4_0\tcow\nbW4nHswGFPo_0\tmotorcycle\nbW5IvSesbV0_0\telephant\nbXR-iz0NfrA_0\tcat\nbZDsNeqNn9I_0\tcar\nbZDsNeqNn9I_2\tcar\nbZDsNeqNn9I_3\tcar\nbZDsNeqNn9I_5\tcar\nbZIU-ajwk6Q_0\tbicycle\nbZIU-ajwk6Q_1\tbicycle\nbZ6Tq0KWSsU_0\ttruck\nbZ6Tq0KWSsU_2\ttruck\nbanaB07Fu9c_0\tbear\nbcKUeyEaRPw_6\tbicycle\nbdhq0SKEqe4_0\tperson\nbd3b9R30l-E_0\tperson\nbeDuTpy1tg4_2\thorse\nbeDuTpy1tg4_0\thorse\nbeLkXAaP78Y_0\ttrain\nbe30TAE-gq4_0\tperson\nbfQSyBsTmE4_0\tumbrella\nbgSSzKax51E_1\tmotorcycle\nbgSSzKax51E_0\tmotorcycle\nbhoUxK8FSqc_0\tperson\nbhuPA9toCGY_0\tperson\nbiIFNnX2Nl4_0\tskateboard\nbiu2ssO3dRg_0\tbus\nbjRPge2oFgU_0\tknife\nbjV04dzuqhk_1\telephant\nbjdIG6B5zn0_0\tperson\nbjdIG6B5zn0_1\tperson\nblPLp16K1XY_2\tbicycle\nbmJ_QDIRS2U_1\ttrain\nbmJ_QDIRS2U_2\ttrain\nbmJ_QDIRS2U_3\ttrain\nbmLsrJHQQ14_4\tknife\nbnBORorLvmk_0\tperson\nbnBORorLvmk_1\tperson\nbnVGsydNrg8_0\tairplane\nbnVGsydNrg8_1\tairplane\nbnZbj1dD0qs_0\tumbrella\nbn0I2aJB5Ps_0\thorse\nboMU1mjUSDw_0\tskateboard\nbo8M-OTk4J0_0\tperson\nbpw3BCxYYU4_0\thorse\nbqoDChNwIYY_0\tumbrella\nbrJqQ_iH2VE_0\tperson\nbrMVhyEZLfo_0\tperson\nbs5AY2jipno_0\ttrain\nbtL-vruELoA_0\tperson\nbtq7gMuqMuo_1\tperson\nbtq7gMuqMuo_0\tperson\nbvEJDHpRNoI_0\telephant\nbvVfFv57gN4_0\tbus\nbvVfFv57gN4_4\tbus\nbwhPTEvGmIo_0\tperson\nbydgNyGwoys_0\tperson\nbziUK-7O0lY_0\tdog\nb0Z6qKhuldo_0\tskateboard\nb0sKQDUFTos_0\tperson\nb1s-jYD36GQ_0\tperson\nb4Wua_98Y9U_0\tperson\nb4d_9Yc0MwY_0\tbicycle\nb4qC2fctnLU_0\thorse\nb4zSrjPtOfs_0\tbicycle\nb5CJtpeG1Lc_0\ttrain\nb5CJtpeG1Lc_2\ttrain\nb5CJtpeG1Lc_1\ttrain\nb5mOcLykYeQ_0\tcow\nb9VOmo_86Ds_1\tperson\nb_W4BWH1i_A_1\tperson\nb_W4BWH1i_A_0\tperson\ncBxo9bPINJc_0\tskateboard\ncCEImigNo38_1\ttrain\ncDHZtfsI_gM_0\ttrain\ncDHZtfsI_gM_1\ttrain\ncDmkhESohro_0\tboat\ncEcTernKOqU_0\tperson\ncEcTernKOqU_1\tperson\ncGJLuwZIG5s_0\tgiraffe\ncGJLuwZIG5s_1\tgiraffe\ncGJLuwZIG5s_2\tgiraffe\ncGwjfCPO-7k_0\tcar\ncH0sXpOxvy0_2\tbird\ncH9u1pCWp2U_0\tperson\ncH_SL9CR8y4_3\tdog\ncIxdxFkZ7y8_0\tdog\ncIxdxFkZ7y8_1\tdog\ncJvh4GqZn-s_0\tperson\ncKQQVTnOzBk_0\thorse\ncLULEYFoBPc_2\tcow\ncMdjRuUhBIs_0\tmotorcycle\ncMdjRuUhBIs_1\tmotorcycle\ncMwa9cC304w_0\tcow\ncMwa9cC304w_1\tcow\ncNDYJRBsIOY_0\tdog\ncPlqWSd2TUc_0\tperson\ncP-p4R-JZxY_1\tbird\ncRBw9lx-EKA_1\tbus\ncR2-4m174EM_0\tbird\ncR-AWpc5zTs_0\tperson\ncTujx-TutbA_1\thorse\ncUrajeQPzpQ_0\tumbrella\ncUrf-ZwPzxI_0\tperson\ncUwPVOboe0k_0\tperson\ncVng1vleWNY_0\tperson\ncVrxfV0w29w_0\tperson\ncXZ7JY7YQmE_3\tbird\ncYdqN1oPRdY_0\tperson\ncagT3K3Ep3s_0\tskateboard\ncagT3K3Ep3s_1\tskateboard\nca8rEbHYMXg_0\tcow\nca-ko46j2fQ_6\tairplane\ncbL66gVAa5Y_0\tcow\ncctYyTO8OtU_0\tperson\ncc3mBIHi-GU_0\telephant\ncdNz1OLa1tU_0\tcar\ncf_U0G5W8BI_0\tperson\ncggX7PRYUh0_0\tperson\ncg_5uaJjLHk_0\tperson\nch_23jXJ_vA_2\tdog\nciCfkv5831Y_0\tairplane\ncih9W0SPGYA_0\tbird\nciwNB-l9a88_0\tperson\ncjHlHkhg0z0_0\tperson\nckFwzL1Ot94_0\ttruck\nckV9ay1lm7A_0\tairplane\nclZo-o5v1EA_0\telephant\nclvCQPta7y0_2\tbird\nclvCQPta7y0_0\tbird\nclvCQPta7y0_1\tbird\ncmTPsZ9x3PE_0\tcat\ncmW0Y4KGI7g_0\tgiraffe\ncnhhgh_z5NU_0\tcow\ncnqT4u0k3sM_0\tumbrella\ncpK8K6JD_GM_0\tairplane\ncpK8K6JD_GM_2\tairplane\ncprvb4cW5x4_0\tmotorcycle\ncqd8PRxMakA_0\ttruck\ncqvjKRFEi8M_1\tcar\ncrys7VEeUgU_0\tperson\ncskBHjsDXEs_0\tcow\ncso6B_84BFA_0\thorse\nctm9x2MaZuk_0\tcat\ncxu1qpzXobY_1\tbird\ncxu1qpzXobY_12\tbird\ncxu1qpzXobY_0\tbird\ncxu1qpzXobY_2\tbird\ncxu1qpzXobY_4\tbird\ncxu1qpzXobY_5\tbird\ncxu1qpzXobY_6\tbird\ncxu1qpzXobY_7\tbird\ncxu1qpzXobY_8\tbird\ncxu1qpzXobY_9\tbird\ncxu1qpzXobY_10\tbird\ncxu1qpzXobY_11\tbird\nczO8IPcAO1A_0\tperson\nc1FBptbYp3I_0\tperson\nc1FBptbYp3I_1\thorse\nc2T3VDriTaY_0\tknife\nc39xfJcSlxk_0\tdog\nc4kbPHdCIE8_1\telephant\nc43mnrjx2MU_0\tbus\nc5fPKbV5cAM_0\tperson\nc53j9l_w3Cg_3\tdog\nc7gnf6G7Jpw_0\tskateboard\nc7oqQy2Fvlw_0\ttruck\nc8JhzKh1i7s_0\tperson\nc8JhzKh1i7s_1\tperson\nc8gBv0b5g9w_1\telephant\nc8iU4McayiU_0\tperson\nc8iU4McayiU_1\thorse\nc8u5Y95o7jE_0\tskateboard\nc84BjBiic4s_0\tmotorcycle\nc93WuBjZeRk_0\tperson\nc-nMPinePds_0\tcat\nc_aupqZy-14_0\tairplane\nc_o91IPAB-c_0\tumbrella\ndAHCPltzogA_0\tbird\ndAP6fuArseQ_5\telephant\ndAtQR4dHPgE_0\tperson\ndA0WQ_RubaI_0\ttruck\ndBzXNQJRzls_0\tcat\ndCJFMDQBPb4_0\tboat\ndEIuy8LjAxc_0\tcar\ndElaQ10vYqg_1\tmotorcycle\ndHMFcv4UnmU_1\tbus\ndIP3FoGUXDQ_0\tperson\ndJYqTnxujb0_0\tperson\ndJnLznNE29w_0\ttrain\ndJnLznNE29w_1\ttrain\ndJ9qJezt6do_0\tcar\ndJ9qJezt6do_1\tcar\ndKmrUcJ9rJY_0\tperson\ndKmrUcJ9rJY_1\tperson\ndK3_HiQMH4o_0\tdog\ndMFsGGvkSVU_7\tairplane\ndMFsGGvkSVU_0\tairplane\ndMFsGGvkSVU_3\tairplane\ndMFsGGvkSVU_5\tairplane\ndMFsGGvkSVU_6\tairplane\ndNByeKh4gnA_0\tperson\ndNJ0q9QKzmY_0\tboat\ndNQYo7REyBU_0\tperson\ndOkb5WhLZGU_0\tperson\ndO0uu_fVUVI_0\tcar\ndO0uu_fVUVI_1\tcar\ndO4Jxsf987s_0\tbus\ndO-OrWse3dA_0\tcar\ndPCSntP-29E_0\tperson\ndPCSntP-29E_1\tperson\ndP7je2qU_QA_0\tdog\ndQIlnQxMIKo_0\ttrain\ndQIlnQxMIKo_4\ttrain\ndQIlnQxMIKo_5\ttrain\ndSAlTJeDlfQ_0\tperson\ndTvJyUKKshw_1\tperson\ndTzaYePj1gY_1\tcow\ndT5gXQAE-Qk_0\ttrain\ndT5gXQAE-Qk_2\ttrain\ndT5gXQAE-Qk_3\ttrain\ndUpoYuxpKPM_0\tperson\ndVTCCi__Z4Y_1\tperson\ndVte44AGoEE_0\tknife\ndW4RjdpTaJo_0\tperson\ndXYYgzjwm8w_0\tperson\ndXf-d5rkqdA_0\thorse\ndZv4xXpV6js_0\tboat\ndaeBFAZFQhU_0\tperson\ndbXKW9_L9sE_0\tbird\ndbwBzQuj1uA_0\tperson\ndc5oaWIkfwg_0\tcat\ndc-iaCwezlU_0\ttrain\ndeO0aj59T8o_0\tperson\ndfU8DcWDX8U_0\thorse\ndfU8DcWDX8U_4\thorse\ndgcW3TkPLmk_0\tboat\ndilCe3bivVk_0\tbus\ndi59PG3l25w_0\tbicycle\ndi59PG3l25w_1\tbicycle\ndjsh1r_W6ko_0\tperson\ndjt1lzJn7ak_2\tbird\ndlYwqfTRqoo_0\tperson\ndl-bg8WPGZs_0\tperson\ndmk3Cedj6g0_0\tperson\ndn006hdarCg_5\telephant\ndn006hdarCg_4\telephant\ndn006hdarCg_6\telephant\ndn006hdarCg_7\telephant\ndn006hdarCg_10\telephant\ndn7iBi1t7UI_0\tcow\ndn83BrM71W4_1\tboat\ndoOsOyiHItw_0\tperson\ndpqVH2tgA3E_0\tperson\ndqlk6F07Cxw_0\tmotorcycle\ndrohCN_vwC8_0\tmotorcycle\nds7JGeImFXo_0\thorse\ndtsLwaO2des_0\ttrain\ndt5TzAZByk0_0\tperson\nduROYI-AZlk_0\tperson\nduROYI-AZlk_1\tperson\ndutryxrzRjE_0\tumbrella\ndvDxOc2VWhc_0\tperson\ndvP5Dsp8EZA_2\tdog\ndvTIkEA7rOc_0\tperson\ndvvoKcQ5OOQ_3\tbear\ndvx9-0cVEYc_0\tperson\ndwQuyR9XFVM_0\tskateboard\ndxcnKYynkEY_1\tcow\ndxmxpyj3WVk_0\tknife\ndxmxpyj3WVk_3\tknife\ndyUVa3ZQVFg_0\thorse\ndzitRPrX410_0\tcow\ndzpcdtcQLfY_0\tmotorcycle\nDEnqBEwPykc_0\tperson\nDFCqlvY5OFY_1\tbus\nDFXptvzN9V8_3\tumbrella\nDFqSvoSh-qA_0\tcat\nDHEtea1hPBc_0\tperson\nDHwUCu0rrvc_0\tboat\nDJ_neeMWAuw_2\tdog\nDLsYDXqthiY_0\tskateboard\nDMBbH5HyOME_0\tperson\nDMn3ruRAObI_0\tperson\nDMyjVWCLbes_0\tperson\nDM6e1vEjYeM_0\tbicycle\nDM6e1vEjYeM_6\tbicycle\nDND0C3XD7mQ_0\thorse\nDOQilAKERwk_0\tumbrella\nDOmE1dA6CoQ_0\tperson\nDQJ4cPhVhFg_0\tairplane\nDT895n1nqqY_5\tbicycle\nDT895n1nqqY_4\tbicycle\nDUO7S4ma320_1\tcow\nDUO7S4ma320_0\tcow\nDU9GDCN25lI_0\tperson\nDV4bDUzPAIU_0\ttrain\nDWxidp6TWlg_0\tairplane\nDXhV8uXKo7w_0\tcow\nDXxF81ZJ_Jo_0\tcow\nDX1_rKFVugE_0\tdog\nDYBLqnRCo7g_0\tcat\nDZ2-5rYAUVk_0\ttrain\nDasqUqgdRv0_0\tdog\nDbNVb8C-Au8_0\tperson\nDbcdvAsVI48_0\tperson\nDcZSisTgSJs_0\tairplane\nDc9pWTcUNXY_5\tbear\nDeVQ3mr19Sw_2\tskateboard\nDeYmal3wAoE_2\tdog\nDeYmal3wAoE_0\tdog\nDfOuxNA9lro_1\tgiraffe\nDfXOTMc9IyM_1\tdog\nDfbPDcLTZEo_0\tairplane\nDf89T9IxDvc_0\tperson\nDf93ocrYlyY_0\tperson\nDgBuwqAbIkI_0\tskateboard\nDgBuwqAbIkI_1\tskateboard\nDhA0S7lPFVw_9\telephant\nDhA0S7lPFVw_0\telephant\nDhA0S7lPFVw_1\telephant\nDhA0S7lPFVw_2\telephant\nDhA0S7lPFVw_4\telephant\nDhA0S7lPFVw_5\telephant\nDhA0S7lPFVw_6\telephant\nDhA0S7lPFVw_7\telephant\nDhA0S7lPFVw_8\telephant\nDhEO4MuDBOc_0\tdog\nDhJAQCycHJs_0\telephant\nDhU-e-L13WM_0\tperson\nDhU-e-L13WM_1\tperson\nDhU-e-L13WM_2\tperson\nDiLGyNCykDE_0\tskateboard\nDjQx_qEnXko_0\tairplane\nDkMltyvC5l4_0\tperson\nDmPTbBo32qI_0\tbear\nDmzlB4KBLN4_0\tbird\nDm-XQKFA-BQ_0\ttruck\nDni4lPw5oH0_0\tperson\nDnzZd_9JlAA_0\tcat\nDoB18AvtSxQ_0\ttrain\nDofzMEokur0_0\tperson\nDonLBf92rMc_0\tdog\nDpp4k_BzZY8_1\tairplane\nDqcEAexhJ10_0\tcar\nDr6LfvQ_qKo_0\tcar\nDs_4eRyQDPo_2\tboat\nDuLk58XzeyA_0\ttrain\nDuv1XrdytdE_0\tcow\nDu4jlCLKZds_0\tperson\nDvjMMfcCq3U_0\tperson\nDvuTkGshMjA_2\tcow\nDvx0WVMuXVw_3\tboat\nDw4--8weqIA_0\tperson\nDx0LbiFgvPI_0\ttruck\nDyY1MPuGf5w_3\tdog\nDzUJVl_Pej0_0\tperson\nDzV-LWU5GoY_0\tperson\nD0b7xYmwl-M_0\tskateboard\nD0fhKhpAhJM_0\tzebra\nD0jRA5TKT-o_0\tperson\nD1vTDW7YDTk_0\tperson\nD2hRnCm0JtM_0\tperson\nD2oV8BC0iq8_0\tperson\nD21mLV716vI_0\tperson\nD32GncZb51Y_3\ttruck\nD4Jcg1u1Z-o_0\tperson\nD5maMxzZBe0_0\tperson\nD5m40zCfU8E_0\tperson\nD6E0xgBBquU_0\tperson\nD68oMT6tpc4_0\tperson\nD7H1UQbgDOw_0\tcow\nD9RGgV3fKds_0\tbird\nD_a5TQmLY-Y_1\tperson\nEBJ5jExrVqY_0\tcow\nEBLJ9v0QSrU_0\tcar\nEBUmagxsoV8_0\tperson\nEC8ftAGy2qA_2\tskateboard\nEDBDHaRqToc_0\tdog\nEEZKnzcn-v0_0\tcat\nEEfiTwozdM0_0\tcow\nEExHYyuWa-o_6\tbird\nEExHYyuWa-o_2\tbird\nEExHYyuWa-o_5\tbird\nEFRywDKULxc_1\ttrain\nEIl3WAxkNwc_0\ttrain\nEJJXpIiBEuw_0\tcow\nEJrj49l1N8k_0\tairplane\nELPjTNVxWfM_0\tperson\nEL-2TiSSQJg_0\tbear\nENPh0zyq2wo_0\tmotorcycle\nEOAADsR4IpM_0\tcow\nEP3xfG5_2i8_0\tcow\nEQN5hODdb6o_0\tskateboard\nEQ09ewMQn8Q_2\tbird\nEQ09ewMQn8Q_0\tbird\nEQ09ewMQn8Q_1\tbird\nEQ9vXT_IFYQ_7\tbird\nEQ9vXT_IFYQ_3\tbird\nESxRPsxVX-U_0\tcar\nETxRky6I39w_0\tperson\nEVD8F2ZOBbI_0\telephant\nEVYb5simSY0_0\tumbrella\nEWOehvvAvqU_0\tperson\nEXK2mcPIoBI_3\tskateboard\nEXK2mcPIoBI_0\tskateboard\nEXK2mcPIoBI_1\tskateboard\nEXK2mcPIoBI_2\tskateboard\nEXeKX_vOTvc_1\tcar\nEd-cfsA3BsU_0\thorse\nEeQOKiPASgY_0\tperson\nEfAYg1FMY-4_0\tbear\nEfAYg1FMY-4_5\tbear\nEfAYg1FMY-4_4\tbear\nEfSd4ucOXKs_0\ttruck\nEfbKwoMA6Kk_3\thorse\nEgpujPNldhs_0\ttrain\nEhQXwVQsngU_0\tboat\nEj0A86Eu1p8_0\tperson\nElHgkP_L8Eg_0\tairplane\nElTbW5itOAs_0\tcar\nElTbW5itOAs_3\tcar\nElTbW5itOAs_4\tcar\nElTbW5itOAs_7\tcar\nEmvEUer4CVc_0\tumbrella\nEnIkH0jrzaI_0\tskateboard\nEn6a3Ed7fvk_0\tperson\nEo5s8ykuzbU_0\tperson\nEpBZ77zmngM_0\thorse\nEpPw2JoHiTQ_0\tperson\nEqPK8xdf8hQ_0\tperson\nEqdBE21XAks_2\tumbrella\nEqdBE21XAks_3\tumbrella\nEqdBE21XAks_4\tumbrella\nEqz3xG4mWTs_0\tperson\nErN8-oTPkq0_1\tperson\nEr-RnWQrUac_0\tcat\nEsvPqOf-zEA_0\tperson\nEtIj5IUtn-g_0\tairplane\nEtIj5IUtn-g_1\tairplane\nEtIj5IUtn-g_2\tairplane\nEtMlgBveP58_0\tdog\nEtMlgBveP58_1\tdog\nEtkDITl8mEM_0\tperson\nEwlCKB77dYo_4\telephant\nEwlCKB77dYo_2\telephant\nEwlCKB77dYo_3\telephant\nEwqkMKutzBE_1\tknife\nEw-67eGgZAI_1\tmotorcycle\nExRpjMcFoBY_0\tdog\nEzRrohN-4ss_0\tskateboard\nEzZW0lM284U_0\tskateboard\nE2DbbyoqLg0_0\tperson\nE2DxfZPPu5Y_0\thorse\nE2DxfZPPu5Y_1\thorse\nE2DxfZPPu5Y_2\thorse\nE5erp1mhTzk_2\tbear\nE7CsRpWElOo_0\thorse\nE76rAl8oksk_0\tdog\nE9ARkaJcz2M_0\tperson\nE9J03vUxTZQ_0\ttruck\nE9w2-Y4d3MM_2\ttruck\nE9w2-Y4d3MM_0\ttruck\nE-ea5keAG3Y_0\tperson\nE-jpkZw_MdU_0\tmotorcycle\nE_cxlc0vrMg_0\thorse\nFBA18EyY2eI_2\tboat\nFBQpWJPC5pQ_0\tperson\nFBQpWJPC5pQ_1\tperson\nFBo954IqOlo_1\tbicycle\nFBo954IqOlo_5\tbicycle\nFBo954IqOlo_0\tbicycle\nFBo954IqOlo_2\tbicycle\nFBo954IqOlo_3\tbicycle\nFCICeCD4dKc_0\tperson\nFCypWBdHWb8_0\telephant\nFDKvBZH5LZE_0\thorse\nFD89Oq7BclA_0\tskateboard\nFETKMmV7P70_0\tmotorcycle\nFETKMmV7P70_1\tmotorcycle\nFEbVjS5-4ps_0\tperson\nFEsMY2y49d0_0\tperson\nFFuW_UWBVpU_0\ttrain\nFHRrYqTZExQ_0\tperson\nFID77dKUAU8_0\tcat\nFITKtv4tf7w_0\tcow\nFIi2mEV5dfQ_0\tskateboard\nFIi2mEV5dfQ_1\tskateboard\nFIvujc5oqIY_0\ttrain\nFJDKoEDLbNc_0\tairplane\nFLsLXPchOx0_0\tknife\nFMV_-mdKV8U_0\thorse\nFNNrfAuIQmo_1\thorse\nFNpd4DJ9LBA_0\thorse\nFPrcQJh9INg_0\tperson\nFQMXzPIoL14_2\tbird\nFQ-_p0lM-FM_1\telephant\nFRxSISi7wV4_0\tbicycle\nFSFW4QxV8-0_1\ttruck\nFUlVrltDAOk_0\tbird\nFWNxjmydNdU_0\tperson\nFYVNE1zYmyA_0\tperson\nFZrXRU5CxC8_0\tboat\nFaG9RreeG6M_6\tbicycle\nFaG9RreeG6M_2\tbicycle\nFbF-nKQx0WI_0\tperson\nFcP50mFdaYM_0\ttrain\nFdPApnQkBVQ_0\tbird\nFdPApnQkBVQ_1\tbird\nFdlDAmvsrR0_0\thorse\nFd1uYmMhzPE_0\thorse\nFedOlGadIYU_0\tbird\nFgd7fHxPhBs_0\ttruck\nFhQLl40AANQ_0\tbicycle\nFhvdS8wJkrI_5\tbicycle\nFhvdS8wJkrI_1\tbicycle\nFhvdS8wJkrI_2\tbicycle\nFhvdS8wJkrI_3\tbicycle\nFiCIZpT08B0_0\tcow\nFiD6UZuDr1M_0\tperson\nFjFwrTEJK1U_0\tperson\nFjmcQfLBpvQ_0\tperson\nFkSfwpb1Gss_0\tperson\nFkhru_XyPSU_4\tbicycle\nFkhru_XyPSU_1\tbicycle\nFlOaA91Qa2M_0\tcow\nFm7Z44jVp_A_1\tperson\nFm7Z44jVp_A_0\tperson\nFnIpAhpGTps_0\tperson\nFn0IWwSVPlk_0\tperson\nFotm2Ewrdr8_0\tdog\nFphk_JpP4JY_2\tbus\nFp2WKSG1qGw_0\tperson\nFrFv1rYtAws_0\ttrain\nFr298zXE9O8_0\tumbrella\nFshCFVUSBXY_0\tperson\nFsiLiUl9I10_1\tdog\nFs0LVU4qKSs_0\tskateboard\nFtEi5TPqRiA_0\tdog\nFuWY9thbtxw_0\tairplane\nFu9EsTmh8z0_0\tperson\nFvCCkxW3sv8_0\tperson\nFvDNYPmcXjQ_0\tbear\nFvDNYPmcXjQ_5\tbear\nFvDNYPmcXjQ_1\tbear\nFvDNYPmcXjQ_3\tbear\nFvHW0PyfZ_Q_1\tskateboard\nFvHW0PyfZ_Q_4\tskateboard\nFvHW0PyfZ_Q_5\tskateboard\nFv542o8y6aE_0\tperson\nFyEliJtlQIY_0\tperson\nF0PPPvVTNnE_3\tbear\nF3iJ9TqS-lE_1\tbear\nF3iJ9TqS-lE_0\tbear\nF39H1yTLerI_1\ttrain\nF4xCJHUMGsE_1\telephant\nF47hXNWC3K8_0\tcat\nF48wdm2YukQ_0\tbicycle\nF48wdm2YukQ_5\tbicycle\nF5Cc5wQJvhI_0\tperson\nF5Tm5BM0oaM_0\ttrain\nF5unbOiULNM_0\tmotorcycle\nF5unbOiULNM_1\tmotorcycle\nF9B5cLZb3T4_4\tbicycle\nF-OWsiGzRg0_0\tperson\nF_bZObIr47Y_0\tbicycle\nF_bZObIr47Y_1\tbicycle\nF_dg4Hi5ZU0_0\tcar\nF_xLwEhMPdY_0\tperson\nF_8rnxkAIgQ_0\tperson\nF_88eTR1pKU_0\ttrain\nGAMoEnodBZ8_1\tbicycle\nGAZx8145Hkk_1\tperson\nGAZx8145Hkk_0\tperson\nGCW28zxN9vk_0\tperson\nGDM2ctXPkmg_0\tperson\nGD5lsE86vOA_0\tcar\nGE2nS7Zbkrc_0\tairplane\nGE6JO6nrE2A_0\tperson\nGF9unI6hEMI_0\tairplane\nGGULYyv3_eY_0\telephant\nGGULYyv3_eY_1\telephant\nGGVYYc0KNWc_0\ttruck\nGHTZcjImEqk_0\tperson\nGIJMEjX04dI_0\tperson\nGIM6FHDMp0A_0\tperson\nGJTjlO1FJpo_3\tbear\nGJTjlO1FJpo_5\tbear\nGKyxtLTjXUU_1\tmotorcycle\nGLG6II1JYko_0\tbird\nGLpNrOwqNXc_0\tperson\nGLvmwdOjsHE_0\tcow\nGOEqT5_bhls_1\telephant\nGOVFUFYsINQ_2\telephant\nGOfP3fxCTvw_0\tperson\nGPPKPFCI-Kc_0\tperson\nGPSXltbv0f4_0\tmotorcycle\nGP5anr-xMfw_0\tperson\nGRluMAZzu8c_0\tairplane\nGSlWcX28sLk_0\tperson\nGUMAgiab8bg_0\tperson\nGUQmoD1aWhw_0\ttruck\nGUS7BLoHHPk_0\tairplane\nGVNmuLeQ6pA_1\tairplane\nGVNmuLeQ6pA_2\tairplane\nGWBEjzdOLjI_0\tgiraffe\nGWBEjzdOLjI_1\tgiraffe\nGWBEjzdOLjI_4\tgiraffe\nGXMBH6OujvQ_0\tperson\nGYM460lVV-k_0\thorse\nGYQO-VevHpI_0\tperson\nGYYxgR_VGFQ_0\tdog\nGZSlxtl9bj4_0\thorse\nGZSnngz0VX4_4\tdog\nGZhWdIsibfs_2\tbear\nGaierMnR4Xk_1\telephant\nGbe74-OWIo4_0\tperson\nGbwJhzDrFtI_0\tairplane\nGceLsS4AwH8_1\thorse\nGcjSF4Uyl74_0\tperson\nGdoD65Qn6kE_0\tcat\nGeOos0BFCSY_0\tbus\nGf_4plKc8tw_7\thorse\nGk8oy0G3dRU_0\tperson\nGlAH7-Rf8gc_1\ttruck\nGm9yMiay9Is_2\tskateboard\nGm9yMiay9Is_3\tskateboard\nGnTFmN4UNrI_0\tmotorcycle\nGn6ltyIKgcs_0\tperson\nGoXxeDaopwo_1\tperson\nGokzf7T4oVU_0\tcat\nGpE5cmO_2kQ_0\tskateboard\nGpE5cmO_2kQ_1\tskateboard\nGq7NQWGviWU_0\ttrain\nGsLJXtf6RC0_0\tperson\nGuMiw_OwxlM_0\tknife\nGubE6GTKTVc_0\tperson\nGubjV1tFrVA_1\tumbrella\nGvRQ4QZHPGc_8\tbicycle\nGvjv4DJftts_1\tcat\nGv5P6ORl-1M_0\tperson\nGwAGS0xPZDQ_0\tperson\nGwY5WqLjTcM_1\tcow\nGwY5WqLjTcM_0\tcow\nG0C4XEsjKGU_1\tbird\nG0i_9qeBwm8_0\tairplane\nG0sAxRZi6m4_0\tcar\nG1doEZFbv70_0\tairplane\nG1gPj-UK_gw_0\tcow\nG107tKapVcQ_0\tgiraffe\nG16fmAfdp9A_1\tzebra\nG16fmAfdp9A_2\tzebra\nG2gyuboBt-E_0\telephant\nG2gyuboBt-E_1\telephant\nG3jqix8WiYE_0\tperson\nG5jg_wMMXmU_0\tperson\nG6iN1OKj_eE_0\telephant\nd0G8DzwenzU_0\tperson\nd2ugQO5Z8M8_0\tairplane\nd3_3kfZ7rkc_0\tboat\nd3_3kfZ7rkc_2\tboat\nd4cTjVsUbIA_0\tperson\nd44bp_UDYOQ_0\tcow\nd6vOtyrW2eQ_0\tmotorcycle\nd6vOtyrW2eQ_1\tmotorcycle\nd6vTXY--7zw_6\ttruck\nd6xRfIz84Og_1\tcat\nd8GWgCsv0fo_0\tperson\nd8kSiPkTvek_1\tbus\nd9IW6kCjfmA_0\tknife\nd9IW6kCjfmA_1\tknife\nd9YRdtwcTOo_0\tmotorcycle\nd-CkujEJl24_0\tzebra\nd-6-T4gkBTk_1\tcow\nd_eu3LZxECY_0\tmotorcycle\nd_eu3LZxECY_1\tmotorcycle\neBIZSQg7pV8_0\tairplane\neBSijengaq4_0\tperson\neBVE2h6i3Do_0\tperson\neByIZzEh-DA_1\tdog\neByIZzEh-DA_2\tdog\neCzDpCe6xvc_0\thorse\neDUR6UTxYhk_0\tperson\neFXZRDC38No_0\tbird\neGVUtZXFcmY_1\tcat\neJn0yGDjytc_0\tcat\neKcJ2alScW8_0\tcow\neL4uMBEG4gE_0\tbus\neMsvM8G2Z0s_0\ttruck\neM0KTbh6EZE_0\tperson\neN0JRkzxVPw_0\telephant\neOeuY4ZbTt8_0\tbird\nePiG-qPeJ6c_1\telephant\nePiG-qPeJ6c_3\telephant\neQEBmp37ZMQ_0\tperson\neQ6zyKVuU2s_0\tperson\neROdacH1GEk_1\thorse\neRsf1_omRf4_2\telephant\neRsf1_omRf4_5\telephant\neRsf1_omRf4_6\telephant\neRsf1_omRf4_9\telephant\neRsf1_omRf4_12\telephant\neRsf1_omRf4_13\telephant\neRsf1_omRf4_14\telephant\neRsf1_omRf4_15\telephant\neTfXd1DQ6mc_0\tdog\neU_B2dXyBkI_0\telephant\neVAEQdogSqk_1\tperson\neVLFX7RZOJM_0\tperson\neVnnuxmvpM8_0\tperson\neVnnuxmvpM8_1\tperson\neVnnuxmvpM8_2\tperson\neWU6Kk9K6lI_0\tairplane\neWZHute7e6Q_0\tperson\neXAJwsjltWs_1\tairplane\neXAJwsjltWs_7\tairplane\neXvofXrEuU8_0\tperson\neZFqrD8MAKk_0\thorse\neZFqrD8MAKk_1\thorse\neZc2BPYt4rU_0\tperson\neZ9Qy0zfLb8_1\tdog\neaoH4_TdTt8_0\tperson\nea2xP5nm53M_2\tknife\nea_yr_40TRY_0\tairplane\nebc-oEY_eDM_0\tcow\necksf6PLvhw_1\tdog\nedx1TW6jRFg_0\tperson\nee6Zcz8Pyfk_1\tcow\nee6Zcz8Pyfk_2\tcow\nefczZtAK28w_1\tdog\negbQbEuLDlE_0\tcat\negfoTu4gtZo_0\tbicycle\negg1WCEyuTw_0\tperson\negmCEe7OgiE_0\tperson\nehxHGWKtaAg_0\tperson\neh9YpbAcMZE_0\tperson\nejRwmx3kUI8_0\tperson\nej0xIcEXWiU_0\thorse\nekfKlK5w3Lg_0\tperson\nekwoV0dpRwI_0\tperson\nekwoV0dpRwI_1\tperson\nek7bnCHGZq0_0\tskateboard\nelB6RfDJA6M_1\tdog\neljiGrMEYiQ_0\tperson\neljiGrMEYiQ_1\tperson\nemISA6YzHZ4_0\tbus\nemISA6YzHZ4_2\tbus\neoIk6xjgQ-4_3\tbicycle\neomNxgG_ivE_1\tumbrella\neomNxgG_ivE_2\tumbrella\neomNxgG_ivE_3\tumbrella\ner7oQRfciJ8_1\tperson\neuESct6MMNg_0\tperson\neuU-dtl6yyA_0\tperson\nevyGgkwoEpU_1\thorse\nex_t3nR28rg_0\tbird\nex_t3nR28rg_1\tbird\nex_t3nR28rg_2\tbird\nezrZuVfbOPs_0\tperson\nezyFfdIkCCQ_0\tcow\nez5RcUDpMoI_0\tbear\nez5RcUDpMoI_4\tbear\ne0cc8KmRgDE_0\tperson\ne0cc8KmRgDE_1\tperson\ne1VJlGQGYTA_0\tumbrella\ne37RxtyP9nk_2\tperson\ne37RxtyP9nk_1\tperson\ne5Q4wIVJR40_0\tperson\ne5a3Z_wlpUU_0\tperson\ne6FwS_DOE-U_1\thorse\ne6FwS_DOE-U_0\thorse\ne6xVrcpMa9Y_0\tcat\ne8Bc9zwTFnE_0\tperson\ne9G1bOd8GlA_0\tcar\ne9QeTOo4XBE_0\tperson\nfBYtizIh0wc_0\tcow\nfCVsRanBID8_0\tperson\nfDWKYttA3fM_1\tumbrella\nfEA-xCaKqfI_0\ttrain\nfEWxV64teMY_0\tdog\nfEpH1AFdSqs_0\tperson\nfFGF5gVW6UU_2\tbicycle\nfFGF5gVW6UU_0\tbicycle\nfFGF5gVW6UU_1\tbicycle\nfFIVNddMFuc_0\tperson\nfFT1LpdsEhQ_1\tcow\nfFmghP5NQVA_1\thorse\nfFw23dFiBDs_0\tperson\nfGJKT5ttUQw_0\tperson\nfHFCYOUh3vU_0\ttruck\nfJJuwfeoaWI_0\tcat\nfJnC2nKYQVQ_0\tmotorcycle\nfMl60_fkMfc_0\tknife\nfMu0OmctSTI_1\tairplane\nfNTptXtpsoo_0\tcow\nfOyaDea7Al4_0\tperson\nfPA_KgXi5v8_0\tbird\nfPA_KgXi5v8_2\tbird\nfP7EpJzJt0A_0\thorse\nfQRAi5pN1Fg_0\tbicycle\nfQRAi5pN1Fg_1\tbicycle\nfRB4jD1Uecw_0\tperson\nfRSu9-lyuaU_0\ttruck\nfRoEX_9tHtM_0\tperson\nfSB_aY8HhJI_0\tperson\nfSFjxB1XU2E_0\tperson\nfTd-8VbsXus_1\tairplane\nfUNAhHKf_OA_0\tcow\nfUva5AKNiPE_0\tperson\nfUva5AKNiPE_1\tperson\nfU8NxbaMKu0_0\tbus\nfWD8TEXWtek_0\tbear\nfYBeigFqN7Q_0\ttrain\nfYBeigFqN7Q_1\ttrain\nfYWFh5BSEyg_1\tcow\nfYup3iPmtHc_0\tperson\nfbAOGfYPur0_0\tperson\nfcFwbcMNdUo_0\tbird\nfcFwbcMNdUo_1\tbird\nfdMa18fwj14_0\tperson\nfdQFJz9IOso_0\tumbrella\nfd73v3-Qjqk_0\tknife\nfeMxoQY38A8_0\tperson\nfeMxoQY38A8_1\tperson\nfeNEI7bD5HI_0\tbus\nfeO8Ip4MOn4_0\tcat\nffQKiGKTDaA_0\tbird\nffr6_q8liAc_0\tperson\nffr6_q8liAc_1\thorse\nfhVVVY5XhDI_1\tknife\nfhWE0XDoxjM_0\tairplane\nfh9tibERtYI_0\tperson\nfiKs6mdtsmM_0\tcow\nfiVKh-Q-iY0_0\tmotorcycle\nfkGWb9_HVsA_0\telephant\nfk85Ace_-LM_0\tdog\nfmE9seWSDfs_0\tumbrella\nfmosIu7__Wc_1\tperson\nfmrqs2YvNCQ_0\tperson\nfm4syrPib5M_0\tperson\nfnKNDlQq-JY_0\tperson\nfoWPkPNDqyU_0\tbird\nfoWPkPNDqyU_1\tbird\nfojim3ViD7Y_0\tperson\nfpI0N9Lv5V8_0\thorse\nfpv4fALQXpQ_0\tperson\nfqWa-DUPAGw_0\tperson\nG8IUU0gjlEI_3\tboat\nG88QbXTQ6LI_0\tskateboard\nG9Sdd3czaTk_0\tdog\nG-kF2D98oms_1\telephant\nG-2yXvawYec_0\tperson\nG-5iXA4ERtM_0\ttrain\nG__uy4I0Kzw_0\tperson\nHAOmPeNNjNc_0\tbus\nHBUeO1WOFFk_0\tmotorcycle\nHBbWtsju37w_0\tboat\nHBw-J_3WlCY_0\tcat\nHF8ZrMgnyo8_0\tdog\nHJYmTdBHVvU_1\telephant\nHJYmTdBHVvU_2\telephant\nHJ08tJU-IIA_0\tdog\nHKNkm0t39B4_0\tcow\nHKRKZksEGro_0\tperson\nHMfFCe-og9A_1\tbus\nHMt7kgP0MC0_0\tperson\nHM8XKdebDvI_0\tboat\nHNBF7AppAQQ_0\tdog\nHNheLARZ64w_0\tbicycle\nHNheLARZ64w_2\tbicycle\nHN-3LaZVuCs_0\tcar\nHONOO3gmDec_1\tperson\nHP6UlpPulc8_0\tbicycle\nHQ3nHqG24O0_1\tcow\nHRF40e3Tbvw_0\tbicycle\nHRF40e3Tbvw_2\tbicycle\nHRRhkyr7U5E_2\ttrain\nHRcVM9md3Xg_0\tcow\nHTrUPWOXlvI_1\tperson\nHTrUPWOXlvI_0\tperson\nHULLjmpSRUI_0\tcow\nHUssZ9c2Qvs_0\ttruck\nHW8Z7IdfuIg_0\tperson\nHYCFQjnuXBI_0\ttruck\nHY4XBjJWJYg_0\ttruck\nHY9NQ2zNtGc_0\tcat\nHZVvEd_Tg_g_0\tperson\nHZngEEoQWDA_0\tperson\nHaMmo5SdpUo_0\tperson\nHaVnQ_P5HdQ_0\ttrain\nHacYwonTy6w_1\tskateboard\nHbWinZWeK2U_1\tdog\nHbhmAMorGaw_0\tperson\nHeOWa0NNB0g_0\tperson\nHg0fRYqZQ3U_0\tperson\nHi384VDSwXw_1\tbird\nHjo95Vo38qU_0\tperson\nHksncw-BlKU_0\tgiraffe\nHlWb7xQHFKI_0\tdog\nHmH4hitBoc4_0\tperson\nHoSTe-9VUJA_0\tcow\nHpdyNV4GqbM_0\tperson\nHpdyNV4GqbM_1\tperson\nHsGPGwN7vSk_0\tperson\nHugie4Q6leo_0\tbicycle\nHvKC4fLwUYw_1\tperson\nHvKC4fLwUYw_0\tperson\nHvOisoEmjKg_1\tairplane\nHvU4Jz4Gd1k_0\tcow\nHv_d6KPoSgA_0\tskateboard\nHwZUDp7yxxk_0\tperson\nHxPskaUPSXg_0\tcow\nHyHQRrpWhpk_0\tboat\nHylH7-rD0wA_0\tbird\nHzEm2GlGzhc_1\ttruck\nHzTD_opfrqI_0\tcar\nH0QTCKxJmLY_1\ttrain\nH1Oxjm0NqCg_0\tperson\nH2GwgpAKbzY_0\tdog\nH3HrWs1HITE_0\tcow\nH3S_DkPBWtw_0\telephant\nH3S_DkPBWtw_7\telephant\nH3S_DkPBWtw_1\telephant\nH3S_DkPBWtw_2\telephant\nH3S_DkPBWtw_3\telephant\nH3S_DkPBWtw_4\telephant\nH3S_DkPBWtw_5\telephant\nH3S_DkPBWtw_6\telephant\nH3XF5rAtuJA_2\tperson\nH3XF5rAtuJA_0\tperson\nH3a-C6RRYyo_0\tperson\nH5mmSHRHeOA_0\tperson\nH6TuJxifX64_0\ttrain\nH6w4nf5H4U4_0\tbird\nH6y9C6Ndy2A_0\tbird\nH6y9C6Ndy2A_1\tbird\nH7XZ5716KnI_0\tperson\nH7z05uOIPRM_1\ttrain\nH92s5sHsotk_0\tairplane\nH-4EZAh3ZiE_0\tbus\nIA1FFP5WN-4_0\tbear\nIA1FFP5WN-4_2\tbear\nICj693xC5DY_2\tairplane\nICj693xC5DY_0\tairplane\nICj693xC5DY_1\tairplane\nICxHfkE0XCo_0\tperson\nIDx8_34ETTQ_0\tperson\nIEyymbAxp24_0\tdog\nIFS0QSfnbaM_4\tknife\nIFS3ILjlHkY_2\ttruck\nIF_auR-0fxM_0\tknife\nIGv9j-RQi0k_0\tdog\nIG0UmL5bvEo_0\tcat\nIHFF7DOpF4Q_0\tmotorcycle\nIHmYV5ymU08_0\tcow\nIKEUMXjIyTQ_0\tcar\nILZvGBKYYrE_4\tbus\nILZvGBKYYrE_0\tbus\nILZvGBKYYrE_1\tbus\nILZvGBKYYrE_3\tbus\nIMTbwAOJNIc_1\ttrain\nIMh4AHUZ2HQ_0\tperson\nIM4EBlgTTOg_0\tbus\nINlrdk7hgl4_0\tknife\nIOQt3fFTSVc_0\thorse\nIO7-lFsWvl0_0\tbicycle\nIO7-lFsWvl0_2\tbicycle\nIPEJs-vLCV4_0\ttruck\nIPEJs-vLCV4_1\ttruck\nIRpgjSP4pLI_0\tperson\nIUJGm3Iu0Bs_1\tbicycle\nIUgsoj74aWQ_0\tperson\nIVlnjlVA5rc_1\tbicycle\nIXP1ML1tdZQ_0\tbus\nIXRxjnkOJeo_1\tmotorcycle\nIXenlPUsqrc_0\tperson\nIZvOv7tCr00_1\ttrain\nIcRjjKSX5uc_1\tperson\nIcRjjKSX5uc_0\tperson\nIcnle27cmMM_0\tbicycle\nIdVZJW1HC9E_0\tairplane\nIdVkEz2IF7w_0\tcar\nIeb9oZ9eB8I_0\tdog\nIfWSlkR8DbU_0\thorse\nIf1zPOV0idg_0\thorse\nIf1zPOV0idg_1\thorse\nIh2gG0269H8_0\tbus\nIjQXXK4uYVY_0\tdog\nIlMHPX2VcGw_0\telephant\nIluTkrIqsVg_1\telephant\nIluTkrIqsVg_3\telephant\nIluTkrIqsVg_6\telephant\nIo7bj1jNpPU_0\tcar\nIpjQJZ42zyQ_0\telephant\nIpjQJZ42zyQ_1\telephant\nIpjQJZ42zyQ_2\telephant\nIpjQJZ42zyQ_3\telephant\nIpwI5VTWHLc_0\thorse\nIpwI5VTWHLc_2\thorse\nIqy4PPX-Tlc_0\tperson\nIsHTpd2cnvI_0\ttrain\nIthz7KSWCxU_0\tbus\nIudK7ch_IIg_1\tairplane\nIvRDw_IA0_s_0\tcow\nIwve-3lTmMk_0\tperson\nIyLshk4jlyo_0\tcat\nIygCvE4_amo_2\tbird\nIygCvE4_amo_3\tbird\nIyjFl1Hhk3Q_0\tperson\nIz4XK2zNDUU_0\tperson\nI1wuUCQbXLc_0\tumbrella\nI2DkTg8wPnI_0\tperson\nI2WoCDTXONA_0\tperson\nI2WoCDTXONA_1\tperson\nI2lh579NY2s_0\tbird\nI45pfwCBczo_0\tperson\nI6ESaCg4z_8_0\tperson\nI6TvXxQTtZQ_1\thorse\nI6TvXxQTtZQ_0\thorse\nI6TvXxQTtZQ_2\thorse\nI8OfOokt6YU_0\tperson\nI8XhyDacLtU_1\tbird\nI8m0QjcQlSo_3\tbicycle\nI8m0QjcQlSo_4\tbicycle\nI9ivT_P5G18_0\tperson\nI_k5qXHxb0Y_2\tknife\nI_k5qXHxb0Y_0\tknife\nJBkwLPruJe0_0\tperson\nJBlDwXJFbQc_1\tumbrella\nJDZiLsus2es_1\tskateboard\nJDvfPX9cFDg_0\tdog\nJEpTSJRO3co_0\tperson\nJG2tVzjxhao_0\tbird\nfsAEg5w8xTg_0\tperson\nfsCwAYYI4js_0\tperson\nfsKTO8ksQ90_0\tperson\nftMQOwvHDF8_1\tcar\nftns38_MSTM_0\tcow\nfvxc7ruCiYk_0\tcow\nfvxc7ruCiYk_3\tcow\nfv8aFklHmko_0\tskateboard\nfwEvL-luHlw_0\tairplane\nfwEvL-luHlw_1\tairplane\nfwt8LzF8Mic_0\tperson\nfyZImQFj_Y8_0\tcow\nfycK7kJWV1I_0\tumbrella\nfzr3kw3BDDo_1\tairplane\nfz6ONSUlvNY_0\tperson\nf0i5E4DOFc8_0\tbus\nf2SctRCBZQc_0\tcar\nf3Z5d9I7rIw_0\tknife\nf4fxmsxPzrg_2\telephant\nf5LEkr56Efg_0\tperson\nf5Uz-TuMQ0Y_0\thorse\nf5ZpGBYuJ7o_0\tboat\nf5kAHBPObsw_1\tcow\nf6fZjMRJgoM_0\thorse\nf63aow5BRAI_5\tbus\nf65rTlprptk_0\thorse\nf7yNS6ltUFk_0\tperson\nf8H7Ns8cw-c_1\ttrain\nf8rXEKktSCg_0\telephant\nf_VqZJyJ4GM_0\tmotorcycle\ngAHcWn06srk_0\tperson\ngB0-eGpMj50_0\tperson\ngB2asNpe3zY_0\tperson\ngB7jSQgkcMM_1\thorse\ngCDC8R7IB7k_0\tperson\ngCwe-o1nqBc_0\tmotorcycle\ngCwe-o1nqBc_1\tmotorcycle\ngC9z8IzG83s_2\tbicycle\ngDEk1TWuZug_2\tperson\ngDG5Xr2p2y8_0\telephant\ngDHnBnqogX0_1\tairplane\ngDHnBnqogX0_0\tairplane\ngDbZj1O36VU_0\tairplane\ngDihz5aZLyA_0\tbus\ngDihz5aZLyA_2\tbus\ngEkiX2yFQm0_0\tcat\ngEnLlmMhxfE_0\tperson\ngGNmKI2M8i4_0\tperson\ngGd6hYCKdEs_0\tbird\ngHMCfvdZzMM_1\tperson\ngHYzGPx8f_4_0\tzebra\ngHYzGPx8f_4_1\tzebra\ngIx12Q8A3p8_1\tperson\ngJwtAwSqEow_0\ttrain\ngKAPbj9esXI_0\tskateboard\ngLqb3YuVttM_0\tumbrella\ngMRigFNGMeY_0\tperson\ngNfQargrILo_1\tcar\ngOFgWsujZaI_0\tcat\ngOWc7VBEwMo_0\tcar\ngPEMf91dil8_1\thorse\ngPSB23kv5Uc_0\tperson\ngPhL52Mj1_A_1\tmotorcycle\ngQ1qmNZzaTo_0\tboat\ngRDFlfzM_iI_4\telephant\ngRDFlfzM_iI_6\telephant\ngRDFlfzM_iI_1\telephant\ngRDFlfzM_iI_3\telephant\ngRMJhsEuiAc_0\tmotorcycle\ngRMJhsEuiAc_1\tmotorcycle\ngRMJhsEuiAc_6\tmotorcycle\ngR29_U82QeE_1\thorse\ngSJbrV0vy8M_0\tperson\ngSz16yrF9yA_0\tperson\ngT0yjYUmf90_0\tcow\ngUGlSiBvfOs_1\tmotorcycle\ngU8s5nxyBDk_0\tairplane\ngU8s5nxyBDk_1\tairplane\ngV3CcNeVZcY_0\telephant\ngV3CcNeVZcY_1\telephant\ngWkTSRUqxoo_0\tperson\ngW6HdCsty0U_0\tknife\ngYLohMps12s_0\telephant\ngYLohMps12s_3\telephant\ngYLohMps12s_4\telephant\ngYLohMps12s_1\telephant\ngYLohMps12s_2\telephant\ngaKGYmLxJVU_3\tbicycle\ngagJEV--3Pw_0\tperson\ngdAVi92ZfSc_0\thorse\ngdx96NpU6BY_6\ttrain\ngd4UfPes3YI_0\tcow\ngeEXytMwfq0_0\tperson\ngePAI8wYSdw_0\tperson\ngfTVuceAzNs_0\telephant\ngg8YzsSulrQ_0\ttruck\nghciPMerSc0_0\ttruck\ngiWDg00GIDw_1\tskateboard\ngig9B4ecK3w_0\tperson\ngiy_SOmkBY8_0\tumbrella\ngjnyg97XwnA_0\tperson\ngk-cycr3xjo_0\tperson\ngmVDmxVI7n0_0\telephant\ngpV4Qlx6YrA_6\tbus\ngqLSqmK3m74_0\tmotorcycle\ngqZYY0m_TuM_0\tmotorcycle\ngsrvWcnpNP4_1\tmotorcycle\ngsrvWcnpNP4_0\tmotorcycle\ngtVr7urU8c8_0\tperson\nguDQk0hVgU0_0\tbird\nguFTeFvjr9Y_0\tbird\ngu3DTnVjNQM_0\tknife\ngwXwH2Cs3BY_0\tknife\ngxHGnBrpPZs_1\tairplane\ngxHGnBrpPZs_2\tairplane\ngxKuLTUNhp4_0\thorse\ngx7PFNpHd_A_0\tperson\ngyaP7qiRxfY_0\tcow\ng1OZWFLSspQ_0\tmotorcycle\ng1rQZNA6yyo_6\tcow\ng1rQZNA6yyo_0\tcow\ng1rQZNA6yyo_1\tcow\ng1rQZNA6yyo_2\tcow\ng1rQZNA6yyo_3\tcow\ng1rQZNA6yyo_4\tcow\ng1rQZNA6yyo_5\tcow\ng3HXJNMlAsM_0\tairplane\ng3oqxu4AhBw_0\tperson\ng3swsx-acTI_1\tdog\ng3swsx-acTI_0\tdog\ng3vbaqnLXn8_0\tcow\ng4bayrAEhIU_0\tumbrella\ng5rUJOptHXQ_0\thorse\ng5ty_7So5Dw_0\tcow\ng51pzrSssl4_0\tperson\ng8M5d--ghFM_0\tperson\ng8vKB3IU1JY_0\thorse\ng8wHQVpij-I_0\tperson\ng9eN0FHn4-E_0\tdog\ng-EAZ6gVcic_0\tmotorcycle\ng-pVcRyPQG8_0\tcow\ng-yHAyCA2KI_1\thorse\ng_C47ek7TmI_1\tknife\ng_C47ek7TmI_4\tknife\ng_C47ek7TmI_5\tknife\ng_QHWoQgmFQ_0\tperson\ng_QHWoQgmFQ_1\tperson\ng_Tk-SESaYI_0\tperson\nhBHt6mnfUeo_0\tbus\nhBMZHx3_cTs_0\ttrain\nhC69bGTvLBo_0\tskateboard\nhD3Bn03GXNQ_1\tdog\nhFNAxcRpGBM_0\tskateboard\nhFSygfNIY_Y_0\tskateboard\nhFex_TS-aUo_0\tperson\nhGnscWmehTI_0\tcar\nhG9efPyerw4_1\thorse\nhHdBCtElIQg_0\tboat\nhHlqyr11RiI_0\tperson\nhIWM6v4zcSM_0\telephant\nhKoGkl1wyCU_0\tperson\nhON0t9Dzay4_0\tmotorcycle\nhP1ViN_WadY_0\tcow\nhR-utsUhYSg_0\tperson\nhSAUbt6-Yjc_0\tknife\nhSAUbt6-Yjc_1\tknife\nhSeHymINF98_1\tbus\nhTaEY4YCVqM_0\tairplane\nhUjzfhyM30Q_0\tairplane\nhUjzfhyM30Q_4\tairplane\nhUxguQsLvcs_4\tknife\nhUxguQsLvcs_5\tknife\nhUyAVmRxAzM_0\tperson\nhU_dAA1A0X0_0\tperson\nhU_9cs_qw1w_0\tperson\nhVjyHhYH6Ss_1\tairplane\nhVjyHhYH6Ss_2\tairplane\nhVowH5-Ss4I_0\ttrain\nhV4tEsm-F5s_0\tairplane\nhZdxBk4cjmg_0\tbus\nhaiW7jpl3wY_0\tperson\nhcJBaxNIvE4_1\tperson\nhcJBaxNIvE4_0\tperson\nhcV4RZPeRbo_0\tairplane\nhcuLD1cn9GA_0\tperson\nhdUc4uUYh0E_0\tboat\nhfWfYFG2O94_0\tperson\nhgagtwzScGQ_0\tperson\nhhFOwnYOLl0_0\tgiraffe\nhhLyE41H8nE_0\tmotorcycle\nhhNlg3Ws9Dc_0\tperson\nhhyVc2wsXVk_0\thorse\nhhyVc2wsXVk_1\thorse\nhh432zDMgPo_0\ttrain\nhiKbm0rqEb4_3\tskateboard\nhiN_kULL84o_5\tumbrella\nhiN_kULL84o_4\tumbrella\nhkEV_E85Jzw_0\tcar\nhkSv_YxmN7w_0\tperson\nhlZDJrpJzPU_0\tperson\nhljwk2WbXGY_0\tperson\nhmSeUlyLLak_0\ttrain\nhnZvUHrA3CY_0\tperson\nho6sg-47RD0_0\tairplane\nhqNhKf3a69Q_2\ttruck\nhqYyvTeOvas_0\tbear\nhqaNlwG0DNU_1\tperson\nhqrmbVw_EwQ_0\tcat\nJIuyqZCU5zY_0\tcow\nJKiG_pk4lSE_0\tperson\nJKmvEldBeEQ_0\tcow\nJKsodtdUW-o_0\tboat\nJMLFZcONQAs_2\tskateboard\nJMLFZcONQAs_5\tskateboard\nJMMci7hryUQ_0\tmotorcycle\nJMMci7hryUQ_1\tmotorcycle\nJMMci7hryUQ_2\tmotorcycle\nJNUhCGqPlFg_0\tbicycle\nJPHPd13gaL8_0\tcar\nJQrDalAaP4w_0\tperson\nJQrDalAaP4w_1\tperson\nJQz6IarIr4E_1\tperson\nJRAVv2LgiGo_0\tskateboard\nJRUvqZtBMrM_1\tknife\nJR0QfXOOmaA_0\tperson\nJSml3dguiUk_0\tmotorcycle\nJTFT_iJGFUE_0\tperson\nJUdUxjC2LRE_0\tbus\nJWU6vdEt_OU_0\tperson\nJWgjcmMh62o_0\ttrain\nJWgjcmMh62o_3\ttrain\nJW0-hEA4v9A_0\tperson\nJXIh3fJ4Jv0_0\tperson\nJX8ODdMUi7g_0\tbird\nJZC15tOV-eg_0\thorse\nJZMOzYwcTA0_0\tperson\nJasH0KtinHY_0\tairplane\nJasH0KtinHY_3\tairplane\nJa5jdE_8qio_0\tperson\nJbyTZ-esDXM_0\ttruck\nJbyTZ-esDXM_1\ttruck\nJb93SMKg5-k_0\tperson\nJcVOyLTTvKA_0\tperson\nJc18AfXzLZU_0\tperson\nJc18AfXzLZU_1\tperson\nJd7uOTcPvY8_1\tcar\nJeWRfjjRMQk_0\tperson\nJerVzlWZwac_0\tbus\nJe-lnjK_8fk_0\tperson\nJfjkltN0lZc_2\thorse\nJfobA6aKaas_0\tdog\nJftQEHHdO5w_0\ttruck\nJgaE8KDwg7k_1\tbird\nJgaE8KDwg7k_2\tbird\nJgc2PQ8Swbo_0\tcow\nJgkj9pj3-tc_1\thorse\nJhdyYrqxn_g_0\tmotorcycle\nJh7o2iR-lRg_0\tperson\nJijsSnHthXE_0\ttrain\nJio_xBodQxY_0\tperson\nJjQ8bdq_eXk_0\tperson\nJjtkwX4npyw_0\tperson\nJlG7Wzz4uU8_0\tcar\nJlG7Wzz4uU8_2\tcar\nJmkUuTj-Nks_0\tumbrella\nJmtuhGXlqmY_1\tairplane\nJnNJksYeB18_0\tcar\nJoKod4XDE6o_3\tbird\nJoKod4XDE6o_0\tbird\nJoKod4XDE6o_2\tbird\nJp6_g7oF2lQ_0\tcow\nJqEprl56N4I_0\tskateboard\nJrIoaRmcs6o_0\tcow\nJrNq6Z5YSoc_0\tperson\nJrUHo8zVwpo_0\tbus\nJsjz8hiE_iU_0\tperson\nJt7Ojtx0TMs_1\tcar\nJt7Ojtx0TMs_3\tcar\nJwBYrXUHdZ8_1\thorse\nJxTKws5Dx_8_0\tcat\nJxjXZYfiem4_0\tdog\nJx9mLWFxpnc_0\tdog\nJyYBZBogBvs_1\tboat\nJyduNnkZOiY_0\tperson\nJyrP5u2MuSo_0\tmotorcycle\nJzcc0pjgA5c_0\tperson\nJzjRC1xYwy8_0\tdog\nJ02u46SlewE_0\tperson\nJ1GtEDNcsHQ_1\thorse\nJ2JOoOxaJdw_0\tperson\nJ2bB5BgR-5Q_0\tbus\nJ2hdK_vuyyw_0\tmotorcycle\nJ2ycUTr0lJQ_0\tcat\nJ4T_QA6J7kw_0\tboat\nJ4T_QA6J7kw_1\tboat\nJ4T_QA6J7kw_2\tboat\nJ40neYxbEYA_0\tskateboard\nJ5-Z9tNISPw_0\tcar\nJ6klPNMhLKc_0\tcow\nJ7I-QXddTIk_0\tperson\nJ7hnNI0jtws_0\tperson\nJ8ITxacusCI_1\tperson\nJ8ITxacusCI_0\tperson\nJ9-8Qe3BWoI_0\tbicycle\nKARqX_agLpU_0\tknife\nKAgU6SrQTlQ_0\tumbrella\nKAgU6SrQTlQ_1\tumbrella\nKArVkjxSGpM_0\tperson\nKBCIbwknDew_1\tbicycle\nKCeuwWEv3ZU_0\tperson\nKCi4f4Hp6oA_0\tairplane\nKC5ECqMiTLU_0\tskateboard\nKD84e88aqHU_0\tperson\nKD84e88aqHU_1\tperson\nKEpHRYH8r28_0\tgiraffe\nKGdIJzBVugY_0\ttruck\nKHqFOBeHCwU_0\tboat\nKIOilXstQLY_0\tperson\nKIOilXstQLY_1\tperson\nKJ2kEj3C5HU_0\tairplane\nKKWUDcCI6yU_0\tcat\nKML2msVr5mE_2\telephant\nKMNAnjpGqv4_2\ttruck\nKNIVWRv3awA_0\ttruck\nKOmUta2sIgk_0\tperson\nKOsm1GUs46s_0\tmotorcycle\nKOza4PGcE0M_1\tbear\nKPLDdfk8hIg_0\ttrain\nKPLDdfk8hIg_1\ttrain\nKP7RzxyTTAU_1\tairplane\nKRKxqkfpetI_0\tperson\nKRNWPLnvZz4_0\tperson\nKR7Ah1hw5gA_0\tperson\nKS8S3STq2W4_0\tbird\nKS8S3STq2W4_1\tbird\nKTkhMglNlCE_0\tperson\nKTpwnsz498Q_4\thorse\nKTpwnsz498Q_6\thorse\nKWYD2iyUmgk_0\thorse\nKXIJLUzQi5Q_0\tperson\nKXMlBQiVeEg_0\ttrain\nKXPGShfFlU8_0\tperson\nKX9MjIikBU8_3\tbicycle\nKYc-vKtN0DI_0\tperson\nKY4mXNDM8I0_6\telephant\nKZdOpoUJ3Nk_0\tperson\nKcg7gY3WD7M_0\tperson\nKcg7gY3WD7M_1\tperson\nKeJWqAV0EgA_4\tumbrella\nKeJWqAV0EgA_6\tumbrella\nKedkADy9tBc_2\tknife\nKedkADy9tBc_4\tknife\nKgDguip9mZM_1\thorse\nKgDguip9mZM_2\thorse\nKg0XH4mez1A_0\tcow\nKho8jpdZzTs_0\tskateboard\nKjd7D98QULc_0\tairplane\nKkdLE8EkzQ8_0\tcat\nKkw7ZPCEz5w_0\tperson\nKk-2ajLfeh8_0\tcat\nKk_LtYOgQXA_0\tboat\nKmLYFD7xykY_1\tcar\nKmwqg1uRPRE_0\tperson\nKnQuff1ffzM_0\tskateboard\nKoRqIzHBQks_0\ttrain\nKoq5YYiN1tc_0\ttrain\nKpHpGcL_jEc_4\tbird\nKpHpGcL_jEc_3\tbird\nKpfTioA2qKw_4\telephant\nKpfTioA2qKw_5\telephant\nKpfTioA2qKw_0\telephant\nKpfTioA2qKw_1\telephant\nKpfTioA2qKw_2\telephant\nKpfTioA2qKw_3\telephant\nKppX5i4QRZ0_0\tumbrella\nKqsBJAhU_Dc_0\tcat\nKrRVwTPG26w_3\tdog\nKsE43Lli_3U_2\thorse\nKsE43Lli_3U_3\thorse\nKskL-dN784o_0\tairplane\nKtfQRtfJQ8s_2\tskateboard\nKxDh7a8_AmU_0\tperson\nKy4ahEexJUc_0\tairplane\nKzDLvBPcQew_2\tknife\nKzMFSHS4xVs_0\tbird\nKzOxVUsduDY_3\tknife\nKzt2eSUr1rY_0\tdog\nK0IvSLIQbgQ_0\tbird\nK0SktTNMXQU_0\tmotorcycle\nK2WsSTHs45g_1\telephant\nK2WsSTHs45g_3\telephant\nK2oIvJd-d-A_0\tperson\nK4IN8pNA--U_1\tperson\nK5C2Y3JvXCU_0\tskateboard\nK7TOmJ6RB_8_0\tskateboard\nK89ScUqJx5E_0\tperson\nK8_u8_NkoAk_1\ttrain\nK9L-BYQcepo_0\tbear\nK9pgB6KH-EY_0\tcow\nK-laAofNBgs_0\thorse\nK-xigT3f2VA_0\thorse\nK-0pug6xNEI_3\ttrain\nhuFyV9NBOBY_0\tperson\nhua1XfGRDoc_0\thorse\nhulGMGXPaBE_1\telephant\nhvXgMKsetW8_0\telephant\nhxBjbg6s174_0\tperson\nhyNwXcKelY0_1\ttrain\nhyNwXcKelY0_0\ttrain\nhzUpr73wZz0_0\tairplane\nh0jkFTI3qmI_1\thorse\nh1Hv9HnMe70_0\tcar\nh1zuISckIeI_0\tbus\nh10iwpJO4pQ_0\ttrain\nh2vHhQ7_MT4_0\tskateboard\nh3Fo82UBMRY_0\tdog\nh3IHNdoTXT0_0\tperson\nh3PBWibdVUc_0\ttrain\nh3RgUc0oY-c_1\tknife\nh3RgUc0oY-c_2\tknife\nh3t75PNg778_0\tperson\nh3uSlke3koc_0\tmotorcycle\nh4qpt2FEbC0_1\telephant\nh5JnAInpuSo_0\tmotorcycle\nh5JnAInpuSo_1\tmotorcycle\nh7_4qHh7Vas_1\ttruck\nh8TnGCoSVeQ_0\tairplane\nh8fKxUGKz8k_0\tmotorcycle\nh8fKxUGKz8k_1\tmotorcycle\nh-pm7wD31Ss_3\ttrain\nh-pm7wD31Ss_0\ttrain\nh-pm7wD31Ss_1\ttrain\nh-pm7wD31Ss_2\ttrain\nh_VG9OpleKc_0\tmotorcycle\nh_VG9OpleKc_1\tmotorcycle\niAZV9nCf3RE_0\tmotorcycle\niA7evYzMygE_2\tknife\niDBpYSvahjE_0\tperson\niDHjOnhAKA8_1\tskateboard\niE75sptNwbs_1\ttruck\niE75sptNwbs_2\ttruck\niFVwtlc6IYE_0\thorse\niFdOAHM4xDg_0\tperson\niFwPDZE4778_0\tskateboard\niG4PvtWoxG8_3\tcow\niH6Vlg0k330_3\tdog\niH6Vlg0k330_5\tdog\niH6Vlg0k330_6\tdog\niIWFuFa7Z4M_2\tperson\niIWFuFa7Z4M_0\tperson\niIWFuFa7Z4M_1\tperson\niIzXR3qRt48_0\tperson\niI08dGJAOMs_4\telephant\niI08dGJAOMs_3\telephant\niJcf4PhS_SQ_0\tperson\niKzpo0D7b_8_0\tcat\niK-7fByPADo_0\tperson\niMdJ5Xlz0hU_0\tknife\niMeNXU67sVg_1\tskateboard\niNiiX6P-kqA_1\tdog\niOxVi3Tq4ts_0\ttrain\niPlXCYJ6F7w_0\tskateboard\niQ-tckw9_uk_0\ttruck\niRzm-CyyW-E_0\tperson\niSNNmpWe3LA_0\tperson\niS7wej_vrvM_0\tperson\niVBDQ5wm-0w_4\tairplane\niVTAxc633DE_0\tperson\niXrLhQgf8HM_0\telephant\niXrLhQgf8HM_1\telephant\niX4gVag7ShI_0\tperson\niYL_l0MxgMY_0\tbird\niYlgi1z6nYI_0\ttruck\niavLgJ3_05c_3\thorse\nicVQnqL0xPI_2\tboat\nidkGZQeYvJ0_0\tskateboard\nigbftnGj4-o_0\tbicycle\nigg-y1toBvA_0\ttruck\nihkqhIpO_hw_0\tperson\nijbDg16cIC8_1\tbus\nik4t0sIEmTI_0\tperson\niltKgr5JKI0_0\tperson\nil5UMLzlQts_0\tbus\nimDfH3So8XU_0\tcar\nimDfH3So8XU_1\tcar\nim4bCIqpJns_1\tbicycle\nim4bCIqpJns_2\tbicycle\nim4bCIqpJns_0\tbicycle\nip1Y5qjDYfQ_0\tairplane\nip_oGEZ6zMw_1\tperson\nirvGAW8bqAw_2\tbus\nisbtQ06yVM8_0\ttruck\nitNqceL9dLM_0\tcow\niuii5XHcAYA_1\tdog\niulQVUJanzg_0\tskateboard\nivGBks6evlo_2\tdog\nivSQWqs_u1I_0\tbear\nivpPLs-cqxA_0\tcar\niwHJDgGVuCA_0\tairplane\niwHJDgGVuCA_1\tairplane\niw7zrlRPMo4_2\thorse\nixgGTHdobNI_0\tperson\niyDedQNhiYI_0\tcat\niyaI71EqLsg_0\tperson\nizHN9JUwtJ8_0\tboat\nizQ74nq9zh4_0\tcow\ni0QLe6YR7yo_0\tperson\ni1OlP2Sq0a0_2\ttruck\ni1xqjStfSsc_0\tperson\ni2SgjtgmsE0_0\tperson\ni5DfO7_n0q8_0\tcow\ni5GkqX44npg_0\tcar\ni5JWZKdNOac_0\tmotorcycle\ni5JWZKdNOac_1\tmotorcycle\ni6mzD2HGWOA_0\tairplane\ni6sR2IY4-Ck_0\tcow\ni8JA178zd0s_0\tcow\ni8Z9-KSMCTA_0\tbicycle\ni8syjc7Erco_0\tmotorcycle\ni-EijejS9Oc_0\tperson\ni-eCNLw3hVU_0\tbird\ni_l48nIXjxw_0\thorse\njBYa-gqwSeY_1\tcow\njCiTA9oIryk_0\telephant\njCuDdMn9sYA_0\tperson\njDGrgBt83DU_7\tcar\njD33e45nuRw_0\tbear\njD5K1zGLtvc_0\tskateboard\njEE_ZlDJ4cc_0\tcow\njEzxW8ylxK8_5\tairplane\njEzxW8ylxK8_1\tairplane\njEz3EToUAg8_0\tperson\njGCLsWhdTds_0\tumbrella\njHhJLxyr960_0\tbicycle\njIqTFAgBLpc_0\tdog\njJkZrKOehcQ_0\tperson\njKD0oOyMl2g_0\tperson\njLO5kFd36OY_0\tbird\njMLgjCQWQY0_0\tperson\njMmH8xfY1kw_0\tcow\njMyxNu6YkEQ_4\tboat\njN5jdXmBv2Y_0\tbird\njN5jdXmBv2Y_1\tbird\njN5jdXmBv2Y_2\tbird\njN5jdXmBv2Y_4\tbird\njN5jdXmBv2Y_6\tbird\njPouarzO-e4_0\tcat\njQPz-9OfXRM_0\tzebra\njRQuCIsXz1c_0\tairplane\njRUeQo3V1bk_0\tperson\njR366TYYsuo_0\tperson\njSkwPkAAiFM_0\tperson\njTNzSUl_zOQ_2\telephant\njUzhGHE_jgE_0\tperson\njVYzDs5YRM4_0\tcat\njVoxxEKEOFo_0\tmotorcycle\njX_taNw8FFg_0\tskateboard\njY4Dh-UAAaY_8\tskateboard\njZBMDKFS5D0_0\tperson\njbp8mHJfHGI_0\tperson\njcYNP_FWkA0_0\tperson\njcne18p2r2c_0\tcat\njdttJqwg_3o_0\tmotorcycle\njfSY_UCtq-w_0\tmotorcycle\njfTXT98Naic_0\tcow\njgQiUggCu7A_0\tcow\njjTgUBAd4D0_0\tcow\njjq2PAHcLiA_1\tperson\njjq2PAHcLiA_0\tperson\njlBGbg_CJz0_5\ttrain\njlBGbg_CJz0_6\ttrain\njlOOUqYlNNY_0\tmotorcycle\njlgECDznb0g_0\tbear\njl7oYVm0X34_0\tbird\njnU2n55I_LU_0\tdog\njouq30Wmqxg_0\tmotorcycle\njouq30Wmqxg_2\tmotorcycle\njo6o9BwKsUQ_1\telephant\njqPPsrUULY8_0\thorse\njtWUSSp-JiY_0\ttruck\njuS7DvjMPoo_0\tperson\nLCzQs5ybibU_0\thorse\nLDwE_VIc9Zc_0\tcow\nLEL3OcoqV8k_1\tknife\nLEPsxGhXYxY_2\ttruck\nLEPsxGhXYxY_3\ttruck\nLEXpJRLTRak_1\tbear\nLFWlRG2B-w0_0\tbus\nLFWlRG2B-w0_2\tbus\nLFWlRG2B-w0_3\tbus\nLGvjU4PVcC0_0\tboat\nLGvjU4PVcC0_1\tboat\nLGvjU4PVcC0_2\tboat\nLIC3D63R3HU_0\tperson\nLIhhU9j6MI4_1\tcow\nLLD46pbwbiU_0\tperson\nLLiy-k-4-OM_0\ttrain\nLLvpoIlozKU_0\thorse\nLLvpoIlozKU_1\thorse\nLO0IsJZeXhU_0\telephant\nLO0IsJZeXhU_1\telephant\nLPzXMvYB97A_0\tperson\nLTh-XAE8m3M_2\ttrain\nLURSawdSS9k_0\tdog\nLUsb9vk1q6U_0\tknife\nLU539OYJ_z8_0\tperson\nLXHO99b-uAQ_0\thorse\nLXHO99b-uAQ_5\thorse\nLX0HL9qztic_1\tumbrella\nLYPeAbFVTQw_0\tperson\nLZCq31MG3yY_0\tperson\nLZEMKs6H53w_0\tperson\nLZNlxXE0_2s_1\tskateboard\nLZNlxXE0_2s_2\tskateboard\nLZNlxXE0_2s_3\tskateboard\nLZ3S39QfkKA_3\tbicycle\nLbHrVQR9f24_0\tcow\nLcvMMvrPIug_1\tcow\nLdNi4yjT3yE_0\tperson\nLdusiqJFR6I_0\tperson\nLesCJsHdAU0_0\tcat\nLe2725PKYQk_0\tdog\nLfUSKsg8JoQ_0\tcat\nLfhPiqIDAcI_0\tperson\nLgbwFATbwhs_0\tcat\nLhF7TJOwt8o_0\tmotorcycle\nLhOMGvkzP28_0\tperson\nLhOMGvkzP28_1\tperson\nLhkFN7f676g_0\tairplane\nLh1QrEwtBxU_0\tskateboard\nLiS31CevvvA_0\tperson\nLiS31CevvvA_1\tperson\nLjRWmJThZrA_0\tperson\nLjyZ7Djyq1U_0\tperson\nLkP8lgpmCJc_0\tairplane\nLkfML7bjGg8_0\tperson\nLmCzQ6WrePM_0\tbus\nLnYz8cQsrWk_0\tcow\nLpTBcxby8_U_0\tcat\nLpT4VBLapqM_0\tcar\nLpjbdSyW__A_1\ttruck\nLqm0JTDlIaU_0\ttruck\nLtIW9sP55N4_0\tperson\nLuC8ON_75l4_0\tperson\nLuRLF2TroVk_1\tairplane\nLunFMJp3_Uc_0\tcat\nLup2fypzuD4_0\tperson\nLurlbycI8WQ_0\tperson\nLvd7WBHnDpk_0\ttruck\nLwxi57QRroE_0\tperson\nLyPkKroSsaU_0\tbird\nLyPkKroSsaU_2\tbird\nLyPkKroSsaU_7\tbird\nLz7uf7cmfAU_0\thorse\nL0Y9j9DtU1o_0\tdog\nL0mqjqU7pmw_0\tperson\nL1C1GJZuI6U_0\thorse\nL1TihVYcfII_0\tbear\nL1xr5gaSzeQ_0\tbicycle\nL2lJenTKrLU_0\ttruck\nL2lJenTKrLU_3\ttruck\nL2lJenTKrLU_5\ttruck\nL22pyXEUjv8_0\tbird\nL22pyXEUjv8_1\tbird\nL5px8rMqxRY_0\tmotorcycle\nL8Q0lJgaUi4_0\tzebra\nL-3-1978GvI_0\tknife\nL-6R2vuKWhc_1\ttruck\nL--TMS61Zvw_1\tboat\nL--TMS61Zvw_5\tboat\nL_dOv3wd1ZM_0\tperson\nL_nI4_2RbTU_0\tknife\nMAmHLoJdmc8_0\tcow\nMENNFokPNbU_0\tairplane\nMG8-IGrKVxc_0\ttruck\nMG8-IGrKVxc_2\ttruck\nMG8-IGrKVxc_3\ttruck\nMG8-IGrKVxc_5\ttruck\nMH1GdFqE_lo_0\thorse\nMH1GdFqE_lo_2\thorse\nMH1Kct5RCRg_6\tairplane\nMH1Kct5RCRg_10\tairplane\nMIkxezmilfY_0\tperson\nMI6x6FrXJqs_0\tknife\nMI9BIgkOBjI_0\thorse\nMJ9vJFTTV5c_0\tperson\nMKiCrBXtflw_0\tcat\nMK8Jm3I4In4_0\tdog\nMK8Jm3I4In4_4\tdog\nMMiSt9MNne8_0\ttrain\nMNve0XPgcGA_1\tbird\nMN1A5E3jNSE_0\thorse\nMPJu68gBGfI_0\tperson\nMPMudxdiIds_0\ttrain\nMPfgu6-snaM_0\tbird\nMQ1o_7gpp5E_0\tperson\nMQ1u8IEmFSA_0\tperson\nMQ3HhLmsCik_0\tperson\nMRNJmLLkjPc_1\tmotorcycle\nMRNJmLLkjPc_2\tmotorcycle\nMRqfEOhWW48_0\tperson\nMSItPvVCUN8_0\tcow\nMSd5Ecl5-W0_0\tperson\nMSnEnQ0psW8_0\tcar\nMV6MGXhQwFQ_0\tcat\nMWbnSN-7WG0_0\tcow\nMWt4P6HWxMM_0\thorse\nMXEcQSFwng0_0\tcat\nMXTzea4MeHc_1\tcar\nMXoVDyewPBE_0\tperson\nMYFPnJIKK5k_0\tperson\nMYpdq9KvK8o_1\tumbrella\nMasaNQLCMGE_0\tperson\nMbRvEKuvR04_0\tskateboard\nMb6r1es0AbU_0\tcat\nMcdl3s6oQrc_3\tbear\nMcdl3s6oQrc_1\tbear\nMe-clc6PGkA_2\thorse\nMe-clc6PGkA_3\thorse\nMfYpMzLWST8_0\tcat\nMgFhoihDD1U_0\tperson\nMkmpoid1BvA_1\ttrain\nMokOHR3wImM_0\tcat\nMqBTk3ITQ8c_5\telephant\nMqBTk3ITQ8c_3\telephant\nMrWZEUtDBq8_0\tdog\nMuyIuhdszH0_0\tperson\nMuyIuhdszH0_2\tmotorcycle\nMvKMtFVP5NU_0\tperson\nMvbZEiffy8s_0\tperson\nMvuGj1qR4Ic_0\tmotorcycle\nMvxUj_Du2IY_0\thorse\nMw6Cu1mPanU_1\tcow\nMxtJwd0GBkA_0\tairplane\nMzTsjMauBH8_0\ttruck\nMzrv2OCC2GE_1\tperson\nM0TTCr9jjgc_0\thorse\nM12KvkF1Nec_0\tperson\nM40gbbuNuL4_0\ttruck\nM5p7jyvEgPk_1\tknife\nM52oDxJEXk4_2\thorse\nM52oDxJEXk4_0\thorse\nM7Kcv9fUrhA_0\tcow\nM9CCnnc8m8k_0\tgiraffe\nNAInb4dMC_E_0\tairplane\nNAInb4dMC_E_3\tairplane\nNAsDBYDNhwY_0\tcat\nNDxs_vxhhME_1\tperson\nND-VrJY7mU0_0\tperson\nNEsCBcZFajg_2\tairplane\nNEsCBcZFajg_5\tairplane\njvlyXCBSuCk_0\tperson\njwYviTYbJYs_0\tcow\njxL3F-iB2S8_0\tbus\njxmsNv20V50_0\ttrain\njyrY4oyyA7M_0\tperson\njzNOBsi5TtQ_0\tcow\njzeFDGEt_iQ_0\tperson\nj4UJ80q_s3c_4\tskateboard\nj4UJ80q_s3c_5\tskateboard\nj4t-Otp9ES8_0\tperson\nj6XmNyG8nYE_0\tbear\nj8SM6uLadmU_0\tmotorcycle\nj8aX3NuEnxc_1\tairplane\nj8aX3NuEnxc_0\tairplane\nj93wwDC_a2I_0\tskateboard\nj_tT90ISNnc_0\tskateboard\nj_6ZWhyOOcA_0\tperson\nkBKG0SaNbdw_2\tcow\nkBYFlPJJx-s_0\tperson\nkCHOoDF-pXo_0\tcat\nkCQIRLEi88s_0\tperson\nkCefZaEK9M4_0\tperson\nkCt3G72NjyY_0\tmotorcycle\nkEx2sgiyKpY_0\tdog\nkG5vclMyg7w_0\tskateboard\nkHIZAi1E9gU_0\tcow\nkH3Hwla_MUM_0\tperson\nkI7523l1Tu4_0\thorse\nkI7523l1Tu4_1\thorse\nkLwsGbEsMjs_5\telephant\nkLwsGbEsMjs_1\telephant\nkL52zPMgsXM_0\ttruck\nkMIRREOoSt0_0\telephant\nkOqKBgGRd_c_0\tboat\nkQu7xcJmp6w_0\tairplane\nkRLl2HLijWc_0\telephant\nkRqsESioKVM_0\tperson\nkSWUU8Ef-Rg_0\tcow\nkSXkd4PYX9M_0\tbear\nkSm9E8WwGYY_0\tperson\nkTT6onfYUug_0\tbicycle\nkZcfsku1oJ4_1\tbicycle\nkarZg0Iifks_0\tskateboard\nkavU8zKXrEY_0\telephant\nkbD6iXQ3P6M_0\tcow\nkb4GuHpwuSw_1\tcow\nkdPgKSrjVYQ_0\ttrain\nkd9Tn_hyeb4_0\tdog\nkeka7aToy_E_0\tperson\nke2Ap6Zvq64_0\tcow\nke2uXJrB9WQ_1\tbird\nkfL1KEY53AM_0\tperson\nkfMMMSNZWeM_0\tgiraffe\nkgcb2y-aw8s_1\ttruck\nkhicinfB1nY_0\tperson\nkhr1-lWZOOw_0\tbicycle\nkixX1ga8yrw_0\tperson\nki51QTz_6iw_0\tbus\nkjhcR5ljaDU_0\tcar\nkksfStf04pc_0\tperson\nkk41Jjw-BpQ_0\thorse\nklxQpVdft5E_1\tbicycle\nkmIUPZSNl5A_0\tairplane\nknFBzlhmDMk_2\tskateboard\nknFBzlhmDMk_3\tskateboard\nkoomOoaIF0Q_0\tmotorcycle\nko4el3e0QFI_0\tbird\nkqE2rNzUnvU_0\tcow\nkqJJ6_2vGtU_0\tmotorcycle\nkqiHy-EzdcQ_0\tairplane\nkqiHy-EzdcQ_1\tairplane\nkqiHy-EzdcQ_2\tairplane\nkrD5WtdljCc_0\tbird\nkrR-lFUTXHo_0\tcow\nksbdMzGs-gs_0\tperson\nksbdMzGs-gs_1\tperson\nktCRlGt6408_0\ttrain\nktcXRj-Vz6c_0\tbus\nktcXRj-Vz6c_1\tbus\nktvaX1ALzwE_0\tmotorcycle\nkwMNSTE0h8U_0\tbus\nkwMNSTE0h8U_1\tbus\nkwyn-eed9l4_1\tbird\nkx2jH9V7vYM_0\ttrain\nkz0gVW9uWkc_0\tskateboard\nk1C25MTUso4_0\tperson\nk1Y6Y1yocF0_1\tknife\nk1qT5GtPmQo_0\tbear\nk2fCUP9H4cw_0\tskateboard\nk24lvYKkK5g_0\tboat\nk3hYFu55iGE_0\tperson\nk3hYFu55iGE_1\tperson\nk3pTU4KNdvE_0\ttrain\nk4tqy4pdlNs_0\thorse\nk5MmpG9afSM_2\tbear\nk5UoGZZb_RY_0\tcat\nk5oey7bw5kA_0\tperson\nk5-IPGgeCPc_0\tperson\nk5-IPGgeCPc_1\tperson\nk8OboASs470_0\tskateboard\nk8OboASs470_1\tskateboard\nk9COlD7u1tI_0\tknife\nk-tdE0VAFkc_1\tperson\nk-tdE0VAFkc_0\tperson\nk_E-cIymiis_0\ttrain\nlAZQZSK_9bk_0\tcat\nlCc5-WmCZJk_3\tdog\nlCc5-WmCZJk_5\tdog\nlDWAsuKkv5Y_1\tbird\nlFObiVRO-BQ_3\tairplane\nlGAGodreVjQ_0\ttrain\nlGJB2hhw5pI_0\tcat\nlIbOGzXhSW8_2\thorse\nlI-A6pFtkLQ_0\ttrain\nlI_jxWxWivM_0\tdog\nlJXfbIuwTIQ_1\tcow\nlJccP5OJjZ8_0\ttrain\nlKBO-dakd8w_0\ttrain\nlLyfm0vbHrw_0\ttrain\nlL_4QscWdx4_0\tperson\nlM0yKqnWblw_0\tperson\nlNJbOSFK9N4_1\tskateboard\nlOFTlhNmKD8_0\tbus\nlOQf3A_3lPI_0\thorse\nlOWmL3mpSeA_0\ttrain\nlOvB2zlHw8w_0\tdog\nlO-XTKPQb5I_0\ttrain\nlPapZHOAdzk_0\tbicycle\nlP5lgBlsH0U_4\tairplane\nlP5lgBlsH0U_1\tairplane\nlP5lgBlsH0U_2\tairplane\nlQDy9Mri-18_0\tperson\nlQsTpo0uOIw_1\tboat\nlQuFC-E7VUM_0\tperson\nlQuzpkDKFQ8_0\tperson\nlRuif4Zc7CI_0\tboat\nlSZa4pAHgV8_0\thorse\nlS-5gEkB0_o_0\tmotorcycle\nlTTquh-jLwM_0\tcar\nlThBPb6HI1U_0\tcat\nlVeIr8AFTjY_0\tperson\nlWT2t48q164_0\tmotorcycle\nlYSpeuL7-oo_0\tumbrella\nlZOTAg9Fofw_3\tbird\nlZVwQoLPjBU_0\tgiraffe\nlZVwQoLPjBU_1\tgiraffe\nlahDGDRe7X8_0\thorse\nlcKDCt1eWqg_1\tknife\nldQGB8gzRjA_1\tcow\nldhdyBduVoU_1\tcow\nlf_tYVzrap0_0\tperson\nlge9f_bgAOk_0\tperson\nlgzIpgcvPvU_0\tperson\nlhNv9zDa1ug_0\tcar\nlhadIxHkaVg_1\tperson\nlhadIxHkaVg_0\tperson\nlhnQuOIF-2c_1\tperson\nljLO1myCfoA_1\tknife\nljayNZQpp-I_1\thorse\nljayNZQpp-I_5\thorse\nljeTwRM6DWE_0\tperson\nlkvdy3Hejpw_0\tperson\nll6gTyUguMY_0\thorse\nll6m5MTpf4o_0\tperson\nlmpKSF0cXSc_0\ttrain\nlnfEV2dRfm4_0\tmotorcycle\nln0_FGR8B08_0\tperson\nloVlMj9Dhkk_0\ttruck\nlotZh71qMks_0\tperson\nlpcqEaZD_Xk_5\tbicycle\nlpcqEaZD_Xk_0\tbicycle\nlpcqEaZD_Xk_1\tbicycle\nlpcqEaZD_Xk_2\tbicycle\nlpcqEaZD_Xk_3\tbicycle\nlpcqEaZD_Xk_4\tbicycle\nlqu4tjd3Zg4_12\tbear\nNE9AhZPTVFY_0\tmotorcycle\nNFF4UemeH8g_0\ttruck\nNFSj66emNbM_0\tcat\nNGS9BrtLJ0I_1\tboat\nNGvpnRrWSKc_1\tbear\nNHLBjlX2jeg_0\tperson\nNHgh88y4e80_1\tcar\nNHpM-oBMIRk_0\tdog\nNHrjnZsJWOw_0\tperson\nNID_0E0tn_g_0\tcow\nNJQNZ36lsvw_2\ttruck\nNJm81cIGO98_0\tskateboard\nNJ22Hynv9s4_0\tumbrella\nNJ22Hynv9s4_1\tumbrella\nNJ7MXR2AaoY_0\tcow\nNKQfFcfr6Ko_0\tperson\nNL1iy1TKtRI_5\tcar\nNL1iy1TKtRI_1\tcar\nNL1iy1TKtRI_2\tcar\nNL1iy1TKtRI_3\tcar\nNL1iy1TKtRI_4\tcar\nNMCijcIa_XU_2\tknife\nNMhR_Z4Rq7g_0\tperson\nNNbRF02KnGM_1\tskateboard\nNQiMeD83sMw_0\ttruck\nNQiMeD83sMw_1\ttruck\nNQsnyZmQoPw_0\telephant\nNQsnyZmQoPw_2\telephant\nNQve9Yujb14_0\tperson\nNRaAEznVIxQ_0\tperson\nNTGqC7kOGAw_1\tbird\nNTRX6gLV_04_0\tbus\nNUSnWbhvmQs_0\tcow\nNVzCor2-ZpI_1\tzebra\nNV-p8Vp-bdA_0\thorse\nNWAQ1is2w98_0\tairplane\nNYIqB-l8eKk_0\ttrain\nNZ5OIYTIoYQ_0\tperson\nNaCksn1bbv4_0\tairplane\nNaCksn1bbv4_2\tairplane\nNaEokN7Nh-U_2\tknife\nNadzcUmXDTk_0\tperson\nNbJ2gM5KJTM_0\tcat\nNbJ2gM5KJTM_1\tcat\nNdXmkm9jcPA_1\tairplane\nNd6ceCmRYBI_0\tbird\nNeXVfNsggZw_0\tcow\nNfEzlo6-i_4_0\ttrain\nNfEzlo6-i_4_2\ttrain\nNfEzlo6-i_4_3\ttrain\nNhi9730yIzM_0\tdog\nNhskHQ9bqlo_0\tcat\nNhvr0y1tqjk_0\tperson\nNiP4AEjiwxs_1\tboat\nNio43-cQPh0_0\ttrain\nNi_TSyCk1Ak_0\tcat\nNjknyzAAQpM_0\tperson\nNlOjGoYPj9Y_0\ttruck\nNlTLvOcpoEA_0\telephant\nNlVEu_8kdoI_0\thorse\nNlVEu_8kdoI_1\thorse\nNljV4UjnFJc_0\tmotorcycle\nNnRWY12wxUk_0\tperson\nNnVFfTO9-q8_0\tperson\nNo84NOV3Pwk_1\tskateboard\nNpZj-n9_STU_1\tbird\nNqwxEAASrCo_1\tairplane\nNr9t7GeBwQY_2\tskateboard\nNsbG9FcyTFk_1\telephant\nNsbG9FcyTFk_4\telephant\nNsbG9FcyTFk_2\telephant\nNsbG9FcyTFk_3\telephant\nNuKyL_c3YcQ_0\tcow\nNulXMVhoGhU_0\tknife\nNuutxSJHULc_1\tcow\nNvkF9R1HsJc_0\tcar\nNxTnPIBFKdE_0\tairplane\nNxjnp7dqCdc_0\tcow\nNxqGplqsmNk_0\tperson\nNyKq-nq-KlQ_0\tperson\nNzAEnNO5-fo_0\tbicycle\nNzAEnNO5-fo_3\tbicycle\nNzAEnNO5-fo_4\tbicycle\nNzAEnNO5-fo_5\tbicycle\nN0LEywKxW9o_0\tcat\nN0e8A9q9tyU_0\ttrain\nN1OYtZSKdKQ_0\ttrain\nN1OYtZSKdKQ_3\ttrain\nN1pTdHcekjU_0\tcar\nN28sspen6dM_3\tbird\nN28sspen6dM_1\tbird\nN3ffRSq8s7M_2\tcow\nN6nP6NLTaG0_0\tmotorcycle\nN7Bv6ZMyBrU_0\tskateboard\nN9vkS7ish9k_0\tcow\nN_5Xf4hpanE_1\tdog\nN_5Xf4hpanE_0\tdog\nOBQQMo8mWLE_0\tperson\nOCA5rhgrl48_0\tperson\nOCLVaKMFCZg_1\tbicycle\nODI8kcB_dSs_0\ttruck\nODJSlRRM1Uo_0\tcat\nOD4XsgCwIKk_0\tperson\nOD9vhbbeBAE_0\thorse\nOEhrO1p2agU_0\tperson\nOGOf9vbNJB8_0\tperson\nOG8Nfns4uh0_0\tcat\nOHEyq1pCfZ8_0\ttruck\nOIV8ASYsqZc_0\tskateboard\nOIV8ASYsqZc_1\tskateboard\nOImLl2ufWqI_0\tcow\nOJktr2-sJmY_0\tmotorcycle\nOJktr2-sJmY_2\tmotorcycle\nOKbNtRotT5w_2\thorse\nOKbNtRotT5w_5\thorse\nOKbNtRotT5w_7\thorse\nOK-2ALhNWts_0\tbird\nOLpvIpNUgY4_0\tperson\nOLyGncmosSs_1\thorse\nOL_lZw3lqE4_0\tperson\nOMm3ReCUyGA_0\tperson\nONlvohUS-io_0\tcow\nOOC45SMJl6M_0\tbus\nOPIxLQwJLaM_1\tcow\nOPbyoGG-M_E_0\thorse\nOPm_iAWIO2o_1\tknife\nOR4OEYlOndk_0\tmotorcycle\nOSRtFznjiro_0\tmotorcycle\nOSUOKZdfiXQ_0\tperson\nOS6SXRjK0rU_0\thorse\nOUeSqgMRLUg_0\tbird\nOUrVDMMYK-4_0\tperson\nOWBXMvAtmcA_0\tcow\nOWqaj3O-u6E_0\ttrain\nOWqaj3O-u6E_1\ttrain\nOWqaj3O-u6E_3\ttrain\nOWvRHFQJ-5g_1\ttrain\nOXjc7JlWYwk_1\tbird\nOXpPVrdEoko_0\telephant\nOXpPVrdEoko_1\telephant\nOYCDyQPt5rU_0\ttruck\nOYRmTydmqZo_0\tcow\nOYugCmogPD8_0\tbear\nOZver3igS6U_1\tzebra\nOZy-0MSWC7o_0\tperson\nOZ5z2K-vIYg_0\tmotorcycle\nOb4ur_FS9xM_0\tdog\nOdLj2La07lM_0\tboat\nOdnylLd12pU_0\tskateboard\nOdsXUxBBISo_0\tairplane\nOePFLxtDg7k_0\thorse\nOflyVi689KA_0\tskateboard\nOg9LiinXMtw_0\tbus\nOjx6OtSIA3k_0\tperson\nOmdbd0YsB2o_0\tairplane\nOmdbd0YsB2o_1\tairplane\nOnRL69PzM4I_0\tbicycle\nOo3Uhz6L-cs_0\tperson\nOpEMSVRTyxk_0\tdog\nOpJl0GUiLQI_0\tperson\nOptQqflXY_g_9\telephant\nOptQqflXY_g_0\telephant\nOptQqflXY_g_4\telephant\nOptQqflXY_g_5\telephant\nOptQqflXY_g_8\telephant\nOqmbWcekMxo_0\tperson\nOrPfakDZX64_0\tperson\nOrwr1k0mKho_0\tperson\nOrwr1k0mKho_1\tperson\nOtHHLfag4xg_2\tknife\nOumTAMPogf4_0\tperson\nOvQFDkMjctE_0\tperson\nOyDNx0iCGUM_0\ttruck\nOyKi2PGJERI_0\tperson\nOyKi2PGJERI_1\tperson\nOyhAS52bQMA_1\tperson\nOyhAS52bQMA_0\tperson\nOzORAIgrZOg_1\tknife\nOzQFkM92we8_1\tdog\nO0o_u_t5Y6w_0\tbus\nO2TgLtQU7PI_0\tknife\nO3GPSL92hYw_0\telephant\nO4UhXpMuxJI_0\tperson\nO5PlzlxQuPc_0\tdog\nO5796OHwBy8_0\tbear\nO6cWlrockUQ_2\thorse\nO8s1bsDJrwc_0\tperson\nO9dxeSLiF9A_0\tskateboard\nO9dxeSLiF9A_1\tskateboard\nO90WVIgQwww_0\tperson\nO9_riOoIpKo_4\ttrain\nO9_riOoIpKo_6\ttrain\nO9_riOoIpKo_10\ttrain\nO_hypcyZCFo_0\tairplane\nlryNU4SKncc_0\tcow\nlrzxlHguluE_0\tbird\nlr7T4YcCWSU_0\telephant\nlr7T9GuNUMY_0\tcat\nlskWmTPa9Gk_0\tperson\nls34lS6fGzw_0\tperson\nlt7kXXW5D-c_0\tbus\nlvdU2uEdpnA_0\tboat\nlv6aYZguv6k_0\tperson\nlxXwMvanqo4_1\tboat\nlznoTW8tuLI_0\tbus\nlznoTW8tuLI_1\tbus\nlznoTW8tuLI_2\tbus\nl0J9Km2lk2I_0\tperson\nl0TirY4L7Es_1\thorse\nl0TirY4L7Es_3\thorse\nl3yFwpak_LA_1\thorse\nl38pNVKwDeo_0\tbird\nl4sdxYUAiJQ_0\tperson\nl4_P74HRriU_0\tperson\nl5GlzRyX39s_0\tperson\nl5GlzRyX39s_1\tperson\nl5WawiGWVxg_0\tperson\nl6cEGnOtFZg_0\tairplane\nl682n6ZmpNk_0\tperson\nl7Mmo3ow8qo_0\tperson\nl7kq2yqxPQc_4\thorse\nl7kq2yqxPQc_2\thorse\nl8r-mOc3-3U_1\tperson\nl9QgZQGtQWI_0\tmotorcycle\nl-4jrxgMGTQ_0\tskateboard\nmAEnlKe67pQ_0\tbicycle\nmAhzB1TH8mU_0\ttruck\nmAj62XUNkIM_0\thorse\nmBgSYaKydZY_0\tperson\nmC5X6MO2y9A_0\tperson\nmDf5zsFFweg_2\tknife\nmDf5zsFFweg_1\tknife\nmFbUnWMAreQ_0\tperson\nmGDfepYDRRE_0\tperson\nmHFxPudSk8c_0\tmotorcycle\nmIFnGYdf0po_0\tperson\nmJm2UYBiD8w_0\tcat\nmJo7aqOfRww_0\tairplane\nmJ6qCcS_-AQ_0\tperson\nmJ-DsFbUPUg_0\tmotorcycle\nmKBs2L-xwdU_0\tperson\nmLVHfKExUNU_0\tboat\nmMdGNbPpLKQ_0\ttruck\nmMy70TxInmA_0\tperson\nmNpEoUW_OPI_0\tknife\nmOFqvrGzJiE_1\telephant\nmOFqvrGzJiE_2\telephant\nmOkmKyBZoXI_0\tperson\nmP6-RR-Vuv0_3\ttruck\nmR1y0XlZhQ4_0\tperson\nmTeNKWTwFcs_0\tperson\nmU7E6pi9PFU_0\tbear\nmU7E6pi9PFU_2\tbear\nmWeNwTJwEmo_0\tperson\nmWhw719wEH4_0\tperson\nmXBKJjrxqmc_0\tknife\nmXekeIascCc_0\tperson\nmX_4T1I2ux4_0\tdog\nmYwEvpKN2-Q_0\ttrain\nmZ0VxiELg9A_2\tmotorcycle\nmZ0VxiELg9A_0\tmotorcycle\nmaiqraHgwgg_0\tskateboard\nmbZZ48h5pnY_0\tperson\nmboIIChd8tY_0\tbicycle\nmcR2Fi6wQj8_1\ttrain\nmcR2Fi6wQj8_0\ttrain\nmciQ3fR1QTE_0\ttruck\nmeAfvCGeyyU_0\tperson\nme-WjezBU4U_0\tmotorcycle\nmflX-nwtpzs_0\tskateboard\nmgSJL9uL49w_0\tbus\nmgSJL9uL49w_1\tbus\nmhDnVhRMCHc_5\tcow\nmhDnVhRMCHc_0\tcow\nmhDnVhRMCHc_1\tcow\nmhDnVhRMCHc_2\tcow\nmhDnVhRMCHc_3\tcow\nmhDnVhRMCHc_4\tcow\nmhIULm3ssFk_2\tairplane\nmiJ1b0bNn9M_0\tperson\nmiLapj3u_5g_0\tcat\nmiR8Xeb7SM0_0\tumbrella\nmi4j0PrR-Gs_0\ttruck\nmi4j0PrR-Gs_1\ttruck\nmjSUb46nTjs_0\thorse\nmj2ClgQE_Q0_3\tskateboard\nmj2ClgQE_Q0_2\tskateboard\nmj_R3ENyiKM_0\tperson\nmnOoqy7I3L8_0\tskateboard\nmns4vFzs4_8_1\tskateboard\nmns4vFzs4_8_0\tskateboard\nmnwyrMq92so_0\tperson\nmoBNY2JjuEQ_0\tcow\nmoc2yPvW_JU_1\tperson\nmpA3PWbdVWc_1\tbus\nmp-cHp44pXo_0\tbird\nmp-cHp44pXo_1\tbird\nmqI9CDpsCDE_0\tcat\nmqYD18pFqm8_0\tperson\nmrnDERbyZcM_0\tskateboard\nmtO9ioY8AHY_0\tperson\nmuk5R25UV1A_0\tperson\nmungFWJMSsg_0\tdog\nmwRNyFvem8g_3\ttruck\nmyYMS85ltwo_0\tskateboard\nmyiCWmM3XN4_1\tdog\nmziKTFuKVco_0\tperson\nmznC1uLm_j8_0\tskateboard\nm0z25TJV2vU_0\tperson\nm1VAqMAJ-Lw_0\telephant\nm2DUDsR4tWA_1\tbus\nm2Sr_Q8JpcI_0\thorse\nm2Sr_Q8JpcI_2\thorse\nm2Sr_Q8JpcI_3\thorse\nm2-nK6oZ08E_0\thorse\nm2-nK6oZ08E_1\thorse\nm3u_pETGaMw_0\ttrain\nm4Ozpr8E1EE_1\ttrain\nm5mSFt43spE_4\tmotorcycle\nm7VhCUoV_Dw_0\tperson\nm77tPf0Ulb0_0\tperson\nm8THukZrE7w_0\tperson\nm86BSOvJvS8_0\tperson\nm9hdxJE9HQE_2\ttrain\nm95nb4Vl_R0_0\telephant\nm-Ry10-IgWg_0\thorse\nm-sLdoVujlI_1\tbird\nm_25GAJYGHE_1\tcar\nnAO2Y4kF7b8_0\tbicycle\nnBllCINiO-4_0\ttrain\nnF_NlCSUpFo_0\tcat\nnIO0ZNZi6n0_0\tperson\nnIiXsRSLxZI_0\tperson\nnIiXsRSLxZI_1\tperson\nnJO5eQXPS0M_1\thorse\nnKfhxWUyc4I_0\telephant\nnKfhxWUyc4I_2\telephant\nnLUyCQwkCds_1\tmotorcycle\nnMW7WsVKd_E_0\ttruck\nnO14Z3ggnZs_0\ttruck\nnO16C5NBMQQ_0\tperson\nnO16C5NBMQQ_1\tperson\nnPJJOI4j3UQ_0\tperson\nnQAqVHkffhY_6\ttrain\nnQAqVHkffhY_7\ttrain\nnQAqVHkffhY_1\ttrain\nnQAqVHkffhY_5\ttrain\nnQrJJZvmF74_0\tcat\nnRu8IVZXzCU_0\tairplane\nnR1Ng3PnYoU_0\tcow\nnSUBF0RYH1o_1\tbicycle\nnTfgyYqyO_Y_0\tperson\nnTtqkLze7eY_0\thorse\nnTtqkLze7eY_3\thorse\nnTtqkLze7eY_4\thorse\nnW4sAWZ6dHQ_0\tbicycle\nnXYeq3IDOFo_0\ttruck\nnXgq-W7J6ho_0\tperson\nnYGQy8peDYk_0\tperson\nnYHjMb7HoK8_3\tbird\nnYIUSRVmY30_0\tperson\nnaMdRxX0924_0\ttrain\nna6hNW8gSx8_0\tbus\nnbojUStyLvY_1\tperson\nnbojUStyLvY_0\tperson\nncZiTQHehfk_0\tperson\nnefS_k9oFMI_0\tperson\nngE_mlmsaqY_0\tperson\nnh4AR9Mjwmo_0\tbicycle\nniQ2DNNlBSM_0\tperson\nniUnVyYTszc_0\tperson\nnjOQqZ1pBGM_2\tboat\nnjP6uuU-G6o_6\tbear\nnjcuqdNTGfM_0\tperson\nnj8ALe3wC9c_0\thorse\nnki1SdWtdCI_0\tcow\nnk6FezKWYSY_0\tbird\nnmNSM48p094_0\tknife\nnmRZQdp3xRk_0\tperson\nnn8WcALmZ7c_3\tbear\nnoTnh5A2OHo_4\tboat\nnoTnh5A2OHo_1\tboat\nnoWsAcioI8g_0\ttrain\nnoe-qNQfJBo_0\tbird\nno-b9_3kXiQ_1\tdog\nnpAPemisdEI_3\tboat\nnpGL0Kl16f0_0\tperson\nnpGL0Kl16f0_1\tperson\nnqZya6Vk3iY_0\tcat\nPAdHnsQ5png_0\tcat\nPAi_eJ_z59w_0\tskateboard\nPBPViL9vBZQ_0\tmotorcycle\nPBS3-SzLV2A_1\thorse\nPBwR_Jdod_g_0\tknife\nPCJWOz32Js8_0\tperson\nPDmAbS9Afkc_0\ttruck\nPE8yxnkayr0_0\tperson\nPE8yxnkayr0_1\tperson\nPFKrDvQuKII_1\tcar\nPFb83m0smRg_0\tperson\nPHunbTKqKwk_0\ttrain\nPH5VqmGrnXs_0\tcat\nPIG9w10uliw_0\tbus\nPIo5FlB1sf4_3\tbear\nPIzyVPr2kvQ_0\tperson\nPI_spS2t57M_1\thorse\nPI_spS2t57M_0\thorse\nPJK-c0HQksg_0\tbear\nPJUvXC0Eumw_0\tairplane\nPJsCV-lA78A_0\telephant\nPJ0Y1xQ7ZJo_0\thorse\nPJ2kZmkL25Y_0\tperson\nPKGRn71TQGQ_6\tairplane\nPKGRn71TQGQ_1\tairplane\nPKtLlpi00cM_1\tskateboard\nPK_UdRSa36U_0\tmotorcycle\nPMDSUC0_Ytg_0\tbus\nPNxobv7rkRU_0\tperson\nPOWngj1oBhQ_1\ttrain\nPOpePYwyHWY_0\tbus\nPOu1oPwNd4g_0\tumbrella\nPPeaYnqzi9g_0\tperson\nPPjAhD3i-v4_0\tbus\nPPqkkhaUIdE_3\tbus\nPPqkkhaUIdE_0\tbus\nPPqkkhaUIdE_1\tbus\nPRaq5kZmO2A_0\tbus\nPRyc4Vp0s00_0\tbird\nPSyuR_D5C2c_0\tcat\nPTLtv0VJ0_s_0\tperson\nPTM6VrBcP80_0\tdog\nPTewrgfas9o_1\ttrain\nPT6u63wHOhs_0\tdog\nPT_tMCTzlSc_0\tperson\nPV_FZhj_0hI_0\tcar\nPWZIO2hdNRU_0\tperson\nPWiyz8b24es_0\tairplane\nPXxs6Hzx7Pk_1\tzebra\nPZ3X20r0oVc_1\tbird\nPZ3X20r0oVc_0\tbird\nPdPZkfHUOq0_0\tperson\nPd9bh2hiWAk_0\tperson\nPeA8729U1jg_0\tboat\nPeJxY7YFBTA_0\tknife\nPgFIqGCjnc0_0\thorse\nPgNvdw3Zges_0\tumbrella\nPgjeF-iHzLk_0\tperson\nPgyVMv-RRL8_0\ttruck\nPiI1e3aKeos_0\tperson\nPkju9RRBRAU_0\tperson\nPn01hUEOICo_0\tbicycle\nPoI-RFl6jqU_0\tbird\nPoI-RFl6jqU_2\tbird\nPpX6lJOP6ng_0\tperson\nPq1kVNudVJo_0\tboat\nPsPMm45bDZA_0\tbird\nPskTcGACgjw_0\tperson\nPsrCCNATJd0_1\telephant\nPs9peKxde4U_0\tdog\nPvCZZzw4FKw_0\tperson\nPvQVqhtqTVk_1\tperson\nPvQVqhtqTVk_0\tperson\nPv3IqqHid-w_0\tairplane\nPv3IqqHid-w_1\tairplane\nPw7zlPV9yh4_0\tmotorcycle\nPytUHdEhipQ_0\tairplane\nP0FylASL6h4_0\tperson\nP06NLpHGLb8_0\ttruck\nP06NLpHGLb8_1\ttruck\nP1FTUN2gJkY_0\tperson\nP3JAtlf2-VA_0\tcat\nP3MhJa_p-dU_1\ttruck\nP5MpdcJgQrI_0\tskateboard\nP5NEco_Rqas_0\tmotorcycle\nP5NEco_Rqas_1\tmotorcycle\nP5v3n_5s-F8_0\thorse\nP7i0pgLo9kg_1\tcar\nP8E7gprJa1s_1\tskateboard\nP8_7-uFl2Go_0\tbicycle\nP9dDbodBY8s_2\tmotorcycle\nP9dDbodBY8s_0\tmotorcycle\nP9dDbodBY8s_1\tmotorcycle\nP91LJh-_E0Y_0\tcow\nP-FrYGR7Bf0_0\tperson\nP-phCIDPeWw_0\thorse\nP-27cmR3CZE_0\tknife\nP-_MzAIxz2E_1\tknife\nQBAxag8dq6Q_0\tcow\nQBfotDmdDkk_1\tskateboard\nQBrAST1Q2iE_0\tperson\nQCCt8ooY4qg_0\tperson\nQCjqG8908mY_0\tcow\nQEDWauqnaSk_0\tskateboard\nQEGY7Dq2x9s_0\thorse\nQE0MjXjSFjU_0\tboat\nQFS35qERdLE_0\tperson\nQFeMKKxurVg_2\thorse\nQFxep-yih-s_0\ttruck\nQFxep-yih-s_1\ttruck\nQGN2-Iqa4QQ_0\tperson\nQHPYpnJSf2s_0\tcat\nQHhkx3CSiWk_0\tperson\nQJ1W4Pajbv0_0\tperson\nQLmFsJCZy_o_3\tknife\nQMRFisCEGQc_0\tperson\nQM9Kddu2XcQ_0\ttrain\nQObG-uf4v68_0\tmotorcycle\nQOjAwmQ_7vA_0\tperson\nQPtMbvxzFuE_2\tbear\nQQC7AIIJg2Y_0\telephant\nQQLrVBS8VSo_0\tperson\nQQLrVBS8VSo_1\tperson\nQSTf92HwJS0_1\tdog\nQSTf92HwJS0_0\tdog\nQTjiYkMuDGI_0\tknife\nQTqvJZS8ZNo_0\telephant\nQUIxOZH8N8c_0\tperson\nQUUgu5YvS1c_0\tperson\nQU7X6RkjKPE_1\tboat\nQVUI5ZkkDsA_0\tperson\nQVnam2Ma6mY_0\tperson\nQY1rz6k86s0_1\tperson\nQZS3V-7xnAA_0\tperson\nQZWqiN4OA_A_0\tperson\nQZk1HSA90KA_0\tknife\nQaUHYb5os4U_0\tperson\nQahBgQXhNfo_0\tcat\nQbOvfWFyPzg_0\tdog\nQbOvfWFyPzg_1\tdog\nQbPvdKEmnrI_0\tperson\nQb4RNeQYfPc_0\tboat\nQcLa-GP2ITc_0\tperson\nQcLa-GP2ITc_1\tperson\nQdeUvHCiXwc_1\thorse\nQd0chk9vUQ0_0\tbear\nQeISQLJERxg_0\tperson\nQfJeJLieLew_0\tcow\nQfJk-eDxmKE_0\tperson\nQfkb-gc72qg_0\tcow\nQgPao5AkXFU_0\tskateboard\nQgiX6-1aN-4_0\tbus\nQhGx_MwYnWs_0\tperson\nQhIp71nr7Vk_0\tdog\nQk_VhG5lt1Q_0\tcat\nQmRFPW81gZc_1\ttruck\nQmfJmQuF1-I_0\tbus\nQmuLT1MpdP8_0\tperson\nQm2yaeiexlI_2\tmotorcycle\nQrd-Q3XrT3A_0\ttrain\nQszBg-eN7F8_0\tcat\nQtBYK8AxWCw_1\tperson\nQtpKcTyf4n4_0\tknife\nQtq2m-MV2q4_0\tcow\nQvY9ysq30EI_3\telephant\nQvY9ysq30EI_5\telephant\nQvY9ysq30EI_0\telephant\nQvY9ysq30EI_2\telephant\nQwJNOYFZ3W8_1\telephant\nQwTIODgGfOM_0\tperson\nQxLFtmn_Igw_2\tbear\nQyyPl-aCFUs_0\tcat\nQzETtzOBUaY_0\tperson\nQ0HpPvC0bKA_0\tperson\nQ0M_Fog02Yw_1\thorse\nQ0UrlXLNioY_1\tumbrella\nQ0tQtb1npx4_0\tcar\nQ0x55aCCNxA_0\tperson\nQ31q8b3CSN8_1\tskateboard\nQ4rAM1058Z4_0\thorse\nQ4rAM1058Z4_1\thorse\nQ5G2n-3zXX8_1\tperson\nQ5G2n-3zXX8_0\tperson\nQ5X1kisU8Qo_0\tperson\nQ6hwtMw2jkU_4\tskateboard\nQ6hwtMw2jkU_3\tskateboard\nQ7SViqj0bEg_0\tdog\nQ83xNK10WK0_0\tbear\nQ-lTGQgTOEg_0\tperson\nQ_rsZh5VqdY_0\tperson\nRANBJV7BN3k_0\tperson\nRAmxGTzr25A_0\tperson\nRBccU2wq7Qs_0\tknife\nRBclSX-7rYQ_0\tperson\nRDiehz1pFVA_0\tknife\nRD7nVPZTGEw_0\tskateboard\nREBfrgEC_3U_0\tknife\nREh7f-__WqU_0\tcat\nRE40E9-qdHE_0\thorse\nRFO8tA6rfbo_0\ttruck\nRFbhEQ4qN-A_0\tperson\nRIfxXKT-_88_1\tskateboard\nRJZgo3_JEPs_0\tperson\nRJi5ZRGQb-A_0\tperson\nRJxPTuKUKjk_0\thorse\nRKFpQfRSYIc_2\tmotorcycle\nRKFpQfRSYIc_3\tmotorcycle\nRKFpQfRSYIc_4\tmotorcycle\nRKFpQfRSYIc_6\tmotorcycle\nRKFpQfRSYIc_7\tmotorcycle\nRKFpQfRSYIc_8\tmotorcycle\nRKFpQfRSYIc_9\tmotorcycle\nRKFpQfRSYIc_10\tmotorcycle\nRKFpQfRSYIc_11\tmotorcycle\nRKFpQfRSYIc_0\tmotorcycle\nRKFpQfRSYIc_1\tmotorcycle\nRLcZcFP03fA_0\tperson\nRN6TzMbUlyg_0\tairplane\nROdg8e5a0Fk_1\tcow\nRPwZjkygYo4_1\telephant\nRR-fksDmQTU_0\tdog\nRSLwmLbf3No_0\thorse\nRSO2IDZGDus_0\tperson\nRSQ7pHT5sU4_1\tcow\nRSWyviTCTqk_0\tcat\nRTAQO62dbRo_0\thorse\nRTONY5PqRUo_0\tskateboard\nRT0mh9U0YDc_0\tperson\nRUAbb66fW18_0\tbicycle\nRUW8xYh84q4_0\tdog\nRU0u42rf0Hw_2\ttruck\nRU0u42rf0Hw_3\ttruck\nRU_8ryQNxC0_1\tbird\nRWJfJx1nXNQ_0\tbicycle\nRWo2zaceWcc_0\tbird\nRahqzUIhIkc_0\tcow\nRawtpxzAbmM_0\tperson\nRdZGVs8pH40_2\tskateboard\nRdZGVs8pH40_1\tskateboard\nRdge7lmfdc8_0\tperson\nRfVv6ECZ78Y_3\tbear\nRfa2If7RJTY_0\tknife\nRfa2If7RJTY_1\tknife\nRfvNPPjs-bw_0\tboat\nRi3O4rz5S2o_0\tboat\nRoMemRfbKkc_0\tperson\nRoNJ0fP0VUU_0\tperson\nRqAANAYxYz0_0\tperson\nRqqaUsDM-aI_0\tperson\nRrnixlsQyn8_0\tperson\nRr6AsTlUNKQ_0\tperson\nRspILw0UAM8_0\tperson\nRsyjwcMkRrY_1\tknife\nRsyjwcMkRrY_2\tknife\nRt1reRy5GVY_0\tperson\nRuFIanBmYzM_0\tbicycle\nRu9ksAvNYc0_2\tcow\nRwVTAYsyWMo_0\tperson\nRxWOvD9i9Ig_0\tcar\nRxtS3kGOYoc_0\tbicycle\nRxtS3kGOYoc_2\tbicycle\nRxtS3kGOYoc_4\tbicycle\nRxtS3kGOYoc_6\tbicycle\nRxtS3kGOYoc_9\tbicycle\nRxtS3kGOYoc_12\tbicycle\nRx9YjtdgOEI_0\tperson\nRyVdNK-PCyg_0\tperson\nRylJTxUTfF0_0\tskateboard\nRzdsXt87bVE_0\tdog\nR0biK134LTQ_0\tperson\nR0n9cqLQE4E_0\tskateboard\nR3rDAaPE_s4_3\ttruck\nR45uCINxuVY_0\tperson\nR7IE_IohaIk_1\tairplane\nR7IE_IohaIk_6\tairplane\nR7IE_IohaIk_0\tairplane\nR8Zg4uo1QpM_0\tperson\nR9d1vlii7cs_8\ttruck\nR9hRCG8pAHM_0\thorse\nR9hRCG8pAHM_1\thorse\nR_xLhXpHgp0_4\tskateboard\nSAeiSpeFynU_1\tbus\nSBmb0VU07rs_0\tboat\nSCLi5OFtzQk_0\tskateboard\nSCaWHsWzxqY_0\tperson\nSC18zgZ9Diw_0\tbus\nSDCTiDVOdW0_0\tbear\nSFA4mVjImxk_0\tperson\nSFoil_6CvbI_0\tbird\nSGsRwH8YxQg_1\tairplane\nSGsRwH8YxQg_11\tairplane\nSHSsDGmwywY_0\tcow\nSIZ3AYCr7PQ_0\tperson\nSIv3Hcq1ge8_0\telephant\nSIv3Hcq1ge8_1\telephant\nSJkZwyPxUTg_0\tcow\nSJqduSR9h4g_0\telephant\nSJwgIeOkfTM_1\thorse\nSKoDZimqLV0_4\tbus\nSMF8aDGwELI_0\tgiraffe\nSNbBUZtngzM_0\tperson\nSNnofRkUk8w_2\tboat\nSNqtno2pOzc_1\tdog\nSNqtno2pOzc_2\tdog\nSQn8ueHVBWc_4\telephant\nSQn8ueHVBWc_6\telephant\nSQn8ueHVBWc_1\telephant\nSQn8ueHVBWc_3\telephant\nSQ4tDbbdzr8_0\ttrain\nSQ4tDbbdzr8_2\ttrain\nSSjgAjilS8g_0\tperson\nSSwA_nC9rr0_0\tperson\nSThjw6JeBnQ_0\tperson\nSTuEo8vap08_0\tperson\nSUHEgX-8bo0_0\tperson\nSUwLfCebumU_1\tbear\nSUwLfCebumU_2\tbear\nSVUAFI7bHqQ_0\tperson\nSWedQv5UnQo_0\tperson\nSXWo-zKZICs_0\tperson\nSYT4odK3Dwo_1\tbird\nSc_CAareVEI_1\telephant\nSc_CAareVEI_6\telephant\nSc_CAareVEI_7\telephant\nSc_CAareVEI_2\telephant\nSc_CAareVEI_3\telephant\nSdzIWTR-rkc_0\tperson\nSeBOeRzwqrQ_0\tskateboard\nSeU_71ydaeA_0\telephant\nSehCD9wP-Pk_0\tperson\nSf9OdV3i3I4_0\tperson\nSgglaVke5lo_3\tboat\nSgySshdgJrQ_0\tmotorcycle\nShves64RCp4_0\tcat\nSiotcXGUwAs_0\tperson\nSj56u4dFe4k_2\tperson\nSlR9qCk_m9k_0\tmotorcycle\nSlR9qCk_m9k_1\tmotorcycle\nSlZZmtOGyeE_0\tairplane\nSlZZmtOGyeE_1\tairplane\nSndDcPzB8Hc_0\tcat\nSn2SGmheI-Q_0\tperson\nSn9gOBw9bf4_0\tperson\nSoiA6jtejG4_0\tdog\nSpbyBYH0OjI_0\tperson\nSph2g6B-X2M_0\tcat\nSpjssmEyc_o_0\tairplane\nSqHtdCP5Oao_1\thorse\nSqHtdCP5Oao_2\thorse\nSqLiHZHzp9w_0\tperson\nSqoR7vKYzCY_0\thorse\nSq-Xok-ea7U_0\tperson\nSreiPFJ6vBw_1\tboat\nSsMS0eIy2Ws_0\tperson\nSse7vXMMO6E_0\tperson\nSuaush4Da4s_0\tperson\nSvPL8gOREaU_0\tknife\nSwaILKCtBVA_0\ttruck\nSw4B_VFic3M_0\tskateboard\nSw7L3wImbSA_0\tperson\nSyldRIQbAGU_0\tperson\nSzVyFmQ28Xo_0\tcar\nSzkobSwGTMk_1\tbird\nSz2bTIe9kTo_0\tairplane\nnrEv-Plh45s_0\tbear\nnt_BXwq_xhA_0\tgiraffe\nnuCdww9iIOs_0\thorse\nnuMeNIi1MPY_0\tperson\nnuMeNIi1MPY_1\tperson\nnui8beXjUlU_0\telephant\nnui8beXjUlU_1\telephant\nnvMXQKwroRY_0\tperson\nnvaO13WFhos_0\tperson\nnxBkP48NgKY_0\tmotorcycle\nnxclZ6iCf7o_0\tcow\nnyogtZp3kIk_1\tairplane\nnzf12QyuD4E_0\ttruck\nn0tx4V2rF3I_1\tgiraffe\nn09NxJcTEYQ_0\tperson\nn12ITkwyzvM_0\tcow\nn15n46culQU_0\tperson\nn19nqH4078Y_0\tbear\nn2F8uNrgh1U_1\telephant\nn2daSQR_dTI_0\tmotorcycle\nn3Eb6Cf77Vg_0\tairplane\nn3aHtfCo_aw_0\tperson\nn3fhSGUvtH8_2\tknife\nn5alwWwFPb0_0\tmotorcycle\nn5osSY0_BSo_0\tperson\nn5-RrJI-Lxw_0\tperson\nn6I0k52pV18_0\tbear\nn8xNf-PRHnc_0\ttruck\nn9AUV2KuhLo_0\tcow\nn9zSAZMj2Mk_0\tknife\nn-I-WnLfnqE_0\thorse\nn-QBM6yD7RI_0\tbird\nn-eDiuWYJUc_0\tperson\nn-1FhryZboM_0\tperson\nn_Cv1LzGol0_0\tperson\noBixVhXVcmY_0\tperson\noBjIRWu_BWA_0\ttruck\noCCV0-mP2R4_0\tbus\noDlSzIkDJGM_1\tcar\noDnobYn8maE_0\tperson\noDrYXyIN9xs_2\tdog\noEcyeE0kNFc_0\thorse\noElAgrukyOk_0\tperson\noE0bjG0z-nk_0\tperson\noGDp2b_LvDA_0\tbicycle\noHu9fCIhAjs_0\tperson\noIQuiXJzEUI_0\tperson\noIYCDBqfT6I_1\telephant\noIZHf-r5C3w_2\tbird\noI3ETWYxCi8_2\tperson\noI3ETWYxCi8_1\tperson\noJAivZwYxDE_0\tperson\noLTHGMleOxk_0\tcar\noLTHGMleOxk_1\tcar\noMZczwLgR1Q_0\tboat\noMZczwLgR1Q_3\tboat\noMZczwLgR1Q_1\tboat\noMZczwLgR1Q_2\tboat\noNMf32fzYvo_0\tperson\noOi9E4se4ww_0\tperson\noOp7fTxc8qY_0\tperson\noOp7fTxc8qY_1\tperson\noQcVQukPVdA_0\thorse\noRacxmfNaSM_0\tcat\noSwwku39aC0_0\tskateboard\noXHr2yBfL3Y_0\tcat\noXfOERZ2kMs_0\tcow\noXlK1t1qisA_0\tperson\noYw8UE0VSFk_10\telephant\noYw8UE0VSFk_1\telephant\noYw8UE0VSFk_5\telephant\noYw8UE0VSFk_8\telephant\noY5CyHk-QEo_0\tperson\noaHCd7KI_Fc_0\tairplane\noaK_EfFOb7o_2\tskateboard\noaK_EfFOb7o_0\tskateboard\noa5NT5mX--c_0\tperson\noa838tg7QCk_2\telephant\noa838tg7QCk_3\telephant\nocJUmpBIBOo_0\tperson\noc7XeYj7dOE_0\tskateboard\nodjK5W70JaE_0\tperson\noeYHzAMgoQ4_0\tskateboard\nofynEJHRTz4_1\tperson\nof1ISNDelz4_0\tcat\nogJGxnVqTWY_0\tcow\nogNqc-uHzQ4_0\tumbrella\nohkrDDXUwjY_0\tperson\nohrYGLaImow_0\tcow\nohxeFH800SE_0\tskateboard\noiftoNj28hs_0\telephant\noiwU7UpO9S4_0\tperson\noi4GfdQBxyc_0\tperson\nojiIyU5ibT0_0\tperson\nokPcGR4BRQM_0\tperson\nomsmPSC4u3A_0\tairplane\nonH8ELLteHg_0\tmotorcycle\noo3eTJKpErU_1\telephant\noo3eTJKpErU_2\telephant\nopWm4bW5B9k_2\ttruck\nopYiNVXmySg_0\tskateboard\nopkxXg1s8ZQ_0\thorse\nopkxXg1s8ZQ_2\thorse\nopkxXg1s8ZQ_3\thorse\nopkxXg1s8ZQ_4\thorse\nosYXdQYkiPQ_0\tperson\notKNUa-KgUg_0\tcar\notKNUa-KgUg_1\tcar\notOxAXKskbI_0\tboat\notU4Zd1n65g_0\tbear\notqOLpbz4LQ_0\tairplane\nouK26Crplso_1\tcar\nouSUKHZs1Dc_0\tperson\nousG5WHZq8I_0\telephant\nouwAzKpUG7k_0\ttrain\novQiwCBG8Eg_4\telephant\novZ4In0kLUg_7\tbear\novZ4In0kLUg_2\tbear\novZ4In0kLUg_6\tbear\nowW-da7Tdls_0\tperson\nowtKQFT_gNk_0\tperson\nox0mlEooWI0_0\tskateboard\noyuMudJ9EM8_0\tperson\nozRJI9h3tks_0\thorse\nozRJI9h3tks_1\thorse\nozvxKPrfdo8_0\tdog\noz11xvTIbvM_0\tperson\no0QRA7gPhBI_0\tgiraffe\no02m7tfad28_0\tperson\no02m7tfad28_1\tperson\no09Ks_UmmkY_1\ttrain\no3eHOnTMxnU_0\tairplane\no4PVsZPaxOM_0\ttrain\no4PVsZPaxOM_1\ttrain\no4VOx1SeRKY_0\tbicycle\no4VOx1SeRKY_2\tbicycle\no4VOx1SeRKY_4\tbicycle\no4VOx1SeRKY_5\tbicycle\no4VOx1SeRKY_1\tbicycle\no4VOx1SeRKY_3\tbicycle\no7wb_t8x0D8_0\tperson\no8KS5SYj0GE_6\tbird\no8YfQD0GA00_0\tperson\no9gD7-MVkJ4_1\tbus\no-IwJTgdr_A_4\tbird\no_sONKO9OMk_0\tperson\no_7RumsdAcE_0\tmotorcycle\npAVwx70oxIc_0\tperson\npAthLZfnXaM_0\tperson\npAthLZfnXaM_1\tperson\npBWgDW8f6II_0\tperson\npB5-haagdS8_2\tbird\npEUCkpfCcaw_0\tboat\npEtOW-iQZCA_0\tperson\npE-OFVB2lzo_0\ttrain\npHsAHiqdb-c_0\tbird\npIHbW9IMV2E_1\tairplane\npIHbW9IMV2E_0\tairplane\npINK56mkS-E_0\tcat\npJBMnX2HBFo_0\ttrain\npJ6wkaE8-iY_3\telephant\npKFd8IXz4K4_0\tboat\npKnRcv--qEI_0\tcat\npLvGIJc0ETk_1\tcat\npMKMeBQzCC8_0\tdog\npMKMeBQzCC8_1\tdog\npMgX9KscZSg_1\ttrain\npNG0qeNr-Vo_0\tperson\npNWXXO380uQ_4\tdog\npNWXXO380uQ_10\tdog\npNWXXO380uQ_1\tdog\npNWXXO380uQ_2\tdog\npNWXXO380uQ_6\tdog\npP84ZurhiFY_0\tumbrella\npQAJTPvkPj4_0\tbird\npRArAdUzaKg_0\tperson\npRVlgxVhtuA_0\tcat\npRy6kU2p41E_0\tcat\npS5AzmSvRPY_0\thorse\npU9s744_T6o_0\ttruck\npVR9b-qG1Ig_0\tgiraffe\npVR9b-qG1Ig_6\tgiraffe\npVR9b-qG1Ig_7\tgiraffe\npVR9b-qG1Ig_1\tgiraffe\npXLbIBluyAQ_0\tbus\npXfO7xO-99w_0\tcat\npYXDml6lcAY_0\tmotorcycle\npZCCPMu42GA_0\tperson\npbFuk0oX6a8_0\tbicycle\npbFuk0oX6a8_1\tbicycle\npbFuk0oX6a8_2\tbicycle\npb3p83fw9bg_0\tperson\npcUV4ja1VRc_0\ttruck\npceUU6aj_ao_0\tcat\npdyhFh6-rCo_0\tbear\npeBxgn7gXlw_1\tmotorcycle\npeHZd4qdOMI_3\tboat\npe00hbvqjDI_0\tperson\npe_73GR1-NI_1\tairplane\npfED6WafVwQ_0\tbear\npfpKoO-GjGI_3\ttruck\npfpKoO-GjGI_1\ttruck\nphXjZ1yxWD0_0\tbus\nphec6_yC2HY_0\tperson\nphjJhuKxT5Y_0\ttrain\npiGT-hRYHHQ_0\thorse\npiN1RiueJhY_0\thorse\npjLei6UAHsE_0\tairplane\npjLei6UAHsE_1\tairplane\npjZqJuEX1ow_0\tairplane\nS2FTgueR-80_0\tperson\nS2FTgueR-80_1\tperson\nS3U383sqlRs_0\tbicycle\nS4UDIyyqmlY_2\tmotorcycle\nS6h6E0IKO6Y_0\tdog\nS73sRU7b2dk_0\tperson\nS9QmlxGGxGM_4\tknife\nS9goDsKFXAg_0\tperson\nS-qgaqzenIE_0\tperson\nTBpnes8Z-3s_0\tperson\nTCtRzPGrwls_0\thorse\nTCycfRWpg0s_0\telephant\nTDKDtLliMhg_0\tperson\nTDlLgW8Fjes_0\tperson\nTFcak4kNd2c_0\tperson\nTGFSBSitWNw_0\tcow\nTISjnLr1r-k_4\tgiraffe\nTISjnLr1r-k_5\tgiraffe\nTISjnLr1r-k_3\tgiraffe\nTJsLSuQcb7E_0\thorse\nTKadOIk-uPI_5\ttruck\nTK61mJMHqTE_0\ttrain\nTK61mJMHqTE_1\ttrain\nTLxcXucOpWw_0\tskateboard\nTMaLrtjFU34_3\tcow\nTNNXwm3Bt5I_0\tbicycle\nTOLyNcTSGPA_0\tperson\nTPglVxQN85I_0\tdog\nTRH4PZkAkiE_0\tperson\nTSl3wSreplo_2\tbird\nTSl3wSreplo_0\tbird\nTVuX76wWzwY_0\tperson\nTW9LBSqxNWo_0\tbicycle\nTW9LBSqxNWo_2\tbicycle\nTW9LBSqxNWo_6\tbicycle\nTXD-idarfhU_0\tperson\nTYsJu2G5WVY_2\tknife\nTZdDUMDyozA_0\tdog\nTZfFEYUY5_0_0\tboat\nTZsigdW7Qfs_0\tairplane\nTaL6ssJD8z4_0\tairplane\nTalhQQ9B7vc_0\tzebra\nTa-JBO0InZk_0\thorse\nTa-JBO0InZk_1\thorse\nTa-JBO0InZk_2\thorse\nTbm_BFLOPic_0\ttrain\nTcRl6wotFw4_0\thorse\nTcR9fR_SWLg_0\tbicycle\nTeiC-tObc4o_0\tbicycle\nTgRRY3Mn0Ro_0\tperson\nTi411VXWtAc_0\tdog\nTjCiDUNoDi0_0\tskateboard\nTkktEeCiSAo_4\tknife\nTkktEeCiSAo_5\tknife\nTlXSJmmN3dc_0\tmotorcycle\nTnB8G7eZm24_0\tperson\nTnY1qP0YQQ8_0\tperson\nTnc7CCuk78Y_0\tperson\nTn4trDBJAqE_0\tperson\nTo8VzjtX70s_1\tperson\nTo-lnvpzIKY_0\tperson\nTqKcS4Cx7wc_0\tbird\nTqvuyyM_x4E_0\tbird\nTqvuyyM_x4E_1\tbird\nTsM45PkaTj0_1\tbird\nTs4iqmKVRy4_0\tknife\nTtI1W2xFQ5k_0\tperson\nTtI1W2xFQ5k_1\tperson\nTtnuIzV01ek_2\ttrain\nTtyfhN-jWcc_0\tperson\nTuEArk4EFWg_0\tperson\nTuEwZSEUe5A_0\tperson\nTuOnAlE6TRs_0\tairplane\nTubHgt_FxYo_0\tperson\nTufSi0uSU8M_0\tperson\nTvUmQi32j08_0\tperson\nTvUmQi32j08_1\tperson\nTvuhORVyaL4_0\tperson\nTvuhORVyaL4_1\tperson\nTwH6hv5zVIU_0\tairplane\nTwSnlq5Kma0_0\tskateboard\nTxV4qpdgJ3Y_0\tairplane\nTxV4qpdgJ3Y_1\tairplane\nTyIzjLHGvjo_0\tperson\nTzUMxAOWWcc_0\tbicycle\nTzVawH7veiM_0\tbicycle\nT0WCoXgklkw_0\tperson\nT0r5yfzMs4g_1\tbicycle\nT24d3EHv2GE_0\tbird\nT406qi8vIlk_5\tairplane\nT406qi8vIlk_2\tairplane\nT6XxSbeAl6Q_0\tmotorcycle\nT8e9Qi4dcNY_1\tbear\nT95G52MuPFU_0\thorse\nT-PL14w9TV4_0\tcat\nT-cOBQACeAw_1\tbird\nT_2A3L49ah4_0\tdog\nT_2A3L49ah4_2\tdog\nT_2A3L49ah4_3\tdog\nT_2A3L49ah4_5\tdog\nUANkhHNWM-M_0\tperson\nUAnl6TGZhxs_0\tcow\nUA5VCImEZ2Y_0\tdog\nUBdNIuCPaZ4_0\tcar\nUBdNIuCPaZ4_2\tcar\nUBsG3-ocU64_1\tboat\nUE40h6VhUaU_1\tbicycle\nUF8l_MU2rj8_0\tperson\nUGCPxfU7FKM_0\tperson\nUG5FFY29OV0_0\tcat\nUHO129a_p0U_0\tairplane\nUHYwdGF9W-0_1\thorse\nUHYwdGF9W-0_0\thorse\nUIvJPTYu6Hc_0\ttrain\nUI4IvmmFIPQ_0\tperson\nUKExOybWiRM_0\tmotorcycle\nUKExOybWiRM_1\tmotorcycle\nUKkr05PKrb0_0\tbicycle\nUKlB9mDIXss_0\tperson\nULdZGJs5ta8_0\tmotorcycle\nUMsR07JXCYs_0\tcow\nUM446G0Lud4_0\tknife\nUOUaveJ_TWA_0\tperson\nUO_zNFtEt3Q_0\tperson\nUPkEE2dnlkU_0\telephant\nUPkEE2dnlkU_1\telephant\nUQAJPD_gH7g_0\tcat\nUQDXdgIlpDg_0\tknife\nUQibn_ZNp9Y_0\tskateboard\nUQibn_ZNp9Y_1\tskateboard\nUSAjeRaDlJ0_0\tperson\nUTqlz0i9KIo_0\tperson\nUVTPHohbCV0_0\tperson\nUX4dpwv6qWE_0\tdog\nUYAtAlnvVy4_0\tskateboard\nUYc0lVVxayQ_0\tdog\nUcCtmXy5F4g_0\tdog\nUcbWaG8GwRs_3\tairplane\nUcbWaG8GwRs_2\tairplane\nUceYFW8-zZM_0\ttrain\nUcse975FqUA_0\telephant\nUc5PAhXhIzk_0\tumbrella\nUgsSu7wC28w_0\tbird\nUhj0HRMHPXY_0\tperson\nUhsh3JUb_aI_0\tbicycle\nUisVwousE8g_0\tcat\nUi8yPflhqHs_0\tperson\nUjMTd3LCxyQ_0\tperson\nUjMTd3LCxyQ_1\tperson\nUlFA0xDQcS4_0\tskateboard\nUlhZSONgFCI_1\tcow\nUlhZSONgFCI_2\tcow\nUmvp1XgX6Qc_0\tperson\nUm-FzEOyncc_0\tperson\nUnUlhJaHWlA_0\tbear\nUnyyMjT0BCc_0\thorse\nUsCJdEa7tq4_0\tdog\nUsCJdEa7tq4_1\tdog\nUsrv7_ONvi0_0\thorse\nUs6dL_WD7xg_0\ttruck\nUtyaA_QRIrQ_0\ttruck\nUu9k1VohpvA_0\thorse\nUvptsJcl_ms_0\tperson\nUwtHiozuyRs_0\tperson\nUxPh-hnwal4_0\ttruck\nU2LvNquzuZ0_0\tbicycle\nU2LvNquzuZ0_2\tbicycle\nU4LhReaGH70_0\tperson\nU64eMon0R9w_1\tperson\nU74o2HGsFeI_0\tdog\nU853uMV0qAY_0\tperson\nU86p5VtUC6c_0\tknife\nU9YbGyTBb5k_0\tperson\nU99ENpOmVGI_0\tairplane\npmrTy1xQ5kI_0\tperson\nprJIAYsv8bQ_0\ttruck\npramqy_Y1gA_0\tboat\nprlcpxzCoyc_0\tbus\nps-nNC6Equg_0\tcat\nptF2Hqj7DGk_1\tmotorcycle\nptF2Hqj7DGk_0\tmotorcycle\nptPi712LDq0_3\tbear\nptU4EDudgg8_1\tbus\npt6v3JZFi4c_0\tbird\npuifEp7W50E_0\tmotorcycle\npuifEp7W50E_1\tmotorcycle\npu0G99aVryc_2\tcar\npu0G99aVryc_0\tcar\npwFqv42foTM_0\tperson\npye4y8sPr9I_0\tperson\npy0U90-ZTkI_0\tcat\npy2dhJjpOaI_0\tbear\np19EU6tw9oM_0\tperson\np2DntTqvGT4_3\tcar\np2DntTqvGT4_1\tcar\np2QsmFuYxdI_0\ttrain\np2TTKNDiGv0_1\tbicycle\np4pf9W4qt8s_0\tperson\np40Oqh_akS4_3\tbird\np43GludvR_g_0\tbicycle\np5F9hHDkbKc_0\ttrain\np7UAl7_bv4s_0\tbus\np8KQvF1DyLg_0\tperson\np8YhfWsz1JY_1\tperson\np8YhfWsz1JY_0\tperson\np8gE3VpTAR4_0\tperson\np84Z-poVaAw_0\tmotorcycle\np9ixpjYEEag_0\tmotorcycle\np-J_LbVq7CU_0\tperson\np-SJ_Ym5pTA_0\tcow\np-XasPaki0k_0\tcow\np-cJamorAiY_0\tperson\np-2rgSte1DI_1\tbus\np-2rgSte1DI_2\tbus\np-6u3d8YV70_0\tperson\np_YVPahadQ4_0\telephant\np_YVPahadQ4_1\telephant\nqDP6_m4bDRA_0\thorse\nqD8NS4r2Gd8_1\ttrain\nqEjyhyeCIR8_0\tcow\nqEjyhyeCIR8_3\tcow\nqEjyhyeCIR8_1\tcow\nqEjyhyeCIR8_2\tcow\nqGiLjP8-EVQ_0\tperson\nqHYuGyp8_HU_0\tbear\nqHZsnSLmqEY_0\tperson\nqIJo1R3rHmQ_0\tperson\nqJI7mnjOp0A_1\tumbrella\nqJOaXM8s-Yo_0\tknife\nqJOaXM8s-Yo_1\tknife\nqJugj62heF8_0\tairplane\nqKqEqxMZHVg_0\tperson\nqM566R4U4Ug_0\tbird\nqQbEwbtvdRg_0\tperson\nqSR2E4eqjqI_0\tskateboard\nqSiMwC5e5_I_0\tperson\nqUGXSXCXUbw_1\tperson\nqVCH1ozivyk_0\tperson\nqV9Ll-N_rpc_0\tdog\nqWpIdTdBIQU_0\tboat\nqWpIdTdBIQU_2\tboat\nqWpIdTdBIQU_3\tboat\nqXaS7daelL4_0\tperson\nqXfnmaLtO-M_0\tairplane\nqXwXdnrUo5w_0\ttrain\nqXx4Vj-HwkU_2\tbus\nqYf_XBAUa_o_2\telephant\nqZFwurCX4DM_0\ttrain\nqZH-IY7bBzg_0\tperson\nqZQcY5PTh10_0\tcat\nqZVUho1xBlo_1\ttruck\nqZVUho1xBlo_2\ttruck\nqZVUho1xBlo_0\ttruck\nqbYjOWN6n70_0\thorse\nqceiUxIt1VE_0\tcar\nqcjVVDAbHUI_0\tperson\nqcmbCgcy3co_0\tperson\nqdNXPwWD9_Q_1\tperson\nqdzu1EFDYUE_0\tcow\nqel4U0nmQOI_1\tperson\nqfp7BvAtQa8_0\tperson\nqgKnno5T6f0_0\tmotorcycle\nqguyMwcAj4M_0\tperson\nqhb1bts1fSM_0\tbear\nqheo-lRVpfk_4\tknife\nqheo-lRVpfk_0\tknife\nqheo-lRVpfk_1\tknife\nqheo-lRVpfk_2\tknife\nqheo-lRVpfk_3\tknife\nqhmscyJC8dM_0\telephant\nqh8xnvGfllE_1\tbird\nqh8xnvGfllE_2\tbird\nqipZi2kaQyA_3\tperson\nqi3hoxEao_g_1\tperson\nqi3hoxEao_g_2\tperson\nqptB3_MZagA_1\thorse\nqp5tJGAi9h0_0\tairplane\nqqL9gnwx87g_0\tcow\nqqL9gnwx87g_1\tcow\nqq4_m1S3AOI_0\tperson\nqt6FFVa8DGM_0\tperson\nquoX4193twY_0\tdog\nqvMRVm660LM_0\tperson\nqvZGFb3CbxA_0\tbird\nqvcNxorHqCc_0\tperson\nqx647iZCsoE_5\tumbrella\nqyQFBM_7mBw_0\tbird\nqywYqT8IzaQ_0\tskateboard\nqz4S2Tn1Jkk_0\tperson\nq2qEXqY43ws_0\tcow\nq2v3AmGBH-M_4\ttrain\nq2v3AmGBH-M_1\ttrain\nq2v3AmGBH-M_5\ttrain\nq2v3AmGBH-M_6\ttrain\nq3TB2Rnymkg_1\ttruck\nq3pYgC4-lrs_0\telephant\nq35X7FnaiGw_2\tbear\nq5BC4AVKV4c_0\tperson\nq6nXZqEmQGQ_0\tperson\nq9MXoyUF-BU_0\tperson\nq9d2hPrip6k_0\tdog\nq_dqx0-AtKk_0\tperson\nrA595TIyUgY_0\tbird\nrBko9NgVOX4_0\tperson\nrB2323YW1iA_0\tcow\nrDQ2hcIWoBY_1\ttrain\nrEXtAqxJj8c_0\tperson\nrGVf1BsLfng_0\tcow\nrHvp_Dghuho_0\tperson\nrH33U6qgd9M_1\tumbrella\nrIqhuv94Zuc_0\tperson\nrKN5E25jozk_1\tperson\nrKN5E25jozk_0\tperson\nrLbBCTSGdzc_0\tperson\nrOoxhMEKcgc_0\tbear\nrPEIT9eAAMY_2\tbicycle\nrPEIT9eAAMY_3\tbicycle\nrPUzTjaLdkk_0\tcat\nrPuPm0ctC3s_11\ttrain\nrQHtu5_Piv4_1\tcat\nrQKV6GBQuag_0\tairplane\nrRH0VLQDJZQ_0\tperson\nrSF1UQ01lZc_0\tperson\nrSSbdX8817Q_3\tdog\nrSu82skaMJQ_2\tskateboard\nrSu82skaMJQ_5\tskateboard\nrTIN784f0CM_0\ttrain\nrTIN784f0CM_1\ttrain\nrTIN784f0CM_3\ttrain\nrTV3ev-xyuk_0\ttrain\nrTYmEM2Lhew_0\tbus\nrT4P9ZJeBG8_0\ttrain\nrT4P9ZJeBG8_1\ttrain\nrT4crgFLycE_5\tbicycle\nrUJ7zeax1zY_0\tperson\nrV1Baq6-C6Q_0\telephant\nrWyf2iqpfng_0\thorse\nrXf2T3VO-kI_1\tcow\nrYkLuW5NLic_0\ttrain\nrZi9k9F8S1w_1\tperson\nrZi9k9F8S1w_0\tperson\nrbIYpEELMQc_3\thorse\nrbIYpEELMQc_2\thorse\nrbMVAO2mJiY_0\tperson\nrbn7_DeuItc_0\telephant\nrcF4-O7o_Qk_0\tperson\nrcF4-O7o_Qk_1\tperson\nrc96rbja6VI_5\tskateboard\nrc-e_NDrZDM_0\tperson\nrdBSfuG2KBA_2\tboat\nrdBSfuG2KBA_0\tboat\nrdQvGZDUDJA_1\tperson\nrdhiEKvYF0w_0\tcar\nrdnDsUHCZSY_1\tcat\nrePM3_x9tqw_7\tperson\nrePM3_x9tqw_4\tperson\nrePM3_x9tqw_5\tperson\nrfL51BZGldc_6\ttruck\nVCkpd_d1z4U_0\tairplane\nVE-3PfVw5-Y_1\tairplane\nVG2QbeXEwec_0\telephant\nVIQGgTWrg00_0\tperson\nVIr_rdbfvQQ_0\thorse\nVJVWk9wyMjI_0\tcow\nVJmgPBopcB4_0\thorse\nVJ0by87MRoI_4\tbicycle\nVJ0by87MRoI_7\tbicycle\nVLSeTnShp54_0\tmotorcycle\nVLSeTnShp54_1\tmotorcycle\nVLSol2tA9WY_0\telephant\nVLcSoFR7qBw_0\tcar\nVMDBBz7G-Pg_0\tmotorcycle\nVMmtrv5OtMQ_0\tboat\nVMxS4op_OBg_0\tperson\nVNCLtdahLmI_0\tbear\nVNCLtdahLmI_3\tbear\nVNHGw5Sj0Qc_0\tperson\nVN8_N7Ceofk_0\tcow\nVP0WD1miM00_0\thorse\nVP20LIiI9S4_3\thorse\nVP20LIiI9S4_7\thorse\nVP20LIiI9S4_1\thorse\nVP20LIiI9S4_2\thorse\nVP20LIiI9S4_5\thorse\nVQWxUc9QOjU_4\tbear\nVRtl4gAWELM_0\tskateboard\nVRt9s3OQPzo_0\tperson\nVSLdNogDia0_0\tbird\nVSrmwgo-veI_1\tboat\nVTqoizpYNeI_0\tcar\nVTqoizpYNeI_1\tcar\nVTqoizpYNeI_2\tcar\nVTqoizpYNeI_3\tcar\nVT11p8szxZY_0\tcow\nVUVAbtGJbuE_0\tperson\nVUh5jCDWj08_0\tcat\nVUl6vkX7PRU_0\tairplane\nVVn3XeSqijk_2\tmotorcycle\nVWTes_MfrOc_0\tknife\nVXNEqQb5C4Y_0\tmotorcycle\nVXT0TH9jfZo_0\telephant\nVXZscyYzxqw_1\tperson\nVYYS45KWEgo_1\tdog\nVYr49ml0uaE_0\tperson\nVZj4RHsnOWU_0\tperson\nVZqdzb_qI2g_0\tperson\nVa81siK4zeI_0\tumbrella\nVdLqI43E7eY_0\tcow\nVd5pCJuOoDM_0\tcar\nVfBrelUfLFg_0\tcow\nVgpm6fwLIns_0\tmotorcycle\nVhc7DKkRHOo_0\tdog\nViQIgBdCkh8_0\tcar\nVlBlBgxUa-U_0\thorse\nVlq4fYmrr6g_0\tcar\nVmVN4E_qtfM_0\tperson\nVm9-f0pXycc_2\tbicycle\nVngapMBo560_0\tcow\nVou-Sfzlpu8_2\ttrain\nVqdeO4pa_rc_0\telephant\nVqj-Qv5bVyE_0\tperson\nVr1Wqz5_UA0_1\tcow\nVr1Wqz5_UA0_2\tcow\nVr1Wqz5_UA0_0\tcow\nVsAo8VBzDTM_0\tperson\nVsOw_U6hYRY_0\tmotorcycle\nVsOw_U6hYRY_1\tmotorcycle\nVsyd7-_CUA0_0\tperson\nVs2JphYinjk_0\tgiraffe\nVtdrYDJFw-Y_0\tperson\nVtkV11WZWEc_0\tcow\nVuDA6sPAa9U_0\tperson\nVuLf3ZTqniM_0\tdog\nVuW2wDK-uZI_0\tmotorcycle\nVv-z9_l8_ms_0\tbird\nVwdZHZPjlT0_0\tcat\nVwkf0U9PZvI_0\tairplane\nVwppYMiCI1g_0\tumbrella\nVwvER7iR2YI_0\tperson\nVxG5gvk1mfo_2\telephant\nVxH52JoUd0I_0\tperson\nVxyq13mC_uk_0\tperson\nVxyq13mC_uk_1\tperson\nVyf_VJEQ1jE_0\tairplane\nV0CjVa5_1P0_0\thorse\nV0sliERbCxI_0\tperson\nV0sliERbCxI_1\tmotorcycle\nV0w_hBBqe-g_0\tperson\nV1ufPW4ictQ_0\tskateboard\nV25H8smvzbM_0\tdog\nV56RVnEPG54_0\tmotorcycle\nV6rg5et7Q14_0\tcat\nV6rg5et7Q14_1\tcat\nV6_XA2w3sTs_0\tboat\nV7CVQjk9-Xc_0\tskateboard\nV8Pv-I4ovPs_0\tperson\nV9m1dMbXxug_0\ttruck\nV9qvycn1a3E_0\ttrain\nV-ZKLxW5cuM_5\thorse\nV-ZKLxW5cuM_2\thorse\nV-ZKLxW5cuM_4\thorse\nV-iFCgvAuCg_0\tperson\nWBcYTIQ65Ow_0\tperson\nWB6uQ708AxE_0\tbird\nWCNpGdfG8nk_0\tperson\nWCZ4ZQ5ohf4_0\tmotorcycle\nWGw94BtHxYE_0\tbird\nWGw94BtHxYE_1\tbird\nWG1DuTb70bQ_0\tcat\nWItuBm7azO0_0\tcat\nWKpjUNNgKG0_1\tperson\nWLZkZ-4Y9fY_0\tcow\nWN5u1Y1yGkA_0\tairplane\nWP5JXCVRe9g_0\tperson\nWP5JXCVRe9g_1\tperson\nWQ603pEp_1k_5\tairplane\nWTEO_Ywn9AI_0\tumbrella\nWTw46mBWjOw_1\tairplane\nWUvTKLEimNw_2\ttruck\nWWcVr4lbq3E_0\tperson\nWXETP4eMyD0_0\tcow\nWZWh1M3qGAc_0\ttruck\nWbXmf511q4E_0\thorse\nWb9i7jssQsY_0\tmotorcycle\nWcUFxXISmb0_1\tmotorcycle\nWcUFxXISmb0_2\tmotorcycle\nWcgQXl6I-Ks_0\tcar\nWc6RwJ_8yts_0\tperson\nWc_-Q9ba0zs_0\tairplane\nWdh2SMcRQ2M_0\thorse\nWdh2SMcRQ2M_1\thorse\nWfZR-VRmSB0_3\tboat\nWfl0LOShC_I_0\tbus\nWh9avYClECA_0\tperson\nWixZlWbnBdM_0\tperson\nWkvpcaxQTSg_0\tdog\nWlFD1z5akJc_0\tperson\nWlK6sU21od0_1\tdog\nWlP5_pcua1U_1\ttruck\nWl1vbjfAxeA_0\tdog\nWl1vbjfAxeA_1\tdog\nWmNKtcf5iLM_0\tperson\nWpxEmYBfqSU_0\telephant\nWqb84sv1P68_0\tcat\nWrClMyPxaDk_0\tperson\nWrSS3nc07hE_0\tcat\nWsFZj4Bgtwc_0\tbicycle\nWvGCvwHutAc_1\tairplane\nWvUiJ8ZRRfc_0\tbird\nWvUziN47FfY_4\thorse\nWwx2Vce-1oM_0\tcar\nWx0zNFqSUZo_0\thorse\nWx1qid26zsw_0\tdog\nWzCI6AqY7cg_0\tbus\nWzrI82-Ak4I_1\tmotorcycle\nW1juH0nZ8v0_0\tairplane\nW1yEDHYLG1Y_0\ttruck\nW14Nt0_EGQg_0\tperson\nW17CFtB5Oy4_0\ttruck\nW1-9iBLd1lg_0\tperson\nW23FACVBLgI_0\tperson\nW3Bv11o03TQ_0\tcat\nW4cKlmHvXZ4_0\tknife\nW4gR7_z77A0_0\tperson\nW4iSCn6ILJs_0\tmotorcycle\nW7xlWK7cuEI_1\tskateboard\nW8U3FkkaVbc_0\tperson\nW8d2hNOMHpQ_1\thorse\nW8yL4Qnuo4k_0\telephant\nW86rN6nrllQ_0\tperson\nW9lLrNUFQ9M_0\tperson\nW975mcNRX7c_0\tboat\nW-sCMBY47ck_0\thorse\nW_QxijO2VBw_0\tzebra\nrftE7M9tNqI_0\tperson\nrftE7M9tNqI_1\tperson\nrhWLgPl3lt8_0\tperson\nrhjcRHB4crY_1\tbicycle\nriNqBOlFCuw_3\tdog\nriVZCbT4LDE_2\tperson\nrih7ECmHfRs_1\tcat\nrkIzABhjHkA_0\tperson\nrk1ByqQSwtI_1\telephant\nrlWlgyP-3-s_1\tumbrella\nrlWlgyP-3-s_2\tumbrella\nrlWlgyP-3-s_4\tumbrella\nrlqtE0bF9nk_0\tbicycle\nrmVxFro55IQ_0\tskateboard\nrmxx9X1ytcA_0\tairplane\nrm4XeENehOU_0\tskateboard\nrn9-fIMYEkA_2\tmotorcycle\nrn9-fIMYEkA_0\tmotorcycle\nroUwF9YU21U_0\tperson\nrsne3z-CaDw_1\ttrain\nrtjlk_iOmdE_2\ttrain\nrtjlk_iOmdE_0\ttrain\nrt4Qm6HPVTY_1\tboat\nrvBm-SnbjVI_0\tcow\nrwQl_jKPcyM_0\tperson\nrww5DvtCsG4_0\thorse\nrwzjQSTLmhk_0\tperson\nryUMZWWwJUk_0\tperson\nr0P-2rp1Hpk_1\tbus\nr0vIwhp5RLo_0\tknife\nr03Za0dP0d8_0\tperson\nr09YKBrwa8M_0\thorse\nr3PUq_cy6Mc_0\ttruck\nr3cOrAN6BI8_0\ttrain\nr3cOrAN6BI8_1\ttrain\nr7WW1Fl-s6s_5\tbus\nr7WW1Fl-s6s_4\tbus\nr7WW1Fl-s6s_6\tbus\nr7WW1Fl-s6s_7\tbus\nr7WW1Fl-s6s_1\tbus\nr7xw4qHLKIY_2\thorse\nr7xw4qHLKIY_1\thorse\nr7yOsosLuHI_0\tcow\nr8NwODfEuhI_0\tdog\nr8NwODfEuhI_2\tdog\nr9LAMeOEcsI_0\tperson\nr9jyOtbfWs8_0\tperson\nr9osF8drSbo_0\tperson\nr-Dva6GT-a0_1\tdog\nr-tFy30HVCw_0\tperson\nr-0UD9KQhvY_0\tcar\nr_sRdP_5WaM_0\tskateboard\nsByCUshWhWs_0\tdog\nsB613NHl89g_0\telephant\nsB8zpg-GrRo_0\tperson\nsD_9McrL3UQ_0\tskateboard\nsD_9McrL3UQ_1\tskateboard\nsEzZ3JnSzaM_0\tbird\nsFxTS449nUg_0\tperson\nsG0q9rphsoY_0\tcat\nsIIFHk89TT0_0\tperson\nsI17jkxX6tE_3\tskateboard\nsJyknuUaIOg_0\tskateboard\nsKCW1p03okE_0\tperson\nsKD6TBNqy6s_0\tperson\nsKD6TBNqy6s_1\tperson\nsKJ0JtWZeWw_1\tcow\nsKJ0JtWZeWw_3\tcow\nsLZh8XaxoYw_0\tperson\nsLfyo1VrX3g_3\tknife\nsLfyo1VrX3g_2\tknife\nsLnYAS4LAY8_1\tperson\nsLnYAS4LAY8_2\tperson\nsMVMaH9aWHw_0\thorse\nsNV29dtSqYs_1\tumbrella\nsOfNz788QiQ_2\thorse\nsP4jeoUjHZM_1\tmotorcycle\nsRb7OHsI6s4_0\tbird\nsV9L8gpGDmA_0\tmotorcycle\nsWbk2Sw9Rew_0\tperson\nsWfMpwviOCA_0\tcar\nsXSjs2EV61Y_2\tknife\nsXw73oA1Tq0_0\thorse\nsX5GCwZG8d8_1\tbus\nsbkHA-DWPSI_0\tperson\nscyRfbyCzJU_0\tcat\nsc15m4_lcvw_0\tperson\nsdAAObJErSA_0\tmotorcycle\nsezamC2zGqg_0\tbird\nsf76JIFYKB0_1\tcat\nsgHdQYSWPXg_0\tcar\nsgU4wTZ6k5s_1\tperson\nshXeONsfVmU_0\tperson\nshiIdcOonRs_0\tperson\nsiFucH6jjIs_0\tboat\nsiFucH6jjIs_1\tboat\nsj7NOYq8KBA_0\tperson\nskEWWsL6k9g_0\thorse\nskl1lsZUG4k_0\tperson\nsm346w9J4zA_0\tknife\nsnZjH03fjVk_1\tperson\nsoNDR07vxhQ_1\tperson\nsoNDR07vxhQ_0\tperson\nsofKbpbuX84_0\tperson\nsofKbpbuX84_1\tperson\nspVw0PNXErs_0\tdog\nsqLiQtbkEO4_0\tcow\nsqv-uPhtxwk_0\tairplane\nsq-wqsIw5hw_0\ttrain\nssspgc75B08_0\tgiraffe\nsteKGH-8MZw_0\thorse\nsteKGH-8MZw_2\thorse\nsts2vAv4BQo_0\tperson\nsuERIXWx_z0_1\tperson\nsvCBYM2zl80_0\thorse\nswuFjNkTmQY_0\tdog\nsyZTh043BkQ_0\thorse\ns0YqBVjRDyU_0\tperson\ns1Pd7evRn0U_2\tdog\ns2PyqAoOqrY_0\tcow\ns2x8llFphNY_0\telephant\ns3WiR_wFUBE_0\tcat\ns3ijyNmvxpE_0\tperson\ns4rr5OrSI4k_0\tskateboard\ns5I219neN7c_0\tperson\ns5jmkD6lkbU_0\tdog\ns5n7L55KpWE_1\tskateboard\ns7or9ZhEyXE_0\tperson\ns74eu-v6aqA_0\tperson\ns8W4NK7dWe0_0\tperson\ns83wzR7ySyM_0\tskateboard\ns9G4llLAJiU_0\tskateboard\ns9OmvmQH9hA_0\telephant\ns94ng_sG6Dg_0\tboat\ns-Jnbfjkmak_0\tskateboard\ns-Jnbfjkmak_1\tskateboard\ns-guJTrtfSU_0\tskateboard\ns-yjgHx_YWg_0\ttrain\ntAGvlfgdOsI_0\tskateboard\ntAGvlfgdOsI_2\tskateboard\ntBlPdyu-syw_0\tbird\ntBlPdyu-syw_2\tbird\ntBryhvKADFQ_0\tdog\ntGyP_SbWsVA_0\tperson\ntHA_VdGe90Y_0\tairplane\ntHA_VdGe90Y_1\tairplane\ntHcqw8Cejs8_0\tperson\ntHfOMcj62SY_0\tzebra\ntI2i9_rBdwo_1\tbird\ntI2i9_rBdwo_3\tbird\ntKpbcnqu6bY_0\tbird\ntK0pl2_wbWU_2\telephant\ntLJpuELQgxY_0\tperson\ntLa4F5ekKW0_0\tcat\ntLzUBeOwhyM_1\tbicycle\ntMojfxB-9zA_0\tperson\ntMp5Y1zucfI_1\ttrain\ntMp5Y1zucfI_0\ttrain\ntM3FYC5IVPo_0\tmotorcycle\ntNiu2o7-KPY_1\tcar\ntOK5TnF8eHQ_2\tbird\ntOL0kPV03Uw_0\ttrain\ntOlXErF8Z4o_0\thorse\ntPCRXfE_aGo_0\tbus\ntQj85vHtmeE_0\tbus\ntQnUccPTkck_1\ttruck\ntQ_Vy-9pvoQ_0\tskateboard\ntSlXTInFXss_0\tperson\ntTSVU8IU10c_0\tmotorcycle\ntUdWqmNDeY8_0\tperson\ntUm_oehvEpM_1\tperson\ntVOS6wht6oQ_1\thorse\ntV17SBx-oqE_0\tperson\ntXBDRj1c-Uc_0\tperson\ntXf9xVs5ZGk_0\ttrain\ntYKrjpIMYb0_1\tskateboard\ntYciFvRQuec_1\ttruck\ntYciFvRQuec_0\ttruck\ntY-4fAv_YRU_0\thorse\ntY-4fAv_YRU_1\thorse\nXA65Kh83GmE_0\tcow\nXA65Kh83GmE_1\tcow\nXBNPaOqVqds_0\tbird\nXBUvxtvKWM0_0\tcat\nXByg_hQRQDM_2\tbird\nXDNVcbDkafM_2\tairplane\nXDNVcbDkafM_3\tairplane\nXDNVcbDkafM_4\tairplane\nXD0ydIAwgGM_0\tcow\nXD_iMe4m2vQ_1\tperson\nXGX6SRd3ZkE_0\tbird\nXHu9PxuBpXg_0\tairplane\nXIzQLXQTsRo_0\tcow\nXI3_0lXrnfY_0\tcow\nXJq9qp3jhq0_0\tmotorcycle\nXJq9qp3jhq0_2\tmotorcycle\nXJq9qp3jhq0_1\tmotorcycle\nXLgI0VgtzEw_0\tcow\nXL50qkg4qdA_2\telephant\nXL50qkg4qdA_0\telephant\nXMIsf8xuMh4_0\ttrain\nXPi83QmsR90_0\tcat\nXQliC40rP9M_0\tperson\nXRKZRwdqhNo_0\tbird\nXSMGAlakHWY_0\tperson\nXS5wfvz6XZI_0\tbird\nXTWeBFPqdh0_0\tperson\nXT0t6ims_FI_2\tskateboard\nXVabRVMuX4Q_0\tmotorcycle\nXVabRVMuX4Q_1\tmotorcycle\nXVabRVMuX4Q_2\tmotorcycle\nXVabRVMuX4Q_3\tmotorcycle\nXVabRVMuX4Q_4\tmotorcycle\nXYA6HKrVVQQ_0\tcow\nXZBFfRl6DkA_0\tperson\nXaVZr4HPh2M_0\tcat\nXalkAzccT5I_0\tperson\nXa6tjMVGH2I_0\tmotorcycle\nXa6tjMVGH2I_2\tmotorcycle\nXd9tLIFo_7E_0\tcow\nXeIssB-JkcU_1\tbicycle\nXeIssB-JkcU_2\tbicycle\nXevq2dskQWo_0\ttruck\nXfUIrHPVj-s_0\tcat\nXf09qM8SYBc_0\ttruck\nXgDJ16iRhxs_0\telephant\nXgDJ16iRhxs_1\telephant\nXgDJ16iRhxs_2\telephant\nXgFaXb7Vb58_0\telephant\nXgxYznR79R0_0\tdog\nXhOx4rgdI-8_0\tbird\nXhTWW9CwFzM_0\tmotorcycle\nXiSjHcHG5IU_1\tbird\nXjXFktrwSOk_0\tbear\nXkpxlUwx4oc_5\ttruck\nXkpxlUwx4oc_1\ttruck\nXkpxlUwx4oc_2\ttruck\nXkr3OHSz_CA_1\tperson\nXkr3OHSz_CA_0\tperson\nXlIxLJTiphI_1\tairplane\nXlSvIczm3JA_0\tperson\nXlcJsAWbsyA_0\tdog\nXmwv-NZZat8_0\tperson\nXm_CKSNQE3E_0\tbird\nXnfAvhHnH6M_0\ttrain\nXnfAvhHnH6M_1\ttrain\nXoWHAeOAXg0_0\tmotorcycle\nXoXMpm6Yxfs_0\tperson\nXoa_dCJDiTE_0\tmotorcycle\nXocaP_gyqJU_0\tperson\nXopbyM2SJbc_0\tbicycle\nXopbyM2SJbc_1\tbicycle\nXr_3UPISgT0_0\tskateboard\nXsK5KxttYBA_0\tperson\nXtTLGRBrm3I_0\tskateboard\nXtVTdegdzvI_0\tmotorcycle\nXu6xzBcJySk_0\tperson\nXu6xzBcJySk_1\tperson\nXvvA9Zc1TMA_0\tperson\nXvwOXlVdehA_1\tperson\nXwqm_wzZDQI_0\tcow\nXxkkXeLqqu8_2\tairplane\nXxkkXeLqqu8_0\tairplane\nXxmNQjB1D_Y_0\tcat\nXyldpxZmUN8_0\tdog\nX0CZDjRqcKg_0\thorse\nX02e7Fj9BLM_0\tumbrella\nX0-n3maCrZU_1\tdog\nX2uXOY9J_UU_0\tperson\nX3HCAEcRaW8_0\tbicycle\nX3qbUW_qT7k_2\tairplane\nX4SbOXRpo0A_1\tdog\nX7xm2nZL7jc_0\tbear\nX79vSvy6SOQ_0\tskateboard\nX9L-jwA6Ozg_1\ttrain\nX9L-jwA6Ozg_0\ttrain\nX9a5wEDFXc8_0\tboat\nX_TnIuY27eM_8\tbird\nYA4-rm-dcsw_0\tperson\nYA-N841dD-0_0\tperson\nYB1trUAUzhg_0\tperson\nYB2wzBLh7MU_0\tzebra\nYCU3daBCWsU_0\tumbrella\nYCXHNoYaQRc_3\tskateboard\nYCXHNoYaQRc_4\tskateboard\nYDd_skWNTMs_0\tskateboard\nYDyc1Yv9j_s_0\tperson\nYEPfw3k3vEw_0\tperson\nYEvBzZ5KBYY_1\thorse\nYEz7v7toUwM_0\ttruck\nYFQlAc3qTBQ_0\tmotorcycle\nYIHcQxH9e1o_0\ttrain\nYIzqB2G1UvY_0\tperson\nYI4lmC3imb4_0\thorse\nYJiqdRcs_gU_1\tperson\nYKlWROFtcxc_1\tskateboard\nYKlWROFtcxc_0\tskateboard\nYKoT-GgRSw0_0\telephant\nYKoT-GgRSw0_1\telephant\nYKrdwZe1vq8_0\tdog\nYL97h6yps6w_1\tknife\nYMbqULxZJpg_1\thorse\nYMbqULxZJpg_2\thorse\nYMkOJNatD88_0\tperson\nYNEDPsAWm5I_0\tperson\nYQXwRsP0zvE_1\tperson\nYQgUV8TrYcw_0\tperson\nYRWC7Tdc5oI_0\tperson\nYTD8j8z44qQ_0\tperson\nYTd8Rxtpt1E_0\ttrain\nYTd8Rxtpt1E_3\ttrain\nYTd8Rxtpt1E_4\ttrain\nYTd8Rxtpt1E_6\ttrain\nYTd8Rxtpt1E_7\ttrain\nYTd8Rxtpt1E_8\ttrain\nYTd8Rxtpt1E_9\ttrain\nYTzuVYGpDhA_0\tmotorcycle\nYUhgrCNuMGQ_3\tbear\nYVDCTyDcjjA_1\tcow\nYWRbi_v93Mo_0\tperson\nYWhwljQ3efA_3\ttrain\nYWhwljQ3efA_4\ttrain\nYXeaiwTZ3ZE_0\tcow\nYXz7CDJ11jY_0\tbird\nYYUo7EkkJeg_0\tbicycle\nYYUo7EkkJeg_1\tbicycle\nYZmhYkqgBi0_0\tskateboard\nYZmhYkqgBi0_1\tskateboard\nYZmhYkqgBi0_2\tskateboard\nYZ3kcrHk4N8_1\thorse\nYZ3kcrHk4N8_0\tbicycle\nYax1xdgRbt4_0\tperson\nYa2zfpe-_ro_0\tbus\nYcjMrWCSRSA_0\tperson\nYdooYDhKq00_0\tperson\nYeTYMiaLkWY_1\tcow\nYfvvO_T8j8k_0\tskateboard\nYf9jBSXQTLo_0\tcar\nYf9jBSXQTLo_1\tcar\nYf9jBSXQTLo_2\tcar\nYf9jBSXQTLo_6\tcar\nYf-okdUBk9g_1\tbird\nYgM058nmMnQ_0\tperson\nYjZoPTjqDGw_0\tskateboard\nYj6XWsgomO0_0\tcat\nYluDona_474_2\tbus\nYmlQVVQx4SA_0\tperson\nYm3lE2u4vxE_3\tskateboard\nYm3lE2u4vxE_1\tskateboard\nYm37vW7b0U0_0\tcow\nYnZU-Qa6yeI_2\tbus\nYnyd8SBB5Wg_0\tknife\nYoFfsRgrNeY_0\tperson\nYof6XFKNuNY_2\thorse\nYorREGtes1I_0\tperson\nYo9XVrgl_GM_0\tcat\nYpDsXa1kNZU_0\ttruck\nYpb0U6Ga5pk_3\ttrain\nYpb0U6Ga5pk_1\ttrain\nYpb0U6Ga5pk_2\ttrain\nYp1kl6xU-Og_0\tperson\nYqvGb_tDI38_1\tbird\nYrhvCSxifRc_0\tcar\nYtrNZ4mlMw4_0\telephant\nYvAlZo3quqE_0\tperson\nYvwW9T4Qpek_0\tmotorcycle\nYv3YH0nImQI_3\ttruck\nYxRG0JQrpwI_0\tperson\nYxia21K4O6I_3\ttruck\nYy0lIDbLxQ8_0\telephant\nYy0lIDbLxQ8_3\telephant\nYy0lIDbLxQ8_1\telephant\nYy0lIDbLxQ8_2\telephant\nYzTl0Nf0Kpw_0\tcow\nYzT_UsE8Mhs_0\tairplane\nY0Hz5Hw1AiM_0\tperson\nY1lKSppJhdI_0\tcow\nY16c_yGYw1M_0\telephant\nY16c_yGYw1M_1\telephant\nY2jXJzRVhMI_0\tperson\nY2x6ow80IkQ_0\tperson\nY3TtBVfW6gs_0\tperson\nY3ZDfyDvFi4_0\telephant\nY3c_6Zv0dxg_1\tknife\nY3mx4jYyagQ_0\ttrain\nY5Atu2VWemQ_0\ttrain\nY5BEvakwvuM_0\tdog\nY64ky0LNHko_2\telephant\nY-YU80ccuXg_0\telephant\nZBJsNXYIQ4o_0\tperson\ntaPyucc_cOU_0\tperson\ntaPyucc_cOU_1\tperson\ntafdN9GXP0g_2\tskateboard\ntbLnjlX1xF8_2\tbird\ntbuu2U3o02Y_0\tperson\ntcOx8KjmHPo_0\tperson\ntc98WTYT-VI_0\telephant\ntdIWlg4_01E_1\tbird\ntgRYkhC-gJU_0\tperson\nthZqLw7IxVw_0\tknife\ntj2-fSeuMRI_0\tbird\ntmch--OGZhY_0\tgiraffe\ntmsInTqqzHI_0\tzebra\ntof4QiBHPQQ_0\tperson\ntowJyxwm3wE_0\tbird\nto8OyPMfkaI_0\tperson\ntpQv6Sn5z3o_0\tmotorcycle\ntpcuQY4eNaI_1\tbus\ntpeBIe69wr0_1\tbus\ntpeBIe69wr0_3\tbus\ntpwUnqxQYjo_0\ttrain\ntqy3XprB11s_1\thorse\ntqy3XprB11s_2\thorse\ntq9WP-2U1QM_0\tperson\ntsMTiOeM52E_0\tcat\ntsg-S4Hk2go_0\tperson\nttzJbLLAR34_0\tcat\ntvSJKUR21UM_0\ttrain\ntwewRZpG7Fs_0\tcow\ntwxvNeK9FZo_1\tbear\ntxDhTthoXSk_0\tmotorcycle\ntx0mtmimu0k_1\tperson\ntx2PSvwf7FU_1\tcow\ntyem40ZMKGE_0\tperson\ntygG1C5DURU_0\tperson\nty3iURJku9k_0\tperson\ntzH_tvBDeJA_0\tskateboard\ntzPForR9Ejs_1\ttrain\ntzvKjCoHBMI_0\tbird\nt0TW8zZxCWQ_0\tperson\nt1N1ijCr5NE_0\tbicycle\nt1N1ijCr5NE_1\tbicycle\nt4FZmjCINtw_0\tbus\nt4naVz1a0sg_0\ttrain\nt4zuUZQozs8_0\thorse\nt5B7vIbyRNQ_0\tperson\nt5kzdnId2sI_0\thorse\nt5s4Fs07WLM_0\tdog\nt50QLEhcZCE_0\tperson\nt6C6ukC_zEA_1\tbird\nt6C6ukC_zEA_2\tbird\nt6C6ukC_zEA_0\tbird\nt7YFOxuWxtg_0\tumbrella\nt7YFOxuWxtg_3\tumbrella\nt7s424DNznk_0\tcat\nt8MqK7LWqs8_0\tairplane\nt8mVwobdP40_0\tboat\nt_qvtoXbLRI_0\tperson\nuAWXGcWWgSU_0\tperson\nuAZF38u6SOo_0\tumbrella\nuAzws057QjE_0\tskateboard\nuA1sb8QyXuU_0\tskateboard\nuCZi19CC7rk_1\ttrain\nuCZi19CC7rk_2\ttrain\nuCZi19CC7rk_3\ttrain\nuE5rIJoAafE_0\tbird\nuE5rIJoAafE_1\tbird\nuH0jKXHq7Lw_0\thorse\nuH35b2DEXFw_1\tskateboard\nuH9vcwYxL2s_1\tperson\nuIu2jQswp94_0\tperson\nuJcu-YlAtbc_0\tbird\nuKJqU3gtIWM_0\tumbrella\nuLPuf056wH4_0\thorse\nuMAkaCYTDuc_0\ttruck\nuMYGWhLdrlc_0\tboat\nuMiNpG3NcEw_0\tperson\nuMpufBdwRn8_0\tgiraffe\nuNpHGE63PdQ_2\ttruck\nuNpHGE63PdQ_8\ttruck\nuOmCLzEMPGc_0\ttrain\nuRFXE4UfdTE_0\tcow\nuR8MqB3VgSI_0\ttruck\nuS1QmKXc0uY_0\tperson\nuTsfiR5FPdM_0\tperson\nuT9uk3mtt98_0\tbird\nuUU-VpxxSiM_0\tcow\nuVrW8Mm2xGY_0\tperson\nuWyTGtedEqU_1\tperson\nuWyTGtedEqU_0\tperson\nuarSTtaV_Ps_4\tboat\nua6Xyj9aWT4_0\tbear\nua6Xyj9aWT4_1\tbear\nua6Xyj9aWT4_2\tbear\nubHgpaAseuo_1\telephant\nubijaVodfKg_0\tperson\nubijaVodfKg_1\tperson\nubsr27_dQOk_0\telephant\nubsr27_dQOk_2\telephant\nubsr27_dQOk_3\telephant\nubsr27_dQOk_1\telephant\nucUearjcPHk_1\tairplane\nucfXE6fw3go_0\tcow\nudlyGSCujUU_0\ttruck\nufB4EORClps_1\tknife\nufMXT_CmtK4_0\tairplane\nuhm0JnSA-kQ_0\tperson\nuiLBqX72k4k_7\tboat\nuiM-lDuYaeY_0\tperson\nujoJwRvjEdI_0\tperson\nujz4u55Tp1U_0\tcat\nul47aFS8dQE_1\tmotorcycle\nul47aFS8dQE_2\tmotorcycle\nul47aFS8dQE_3\tmotorcycle\numkNI2_0Lqc_0\tperson\numxZfostBlE_0\ttrain\num22CD4bkqo_0\tcow\nun6QDPagbfo_1\tcow\nun6QDPagbfo_0\tcow\nup6VT6l38-A_1\tskateboard\nuqn85v1WM7A_0\tmotorcycle\nurAYVS5Lz7k_0\tperson\nusAsP-m-qs4_0\tdog\nuuhWeHmlvt4_0\tperson\nuu3KluYuhc0_0\tperson\nuu3pH95cmtk_0\tperson\nuwXhzSsAIJw_0\tperson\nuw9TxuXeiP0_0\ttrain\nuxgUbys1eD8_1\tbus\nuzMFzDPfsws_0\tknife\nuzsdMqrgiL8_0\tperson\nu14Sp3wCQew_0\tcar\nu2BHvsjQGjw_0\tperson\nu25Jazd2yJM_0\tperson\nu4KPFsw5W5c_0\tmotorcycle\nu4oma0FVycA_8\tknife\nu69KRu61wXM_0\tperson\nu7xTeWelI-U_3\tknife\nu8mmwwrdNb0_4\tairplane\nu8mmwwrdNb0_5\tairplane\nu8mmwwrdNb0_9\tairplane\nu80Y4lA5xT0_0\tdog\nu85tUrDgmOQ_0\tbus\nu9HkSfjYpnA_0\tmotorcycle\nu9rfXD33UIM_0\tperson\nu9_P9HFh_NY_0\tdog\nu-_A36Ha04o_0\tcow\nu_D1eyd8AOM_0\tcar\nvAUSfFO5UI4_1\tdog\nvFMzMNDlnBs_0\tperson\nvGIYDcnNTvA_0\tknife\nvHQkxg7kPUk_0\tdog\nvH0ZiiuSQzU_2\tperson\nvH7sKynwjD4_0\tperson\nvJypzwSdyN4_0\ttrain\nvMt5AD41SKM_0\tperson\nvMt5AD41SKM_1\tperson\nvOY2IRNsjYg_1\tperson\nvOY2IRNsjYg_0\tperson\nvQ6eOB8rxUE_0\tperson\nvRjErSbQNNY_0\tperson\nvTa2zdbIyUw_0\tperson\nvT2JpCnT6rg_0\tboat\nvWqexY1OdWg_1\tskateboard\nvXbTARLug3M_0\tperson\nvYN_Gy6fUbI_0\tbus\nvYhPihwivZs_0\tperson\nvaaqJVWoSf0_0\tperson\nvadASNfLl9I_0\tdog\nvas3iNRcsK8_0\telephant\nvas3iNRcsK8_1\telephant\nvbLhfzHqEKc_2\thorse\nvbSnjtc3vIs_0\tcat\nvcALsxetYU4_0\tairplane\nvc-_aAQAXs0_0\tknife\nvdXD-HTzyFM_0\tcat\nvfeKOPKE6l8_0\tperson\nvf7NtV1T5Jc_0\ttrain\nvf7NtV1T5Jc_1\ttrain\nvjb_l1_hEXk_0\tperson\nvjojFy4rPeo_3\tcar\nvjojFy4rPeo_1\tcar\nvj_BAwFKqtQ_0\tumbrella\nvklwqjQis8Y_1\tcat\nvlPgSny76H8_0\tperson\nvlflI5iuszQ_0\tperson\nvnD3gELVAq8_0\tperson\nvnyBVn70QLY_0\tcat\nvnzsKpfAS_M_1\thorse\nvpBxBDjiJxw_1\tdog\nvvamB_-Z0so_0\thorse\nvv3gfxFz2zw_0\tperson\nvwe8ZaV-4z8_0\tbicycle\nvwtokH03eW0_0\tskateboard\nvwxzh1lJ7iw_5\tmotorcycle\nvxmdsyEpU6A_2\tbus\nvx0oKJcOQb0_0\ttrain\nvx0oKJcOQb0_3\ttrain\nvx0oKJcOQb0_4\ttrain\nvyLqolkoVIM_0\tperson\nvzBbUEwED60_0\tperson\nvzBbUEwED60_1\tperson\nvzU0GH4cZM4_0\tcow\nv0tUEeE4RGc_1\ttruck\nv0xTNbrYZY0_0\tgiraffe\nv01IvIxWXTo_0\tperson\nv1iIhTWRjg8_0\tboat\nv1-PGfS1YCY_0\tboat\nv3LIQHdveBA_0\tperson\nv4H5VwQyKEU_0\ttrain\nv4H5VwQyKEU_1\ttrain\nv4QYOX-FHhY_1\tmotorcycle\nv40pc8KBg0I_2\thorse\nv5YzVj25_hs_0\ttruck\nv5lUHsxx0mc_1\tskateboard\nv50Qa_KMCzQ_0\ttruck\nv51CdpETaug_0\tbird\nv6UDfM50GIM_1\ttruck\nv7XVyg16ens_0\tcat\nv8Kp0jhKsKk_0\tperson\nv8ceKkKdqrE_1\tknife\nv8hOOgLXRjg_0\tperson\nv8kyeMoFLqk_0\thorse\nv8rj3jIndSE_0\tdog\nv8tktR3aE38_0\tairplane\nv_yEG5_Qm8Y_0\tperson\nwCu6xsT18qo_0\tperson\nwDHRro9mXuM_0\thorse\nwDcnUJFHguE_0\thorse\nwE8LYkzcq0o_1\thorse\nwE8LYkzcq0o_0\thorse\nwGPW8I8nGmc_0\ttrain\nwGWIrs5ja0Y_0\tbicycle\nwGyJeWBe8VA_0\tumbrella\nwIapUcRvgTM_0\tbear\nwIapUcRvgTM_5\tbear\nwI0a0fzgy3w_0\thorse\nwJdfgWlSY5M_0\tperson\nwJdfgWlSY5M_1\tperson\nwK7yIg1qfZ4_0\tperson\nwLA244rmq6g_0\tcat\nwLHLSvMwmjM_0\tskateboard\nwL0z6-jkCcc_0\tdog\nwL0z6-jkCcc_3\tdog\nwL0z6-jkCcc_1\tdog\nwL9iOnWhckI_1\tskateboard\nwL9iOnWhckI_3\tskateboard\nwMShicf3N_E_0\tperson\nwMyAEfVE_u4_1\telephant\nwNKWZ43SioQ_0\tairplane\nwNKWZ43SioQ_2\tairplane\nwNWW59wDinQ_1\ttrain\nwNcjU9-ck10_0\tperson\nwODzPBxcT0A_0\tmotorcycle\nwODzPBxcT0A_2\tmotorcycle\nwOLrGAo0vFo_0\thorse\nwOSL7OPRBXM_1\tdog\nwPRCf3v0EfI_0\tmotorcycle\nwQtHgysmmFg_1\tboat\nwQvPlByUvB0_1\tknife\nwSSTL6uuM9Y_0\ttrain\nwSmVgAahSUw_0\tskateboard\nwSmVgAahSUw_1\tskateboard\nwSmVgAahSUw_2\tskateboard\nwTMj2Gp8wz4_1\tbird\nwTMj2Gp8wz4_0\tbird\nwTtXB0Z2eMk_0\tcar\nwV1VMLQfTYo_0\tskateboard\nwWpNKbsF6q8_0\tbear\nwa1KdARQXXg_0\ttruck\nwa3jVRzsWGo_2\ttruck\nwbmT4LB3lVQ_2\tknife\nwb9x3QDpcYA_0\tperson\nwb9x3QDpcYA_1\tperson\nwcOuc6Y3Gek_0\ttrain\nwcjnFIBHoc8_0\tbear\nwdb2-oX7HqU_0\tboat\nwdhqMpQcsjc_0\tdog\nwdhqMpQcsjc_2\tdog\nweH4PvRo2GU_1\tbear\nwgZbNzu2Mdw_0\tperson\nwguspvl5Ioo_0\tperson\nwg1ZFP15W8U_0\thorse\nwg6XS3q4Vg8_0\ttrain\nwifl75i2zGw_0\tperson\nwiiV9QdYsYM_3\tbus\nwjfHYr4lXU0_0\tcow\nwmfJAE6gu7w_0\tperson\nwmjfHsCs1CE_0\tperson\nwmn4YG9rirU_1\tbird\nwmn4YG9rirU_0\tbird\nwmx0UeWsPyU_0\tperson\nwoEUh2mzEkE_0\thorse\nwqD1WkfidVw_1\tbear\nwr5b8Op3LUM_2\tbear\nwuAwZ_wX7jk_0\tknife\nwuFVuJjgpLk_0\tairplane\nwvadJ-1Ls80_0\tperson\nwymDvXB08SM_0\tperson\nwzBmon2jJxI_2\tbird\nwzlA0qMLDV8_1\tcow\nwzlA0qMLDV8_2\tcow\nwzlA0qMLDV8_3\tcow\nwzuQhwWLllk_2\tbird\nw0JzCkELpj8_0\tcat\nw0bfVrI7CPQ_0\tbear\nw1j-YVcZpfc_0\tperson\nw2WW3bYmA7s_0\ttruck\nw247rqoLoGg_0\tbear\nw3F_8A8kY7o_3\telephant\nw3F_8A8kY7o_5\telephant\nw3F_8A8kY7o_6\telephant\nw3adXMIxupk_0\tcat\nw35-xR0Vn_0_0\tzebra\nw5Pb_ORVLKI_0\tairplane\nw6A2W9VQeZk_0\tcar\nw6JEUZI5Vh8_2\tskateboard\nw6JEUZI5Vh8_0\tskateboard\nw6JEUZI5Vh8_3\tskateboard\nw7IKxGLuaQA_0\thorse\nw7g5pDCGteg_0\tperson\nw8zrFmMpPmc_0\tmotorcycle\nw8-ovxjadNo_0\ttrain\nw93q7lv9In8_0\tperson\nw-eAEp0TUi0_0\thorse\nw-eAEp0TUi0_1\thorse\nw_euwPW5ukA_0\tbicycle\nxAUupk4sGI0_0\tperson\nxAedjC0r5KY_0\tperson\nxAfxJQL2_aY_0\tzebra\nxDgoaE-g50s_2\tbear\nxFnFWM8KXcE_0\tperson\nxFzsK94M68U_1\tperson\nxGbFeCuGypE_0\tperson\nxHOcerZTZxM_0\tperson\nxIUJ8zlr0TU_0\tbear\nxIizuktSVrM_0\ttruck\nxJ_xdRV9lzo_0\tcat\nxKd8dHsveKg_0\tperson\nxMiQuC8eKGU_0\tperson\nxMp4dCjzI08_0\tcat\nxMuQzm__4bo_1\tperson\nxMuQzm__4bo_0\tperson\nxNBT-PZEMH0_0\tbicycle\nxOLvPvBg-8U_1\thorse\nxOtxf0cmHyA_2\thorse\nxPDDIKF9T3A_0\tperson\nxRJNEyms-F8_0\ttrain\nxSIjCyHBypw_0\tumbrella\nxSIjCyHBypw_1\tumbrella\nxSL4NZUmhW4_0\tperson\nxUB3mR57tLE_0\tbicycle\nxUtGzUu5Ryc_0\tumbrella\nxU_2MZdWfxM_0\tcow\nxVuNCF2vbXs_0\tperson\nxWWnn5OWp4I_0\tairplane\nxYVriT4YV0M_0\tperson\nxZLHtt1yjYk_0\ttruck\nxZZ_W6fRi8E_0\tknife\nxbL4hiu8qh0_0\thorse\nxbQZucd8eu0_0\tbicycle\nxbQZucd8eu0_3\tbicycle\nxbQZucd8eu0_2\tbicycle\nxcY11ewiUMM_1\thorse\nxd_raY9PCHM_0\tbus\nxd_raY9PCHM_1\tbus\nxeAkz6Kg108_0\tbird\nxeBhbPbmS8w_0\tperson\nxfzxTuJ85A4_0\tairplane\nxfzxTuJ85A4_1\tairplane\nxitZyv8gMgQ_1\thorse\nxjdEiJ_z4T8_0\tmotorcycle\nxj3FKNXP-cw_0\tbird\nxkKoATbAX0w_0\tdog\nxkeTuOlBIMM_0\tcat\nxlT93OXr3uc_0\tperson\nxlT93OXr3uc_1\tperson\nxlfOatU3OyY_0\tboat\nxljqBqpwIHo_0\tperson\nxl110TqE0kQ_0\tcat\nxmWAmSXnWCY_0\tcar\nxo54E-kQcoA_1\tboat\nxpGDfRYqtSE_0\tcow\nxpcNJG8acpU_0\tdog\nxp_ShmZCoDw_2\tairplane\nxqNQIYHzAGk_0\tperson\nxrGm-1D2Zqk_1\ttrain\nxsrHSco3Zcs_0\tperson\nxsrNtKa0oZg_1\tperson\nxs1kBHxDpxU_0\ttrain\nxs1kBHxDpxU_1\ttrain\nxs1kBHxDpxU_2\ttrain\nxtHE1-GIP_w_0\tperson\nxtXt8Vm3Qps_2\tdog\nxuAm_BWnXRc_1\tmotorcycle\nxuAm_BWnXRc_0\tmotorcycle\nxucBFquWbi8_1\tbear\nxv4fy9zyuNE_0\tperson\nxv6NQvvvIhk_1\tbicycle\nxxEtEzi7YiY_0\tbus\nxxcJJA7hCQY_0\tperson\nxxdOVyEU-c4_0\tperson\nxyg1xFLohGI_0\tcow\nxyyz5QJ7wi8_0\tdog\nxzC5_r9raeY_0\tperson\nxzFcPnglQf4_0\tperson\nx0RxwpR4wIc_0\tbird\nx0RxwpR4wIc_1\tbird\nx0nlchdJVJw_0\tbear\nx0nlchdJVJw_1\tbear\nx0q0JMiiw1A_0\tcat\nx0xsHmQGaB8_0\tdog\nx1RBYEheBRQ_0\tperson\nx2MJ_zDJY3k_0\tperson\nx2Tfa1fMOyE_0\tperson\nx29EcPsdK1Q_0\tdog\nx29EcPsdK1Q_1\tdog\nx4h9pGwdSMU_0\thorse\nx4r2tx9_9wQ_1\tperson\nx4r2tx9_9wQ_0\tperson\nx4uX_33GiJk_1\ttruck\nx48Ogx7C31g_0\tperson\nx4-I_EckNls_0\tbus\nx4-I_EckNls_1\tbus\nx4-I_EckNls_2\tbus\nx4-I_EckNls_3\tbus\nx5nImw1YH94_0\tperson\nx6sZc4EoI8o_0\tperson\nx6298plJ-7M_0\tcow\nx7jo9uCmWA0_0\tbear\nx8VC2CXIDBI_0\tperson\nx96LXIEQ3SM_1\tcow\nx96LXIEQ3SM_0\tcow\nx-2AUxPCkVM_0\tperson\nx-26Z1zy1-E_1\tperson\nx-26Z1zy1-E_2\tperson\nx-26Z1zy1-E_3\tperson\nx-26Z1zy1-E_0\tperson\nx_CImXdwsg4_0\ttruck\nx_XV2Y3pwDA_1\tbicycle\nx_XV2Y3pwDA_0\tbicycle\nyCYtcDx1zzE_0\tumbrella\nyCaJQKIGAjg_0\tmotorcycle\nyCz3VdCGZMA_0\tperson\nyDw-9GLrYj0_0\tperson\nyF0X9hui-Go_0\tperson\nyGD_BY9mQlM_0\tboat\nyIkwS9Vkq-k_0\telephant\nyJOGbyQ8qs8_0\tperson\nyJZU3h3_06M_1\tcat\nyLFd8GdaqBg_0\tperson\nyLL5Dv2F1rs_1\telephant\nyLL5Dv2F1rs_5\telephant\nyLL5Dv2F1rs_0\telephant\nyLNuhB7I5iI_1\tknife\nyLNuhB7I5iI_2\tknife\nyLkMk9nMaos_0\ttrain\nyLkMk9nMaos_7\ttrain\nyLkMk9nMaos_1\ttrain\nyLkMk9nMaos_2\ttrain\nyM9_GnJpXsM_0\tairplane\nyNnOUMUIIno_0\tbicycle\nyOrqtKYEfNs_0\ttrain\nyOrqtKYEfNs_1\ttrain\nyOrqtKYEfNs_2\ttrain\nyPscRV8ebRg_0\tperson\nyQLGypU_WiY_0\tknife\nyTZekxz2awI_4\tairplane\nyTZekxz2awI_1\tairplane\nyT-tBu_wqEo_0\tcat\nyVO-nlNYxrU_0\tperson\nyV1EsNcE3kY_0\tairplane\nyYIY-K1Hk-0_0\tcat\nyYUnGStTnHE_0\ttrain\nyYUnGStTnHE_1\ttrain\nyYr5tuCEb3w_0\tcat\nyY6S-xTKWGc_1\tperson\nyaNT5d8H3ho_0\tperson\nyahVo8Nqxks_0\tperson\nybCbkJl7tog_0\tperson\nybt9EtMfrdI_0\tperson\nydxMYuiOJAI_0\tperson\nygK39Pz1tKw_1\tmotorcycle\nyhp30idsPKU_0\tboat\nyiCMaealOnQ_0\tcow\nyiujj_fUOg8_0\tperson\nyjOTRS1-3Is_0\tcow\nyjUDTPRe-tg_1\tperson\nyjnR7dP-hxE_1\tbird\nykQnvD35jxs_0\tbus\nymoggco-rpw_1\telephant\nynHMWKjfsNk_0\tcar\nynYz6f5FCOk_0\tmotorcycle\nyoTs9WxR0mI_0\tperson\nyo3wwD8VMLA_0\tperson\nyo9gwC7gpEk_0\tboat\nypC9L5um-ic_0\tperson\nyp9kACFk9KU_0\tcar\nyqWKo_T-YsM_0\tperson\nysb6LLJ0t-c_0\tperson\nyssYMx-tQs4_0\thorse\nyu2v206waMs_0\tperson\nyvDdzmW5jGs_0\tcat\nyxURDHgvWrs_0\ttrain\nyxURDHgvWrs_7\ttrain\nyyMtxTJNnUM_0\tskateboard\nyzE2GgYffew_0\tperson\ny0HZlHGSvHk_0\thorse\ny0ptIotKNVU_1\thorse\ny0qGszhFtUc_0\tbird\ny2BOVk7bg7k_0\tcow\ny2BOVk7bg7k_1\tcow\ny2xzls--cC4_0\tperson\ny2_iaWWx-C0_1\tzebra\ny3VNGZBlDb0_0\tcat\ny3hSeUaVwAY_0\tbus\ny34cSfArQnM_0\tcat\ny6nBJ0OUtDs_0\tperson\ny6nBJ0OUtDs_2\tperson\ny67A9YHKh1U_0\tperson\ny8ib31rVZA0_0\tbicycle\ny8ib31rVZA0_1\tbicycle\ny8r2SJltJ1M_0\tdog\ny9hu6CyRi5s_0\tairplane\ny_O1AiuRLGA_0\tumbrella\ny_5uacneFuc_0\thorse\nzAvoyJ0_PSA_0\tcow\nzBtuA6r8o0M_0\tcat\nzCG95maa310_0\tperson\nzCnZg9VP1xw_0\ttruck\nzDs4lXFLJuM_1\thorse\nzD59UHvdpmY_0\tperson\nzESRFobSQMU_0\ttruck\nzESRFobSQMU_1\ttruck\nzHRsZ9HlcBk_0\tperson\nzIDehNZ1yiM_0\tperson\nzIvzY3cVVbM_0\tperson\nzI5cBWlyAMo_0\tdog\nzI5cBWlyAMo_1\tdog\nzJdOWFEL_CQ_0\tperson\nzLflV_7noSM_1\tairplane\nzMhr8GZ1QeY_1\tairplane\nzMjW-G29IRA_3\tbear\nzMjW-G29IRA_1\tbear\nzMjW-G29IRA_2\tbear\nzMjW-G29IRA_4\tbear\nzNFb--FJ2A4_0\tperson\nzNF5YxfaNTk_0\tcat\nzNfVxQPGrvM_1\telephant\nzN8rF-AchY0_1\tmotorcycle\nzN9Tz6jp7AY_0\tperson\nzOLTybhsJ5s_0\tcat\nzORNq_7nmVQ_1\tgiraffe\nzORNq_7nmVQ_0\tgiraffe\nzOoxYmqzDyc_1\tdog\nzPvrRc94j6s_0\tperson\nzP2DkEcgJFo_0\tperson\nzP8Recx-KgA_0\tboat\nzQbeiOf9ljM_0\tperson\nzU0g6JCyxAs_2\telephant\nzVVQ63dPpe4_2\tbicycle\nzWQQBElMPYI_0\tperson\nzX9OX5I2574_0\tperson\nzYvjN5ShZDI_0\tperson\nzYzASiLjHgY_0\tperson\nzZ8f7oFIg_c_0\tperson\nzbtsVe8RQqI_0\tperson\nzb8-yrB5SlI_1\tbird\nzcgArp_fmjc_5\tskateboard\nzcsREBhC1Rc_0\tdog\nzdWtCunlv1c_0\tcow\nzdqJTtHvwk4_0\tperson\nzd3rNWQ-OUQ_0\tperson\nzgJHKszSf2o_0\tperson\nzgJHKszSf2o_1\tperson\nzgRxry9FvEk_1\thorse\nzgSx8Y5FaPI_1\tknife\nzhDC_SqN7lQ_0\tbear\nzhNNahIXxC8_0\tbear\nzjQG5PadkFQ_0\tperson\nzj4cs0_VpTk_0\ttruck\nzkSIG3AE7tY_0\telephant\nzmDkkM7Buuo_0\tcow\nzmEU5n2Dy8Y_0\tdog\nzmdKmfMPuvA_0\tbird\nznTYxWfU2XM_0\ttruck\nzpEtPFxxD5M_0\thorse\nzqE3Jnn6_gw_0\tperson\nzqYLN7vCqcw_0\ttrain\nzqq508NRpOY_0\tperson\nztMFfJj7jb0_0\tknife\nzt3ojCKnIYM_0\tcat\nzwSnaqQ-5UU_0\tperson\nzxiZnbMo3io_0\tmotorcycle\nzxiZnbMo3io_1\tmotorcycle\nzxzApvuo8Lg_0\tperson\nzx0RzA6ts8U_0\tcow\nzyXxWBoTuww_0\tperson\nzyXxWBoTuww_1\tperson\nzyftQz018g0_0\tbus\nzy0lNSoVB0A_0\tcat\nzzRnX2EiOYU_0\tcat\nz0Tl2FDG69g_0\telephant\nz1kOi92oBDI_0\ttruck\nz1kOi92oBDI_1\ttruck\nz1qQ7Ma5C5U_1\ttruck\nz1qQ7Ma5C5U_0\ttruck\nz18s4h6yW2A_0\tbird\nz2M6XJGE1QM_0\tdog\nz2RqakqNnIM_1\tskateboard\nz29ijVd-dvc_0\tairplane\nz3rcLKwHCxM_1\ttruck\nz5-nsuFvaR8_0\tmotorcycle\nz7FTg1R3Hik_0\thorse\nz7mLqljZMP8_0\tperson\nz709zOu3tM8_0\tcar\nz9HO__A5ryw_0\tdog\nz9wpJN1R63w_0\tperson\nz-iM0zVi7a4_0\tbus\nz_CQX_gwU_o_0\tperson\nz_w1gsSfZhQ_0\tperson\n0AroA_SBRtQ_0\tperson\n0BUPQDR99KY_0\tbear\n0DDYOUzExSY_0\tperson\n0DGPzzGhUgI_0\tperson\n0DHLS1VDcnA_1\tbear\n0EeBXB53BQE_0\tairplane\n0EnI7ZqJvqI_1\tcar\n0EnI7ZqJvqI_2\tcar\n0GzrKbW6Reo_0\tperson\n0G0mSrzOZ2M_8\tbus\n0G0mSrzOZ2M_9\tbus\n0G0mSrzOZ2M_10\tbus\n0IHYTCKh8HM_0\tperson\n0KWfi9m1uZg_0\thorse\n0KWfi9m1uZg_2\thorse\n0KWfi9m1uZg_1\thorse\n0L0JFDbAEZg_0\tknife\n0Neg9vT08to_0\tcow\n0NtpuqPU3YI_0\tairplane\n0N7yCdf7DPs_0\ttruck\n0ORpOxJZo-Y_1\tbear\n0OqnKMwSULM_0\tskateboard\n0OqnKMwSULM_1\tskateboard\n0Pk8OLmmqrM_0\tmotorcycle\n0Pu-_5lNYZM_0\tbird\n0QKe3M6GiT4_0\tperson\n0Tu3KWEm4SE_0\tcow\n0Tu3KWEm4SE_1\tcow\n0TwpPpqiVQ8_0\tcow\n0U6SmZC1j40_0\tperson\n0VKozmEWjZ4_0\tperson\n0VaX_g70BaY_0\tmotorcycle\n0ZGdpgF-bGI_0\tbus\n0ZQ_-4ia7z0_0\tperson\n0c-Cwr5rI_A_0\telephant\n0c-Cwr5rI_A_1\telephant\n0fyRjxenSfY_0\tbear\n0fyRjxenSfY_1\tbear\n0f4alYlvEQw_0\tperson\n0gelRcDsNio_0\tairplane\n0ghRNQFgHow_0\tbicycle\n0gl1mPRzCqo_0\tperson\n0h9x35zsnyo_0\tbird\n0iLR3BtDujk_0\ttrain\n0iYm4g4D2wY_0\tperson\n0iv0Xw_u-sc_0\tbicycle\n0i-Nv28lRT0_0\tbicycle\n0kZSWqFOr0c_0\tperson\n0kidYsWSVvc_0\tperson\n0mbZJnNhckg_0\tperson\n0omh-B4giqI_0\tumbrella\n0owf_YERias_0\tskateboard\n0pAMIiK_RDo_0\tperson\n0pm7YRiUKTc_0\thorse\n0qVc1Whb3GA_0\tperson\n0qwRoiWnwmQ_0\tperson\n0rQzfr4WVKc_0\tcat\n0sA23Q_HQr8_2\tzebra\n0sA23Q_HQr8_1\tgiraffe\n0sA23Q_HQr8_0\tgiraffe\n0sfu67JuBFg_0\tperson\n0ss0_Sgy72g_1\tskateboard\n0tNuUAe5sNE_1\tperson\n0tNuUAe5sNE_0\tperson\n0txAuEdZYTI_0\tmotorcycle\n0uJKDzuaiys_0\ttrain\n0urYbdFc55k_0\ttrain\n0utGbb5enqA_2\tdog\n0utGbb5enqA_1\tdog\n0vQFT9tfq40_0\tperson\n0viKlMZRKdk_0\tperson\n0v7GMl2k-Sk_3\ttrain\n0yCCEL3tl24_0\telephant\n0zmzEkQWyps_0\tboat\n0zraBBQY8ew_0\tumbrella\n0zyhohOeIM4_0\ttrain\n00xcm8_ZTBc_0\tperson\n01CYScp2Yc0_1\thorse\n01mkUffAvo8_0\tperson\n02zor_ScZfo_1\tperson\n02zor_ScZfo_0\tperson\n03p9Ao9JvpY_0\ttrain\n03p9Ao9JvpY_2\ttrain\n03u5BWTYiRg_0\ttrain\n04Sh9tJvOAc_0\tairplane\n04UO1jSx2p4_0\tperson\n04gNIg-kFI8_0\tperson\n057f0LfDVoA_1\ttrain\n08Nunz5Qngc_0\tbus\n09jyC-o18uU_3\telephant\n09kq3b7cMwc_0\tcat\n1AcsNm2kiok_0\thorse\n1BfbSv9ZCu4_0\tknife\n1BfbSv9ZCu4_3\tknife\n1BiqFD2BD7Y_0\thorse\n1C3_qaiKlwo_0\ttruck\n1DHXDdSkk0s_0\tbicycle\n1DeIbpIRrAc_0\tknife\n1Dfkbv8bi9k_0\tperson\n1Dz4x50F-RQ_0\tdog\n1EYL4Mm3dfA_0\tbear\n1EiH3PTqhLE_0\tperson\n1ExRnJBXYP4_0\tknife\n1FVN3QOPlR0_0\tperson\n1FVN3QOPlR0_1\tperson\n1GJ0iwyNHIc_0\tairplane\n1JWHb6FAbmI_0\tperson\n1Knz9s55vjc_0\tcar\n1Knz9s55vjc_1\tcar\n1Knz9s55vjc_2\tcar\n1Knz9s55vjc_3\tcar\n1LmCkh8Dd-o_0\tdog\n1MmlnQKtd6g_0\tumbrella\n1M6GhIT94zE_0\tcow\n1M6GhIT94zE_2\tcow\n1NThnoBEkmc_0\tperson\n1ONRbj8GKJ4_1\tbear\n1ONRbj8GKJ4_2\tbear\n1ONRbj8GKJ4_8\tbear\n1ONRbj8GKJ4_10\tbear\n1ONptqLyHxQ_0\tdog\n1OSa1ptYmzE_0\ttrain\n1OSa1ptYmzE_1\ttrain\n1Ob23hwFaDg_0\tmotorcycle\n1PSIOY62FBg_1\tbear\n1Pe9JpKgjGY_0\tcar\n1P8yUGru9R4_0\tknife\n1RCZCLIZzc4_0\tboat\n1RGxleB_Ezk_0\tperson\n1RKOWfpa5Dc_0\tknife\n1RuPxpqNjBI_0\thorse\n1Tpmsev8onw_0\tcat\n1TsLUvJiluI_1\tperson\n1TsLUvJiluI_0\tperson\n1UhZKsDTuQs_2\tboat\n1V-7ErZ83ZY_0\tbus\n1ZN9xVmQojU_0\tumbrella\n1ZbSl9tPtbA_0\tbird\n1Z7CVnRjVT0_0\tperson\n1as5iG4PPas_0\tbus\n1bFvYEA0U3U_1\telephant\n1bveGPhOKuU_0\tcow\n1cKjzUG0YCQ_0\tbicycle\n1ceprZO-VEU_2\ttrain\n1ecpkwMLabI_0\tperson\n1fOM-kkuRsw_0\tcar\n1ggOn5NDRco_0\tcat\n1hUe5E9cjiU_0\tmotorcycle\n1iQKKup2m3I_0\ttruck\n1iQKKup2m3I_1\ttruck\n1iSjb4IlqfU_0\tperson\n1i7lugA55RU_0\tbicycle\n1i7lugA55RU_1\tbicycle\n1kZMlCvKoe8_0\tskateboard\n1kZMlCvKoe8_1\tskateboard\n1kZMlCvKoe8_2\tskateboard\n1ksBabVqkMY_0\tcar\n1ltK_3kkqfg_4\telephant\n1l7LOpfDmXY_0\tperson\n1ohoCoKJLDU_0\tmotorcycle\n1oyjAtaWDZA_0\ttruck\n1sQ3EL13Vqo_0\tperson\n1tK31PAVNJM_5\telephant\n1tK31PAVNJM_0\telephant\n1tK31PAVNJM_2\telephant\n1tK31PAVNJM_3\telephant\n1v2enBiUcqA_0\tbus\n1wIGd0H1CUo_0\tperson\n1xSI36nguW0_0\tbear\n1xs-ibIaMMU_0\tperson\n1xyKgJUu0lM_0\tskateboard\n1zVWBQWZxV0_0\tperson\n1zVWBQWZxV0_1\tperson\n1zqpqKWhr1Y_0\tperson\n10la9pvd-pk_0\tknife\n11kfBYxzlFA_0\tperson\n12f1R5wMVPs_0\tperson\n12_S_8HkAvA_0\tperson\n1462k8mwVB0_0\telephant\n15Lx-nGngUo_0\tskateboard\n18WxVaz5Ue4_1\tskateboard\n19A2XM5NIWs_0\tperson\n19UmUpkjRbs_0\tperson\n19oZ30mOTkU_0\tboat\n1-p8vd0PFQ4_0\tdog\n1_6ymF7z_iM_0\ttruck\n2ASHEEgYHcU_0\tcat\n2CF0oQ38cBQ_0\tmotorcycle\n2DM1oM4HFjI_0\tmotorcycle\n2FXE_xO8Mb4_0\tbus\n2FvnQne8he8_0\ttrain\n2GTexq12sBY_0\tperson\n2GTtMvLQqio_4\ttruck\n2GZphW1DkS4_0\tperson\n2HvVFwq85n0_0\tperson\n2Hwu-YpHKw0_0\telephant\n2H8AZ00ONQE_0\telephant\n2IJ4H46ZxEE_0\tperson\n2INYBScuPM8_0\tcar\n2IqEaQ0oyQg_0\tairplane\n2JN_uMTDa9I_0\tskateboard\n2KWlj_ZAw94_0\thorse\n2KWlj_ZAw94_1\thorse\n2KWlj_ZAw94_2\thorse\n2K2gLrhP9AU_1\tairplane\n2K2gLrhP9AU_2\tairplane\n2K6iDBPdcHk_0\tmotorcycle\n2LBHZoJ5skk_0\tperson\n2L3uwdhZtV0_0\tcar\n2MJHsLxKUBg_0\tperson\n2MiqTBWBlEc_0\tumbrella\n2NjC1r6v4IQ_0\tperson\n2O-2zfQxbnA_0\tperson\n2PaTs4s2Ybw_1\tbear\n2PaTs4s2Ybw_7\tbear\n2PaTs4s2Ybw_4\tbear\n2Pa1anwpeKE_0\tperson\n2Q3_TaV8vcg_0\tdog\n2Rc-oAwMJBs_0\thorse\n2Tp0YJi7JwQ_0\tgiraffe\n2UpHhiQWzD4_0\ttruck\n2VZlkg5HjME_0\tcow\n2WTwzNufol8_0\tdog\n2WTwzNufol8_1\tdog\n2WtNxQ0RBfc_0\tperson\n2ZXlS-GRWAw_0\tknife\n2Z6wSOr0jLI_1\tperson\n2a5TUccpQ08_0\tdog\n2a_-AyOXTXg_0\tskateboard\n2cFRz-musVA_0\tairplane\n2cFRz-musVA_1\tairplane\n2cFRz-musVA_2\tairplane\n2cFRz-musVA_3\tairplane\n2dZFWL9XGmw_0\tcow\n2fCH7TpvtlM_0\ttrain\n2fCH7TpvtlM_1\ttrain\n2fJ1hPXpiQc_3\tknife\n2fJ1hPXpiQc_0\tknife\n2gGuKs-4t94_0\tboat\n2i45n6p8AT8_0\tperson\n2i_wjgk6DiA_0\thorse\n2lK0mmHTvB8_3\ttrain\n2lK0mmHTvB8_1\ttrain\n2lqlNq6aII0_0\tskateboard\n2lxPwFW5YQo_0\tumbrella\n2l2gnrYWuWQ_0\ttruck\n2l7MPXzF64M_0\tcat\n2l7TuAfDgO8_0\ttruck\n2mO7-ybapaQ_1\tumbrella\n2nqGkC9ebf8_0\tboat\n2oA7J6HSmt8_6\tbicycle\n2oA7J6HSmt8_9\tbicycle\n2tSpb14o7SA_0\tperson\n2vF8Va9DGSM_5\tbicycle\n2vF8Va9DGSM_4\tbicycle\n2vF8Va9DGSM_14\tbicycle\n2vF8Va9DGSM_15\tbicycle\n2vF8Va9DGSM_2\tbicycle\n2vrbssf2sDM_0\ttruck\n2v808Hn8_do_0\tperson\n2v808Hn8_do_1\tperson\n2yEUVUqYMPc_0\tgiraffe\n2ya3SN5pLyU_0\tcar\n2065vf90oIM_0\tperson\n2065vf90oIM_1\tperson\n21GQbN_4k9M_0\tcow\n21Hp5g5RrOc_1\tperson\n21Hp5g5RrOc_0\tperson\n22iFltXYCcQ_0\tcow\n22ztStWwd8g_0\ttrain\n22ztStWwd8g_2\ttrain\n22ztStWwd8g_3\ttrain\n23qU2q5u0OE_6\tbird\n24Zxq5TuxzI_0\tcow\n26kWe8Ikgxk_0\tbird\n28AecePdVok_0\ttruck\n281z-ZLrI3g_7\tbicycle\n281z-ZLrI3g_4\tbicycle\n29bWSLuiEl0_1\tperson\n2_R2wz82ugQ_0\tumbrella\n3A4oCDgMkHw_0\tcow\n3A-dEIjnmyE_1\tskateboard\n3Bag9o-z-Ks_4\tbear\n3DN2iQJzM-k_0\ttrain\n3DaASBRARLQ_0\tcow\n3D8wwibqkYo_0\tcow\n3EtIKWgGaKY_0\tperson\n3FJ4ZWRq_S0_0\tperson\n3GLXlSuXWcs_1\tcow\n3GQxmRKhMMY_1\tairplane\n3GQxmRKhMMY_2\tairplane\n3GQxmRKhMMY_3\tairplane\n3GQxmRKhMMY_4\tairplane\n3GULyU-IOhA_0\tperson\n3HFqP9a97kA_0\tbird\n3IgOwKkKALw_0\tcat\n3LruhG4SULI_1\ttruck\n3LruhG4SULI_2\ttruck\n3LruhG4SULI_7\ttruck\n3LxUuC1C4y8_0\tbird\n3L7LWpMShiw_0\tskateboard\n3L759GhRx6M_0\tperson\n3MiM8HSul5A_0\tcow\n3MiM8HSul5A_2\tcow\n3MiM8HSul5A_4\tcow\n3M9T5RFr_9s_0\tperson\n3OmdALGspY8_0\tperson\n3O4ynxtRIDk_5\ttrain\n3O4ynxtRIDk_2\ttrain\n3RLrjX-XB98_0\tperson\n3RhgYReCxjo_0\tbus\n3S-lQgiUWVU_1\thorse\n3S-lQgiUWVU_0\thorse\n3UDEQElT2yQ_0\ttrain\n3WhmVhG1ZwU_0\tboat\n3WrB7zPpcHU_0\tcow\n3XDvXaNmGpM_0\tdog\n3XDvXaNmGpM_1\tdog\n3X29L9uQCqc_0\ttrain\n3X29L9uQCqc_1\ttrain\n3Y7-acGE4Wc_0\tperson\n3ZBYYBUfT6E_0\ttrain\n3Zwa4XoeZcA_0\tperson\n3bSWlbx1o3I_2\tbear\n3cOMDXFxcOQ_0\tcat\n3dvUlr2yxz4_0\ttrain\n3g4c88ocJ38_0\tskateboard\n3hMszgfh_qA_0\tbicycle\n3hR78-EVNEE_0\ttruck\n3jdK8UPhpO8_1\tskateboard\n3jdK8UPhpO8_0\tskateboard\n3kdpeeQ1Jnc_0\tcar\n3kd_QEZRUWc_1\ttruck\n3kd_QEZRUWc_5\ttruck\n3lHqsoi5cgo_0\tperson\n3liK-2EflUk_0\tcar\n3mIRDwcY1Lg_1\tperson\n3m5eMVv4z6w_1\tbear\n3nD6nhJtxIU_1\tskateboard\n3nbim5nlANI_1\thorse\n3q6LFZBelUs_0\tperson\n3rSUjqH5Wlw_0\ttruck\n3sEpU7UoQP8_0\tperson\n3sg9txiHCp0_0\tbear\n3szPqA1S6P0_0\tperson\n3tv_dUR84cE_1\tairplane\n3tv_dUR84cE_0\tairplane\n3uG4S1gvMxs_0\tbird\n3uVS_DAYfvY_3\tcar\n3vuykX663QA_0\tperson\n3wI_ureHDBY_0\ttrain\n3xLvnY9w5y0_0\tperson\n3xy8Fz8Nsgk_0\tbear\n3zV0wmpiS78_0\tperson\n3zccg30U6vs_0\tperson\n30AwDyYIr7o_0\tskateboard\n325FEWXtOYw_0\tperson\n3293hM-lzx8_0\tperson\n32_1y90B5eQ_0\tperson\n34L4iiCFTXM_0\tairplane\n34Pma_R21A8_2\tperson\n34jFMRay1zg_0\tperson\n35-MplWeZYQ_0\tmotorcycle\n36zopo-HS48_0\tperson\n38fx_nvlYDE_0\ttruck\n39yxd86tGLU_1\tboat\n3-ugxoEDuFY_0\tperson\n3_DeqcBRuwE_1\telephant\n3_DeqcBRuwE_3\telephant\n3_w3NNPGotM_0\tperson\n4ARhlapmEmI_0\tdog\n4Ac5edN3qIA_0\telephant\n4Ac5edN3qIA_1\telephant\n4BItGVIP3_w_0\tcow\n4BItGVIP3_w_1\tcow\n4BO3P7E3NDE_0\ttruck\n4BO3P7E3NDE_1\ttruck\n4BO3P7E3NDE_2\ttruck\n4Bw4gKDBQCM_1\tdog\n4C8rmAORSg8_0\tperson\n4Dcg1W7RRmQ_1\ttrain\n4ENxW7OPynQ_1\tcar\n4ExA1FWRfMM_0\tdog\n4FVfzA07rVs_0\tperson\n4FVfzA07rVs_1\tperson\n4GgzQqhrTmA_0\ttrain\n4GrMZIyjUdo_0\tperson\n4IUjw1DfTd4_0\tcow\n4ItJTYAUV3Q_0\tcat\n4IxmhmTsSRM_0\tperson\n4I72WJJrc1o_0\tperson\n4I72WJJrc1o_1\tperson\n4KFEzxXCjmw_0\tcar\n4KYtNfb0-64_0\tperson\n4KqP6ylUZpI_0\tumbrella\n4LHOLAPnjV8_0\tboat\n4LXlXP1epJE_0\tperson\n4MFPOb36tfo_2\tbear\n4MFPOb36tfo_1\tbear\n4MZrjdSF01s_1\tboat\n4Me3lyNuZ7k_0\tperson\n4M9sKAzevzo_0\ttrain\n4NI5ycFo2TA_0\tairplane\n4NI5ycFo2TA_1\tairplane\n4NKnUR1OMGo_0\thorse\n4NKnUR1OMGo_1\thorse\n4Ng6OxFQ9RY_3\tbear\n4Nx45ho9gSg_0\tperson\n4PNJ3ZV4f8E_0\tairplane\n4PNJ3ZV4f8E_1\tairplane\n4PNvdZPZIdM_0\ttrain\n4PhakAK74GE_1\tmotorcycle\n4PxLGSy75rk_2\tknife\n4QOhfEMrhzU_0\tairplane\n4Q0M6mWNDiU_0\thorse\n4RhaYtFsnGY_0\tperson\n4SrP2aSHoRk_0\tperson\n4TyWpb19rk4_0\tumbrella\n4U9sm_eqKTM_1\tcar\n4U9sm_eqKTM_2\tcar\n4Xd_k2REw4I_3\tbear\n4YRd-9lHLko_0\ttruck\n4ZIgGDQB_R0_0\tairplane\n4ZYWcd-Fdzg_0\tperson\n4Zxsg6aJ9tA_0\tperson\n4aOWHpM7rOM_0\tskateboard\n4avaoLry8L0_2\tskateboard\n4bHGieqZfUk_1\tknife\n4duFrAfYG8k_0\tperson\n4d6P5umc9j0_0\tbird\n4fIznTWAFRw_0\thorse\n4fIznTWAFRw_1\thorse\n4fIznTWAFRw_2\thorse\n4f_X4WbQu4M_0\telephant\n4hCLCX2lLGk_0\tperson\n4iBMfS5mIt8_0\tbird\n4ibKNzoA1tQ_0\ttruck\n4igLFns238c_0\tmotorcycle\n4kGNxHIXcUA_0\tperson\n4kLhVZ9UGDE_0\tskateboard\n4lC7BU1eHxc_0\tbus\n4l683stlRno_0\tknife\n4mv1Nx0j3k4_0\tperson\n4nz8CN4XlBE_0\tdog\n4oWXZIsPnEg_4\telephant\n4ofuHARhFlQ_0\tperson\n4pYH5Cm7Vkg_1\tboat\n4p3JGxvfiNE_4\tbicycle\n4p3JGxvfiNE_8\tbicycle\n4p3JGxvfiNE_10\tbicycle\n4qBYTh0AcfM_0\ttrain\n4qIx-9Qs3Zs_0\tairplane\n4qIx-9Qs3Zs_2\tairplane\n4qRkIra0ARM_0\tperson\n4rhkfDV0QC8_1\ttruck\n4ry_MJjFDUA_0\tcat\n4skAfQd8nX8_0\tperson\n4t79zNxVi0Y_0\telephant\n4t79zNxVi0Y_1\telephant\n4uFHcf-qpkU_0\thorse\n4uwly-P5oxg_0\tperson\n4uwly-P5oxg_1\tperson\n4u7pm-h8fiE_0\tperson\n4wox28JkSKY_1\tperson\n4w3ykGq-Q_E_0\tbicycle\n4w3ykGq-Q_E_2\tbicycle\n4w5q5RdJ5g4_0\thorse\n4w5q5RdJ5g4_2\thorse\n4w5q5RdJ5g4_4\thorse\n4x80RbpjCPM_0\tbear\n4x80RbpjCPM_4\tbear\n4yFIyyevEVY_1\tairplane\n4ycylGSteiU_0\ttruck\n4yjvwunpMKI_0\tcar\n4yjvwunpMKI_1\tcar\n4yjvwunpMKI_2\tcar\n4yw2hFyx47Q_0\tperson\n4y3qJAq5ap0_0\tcar\n40QgDL4dxrc_0\tairplane\n40deMboVqPI_1\tbird\n44FNsfkuWOI_0\telephant\n44hlNbUHL2c_0\tperson\n44672wUoOwM_0\tperson\n46NXMVbpzZw_1\tboat\n468w3XkLHwc_1\tboat\n47Nn3ywWOlU_1\tperson\n47cBD-Sq9mw_1\tperson\n48ujtCaCdX0_0\tperson\n49CwzbRIUpI_1\tbird\n49a6EgDu-ZU_0\ttruck\n4-GpBan9Z8s_0\thorse\n4_A8f6NAa3w_0\tperson\n5BHekdOG9JA_0\telephant\n5Bw22C4nsb4_0\ttrain\n5CPZUe4hn0M_0\tairplane\n5DS23LkFit8_0\tcow\n5DVU9wTDzN8_0\tskateboard\n5DjSsYt5N4Q_0\tskateboard\n5FAbvaslTQE_0\tmotorcycle\n5FXOzzaKrcw_0\tairplane\n5Fro7Bo628Y_0\tboat\n5FxLl3jd7I0_0\tskateboard\n5F5fgLUXow8_3\tcar\n5F5fgLUXow8_7\tcar\n5F5fgLUXow8_8\tcar\n5F5fgLUXow8_0\tcar\n5F5fgLUXow8_1\tcar\n5F5fgLUXow8_2\tcar\n5F5fgLUXow8_4\tcar\n5GMISyAZA9o_0\thorse\n5GpziDmwRTc_0\tcow\n5JPqrGj3CgM_0\tgiraffe\n5Ko6ZHOz4IY_0\tperson\n5Lbguv7FGLM_1\tbird\n5M7Wx_HJ_XQ_0\tperson\n5Nz4g-YykuI_0\tperson\n5O41yfenxMM_1\tcow\n5PeDI6XI7is_3\thorse\n5Qd986abGHo_0\tperson\n5Tza7UHp3xE_0\ttrain\n5WTw98UVUCo_1\thorse\n5WpjuP9uJrI_2\tbird\n5W8Hg8uhxgQ_0\tcar\n5W8Hg8uhxgQ_1\tcar\n5XEAIdyb_ng_0\tperson\n5XcopMzRch4_0\tskateboard\n5YbA5Uw-5xQ_0\tperson\n5YbA5Uw-5xQ_1\tperson\n5bIO0Gl25u0_1\tboat\n5bIO0Gl25u0_0\tboat\n5dGbxAkTDPM_1\tcow\n5dRnssv_jug_0\tcow\n5eRQh3Rv1Lk_0\thorse\n5eak0nLYZC0_0\tairplane\n5enKNMe1Dpg_0\tperson\n5eq6WBGMyME_0\tgiraffe\n5eum6r7kxbw_1\tgiraffe\n5eum6r7kxbw_4\tgiraffe\n5e84K5OEIj4_0\tperson\n5fXoyIBk_gI_0\tperson\n5gNgZQ0nDW8_4\tknife\n5gNgZQ0nDW8_5\tknife\n5gNhZJMFmis_0\tbear\n5gNhZJMFmis_1\tbear\n5gbLo2hItTs_0\tperson\n5geZjQ9qAJU_0\tmotorcycle\n5iDhgUX1kdc_0\tperson\n5iwoWJK4GGo_0\tcar\n5ll8fjNhIzg_0\tperson\n5lv2GCs3_E0_0\tperson\n5l9rlcuS7pE_0\tbus\n5mocfP3c3JE_0\tbear\n5mqvNWXtMCU_0\tcat\n5nAuDbKmWLY_0\telephant\n5nC2ZXfE-sg_0\ttrain\n5nkh3PK6lBs_0\tcow\n5of5t38DQL4_0\tcow\n5okxoIw3cJI_0\tskateboard\n5ovlgihl130_0\tknife\n5phhj08_8hI_0\tdog\n5psIBlFu-yQ_0\tperson\n5rh7nf5z_O0_1\tcow\n5rkM4mLsQoU_0\tknife\n5sIj93XnVc0_1\tmotorcycle\n5sjUnvABkko_0\tairplane\n5s4kqURLLo4_0\tperson\n5toRpAYrY_4_0\tperson\n5uYObEyAbCQ_0\thorse\n5ukcjpXOopg_0\tperson\n5vPXxAEGTrw_0\tairplane\n5vUtusnPXXs_0\tbird\n5vaBUAh4HkU_0\tairplane\n5yMeqHPiJgY_1\thorse\n5yMeqHPiJgY_2\thorse\n5yMeqHPiJgY_3\thorse\n5yeSANffSRk_0\tperson\n5yeSANffSRk_1\tperson\n5zJuhMtO1F8_0\tbird\n5zKtWxffw-0_0\tboat\n51rDJW0FO8w_0\thorse\n51yQTVmaMXw_1\tmotorcycle\n52UjkVxSSHg_0\tperson\n52VFNDCXUHg_0\tperson\n52pNzl4wrxs_0\tperson\n52wdqvYrGv4_0\tperson\n522wkm19sH0_0\tbus\n54icMYqqx_w_1\tbus\n55H1IVgQj3E_0\tboat\n56BI7lH0z1g_0\tperson\n56bgv0J-cXw_1\tknife\n56bgv0J-cXw_4\tknife\n56r2wDCnuQQ_0\thorse\n57BY7QjcYbQ_0\tperson\n574FA_5qp-s_0\tbus\n58K_ZPS7U8M_0\tperson\n58gdyHWU6do_1\ttruck\n5802XdQdAkU_0\tcow\n59JJGcB2jRE_0\thorse\n59JJGcB2jRE_4\thorse\n59JJGcB2jRE_2\thorse\n59cXOQc39JI_1\tzebra\n5928Zhy26yI_1\tgiraffe\n5-Oeo8tmauc_0\tbus\n5-Oeo8tmauc_1\tbus\n5-Oeo8tmauc_2\tbus\n5-O2xma48Tw_0\tbird\n5-y_Rrr8shw_2\tperson\n5_njhyGAXdE_0\ttruck\n5_njhyGAXdE_1\ttruck\n5_njhyGAXdE_2\ttruck\n5_2sGSrZblY_0\tperson\n6AD9GHHEVkE_1\tboat\n6AYkCla5Oak_0\tcar\n6A2LC4_gts4_0\tperson\n6A2LC4_gts4_1\tperson\n6BB65BA-pS0_1\tknife\n6CKS3WJRpHI_0\tperson\n6C1C-L7L6CE_0\tperson\n6DQ-H73b62Y_0\tperson\n6EHcwJiML3g_2\tperson\n6GlBa-DUEqc_0\tperson\n6HlTwF1ZDkc_0\tperson\n6HrWOx9GfzI_0\tperson\n6JrhpITR8po_1\tcow\n6JrhpITR8po_0\tcow\n6KpKxtwB1Ww_0\tperson\n6LiW0KF3fME_0\tperson\n6Meaw8zK8sU_0\tperson\n6M3wDWZDZJ8_0\tcar\n6M4oJG9NsRM_0\tperson\n6Nc1z3BVzlI_0\tbear\n6OlxDr5vZuI_2\thorse\n6Ona04rOyZk_0\tcat\n6PBKPTCkWOo_0\tperson\n6PH-mFChsi0_0\tairplane\n6PwE6q6pebc_1\tperson\n6QFs4uNsSt4_0\tperson\n6RIFox7kLqY_0\tcat\n6SBj14dkVPM_0\tcow\n6SdX0oE9Qm8_0\tcat\n6SizSdOT9_k_0\thorse\n6TEQ098RfzE_0\tcow\n6TQ8X9G4BAY_0\tdog\n6UQbOOWv_ws_0\tcow\n6UQbOOWv_ws_2\tcow\n6XUe2u2YWkQ_2\tumbrella\n6bJPo4tzJvQ_0\tperson\n6bco275PcUs_0\ttruck\n6bco275PcUs_1\ttruck\n6gwBOlfJ34I_1\tskateboard\n6gww5ltOLQY_0\tbird\n6gww5ltOLQY_1\tbird\n6hAG7632JjA_0\tcat\n6htKDjHsXPQ_0\tcow\n6id5A0aiJbE_0\ttrain\n6jwTUZocHXY_0\thorse\n6j07-PcNv70_0\ttruck\n6kjb3q8EygI_0\telephant\n6lAxaY4AYB8_0\tperson\n6lPPfWdeBvU_0\tcat\n6l3SpVgqJY0_0\tperson\n6mYi-vXre4Q_0\ttruck\n6med3JZ2k40_0\tperson\n6miVJWDTBCY_1\ttrain\n6n6fVeWD_m0_0\tknife\n6o61j0KZ9cA_0\tperson\n6pPjKIlVlfY_0\tbicycle\n6pnenPlFGIc_0\tmotorcycle\n6pnenPlFGIc_1\tmotorcycle\n6pny8Td3Lvs_0\thorse\n6qRIuIHqJco_0\ttrain\n6qSDUh2ES7Q_0\tperson\n6qVpY1VC2hU_1\tcat\n6qhp1FiVbBQ_0\tknife\n6rlBtCRp25g_0\tcat\n6r0rYZCL4Qc_0\tperson\n6r0rYZCL4Qc_1\tperson\n6uMmknjq0mg_0\tbicycle\n6uSZqFsKMGI_0\tcow\n6um2PoiKfT4_0\tmotorcycle\n6vAGEaKFuyY_1\tbus\n6vAGEaKFuyY_2\tbus\n6vafM_LKdhA_0\tumbrella\n6vc8u4MPWkY_0\tbird\n6v_NKAM10sA_5\tbicycle\n6v_NKAM10sA_9\tbicycle\n6v_NKAM10sA_10\tbicycle\n6v_NKAM10sA_11\tbicycle\n6v_NKAM10sA_12\tbicycle\n6v_NKAM10sA_0\tbicycle\n6v_NKAM10sA_1\tbicycle\n6w-nwNFVYm8_0\tmotorcycle\n6y78kiGuIAk_0\tperson\n6zPET0HFVaM_3\ttrain\n6zPgsocp4bY_1\tbicycle\n6zPgsocp4bY_2\tbicycle\n6zPgsocp4bY_3\tbicycle\n6zPgsocp4bY_7\tbicycle\n6zPgsocp4bY_9\tbicycle\n6zW1omjPFRs_0\telephant\n6zW1omjPFRs_1\telephant\n62MEsd3U1aQ_0\tperson\n62PpG0cOcbU_0\tperson\n63vKOQ-SCBw_0\tairplane\n63_kFJCm2pQ_0\tperson\n64yGcACuF0g_0\tcat\n64yZxDGH92I_0\tperson\n64-njkqyF7k_0\tbus\n65u4BXZ10RY_0\tdog\n65u4BXZ10RY_1\tdog\n654ylXfWndU_0\tboat\n66HPgc7Up3o_6\thorse\n66HPgc7Up3o_3\thorse\n66HPgc7Up3o_4\thorse\n66HPgc7Up3o_7\thorse\n66N_Ju8hg2U_0\tknife\n665JKK-JrTc_0\tperson\n67kix34dj7A_0\ttruck\n67wgEifQYpg_0\tperson\n68KnEa1hVf8_0\tbicycle\n6-Z9S0qy8ys_1\tdog\n6-7x1BQGuQE_0\tperson\n6_nq4o_21CY_0\telephant\n7BBHz6wfABM_0\tperson\n7CYm8WQftfw_0\tbus\n7DIXCjEBWLw_0\tairplane\n7D-ypPzaTDI_0\tperson\n7GvsFRhnxWc_1\tbird\n7G2sXxpbA-0_0\tmotorcycle\n7HXox1j1X2A_0\tperson\n7Hthj7LhsoI_1\telephant\n7H1AhHiyip0_0\tperson\n7JXhfaNTsUQ_2\tbird\n7K61aiu3UsM_0\tperson\n7K61aiu3UsM_1\tperson\n7LKG4ReUlZA_0\tperson\n7LTKFUY3Xo8_0\tbird\n7MQZWaHzUOo_0\tcow\n7Mb_dcvNENM_7\tbicycle\n7Mb_dcvNENM_3\tbicycle\n7Mb_dcvNENM_4\tbicycle\n7Mb_dcvNENM_5\tbicycle\n7Mb_dcvNENM_6\tbicycle\n7NDhXBp57BY_0\tperson\n7NFMDZwqdw4_0\tperson\n7Ng49Wed4Y4_0\tcow\n7Ng49Wed4Y4_2\tcow\n7NxvW5DSQrI_0\tcat\n7O8grUKQopY_0\tperson\n7PeZgsBNi5g_0\tcar\n7QauV6mvt98_0\tcar\n7RxzfGFIxSg_0\tcat\n7Strg7qJtW0_0\telephant\n7Strg7qJtW0_7\telephant\n7Strg7qJtW0_1\telephant\n7Strg7qJtW0_2\telephant\n7Strg7qJtW0_3\telephant\n7VQ8QZRnxD8_0\tcow\n7Vcfkjk--Fc_1\tdog\n7V5Q7Te4KNI_0\tbus\n7WZRhdW3Ysw_0\telephant\n7XQ-ufhX7gc_0\tcow\n7XQ-ufhX7gc_1\tcow\n7YCox5adS-U_0\tperson\n7YQM-nFSHW4_0\tknife\n7Ya_jh9VO9U_0\tperson\n7aTla4KAK_U_1\tknife\n7bqlApH5GwI_1\tbicycle\n7dFEYp-1Hgo_0\tperson\n7e8WNmzDHUQ_0\tperson\n7fF7heSCMTw_0\tmotorcycle\n7fRxyCT-Wao_0\tgiraffe\n7fRxyCT-Wao_2\tgiraffe\n7fSMUG5W8vk_2\tbicycle\n7g8SI9aAn70_1\tumbrella\n7hIJP5KExbE_1\telephant\n7hjOcuaQm7I_0\telephant\n7kPsaqRQBCk_0\tknife\n7kl1hNW3aVs_0\tmotorcycle\n7k7H9RKhOF8_1\tskateboard\n7k7H9RKhOF8_3\tskateboard\n7ledBa3nuVs_0\ttrain\n7ledBa3nuVs_2\ttrain\n7m98zjjFHbU_0\tperson\n7ntsSm-LFZA_0\tperson\n7ntsSm-LFZA_1\tperson\n7nzY38tPTM0_0\tperson\n7nzY38tPTM0_1\tperson\n7n8C_td0Th8_0\thorse\n7p4RxRFB_Eg_0\thorse\n7rE5dIroJwQ_0\tperson\n7rifGM-TuPA_0\thorse\n7trl2U6nLPc_0\thorse\n7vyHv7_GxbQ_0\tperson\n7wte1pPBwQ0_1\tbear\n7w616uMnI_8_0\telephant\n7w616uMnI_8_1\telephant\n7x8K4JervhE_0\tbus\n7y0joj813H0_3\tbus\n7zRaB-2B7B0_0\ttrain\n72RzEHZFYtM_2\tairplane\n72RzEHZFYtM_1\tairplane\n73Wonc3xnLI_0\tperson\n73Z4KnnAMlU_0\tperson\n74gRlu6vJLY_0\tperson\n747bRdBUPSw_0\tperson\n76LU6w1a7UA_1\tairplane\n76PIBEC3WVo_0\tskateboard\n77GychcVDRI_0\tperson\n77dvi_3OU4M_0\tperson\n79MY0qku9uc_1\thorse\n8AgZqrCi9no_0\thorse\n8BK44tI3ACo_0\tperson\n8BQJVHpHFsU_1\tdog\n8BQJVHpHFsU_2\tdog\n8B3bbakza_Q_0\tperson\n8CJRCoA1Rps_0\tperson\n8ClOgfNAjXs_0\tgiraffe\n8DlXcc1IXlw_0\tcar\n8EwDzFi34nA_0\tcow\n8FEp5ORJ27g_0\ttruck\n8FyuS809d24_0\tdog\n8FyuS809d24_1\tdog\n8GGi0BXLCaM_0\tperson\n8G_vBzM-Ws4_1\tumbrella\n8HcyzPUv5ag_0\tperson\n8JIpa6tfWzo_0\tairplane\n8JKJnuN_UTI_0\tcow\n8JhHIO_7m-0_0\tcow\n8LGnOH6nDbc_0\tdog\n8LGnOH6nDbc_1\tdog\n8Lx004yCltY_6\telephant\n8Lx004yCltY_12\telephant\n8Lx004yCltY_18\telephant\n8MO_kng7L-s_0\tperson\n8MO_kng7L-s_1\tperson\n8NlznvdsNJQ_2\tboat\n8N8hB2Au4JE_0\tperson\n8Pbd3dd3v5E_0\tperson\n8Pz3xq3KFo0_6\telephant\n8Pz3xq3KFo0_4\telephant\n8Qr-5_567tI_1\ttruck\n8Q8g9z-DNF8_0\tmotorcycle\n8RZsKbffdqI_0\tcat\n8Sbz2MGzhp4_0\tperson\n8UcqXCLmq-M_1\telephant\n8UcqXCLmq-M_3\telephant\n8UcqXCLmq-M_6\telephant\n8UcqXCLmq-M_7\telephant\n8Ul_lS0g_RU_0\tskateboard\n8UmKRVMR08g_2\tbird\n8U7BmrkcgcU_2\ttruck\n8VkbfdMQrR8_0\tperson\n8VzjERSpeS4_1\telephant\n8VzjERSpeS4_0\telephant\n8WcBoYh-IMg_0\tbird\n8X27eyH-tx0_0\tcar\n8Zi2bsTpMeY_0\tperson\n8ZmfZDMaVhg_0\tcat\n8Z1GvAHPEnU_0\tcat\n8a1bD-UgfKE_0\ttruck\n8bD-aqWPxwM_0\tmotorcycle\n8bE_FhrjBuM_2\tskateboard\n8bE_FhrjBuM_0\tskateboard\n8bE_FhrjBuM_1\tskateboard\n8bypIjdKgEI_0\tperson\n8b5fedIr-WQ_0\tperson\n8cNzCe26dSM_0\tperson\n8cSOpd9gaPE_0\tcow\n8c8TJ_Jzngk_0\thorse\n8d6950aGpD8_0\tdog\n8eK3ktD9j5o_0\thorse\n8eK3ktD9j5o_1\thorse\n8ewNcrMhg-w_0\tperson\n8gsiG2Wu3YM_0\tgiraffe\n8hFEJz0GvfU_0\telephant\n8hwa44VMdLs_0\tperson\n8h8Cpkugo-Y_0\telephant\n8h_eY7zEIqk_3\ttruck\n8iBiHoA_OJk_0\tperson\n8jRFQ8RKZ0s_1\tcar\n8kTREwiI1-8_0\tcow\n8kn6PJbtsyA_0\tbicycle\n8kn6PJbtsyA_1\tbicycle\n8kn6PJbtsyA_2\tbicycle\n8kn6PJbtsyA_3\tbicycle\n8kn6PJbtsyA_4\tbicycle\n8lKXEr2W3yM_0\tknife\n8lMRKCKyBwk_0\tperson\n8lonNtE99PI_1\tperson\n8l7UmXXnAJs_0\ttruck\n8mlHevSC8cc_0\tcar\n8m-GtOBjbzY_1\tbicycle\n8nWSGwlJyPQ_0\tcat\n8nsl-r_i0AI_0\tperson\n8n3A8io4GNU_0\tperson\n8okfUuO0Pvc_1\tbird\n8poWB-6q4xk_1\tbicycle\n8p2saqn2kiQ_0\tperson\n8qFJg_AoKeY_0\tcow\n8qulLm8MYrM_0\tbus\n8rBxRMDJEFY_0\tperson\n8sOWPIfWpCM_0\thorse\n8tKto2zQWUg_0\telephant\n8uoYlmdJlAo_1\tknife\n8wdvLn40CTk_5\tbus\n8wdvLn40CTk_0\tbus\n8wdvLn40CTk_1\tbus\n8wv3WJBJmog_1\tdog\n8yFZUTSjpos_0\tmotorcycle\n8zBx-nHUqBY_0\tperson\n8zUAF30Hu6c_1\ttrain\n8zUAF30Hu6c_2\ttrain\n8zftjn0I9TQ_0\ttruck\n8zftjn0I9TQ_2\ttruck\n8zjgYuK3nVY_0\tperson\n8z-YLOzAxb4_2\tbicycle\n8z-YLOzAxb4_4\tbicycle\n8z-sTr28AWk_0\tskateboard\n80CcMFD-Rcw_1\tperson\n80CcMFD-Rcw_0\tperson\n81cNVk8boEM_0\tperson\n82lK9rB-e08_1\tmotorcycle\n84P6L_HrN48_0\tbird\n88N5__h7Zdo_0\tbicycle\n89a461_gh2o_0\tbicycle\n89mGhzBokZ8_1\tbear\n89qfsC77BYk_0\tperson\n8_oUj2cuPdo_0\tdog\n9A-VO1zCZJ4_1\tmotorcycle\n9BVgbNz-bi8_0\tperson\n9BVgbNz-bi8_1\tperson\n9BpvtvUGG5g_0\tperson\n9DGpFjuUVBk_0\tperson\n9DY0dTRH5xI_0\tbird\n9D5ORdC7BuQ_6\tbus\n9ELQq5BMR1U_0\tperson\n9E8VBIYmTGY_1\tcow\n9E8VBIYmTGY_0\tcow\n9FAB9BrcQls_0\tperson\n9FTOvdcnzDQ_0\tairplane\n9GdhKEBm0pA_6\tbicycle\n9GdhKEBm0pA_1\tbicycle\n9GdhKEBm0pA_3\tbicycle\n9HqapwdLVzk_4\tknife\n9KfdTsjy53o_0\ttruck\n9LHbQA-pT0U_2\thorse\n9LJRUmW_AII_0\tboat\n9LOpNoTFWKg_0\ttruck\n9LOpNoTFWKg_4\ttruck\n9LOpNoTFWKg_1\ttruck\n9LOpNoTFWKg_2\ttruck\n9LqExSHe9y8_0\tknife\n9Ls7gSZQt1w_2\tbear\n9NsmnTdRiik_0\tairplane\n9PsezNNV0Jc_1\tairplane\n9PsezNNV0Jc_2\tairplane\n9PsezNNV0Jc_0\tairplane\n9Q3srzApSJU_0\tperson\n9RGlWjTKvE0_0\tbus\n9RZCK24Shec_0\tcat\n9ScZtgWAJZA_1\tperson\n9SgrA5Q1d94_0\tperson\n9ShZpsmuvc4_2\tskateboard\n9ShZpsmuvc4_1\tskateboard\n9UU2h6M8DJk_2\ttruck\n9UwLiWKOIGY_0\tperson\n9U-tccGetsk_0\tknife\n9VwSYjCCRYk_1\ttruck\n9VwSYjCCRYk_2\ttruck\n9WDPvYpnrfU_1\ttruck\n9WDt0JjOFIA_0\tperson\n9YVkZ7QxD5E_0\tperson\n9Y6XZFO31JU_0\tcow\n9ZpZZoTtySo_1\tbear\n9Z0Jz1tesQ4_4\tcow\n9Z0Jz1tesQ4_1\tcow\n9Z0Jz1tesQ4_2\tcow\n9Z0Jz1tesQ4_3\tcow\n9aQOAnspXGo_1\tbird\n9bYPYgMQVjU_0\tperson\n9bzmQFGK8m8_0\tperson\n9dOPPvgyMqk_0\tperson\n9eI_0DoOE08_0\tperson\n9eI_0DoOE08_1\tperson\n9g8o260G10k_0\tbird\n9hAU80xKWy0_0\ttruck\n9jS5MThAtmo_0\tperson\n9kGuuCx39JA_0\tmotorcycle\n9lsXenPJ-X8_1\tbird\n9ltdzlYXfp8_0\tcow\n9ltdzlYXfp8_3\tcow\n9muklrcigJY_0\tdog\n9nqU8e9IUPU_0\tskateboard\n9pEB8cjvPSQ_1\thorse\n9qamzN9bwxw_0\tperson\n9rvVWyyuud0_0\tperson\n9r1FvK19XV8_0\tperson\n9uhZRDsQKnc_0\tperson\n9yt1if13PHk_0\telephant\n9y5txKR57mc_0\tbird\n9zBCjCtH3Eg_0\thorse\n9zqk5w8Qx1Q_1\tbicycle\n9zroWMwZHGI_1\tperson\n907A5I4-LpA_0\tmotorcycle\n91SWvU-5TcI_0\tperson\n92MaWPuO8PI_0\tboat\n92560YiwSP0_0\tperson\n93gyPa_dPGU_0\ttruck\n946wiAK4Seg_1\tperson\n95CV_olHtcI_0\tperson\n96WWGXa4QrI_0\tcar\n96akJFw5SPU_0\ttruck\n96iqXHgOXKY_0\tperson\n98XiF-Z__aI_0\tcat\n99Tb7HSFn3I_0\tperson\n9_bFE0FUq_c_1\tknife\n-A-tBuMjU8s_0\tcat\n-B4YQQLrOfI_2\tskateboard\n-C0rYHhL_x4_0\tmotorcycle\n-DYf49hlRSE_0\tperson\n-Ebcfmg0-eE_0\tperson\n-E05a-eQSwY_0\tumbrella\n-FMaVn21dYU_1\thorse\n-Fu9coX9J-A_0\tperson\n-Fu9coX9J-A_1\tperson\n-Gk4iMiEMCc_0\tperson\n-LVtIbelA3M_0\thorse\n-LXr7LdXtrk_0\tboat\n-LjAFTF5WP4_0\tbicycle\n-LjAFTF5WP4_1\tbicycle\n-LjAFTF5WP4_3\tbicycle\n-MpLPuviQ00_0\tperson\n-M_jT3EYgcc_0\tperson\n-NWvB2g952Q_2\tbird\n-OZt785bbpY_0\tairplane\n-P37Y1G6oHk_2\tairplane\n-P37Y1G6oHk_3\tairplane\n-P37Y1G6oHk_0\tairplane\n-QBeUV_OkJg_1\tdog\n-QQCINzsXpw_0\tperson\n-Q6g2xZ0PxY_1\tairplane\n-RjxMfaV-Vo_1\tknife\n-RjxMfaV-Vo_2\tknife\n-SPHavKGd3M_0\tskateboard\n-S8L2HACCPE_12\telephant\n-S8L2HACCPE_1\telephant\n-S8L2HACCPE_10\telephant\n-TKKOo1FfAI_0\tbird\n-VgWHKeRRjs_0\tairplane\n-VgWHKeRRjs_1\tairplane\n-WyEyKxdZOQ_0\tperson\n-XWeGpACKwc_0\tskateboard\n-Xj6MiGVWt0_0\tperson\n-XwZnoNm0FU_0\tdog\n-ZDO95E0pl8_0\tperson\n-anX-ad_gHQ_0\tperson\n-avz2OsPIq4_2\tbicycle\n-bJkl4q5f-A_0\tbird\n-c1b7nHzGn4_0\tairplane\n-dQnNlBQp3o_0\tperson\n-db_SToBhkg_2\tmotorcycle\n-eZUdm8ERQQ_0\tperson\n-e42Pb0YeOY_0\tcat\n-fnhznKC3CU_0\tperson\n-f0JLwuyuTM_0\tperson\n-jL0HOXwYls_0\tperson\n-kLIF2a7yeU_0\tperson\n-k1TxEpOgnA_0\tperson\n-l9NS6DuRPI_0\tperson\n-mgNwLW3ODc_0\tperson\n-mwDgqLpu-k_0\tskateboard\n-nOfuA8B7As_1\tbicycle\n-nzXunuZac4_0\tcat\n-oG6YVPhC_I_0\thorse\n-o28rb1UnYA_0\tcar\n-sJOJNjOCBI_0\tmotorcycle\n-sJOJNjOCBI_1\tmotorcycle\n-sWch1rnO10_0\tperson\n-th9NS9hl6s_0\tcow\n-uP01llwXFY_5\tboat\n-uP01llwXFY_1\tboat\n-u5MNR-9ClU_0\tperson\n-vkMKVuweFA_0\tperson\n-v7FXEhgwtE_0\tperson\n-y652b4w3Ss_0\tbird\n-zqHD6Jthqg_0\tperson\n-0U1vm6LIi8_0\tperson\n-1je1K1ihbk_2\tskateboard\n-2iw3MzUP2Y_0\tmotorcycle\n-3OvKcu5P2U_0\tcar\n-3fzr21Ov5w_0\tperson\n-6vJDV8XnWE_0\tboat\n-7Im8MyvaXU_0\tcat\n--8shIp3t0I_0\tknife\n-_iBuJTwjw8_1\thorse\n-_xag4X_Do0_0\tbird\n_ATEx5gbBEQ_0\tknife\n_ATEx5gbBEQ_1\tknife\n_AcvI8VF5ig_0\tcow\n_Ae4vmwt8uA_0\tperson\n_Auvs-o5Pck_0\ttruck\n_A8nA25Tq8c_1\tperson\n_C_yvxdjVGA_2\thorse\n_C_yvxdjVGA_0\thorse\n_DXAxnPIiBU_0\tcow\n_D-9w3aSX50_0\tperson\n_GyE3cPQ6U8_0\tcar\n_HN1_MjnjWo_2\telephant\n_HYaLoOKE84_1\tcow\n_IhkqtAQHBw_0\ttrain\n_InrHPE8Umw_0\tmotorcycle\n_IpUnYit3Pg_0\tdog\n_JNG6qK6INs_3\tbear\n_KzDIvt0cCk_0\tperson\n_K6jYgDC1JU_0\tairplane\n_NZ4o-omJLE_0\tumbrella\n_NtOMcyVAp4_1\tdog\n_OmnjH4t-IY_0\tperson\n_QF0A9B-xB8_0\tperson\n_QRy9nd4kcg_0\tairplane\n_Q9M8QAjSMk_0\tperson\n_Rd-wEO2r10_0\tperson\n_R6nlDzh6Tc_0\tperson\n_R6nlDzh6Tc_2\tperson\n_T0O1BlYjaU_1\tbear\n_VegkTdhrQE_0\tmotorcycle\n_WKJaPPBz8Q_0\tumbrella\n_WcqTpLKkww_1\ttruck\n_Y6_E1l4blQ_1\tknife\n_ZDU4qi4lcI_2\tcow\n_ZDU4qi4lcI_0\tcow\n_ZDU4qi4lcI_1\tcow\n_ZHmkH59bCQ_0\tperson\n_ZXqLyRe4n0_0\telephant\n_ZsogS9uPJQ_0\tperson\n_akq_DieEWE_0\tperson\n_akq_DieEWE_1\tperson\n_bO2sdIelLY_0\tperson\n_dC_upYbxWI_0\tknife\n_eCb7mFYyIg_0\tmotorcycle\n_egWujmdZtw_0\tperson\n_epdfuB0qRM_0\tcar\n_e5Vvy9DJ9E_4\tbear\n_e5Vvy9DJ9E_0\tbear\n_foK5Dvj1As_0\tbird\n_hryEVGKNuw_0\thorse\n_iY4AnGfq0Y_0\ttrain\n_jBzwdg0QRA_1\tbus\n_jci9tIBIB4_5\ttruck\n_kdhlRke8uI_0\tperson\n_kfdh_5bI-Q_0\tperson\n_lmD-useijU_0\tperson\n_mJBwuCegJ0_12\ttruck\n_mJBwuCegJ0_1\ttruck\n_mJBwuCegJ0_2\ttruck\n_mJBwuCegJ0_8\ttruck\n_mJBwuCegJ0_9\ttruck\n_oRtPVRmtwo_0\tdog\n_pEHwWe2seA_5\telephant\n_sV1Jd1uiYg_0\tperson\n_tZU1XTOML4_0\tboat\n_usyDpllGBo_0\thorse\n_vBAv8cBoqE_0\tskateboard\n_vV0wdWq0cU_0\tperson\n_xMVx44FbT4_0\thorse\n_xQn3TupjYs_0\tcat\n_xy58m6yCko_0\tmotorcycle\n_yQQjARqD1s_0\tboat\n_yfoe4GCA0Q_4\tairplane\n_yfoe4GCA0Q_2\tairplane\n_yv5Cwbm9EA_0\tperson\n_zIDofZkgS4_1\ttruck\n_zQt1CSSKyA_1\tbicycle\n_0eR2vQAEqE_0\telephant\n_0eR2vQAEqE_1\telephant\n_17u-cPTYt0_0\tcar\n_17u-cPTYt0_1\tcar\n_2mIWIhbDPY_0\tbus\n_37U5Elgnck_0\tperson\n_5fE6dP48FM_0\tcow\n_5sIT4l5izM_0\tknife\n_6qUuUUYvUQ_0\tperson\n_7zbbqEa3nw_1\ttrain\n_7zbbqEa3nw_4\ttrain\n_8VTthFkvS0_0\tbird\n_8iyumFI4sQ_1\telephant\n_8iyumFI4sQ_2\telephant\n_8iyumFI4sQ_3\telephant\n_81FImml2gk_0\tdog\n_9bypka_Q4c_0\tbus\n_-CvwC7H730_0\tperson\n_-XcxnQLKPM_0\tdog\n__Q5A7gExpI_0\tperson\n"
  },
  {
    "path": "lib/train/dataset/COCO_tool.py",
    "content": "__author__ = 'tylin'\n__version__ = '2.0'\n# Interface for accessing the Microsoft COCO dataset.\n\n# Microsoft COCO is a large image dataset designed for object detection,\n# segmentation, and caption generation. pycocotools is a Python API that\n# assists in loading, parsing and visualizing the annotations in COCO.\n# Please visit http://mscoco.org/ for more information on COCO, including\n# for the data, paper, and tutorials. The exact format of the annotations\n# is also described on the COCO website. For example usage of the pycocotools\n# please see pycocotools_demo.ipynb. In addition to this API, please download both\n# the COCO images and annotations in order to run the demo.\n\n# An alternative to using the API is to load the annotations directly\n# into Python dictionary\n# Using the API provides additional utility functions. Note that this API\n# supports both *instance* and *caption* annotations. In the case of\n# captions not all functions are defined (e.g. categories are undefined).\n\n# The following API functions are defined:\n#  COCO       - COCO api class that loads COCO annotation file and prepare data structures.\n#  decodeMask - Decode binary mask M encoded via run-length encoding.\n#  encodeMask - Encode binary mask M using run-length encoding.\n#  getAnnIds  - Get ann ids that satisfy given filter conditions.\n#  getCatIds  - Get cat ids that satisfy given filter conditions.\n#  getImgIds  - Get img ids that satisfy given filter conditions.\n#  loadAnns   - Load anns with the specified ids.\n#  loadCats   - Load cats with the specified ids.\n#  loadImgs   - Load imgs with the specified ids.\n#  annToMask  - Convert segmentation in an annotation to binary mask.\n#  showAnns   - Display the specified annotations.\n#  loadRes    - Load algorithm results and create API for accessing them.\n#  download   - Download COCO images from mscoco.org server.\n# Throughout the API \"ann\"=annotation, \"cat\"=category, and \"img\"=image.\n# Help on each functions can be accessed by: \"help COCO>function\".\n\n# See also COCO>decodeMask,\n# COCO>encodeMask, COCO>getAnnIds, COCO>getCatIds,\n# COCO>getImgIds, COCO>loadAnns, COCO>loadCats,\n# COCO>loadImgs, COCO>annToMask, COCO>showAnns\n\n# Microsoft COCO Toolbox.      version 2.0\n# Data, paper, and tutorials available at:  http://mscoco.org/\n# Code written by Piotr Dollar and Tsung-Yi Lin, 2014.\n# Licensed under the Simplified BSD License [see bsd.txt]\n\nimport json\nimport time\nimport matplotlib.pyplot as plt\nfrom matplotlib.collections import PatchCollection\nfrom matplotlib.patches import Polygon\nimport numpy as np\nimport copy\nimport itertools\nfrom pycocotools import mask as maskUtils\nimport os\nfrom collections import defaultdict\nimport sys\nPYTHON_VERSION = sys.version_info[0]\nif PYTHON_VERSION == 2:\n    from urllib import urlretrieve\nelif PYTHON_VERSION == 3:\n    from urllib.request import urlretrieve\n\n\ndef _isArrayLike(obj):\n    return hasattr(obj, '__iter__') and hasattr(obj, '__len__')\n\n\nclass COCO:\n    def __init__(self, dataset):\n        \"\"\"\n        Constructor of Microsoft COCO helper class for reading and visualizing annotations.\n        :param annotation_file (str): location of annotation file\n        :param image_folder (str): location to the folder that hosts images.\n        :return:\n        \"\"\"\n        # load dataset\n        self.dataset,self.anns,self.cats,self.imgs = dict(),dict(),dict(),dict()\n        self.imgToAnns, self.catToImgs = defaultdict(list), defaultdict(list)\n        assert type(dataset)==dict, 'annotation file format {} not supported'.format(type(dataset))\n        self.dataset = dataset\n        self.createIndex()\n\n    def createIndex(self):\n        # create index\n        print('creating index...')\n        anns, cats, imgs = {}, {}, {}\n        imgToAnns,catToImgs = defaultdict(list),defaultdict(list)\n        if 'annotations' in self.dataset:\n            for ann in self.dataset['annotations']:\n                imgToAnns[ann['image_id']].append(ann)\n                anns[ann['id']] = ann\n\n        if 'images' in self.dataset:\n            for img in self.dataset['images']:\n                imgs[img['id']] = img\n\n        if 'categories' in self.dataset:\n            for cat in self.dataset['categories']:\n                cats[cat['id']] = cat\n\n        if 'annotations' in self.dataset and 'categories' in self.dataset:\n            for ann in self.dataset['annotations']:\n                catToImgs[ann['category_id']].append(ann['image_id'])\n\n        print('index created!')\n\n        # create class members\n        self.anns = anns\n        self.imgToAnns = imgToAnns\n        self.catToImgs = catToImgs\n        self.imgs = imgs\n        self.cats = cats\n\n    def info(self):\n        \"\"\"\n        Print information about the annotation file.\n        :return:\n        \"\"\"\n        for key, value in self.dataset['info'].items():\n            print('{}: {}'.format(key, value))\n\n    def getAnnIds(self, imgIds=[], catIds=[], areaRng=[], iscrowd=None):\n        \"\"\"\n        Get ann ids that satisfy given filter conditions. default skips that filter\n        :param imgIds  (int array)     : get anns for given imgs\n               catIds  (int array)     : get anns for given cats\n               areaRng (float array)   : get anns for given area range (e.g. [0 inf])\n               iscrowd (boolean)       : get anns for given crowd label (False or True)\n        :return: ids (int array)       : integer array of ann ids\n        \"\"\"\n        imgIds = imgIds if _isArrayLike(imgIds) else [imgIds]\n        catIds = catIds if _isArrayLike(catIds) else [catIds]\n\n        if len(imgIds) == len(catIds) == len(areaRng) == 0:\n            anns = self.dataset['annotations']\n        else:\n            if not len(imgIds) == 0:\n                lists = [self.imgToAnns[imgId] for imgId in imgIds if imgId in self.imgToAnns]\n                anns = list(itertools.chain.from_iterable(lists))\n            else:\n                anns = self.dataset['annotations']\n            anns = anns if len(catIds)  == 0 else [ann for ann in anns if ann['category_id'] in catIds]\n            anns = anns if len(areaRng) == 0 else [ann for ann in anns if ann['area'] > areaRng[0] and ann['area'] < areaRng[1]]\n        if not iscrowd == None:\n            ids = [ann['id'] for ann in anns if ann['iscrowd'] == iscrowd]\n        else:\n            ids = [ann['id'] for ann in anns]\n        return ids\n\n    def getCatIds(self, catNms=[], supNms=[], catIds=[]):\n        \"\"\"\n        filtering parameters. default skips that filter.\n        :param catNms (str array)  : get cats for given cat names\n        :param supNms (str array)  : get cats for given supercategory names\n        :param catIds (int array)  : get cats for given cat ids\n        :return: ids (int array)   : integer array of cat ids\n        \"\"\"\n        catNms = catNms if _isArrayLike(catNms) else [catNms]\n        supNms = supNms if _isArrayLike(supNms) else [supNms]\n        catIds = catIds if _isArrayLike(catIds) else [catIds]\n\n        if len(catNms) == len(supNms) == len(catIds) == 0:\n            cats = self.dataset['categories']\n        else:\n            cats = self.dataset['categories']\n            cats = cats if len(catNms) == 0 else [cat for cat in cats if cat['name']          in catNms]\n            cats = cats if len(supNms) == 0 else [cat for cat in cats if cat['supercategory'] in supNms]\n            cats = cats if len(catIds) == 0 else [cat for cat in cats if cat['id']            in catIds]\n        ids = [cat['id'] for cat in cats]\n        return ids\n\n    def getImgIds(self, imgIds=[], catIds=[]):\n        '''\n        Get img ids that satisfy given filter conditions.\n        :param imgIds (int array) : get imgs for given ids\n        :param catIds (int array) : get imgs with all given cats\n        :return: ids (int array)  : integer array of img ids\n        '''\n        imgIds = imgIds if _isArrayLike(imgIds) else [imgIds]\n        catIds = catIds if _isArrayLike(catIds) else [catIds]\n\n        if len(imgIds) == len(catIds) == 0:\n            ids = self.imgs.keys()\n        else:\n            ids = set(imgIds)\n            for i, catId in enumerate(catIds):\n                if i == 0 and len(ids) == 0:\n                    ids = set(self.catToImgs[catId])\n                else:\n                    ids &= set(self.catToImgs[catId])\n        return list(ids)\n\n    def loadAnns(self, ids=[]):\n        \"\"\"\n        Load anns with the specified ids.\n        :param ids (int array)       : integer ids specifying anns\n        :return: anns (object array) : loaded ann objects\n        \"\"\"\n        if _isArrayLike(ids):\n            return [self.anns[id] for id in ids]\n        elif type(ids) == int:\n            return [self.anns[ids]]\n\n    def loadCats(self, ids=[]):\n        \"\"\"\n        Load cats with the specified ids.\n        :param ids (int array)       : integer ids specifying cats\n        :return: cats (object array) : loaded cat objects\n        \"\"\"\n        if _isArrayLike(ids):\n            return [self.cats[id] for id in ids]\n        elif type(ids) == int:\n            return [self.cats[ids]]\n\n    def loadImgs(self, ids=[]):\n        \"\"\"\n        Load anns with the specified ids.\n        :param ids (int array)       : integer ids specifying img\n        :return: imgs (object array) : loaded img objects\n        \"\"\"\n        if _isArrayLike(ids):\n            return [self.imgs[id] for id in ids]\n        elif type(ids) == int:\n            return [self.imgs[ids]]\n\n    def showAnns(self, anns, draw_bbox=False):\n        \"\"\"\n        Display the specified annotations.\n        :param anns (array of object): annotations to display\n        :return: None\n        \"\"\"\n        if len(anns) == 0:\n            return 0\n        if 'segmentation' in anns[0] or 'keypoints' in anns[0]:\n            datasetType = 'instances'\n        elif 'caption' in anns[0]:\n            datasetType = 'captions'\n        else:\n            raise Exception('datasetType not supported')\n        if datasetType == 'instances':\n            ax = plt.gca()\n            ax.set_autoscale_on(False)\n            polygons = []\n            color = []\n            for ann in anns:\n                c = (np.random.random((1, 3))*0.6+0.4).tolist()[0]\n                if 'segmentation' in ann:\n                    if type(ann['segmentation']) == list:\n                        # polygon\n                        for seg in ann['segmentation']:\n                            poly = np.array(seg).reshape((int(len(seg)/2), 2))\n                            polygons.append(Polygon(poly))\n                            color.append(c)\n                    else:\n                        # mask\n                        t = self.imgs[ann['image_id']]\n                        if type(ann['segmentation']['counts']) == list:\n                            rle = maskUtils.frPyObjects([ann['segmentation']], t['height'], t['width'])\n                        else:\n                            rle = [ann['segmentation']]\n                        m = maskUtils.decode(rle)\n                        img = np.ones( (m.shape[0], m.shape[1], 3) )\n                        if ann['iscrowd'] == 1:\n                            color_mask = np.array([2.0,166.0,101.0])/255\n                        if ann['iscrowd'] == 0:\n                            color_mask = np.random.random((1, 3)).tolist()[0]\n                        for i in range(3):\n                            img[:,:,i] = color_mask[i]\n                        ax.imshow(np.dstack( (img, m*0.5) ))\n                if 'keypoints' in ann and type(ann['keypoints']) == list:\n                    # turn skeleton into zero-based index\n                    sks = np.array(self.loadCats(ann['category_id'])[0]['skeleton'])-1\n                    kp = np.array(ann['keypoints'])\n                    x = kp[0::3]\n                    y = kp[1::3]\n                    v = kp[2::3]\n                    for sk in sks:\n                        if np.all(v[sk]>0):\n                            plt.plot(x[sk],y[sk], linewidth=3, color=c)\n                    plt.plot(x[v>0], y[v>0],'o',markersize=8, markerfacecolor=c, markeredgecolor='k',markeredgewidth=2)\n                    plt.plot(x[v>1], y[v>1],'o',markersize=8, markerfacecolor=c, markeredgecolor=c, markeredgewidth=2)\n\n                if draw_bbox:\n                    [bbox_x, bbox_y, bbox_w, bbox_h] = ann['bbox']\n                    poly = [[bbox_x, bbox_y], [bbox_x, bbox_y+bbox_h], [bbox_x+bbox_w, bbox_y+bbox_h], [bbox_x+bbox_w, bbox_y]]\n                    np_poly = np.array(poly).reshape((4,2))\n                    polygons.append(Polygon(np_poly))\n                    color.append(c)\n\n            p = PatchCollection(polygons, facecolor=color, linewidths=0, alpha=0.4)\n            ax.add_collection(p)\n            p = PatchCollection(polygons, facecolor='none', edgecolors=color, linewidths=2)\n            ax.add_collection(p)\n        elif datasetType == 'captions':\n            for ann in anns:\n                print(ann['caption'])\n\n    def loadRes(self, resFile):\n        \"\"\"\n        Load result file and return a result api object.\n        :param   resFile (str)     : file name of result file\n        :return: res (obj)         : result api object\n        \"\"\"\n        res = COCO()\n        res.dataset['images'] = [img for img in self.dataset['images']]\n\n        print('Loading and preparing results...')\n        tic = time.time()\n        if type(resFile) == str or (PYTHON_VERSION == 2 and type(resFile) == unicode):\n            with open(resFile) as f:\n                anns = json.load(f)\n        elif type(resFile) == np.ndarray:\n            anns = self.loadNumpyAnnotations(resFile)\n        else:\n            anns = resFile\n        assert type(anns) == list, 'results in not an array of objects'\n        annsImgIds = [ann['image_id'] for ann in anns]\n        assert set(annsImgIds) == (set(annsImgIds) & set(self.getImgIds())), \\\n               'Results do not correspond to current coco set'\n        if 'caption' in anns[0]:\n            imgIds = set([img['id'] for img in res.dataset['images']]) & set([ann['image_id'] for ann in anns])\n            res.dataset['images'] = [img for img in res.dataset['images'] if img['id'] in imgIds]\n            for id, ann in enumerate(anns):\n                ann['id'] = id+1\n        elif 'bbox' in anns[0] and not anns[0]['bbox'] == []:\n            res.dataset['categories'] = copy.deepcopy(self.dataset['categories'])\n            for id, ann in enumerate(anns):\n                bb = ann['bbox']\n                x1, x2, y1, y2 = [bb[0], bb[0]+bb[2], bb[1], bb[1]+bb[3]]\n                if not 'segmentation' in ann:\n                    ann['segmentation'] = [[x1, y1, x1, y2, x2, y2, x2, y1]]\n                ann['area'] = bb[2]*bb[3]\n                ann['id'] = id+1\n                ann['iscrowd'] = 0\n        elif 'segmentation' in anns[0]:\n            res.dataset['categories'] = copy.deepcopy(self.dataset['categories'])\n            for id, ann in enumerate(anns):\n                # now only support compressed RLE format as segmentation results\n                ann['area'] = maskUtils.area(ann['segmentation'])\n                if not 'bbox' in ann:\n                    ann['bbox'] = maskUtils.toBbox(ann['segmentation'])\n                ann['id'] = id+1\n                ann['iscrowd'] = 0\n        elif 'keypoints' in anns[0]:\n            res.dataset['categories'] = copy.deepcopy(self.dataset['categories'])\n            for id, ann in enumerate(anns):\n                s = ann['keypoints']\n                x = s[0::3]\n                y = s[1::3]\n                x0,x1,y0,y1 = np.min(x), np.max(x), np.min(y), np.max(y)\n                ann['area'] = (x1-x0)*(y1-y0)\n                ann['id'] = id + 1\n                ann['bbox'] = [x0,y0,x1-x0,y1-y0]\n        print('DONE (t={:0.2f}s)'.format(time.time()- tic))\n\n        res.dataset['annotations'] = anns\n        res.createIndex()\n        return res\n\n    def download(self, tarDir = None, imgIds = [] ):\n        '''\n        Download COCO images from mscoco.org server.\n        :param tarDir (str): COCO results directory name\n               imgIds (list): images to be downloaded\n        :return:\n        '''\n        if tarDir is None:\n            print('Please specify target directory')\n            return -1\n        if len(imgIds) == 0:\n            imgs = self.imgs.values()\n        else:\n            imgs = self.loadImgs(imgIds)\n        N = len(imgs)\n        if not os.path.exists(tarDir):\n            os.makedirs(tarDir)\n        for i, img in enumerate(imgs):\n            tic = time.time()\n            fname = os.path.join(tarDir, img['file_name'])\n            if not os.path.exists(fname):\n                urlretrieve(img['coco_url'], fname)\n            print('downloaded {}/{} images (t={:0.1f}s)'.format(i, N, time.time()- tic))\n\n    def loadNumpyAnnotations(self, data):\n        \"\"\"\n        Convert result data from a numpy array [Nx7] where each row contains {imageID,x1,y1,w,h,score,class}\n        :param  data (numpy.ndarray)\n        :return: annotations (python nested list)\n        \"\"\"\n        print('Converting ndarray to lists...')\n        assert(type(data) == np.ndarray)\n        print(data.shape)\n        assert(data.shape[1] == 7)\n        N = data.shape[0]\n        ann = []\n        for i in range(N):\n            if i % 1000000 == 0:\n                print('{}/{}'.format(i,N))\n            ann += [{\n                'image_id'  : int(data[i, 0]),\n                'bbox'  : [ data[i, 1], data[i, 2], data[i, 3], data[i, 4] ],\n                'score' : data[i, 5],\n                'category_id': int(data[i, 6]),\n                }]\n        return ann\n\n    def annToRLE(self, ann):\n        \"\"\"\n        Convert annotation which can be polygons, uncompressed RLE to RLE.\n        :return: binary mask (numpy 2D array)\n        \"\"\"\n        t = self.imgs[ann['image_id']]\n        h, w = t['height'], t['width']\n        segm = ann['segmentation']\n        if type(segm) == list:\n            # polygon -- a single object might consist of multiple parts\n            # we merge all parts into one mask rle code\n            rles = maskUtils.frPyObjects(segm, h, w)\n            rle = maskUtils.merge(rles)\n        elif type(segm['counts']) == list:\n            # uncompressed RLE\n            rle = maskUtils.frPyObjects(segm, h, w)\n        else:\n            # rle\n            rle = ann['segmentation']\n        return rle\n\n    def annToMask(self, ann):\n        \"\"\"\n        Convert annotation which can be polygons, uncompressed RLE, or RLE to binary mask.\n        :return: binary mask (numpy 2D array)\n        \"\"\"\n        rle = self.annToRLE(ann)\n        m = maskUtils.decode(rle)\n        return m\n"
  },
  {
    "path": "lib/train/dataset/__init__.py",
    "content": "from .lasot import Lasot\nfrom .got10k import Got10k\nfrom .tracking_net import TrackingNet\nfrom .imagenetvid import ImagenetVID\nfrom .coco import MSCOCO\nfrom .coco_seq import MSCOCOSeq\nfrom .got10k_lmdb import Got10k_lmdb\nfrom .lasot_lmdb import Lasot_lmdb\nfrom .imagenetvid_lmdb import ImagenetVID_lmdb\nfrom .coco_seq_lmdb import MSCOCOSeq_lmdb\nfrom .tracking_net_lmdb import TrackingNet_lmdb\n"
  },
  {
    "path": "lib/train/dataset/base_image_dataset.py",
    "content": "import torch.utils.data\nfrom lib.train.data.image_loader import jpeg4py_loader\n\n\nclass BaseImageDataset(torch.utils.data.Dataset):\n    \"\"\" Base class for image datasets \"\"\"\n\n    def __init__(self, name, root, image_loader=jpeg4py_loader):\n        \"\"\"\n        args:\n            root - The root path to the dataset\n            image_loader (jpeg4py_loader) -  The function to read the images. jpeg4py (https://github.com/ajkxyz/jpeg4py)\n                                            is used by default.\n        \"\"\"\n        self.name = name\n        self.root = root\n        self.image_loader = image_loader\n\n        self.image_list = []     # Contains the list of sequences.\n        self.class_list = []\n\n    def __len__(self):\n        \"\"\" Returns size of the dataset\n        returns:\n            int - number of samples in the dataset\n        \"\"\"\n        return self.get_num_images()\n\n    def __getitem__(self, index):\n        \"\"\" Not to be used! Check get_frames() instead.\n        \"\"\"\n        return None\n\n    def get_name(self):\n        \"\"\" Name of the dataset\n\n        returns:\n            string - Name of the dataset\n        \"\"\"\n        raise NotImplementedError\n\n    def get_num_images(self):\n        \"\"\" Number of sequences in a dataset\n\n        returns:\n            int - number of sequences in the dataset.\"\"\"\n        return len(self.image_list)\n\n    def has_class_info(self):\n        return False\n\n    def get_class_name(self, image_id):\n        return None\n\n    def get_num_classes(self):\n        return len(self.class_list)\n\n    def get_class_list(self):\n        return self.class_list\n\n    def get_images_in_class(self, class_name):\n        raise NotImplementedError\n\n    def has_segmentation_info(self):\n        return False\n\n    def get_image_info(self, seq_id):\n        \"\"\" Returns information about a particular image,\n\n        args:\n            seq_id - index of the image\n\n        returns:\n            Dict\n            \"\"\"\n        raise NotImplementedError\n\n    def get_image(self, image_id, anno=None):\n        \"\"\" Get a image\n\n        args:\n            image_id      - index of image\n            anno(None)  - The annotation for the sequence (see get_sequence_info). If None, they will be loaded.\n\n        returns:\n            image -\n            anno -\n            dict - A dict containing meta information about the sequence, e.g. class of the target object.\n\n        \"\"\"\n        raise NotImplementedError\n\n"
  },
  {
    "path": "lib/train/dataset/base_video_dataset.py",
    "content": "import torch.utils.data\n# 2021.1.5 use jpeg4py_loader_w_failsafe as default\nfrom lib.train.data.image_loader import jpeg4py_loader_w_failsafe\n\n\nclass BaseVideoDataset(torch.utils.data.Dataset):\n    \"\"\" Base class for video datasets \"\"\"\n\n    def __init__(self, name, root, image_loader=jpeg4py_loader_w_failsafe):\n        \"\"\"\n        args:\n            root - The root path to the dataset\n            image_loader (jpeg4py_loader) -  The function to read the images. jpeg4py (https://github.com/ajkxyz/jpeg4py)\n                                            is used by default.\n        \"\"\"\n        self.name = name\n        self.root = root\n        self.image_loader = image_loader\n\n        self.sequence_list = []     # Contains the list of sequences.\n        self.class_list = []\n\n    def __len__(self):\n        \"\"\" Returns size of the dataset\n        returns:\n            int - number of samples in the dataset\n        \"\"\"\n        return self.get_num_sequences()\n\n    def __getitem__(self, index):\n        \"\"\" Not to be used! Check get_frames() instead.\n        \"\"\"\n        return None\n\n    def is_video_sequence(self):\n        \"\"\" Returns whether the dataset is a video dataset or an image dataset\n\n        returns:\n            bool - True if a video dataset\n        \"\"\"\n        return True\n\n    def is_synthetic_video_dataset(self):\n        \"\"\" Returns whether the dataset contains real videos or synthetic\n\n        returns:\n            bool - True if a video dataset\n        \"\"\"\n        return False\n\n    def get_name(self):\n        \"\"\" Name of the dataset\n\n        returns:\n            string - Name of the dataset\n        \"\"\"\n        raise NotImplementedError\n\n    def get_num_sequences(self):\n        \"\"\" Number of sequences in a dataset\n\n        returns:\n            int - number of sequences in the dataset.\"\"\"\n        return len(self.sequence_list)\n\n    def has_class_info(self):\n        return False\n\n    def has_occlusion_info(self):\n        return False\n\n    def get_num_classes(self):\n        return len(self.class_list)\n\n    def get_class_list(self):\n        return self.class_list\n\n    def get_sequences_in_class(self, class_name):\n        raise NotImplementedError\n\n    def has_segmentation_info(self):\n        return False\n\n    def get_sequence_info(self, seq_id):\n        \"\"\" Returns information about a particular sequences,\n\n        args:\n            seq_id - index of the sequence\n\n        returns:\n            Dict\n            \"\"\"\n        raise NotImplementedError\n\n    def get_frames(self, seq_id, frame_ids, anno=None):\n        \"\"\" Get a set of frames from a particular sequence\n\n        args:\n            seq_id      - index of sequence\n            frame_ids   - a list of frame numbers\n            anno(None)  - The annotation for the sequence (see get_sequence_info). If None, they will be loaded.\n\n        returns:\n            list - List of frames corresponding to frame_ids\n            list - List of dicts for each frame\n            dict - A dict containing meta information about the sequence, e.g. class of the target object.\n\n        \"\"\"\n        raise NotImplementedError\n\n"
  },
  {
    "path": "lib/train/dataset/coco.py",
    "content": "import os\nfrom .base_image_dataset import BaseImageDataset\nimport torch\nimport random\nfrom collections import OrderedDict\nfrom lib.train.data import jpeg4py_loader\nfrom lib.train.admin import env_settings\nfrom pycocotools.coco import COCO\n\n\nclass MSCOCO(BaseImageDataset):\n    \"\"\" The COCO object detection dataset.\n\n    Publication:\n        Microsoft COCO: Common Objects in Context.\n        Tsung-Yi Lin, Michael Maire, Serge J. Belongie, Lubomir D. Bourdev, Ross B. Girshick, James Hays, Pietro Perona,\n        Deva Ramanan, Piotr Dollar and C. Lawrence Zitnick\n        ECCV, 2014\n        https://arxiv.org/pdf/1405.0312.pdf\n\n    Download the images along with annotations from http://cocodataset.org/#download. The root folder should be\n    organized as follows.\n        - coco_root\n            - annotations\n                - instances_train2014.json\n                - instances_train2017.json\n            - images\n                - train2014\n                - train2017\n\n    Note: You also have to install the coco pythonAPI from https://github.com/cocodataset/cocoapi.\n    \"\"\"\n\n    def __init__(self, root=None, image_loader=jpeg4py_loader, data_fraction=None, min_area=None,\n                 split=\"train\", version=\"2014\"):\n        \"\"\"\n        args:\n            root - path to coco root folder\n            image_loader (jpeg4py_loader) - The function to read the images. jpeg4py (https://github.com/ajkxyz/jpeg4py)\n                                            is used by default.\n            data_fraction - Fraction of dataset to be used. The complete dataset is used by default\n            min_area - Objects with area less than min_area are filtered out. Default is 0.0\n            split - 'train' or 'val'.\n            version - version of coco dataset (2014 or 2017)\n        \"\"\"\n\n        root = env_settings().coco_dir if root is None else root\n        super().__init__('COCO', root, image_loader)\n\n        self.img_pth = os.path.join(root, 'images/{}{}/'.format(split, version))\n        self.anno_path = os.path.join(root, 'annotations/instances_{}{}.json'.format(split, version))\n\n        self.coco_set = COCO(self.anno_path)\n\n        self.cats = self.coco_set.cats\n\n        self.class_list = self.get_class_list()  # the parent class thing would happen in the sampler\n\n        self.image_list = self._get_image_list(min_area=min_area)\n\n        if data_fraction is not None:\n            self.image_list = random.sample(self.image_list, int(len(self.image_list) * data_fraction))\n        self.im_per_class = self._build_im_per_class()\n\n    def _get_image_list(self, min_area=None):\n        ann_list = list(self.coco_set.anns.keys())\n        image_list = [a for a in ann_list if self.coco_set.anns[a]['iscrowd'] == 0]\n\n        if min_area is not None:\n            image_list = [a for a in image_list if self.coco_set.anns[a]['area'] > min_area]\n\n        return image_list\n\n    def get_num_classes(self):\n        return len(self.class_list)\n\n    def get_name(self):\n        return 'coco'\n\n    def has_class_info(self):\n        return True\n\n    def has_segmentation_info(self):\n        return True\n\n    def get_class_list(self):\n        class_list = []\n        for cat_id in self.cats.keys():\n            class_list.append(self.cats[cat_id]['name'])\n        return class_list\n\n    def _build_im_per_class(self):\n        im_per_class = {}\n        for i, im in enumerate(self.image_list):\n            class_name = self.cats[self.coco_set.anns[im]['category_id']]['name']\n            if class_name not in im_per_class:\n                im_per_class[class_name] = [i]\n            else:\n                im_per_class[class_name].append(i)\n\n        return im_per_class\n\n    def get_images_in_class(self, class_name):\n        return self.im_per_class[class_name]\n\n    def get_image_info(self, im_id):\n        anno = self._get_anno(im_id)\n\n        bbox = torch.Tensor(anno['bbox']).view(4,)\n\n        mask = torch.Tensor(self.coco_set.annToMask(anno))\n\n        valid = (bbox[2] > 0) & (bbox[3] > 0)\n        visible = valid.clone().byte()\n\n        return {'bbox': bbox, 'mask': mask, 'valid': valid, 'visible': visible}\n\n    def _get_anno(self, im_id):\n        anno = self.coco_set.anns[self.image_list[im_id]]\n\n        return anno\n\n    def _get_image(self, im_id):\n        path = self.coco_set.loadImgs([self.coco_set.anns[self.image_list[im_id]]['image_id']])[0]['file_name']\n        img = self.image_loader(os.path.join(self.img_pth, path))\n        return img\n\n    def get_meta_info(self, im_id):\n        try:\n            cat_dict_current = self.cats[self.coco_set.anns[self.image_list[im_id]]['category_id']]\n            object_meta = OrderedDict({'object_class_name': cat_dict_current['name'],\n                                       'motion_class': None,\n                                       'major_class': cat_dict_current['supercategory'],\n                                       'root_class': None,\n                                       'motion_adverb': None})\n        except:\n            object_meta = OrderedDict({'object_class_name': None,\n                                       'motion_class': None,\n                                       'major_class': None,\n                                       'root_class': None,\n                                       'motion_adverb': None})\n        return object_meta\n\n    def get_class_name(self, im_id):\n        cat_dict_current = self.cats[self.coco_set.anns[self.image_list[im_id]]['category_id']]\n        return cat_dict_current['name']\n\n    def get_image(self, image_id, anno=None):\n        frame = self._get_image(image_id)\n\n        if anno is None:\n            anno = self.get_image_info(image_id)\n\n        object_meta = self.get_meta_info(image_id)\n\n        return frame, anno, object_meta\n"
  },
  {
    "path": "lib/train/dataset/coco_seq.py",
    "content": "import os\nfrom .base_video_dataset import BaseVideoDataset\nfrom lib.train.data import jpeg4py_loader\nimport torch\nimport random\nfrom pycocotools.coco import COCO\nfrom collections import OrderedDict\nfrom lib.train.admin import env_settings\n\n\nclass MSCOCOSeq(BaseVideoDataset):\n    \"\"\" The COCO dataset. COCO is an image dataset. Thus, we treat each image as a sequence of length 1.\n\n    Publication:\n        Microsoft COCO: Common Objects in Context.\n        Tsung-Yi Lin, Michael Maire, Serge J. Belongie, Lubomir D. Bourdev, Ross B. Girshick, James Hays, Pietro Perona,\n        Deva Ramanan, Piotr Dollar and C. Lawrence Zitnick\n        ECCV, 2014\n        https://arxiv.org/pdf/1405.0312.pdf\n\n    Download the images along with annotations from http://cocodataset.org/#download. The root folder should be\n    organized as follows.\n        - coco_root\n            - annotations\n                - instances_train2014.json\n                - instances_train2017.json\n            - images\n                - train2014\n                - train2017\n\n    Note: You also have to install the coco pythonAPI from https://github.com/cocodataset/cocoapi.\n    \"\"\"\n\n    def __init__(self, root=None, image_loader=jpeg4py_loader, data_fraction=None, split=\"train\", version=\"2014\"):\n        \"\"\"\n        args:\n            root - path to the coco dataset.\n            image_loader (default_image_loader) -  The function to read the images. If installed,\n                                                   jpeg4py (https://github.com/ajkxyz/jpeg4py) is used by default. Else,\n                                                   opencv's imread is used.\n            data_fraction (None) - Fraction of images to be used. The images are selected randomly. If None, all the\n                                  images  will be used\n            split - 'train' or 'val'.\n            version - version of coco dataset (2014 or 2017)\n        \"\"\"\n        root = env_settings().coco_dir if root is None else root\n        super().__init__('COCO', root, image_loader)\n\n        self.img_pth = os.path.join(root, 'images/{}{}/'.format(split, version))\n        self.anno_path = os.path.join(root, 'annotations/instances_{}{}.json'.format(split, version))\n\n        # Load the COCO set.\n        self.coco_set = COCO(self.anno_path)\n\n        self.cats = self.coco_set.cats\n\n        self.class_list = self.get_class_list()\n\n        self.sequence_list = self._get_sequence_list()\n\n        if data_fraction is not None:\n            self.sequence_list = random.sample(self.sequence_list, int(len(self.sequence_list)*data_fraction))\n        self.seq_per_class = self._build_seq_per_class()\n\n    def _get_sequence_list(self):\n        ann_list = list(self.coco_set.anns.keys())\n        seq_list = [a for a in ann_list if self.coco_set.anns[a]['iscrowd'] == 0]\n\n        return seq_list\n\n    def is_video_sequence(self):\n        return False\n\n    def get_num_classes(self):\n        return len(self.class_list)\n\n    def get_name(self):\n        return 'coco'\n\n    def has_class_info(self):\n        return True\n\n    def get_class_list(self):\n        class_list = []\n        for cat_id in self.cats.keys():\n            class_list.append(self.cats[cat_id]['name'])\n        return class_list\n\n    def has_segmentation_info(self):\n        return True\n\n    def get_num_sequences(self):\n        return len(self.sequence_list)\n\n    def _build_seq_per_class(self):\n        seq_per_class = {}\n        for i, seq in enumerate(self.sequence_list):\n            class_name = self.cats[self.coco_set.anns[seq]['category_id']]['name']\n            if class_name not in seq_per_class:\n                seq_per_class[class_name] = [i]\n            else:\n                seq_per_class[class_name].append(i)\n\n        return seq_per_class\n\n    def get_sequences_in_class(self, class_name):\n        return self.seq_per_class[class_name]\n\n    def get_sequence_info(self, seq_id):\n        anno = self._get_anno(seq_id)\n\n        bbox = torch.Tensor(anno['bbox']).view(1, 4)\n\n        mask = torch.Tensor(self.coco_set.annToMask(anno)).unsqueeze(dim=0)\n\n        '''2021.1.3 To avoid too small bounding boxes. Here we change the threshold to 50 pixels'''\n        valid = (bbox[:, 2] > 50) & (bbox[:, 3] > 50)\n\n        visible = valid.clone().byte()\n\n        return {'bbox': bbox, 'mask': mask, 'valid': valid, 'visible': visible}\n\n    def _get_anno(self, seq_id):\n        anno = self.coco_set.anns[self.sequence_list[seq_id]]\n\n        return anno\n\n    def _get_frames(self, seq_id):\n        path = self.coco_set.loadImgs([self.coco_set.anns[self.sequence_list[seq_id]]['image_id']])[0]['file_name']\n        img = self.image_loader(os.path.join(self.img_pth, path))\n        return img\n\n    def get_meta_info(self, seq_id):\n        try:\n            cat_dict_current = self.cats[self.coco_set.anns[self.sequence_list[seq_id]]['category_id']]\n            object_meta = OrderedDict({'object_class_name': cat_dict_current['name'],\n                                       'motion_class': None,\n                                       'major_class': cat_dict_current['supercategory'],\n                                       'root_class': None,\n                                       'motion_adverb': None})\n        except:\n            object_meta = OrderedDict({'object_class_name': None,\n                                       'motion_class': None,\n                                       'major_class': None,\n                                       'root_class': None,\n                                       'motion_adverb': None})\n        return object_meta\n\n\n    def get_class_name(self, seq_id):\n        cat_dict_current = self.cats[self.coco_set.anns[self.sequence_list[seq_id]]['category_id']]\n        return cat_dict_current['name']\n\n    def get_frames(self, seq_id=None, frame_ids=None, anno=None):\n        # COCO is an image dataset. Thus we replicate the image denoted by seq_id len(frame_ids) times, and return a\n        # list containing these replicated images.\n        frame = self._get_frames(seq_id)\n\n        frame_list = [frame.copy() for _ in frame_ids]\n\n        if anno is None:\n            anno = self.get_sequence_info(seq_id)\n\n        anno_frames = {}\n        for key, value in anno.items():\n            anno_frames[key] = [value[0, ...] for _ in frame_ids]\n\n        object_meta = self.get_meta_info(seq_id)\n\n        return frame_list, anno_frames, object_meta\n"
  },
  {
    "path": "lib/train/dataset/coco_seq_lmdb.py",
    "content": "import os\nfrom .base_video_dataset import BaseVideoDataset\nfrom lib.train.data import jpeg4py_loader\nimport torch\nimport random\nfrom collections import OrderedDict\nfrom lib.train.admin import env_settings\nfrom lib.train.dataset.COCO_tool import COCO\nfrom lib.utils.lmdb_utils import decode_img, decode_json\nimport time\n\nclass MSCOCOSeq_lmdb(BaseVideoDataset):\n    \"\"\" The COCO dataset. COCO is an image dataset. Thus, we treat each image as a sequence of length 1.\n\n    Publication:\n        Microsoft COCO: Common Objects in Context.\n        Tsung-Yi Lin, Michael Maire, Serge J. Belongie, Lubomir D. Bourdev, Ross B. Girshick, James Hays, Pietro Perona,\n        Deva Ramanan, Piotr Dollar and C. Lawrence Zitnick\n        ECCV, 2014\n        https://arxiv.org/pdf/1405.0312.pdf\n\n    Download the images along with annotations from http://cocodataset.org/#download. The root folder should be\n    organized as follows.\n        - coco_root\n            - annotations\n                - instances_train2014.json\n                - instances_train2017.json\n            - images\n                - train2014\n                - train2017\n\n    Note: You also have to install the coco pythonAPI from https://github.com/cocodataset/cocoapi.\n    \"\"\"\n\n    def __init__(self, root=None, image_loader=jpeg4py_loader, data_fraction=None, split=\"train\", version=\"2014\"):\n        \"\"\"\n        args:\n            root - path to the coco dataset.\n            image_loader (default_image_loader) -  The function to read the images. If installed,\n                                                   jpeg4py (https://github.com/ajkxyz/jpeg4py) is used by default. Else,\n                                                   opencv's imread is used.\n            data_fraction (None) - Fraction of images to be used. The images are selected randomly. If None, all the\n                                  images  will be used\n            split - 'train' or 'val'.\n            version - version of coco dataset (2014 or 2017)\n        \"\"\"\n        root = env_settings().coco_dir if root is None else root\n        super().__init__('COCO_lmdb', root, image_loader)\n        self.root = root\n        self.img_pth = 'images/{}{}/'.format(split, version)\n        self.anno_path = 'annotations/instances_{}{}.json'.format(split, version)\n\n        # Load the COCO set.\n        print('loading annotations into memory...')\n        tic = time.time()\n        coco_json = decode_json(root, self.anno_path)\n        print('Done (t={:0.2f}s)'.format(time.time() - tic))\n\n        self.coco_set = COCO(coco_json)\n\n        self.cats = self.coco_set.cats\n\n        self.class_list = self.get_class_list()\n\n        self.sequence_list = self._get_sequence_list()\n\n        if data_fraction is not None:\n            self.sequence_list = random.sample(self.sequence_list, int(len(self.sequence_list)*data_fraction))\n        self.seq_per_class = self._build_seq_per_class()\n\n    def _get_sequence_list(self):\n        ann_list = list(self.coco_set.anns.keys())\n        seq_list = [a for a in ann_list if self.coco_set.anns[a]['iscrowd'] == 0]\n\n        return seq_list\n\n    def is_video_sequence(self):\n        return False\n\n    def get_num_classes(self):\n        return len(self.class_list)\n\n    def get_name(self):\n        return 'coco_lmdb'\n\n    def has_class_info(self):\n        return True\n\n    def get_class_list(self):\n        class_list = []\n        for cat_id in self.cats.keys():\n            class_list.append(self.cats[cat_id]['name'])\n        return class_list\n\n    def has_segmentation_info(self):\n        return True\n\n    def get_num_sequences(self):\n        return len(self.sequence_list)\n\n    def _build_seq_per_class(self):\n        seq_per_class = {}\n        for i, seq in enumerate(self.sequence_list):\n            class_name = self.cats[self.coco_set.anns[seq]['category_id']]['name']\n            if class_name not in seq_per_class:\n                seq_per_class[class_name] = [i]\n            else:\n                seq_per_class[class_name].append(i)\n\n        return seq_per_class\n\n    def get_sequences_in_class(self, class_name):\n        return self.seq_per_class[class_name]\n\n    def get_sequence_info(self, seq_id):\n        anno = self._get_anno(seq_id)\n\n        bbox = torch.Tensor(anno['bbox']).view(1, 4)\n\n        mask = torch.Tensor(self.coco_set.annToMask(anno)).unsqueeze(dim=0)\n\n        '''2021.1.3 To avoid too small bounding boxes. Here we change the threshold to 50 pixels'''\n        valid = (bbox[:, 2] > 50) & (bbox[:, 3] > 50)\n\n        visible = valid.clone().byte()\n\n        return {'bbox': bbox, 'mask': mask, 'valid': valid, 'visible': visible}\n\n    def _get_anno(self, seq_id):\n        anno = self.coco_set.anns[self.sequence_list[seq_id]]\n\n        return anno\n\n    def _get_frames(self, seq_id):\n        path = self.coco_set.loadImgs([self.coco_set.anns[self.sequence_list[seq_id]]['image_id']])[0]['file_name']\n        # img = self.image_loader(os.path.join(self.img_pth, path))\n        img = decode_img(self.root, os.path.join(self.img_pth, path))\n        return img\n\n    def get_meta_info(self, seq_id):\n        try:\n            cat_dict_current = self.cats[self.coco_set.anns[self.sequence_list[seq_id]]['category_id']]\n            object_meta = OrderedDict({'object_class_name': cat_dict_current['name'],\n                                       'motion_class': None,\n                                       'major_class': cat_dict_current['supercategory'],\n                                       'root_class': None,\n                                       'motion_adverb': None})\n        except:\n            object_meta = OrderedDict({'object_class_name': None,\n                                       'motion_class': None,\n                                       'major_class': None,\n                                       'root_class': None,\n                                       'motion_adverb': None})\n        return object_meta\n\n\n    def get_class_name(self, seq_id):\n        cat_dict_current = self.cats[self.coco_set.anns[self.sequence_list[seq_id]]['category_id']]\n        return cat_dict_current['name']\n\n    def get_frames(self, seq_id=None, frame_ids=None, anno=None):\n        # COCO is an image dataset. Thus we replicate the image denoted by seq_id len(frame_ids) times, and return a\n        # list containing these replicated images.\n        frame = self._get_frames(seq_id)\n\n        frame_list = [frame.copy() for _ in frame_ids]\n\n        if anno is None:\n            anno = self.get_sequence_info(seq_id)\n\n        anno_frames = {}\n        for key, value in anno.items():\n            anno_frames[key] = [value[0, ...] for _ in frame_ids]\n\n        object_meta = self.get_meta_info(seq_id)\n\n        return frame_list, anno_frames, object_meta\n"
  },
  {
    "path": "lib/train/dataset/got10k.py",
    "content": "import os\nimport os.path\nimport numpy as np\nimport torch\nimport csv\nimport pandas\nimport random\nfrom collections import OrderedDict\nfrom .base_video_dataset import BaseVideoDataset\nfrom lib.train.data import jpeg4py_loader\nfrom lib.train.admin import env_settings\n\n\nclass Got10k(BaseVideoDataset):\n    \"\"\" GOT-10k dataset.\n\n    Publication:\n        GOT-10k: A Large High-Diversity Benchmark for Generic Object Tracking in the Wild\n        Lianghua Huang, Xin Zhao, and Kaiqi Huang\n        arXiv:1810.11981, 2018\n        https://arxiv.org/pdf/1810.11981.pdf\n\n    Download dataset from http://got-10k.aitestunion.com/downloads\n    \"\"\"\n\n    def __init__(self, root=None, image_loader=jpeg4py_loader, split=None, seq_ids=None, data_fraction=None):\n        \"\"\"\n        args:\n            root - path to the got-10k training data. Note: This should point to the 'train' folder inside GOT-10k\n            image_loader (jpeg4py_loader) -  The function to read the images. jpeg4py (https://github.com/ajkxyz/jpeg4py)\n                                            is used by default.\n            split - 'train' or 'val'. Note: The validation split here is a subset of the official got-10k train split,\n                    not NOT the official got-10k validation split. To use the official validation split, provide that as\n                    the root folder instead.\n            seq_ids - List containing the ids of the videos to be used for training. Note: Only one of 'split' or 'seq_ids'\n                        options can be used at the same time.\n            data_fraction - Fraction of dataset to be used. The complete dataset is used by default\n        \"\"\"\n        root = env_settings().got10k_dir if root is None else root\n        super().__init__('GOT10k', root, image_loader)\n\n        # all folders inside the root\n        self.sequence_list = self._get_sequence_list()\n\n        # seq_id is the index of the folder inside the got10k root path\n        if split is not None:\n            if seq_ids is not None:\n                raise ValueError('Cannot set both split_name and seq_ids.')\n            ltr_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..')\n            if split == 'train':\n                file_path = os.path.join(ltr_path, 'data_specs', 'got10k_train_split.txt')\n            elif split == 'val':\n                file_path = os.path.join(ltr_path, 'data_specs', 'got10k_val_split.txt')\n            elif split == 'train_full':\n                file_path = os.path.join(ltr_path, 'data_specs', 'got10k_train_full_split.txt')\n            elif split == 'vottrain':\n                file_path = os.path.join(ltr_path, 'data_specs', 'got10k_vot_train_split.txt')\n            elif split == 'votval':\n                file_path = os.path.join(ltr_path, 'data_specs', 'got10k_vot_val_split.txt')\n            else:\n                raise ValueError('Unknown split name.')\n            # seq_ids = pandas.read_csv(file_path, header=None, squeeze=True, dtype=np.int64).values.tolist()\n            seq_ids = pandas.read_csv(file_path, header=None, dtype=np.int64).squeeze(\"columns\").values.tolist()\n        elif seq_ids is None:\n            seq_ids = list(range(0, len(self.sequence_list)))\n\n        self.sequence_list = [self.sequence_list[i] for i in seq_ids]\n\n        if data_fraction is not None:\n            self.sequence_list = random.sample(self.sequence_list, int(len(self.sequence_list)*data_fraction))\n\n        self.sequence_meta_info = self._load_meta_info()\n        self.seq_per_class = self._build_seq_per_class()\n\n        self.class_list = list(self.seq_per_class.keys())\n        self.class_list.sort()\n\n    def get_name(self):\n        return 'got10k'\n\n    def has_class_info(self):\n        return True\n\n    def has_occlusion_info(self):\n        return True\n\n    def _load_meta_info(self):\n        sequence_meta_info = {s: self._read_meta(os.path.join(self.root, s)) for s in self.sequence_list}\n        return sequence_meta_info\n\n    def _read_meta(self, seq_path):\n        try:\n            with open(os.path.join(seq_path, 'meta_info.ini')) as f:\n                meta_info = f.readlines()\n            object_meta = OrderedDict({'object_class_name': meta_info[5].split(': ')[-1][:-1],\n                                       'motion_class': meta_info[6].split(': ')[-1][:-1],\n                                       'major_class': meta_info[7].split(': ')[-1][:-1],\n                                       'root_class': meta_info[8].split(': ')[-1][:-1],\n                                       'motion_adverb': meta_info[9].split(': ')[-1][:-1]})\n        except:\n            object_meta = OrderedDict({'object_class_name': None,\n                                       'motion_class': None,\n                                       'major_class': None,\n                                       'root_class': None,\n                                       'motion_adverb': None})\n        return object_meta\n\n    def _build_seq_per_class(self):\n        seq_per_class = {}\n\n        for i, s in enumerate(self.sequence_list):\n            object_class = self.sequence_meta_info[s]['object_class_name']\n            if object_class in seq_per_class:\n                seq_per_class[object_class].append(i)\n            else:\n                seq_per_class[object_class] = [i]\n\n        return seq_per_class\n\n    def get_sequences_in_class(self, class_name):\n        return self.seq_per_class[class_name]\n\n    def _get_sequence_list(self):\n        with open(os.path.join(self.root, 'list.txt')) as f:\n            dir_list = list(csv.reader(f))\n        dir_list = [dir_name[0] for dir_name in dir_list]\n        return dir_list\n\n    def _read_bb_anno(self, seq_path):\n        bb_anno_file = os.path.join(seq_path, \"groundtruth.txt\")\n        gt = pandas.read_csv(bb_anno_file, delimiter=',', header=None, dtype=np.float32, na_filter=False, low_memory=False).values\n        return torch.tensor(gt)\n\n    def _read_target_visible(self, seq_path):\n        # Read full occlusion and out_of_view\n        occlusion_file = os.path.join(seq_path, \"absence.label\")\n        cover_file = os.path.join(seq_path, \"cover.label\")\n\n        with open(occlusion_file, 'r', newline='') as f:\n            occlusion = torch.ByteTensor([int(v[0]) for v in csv.reader(f)])\n        with open(cover_file, 'r', newline='') as f:\n            cover = torch.ByteTensor([int(v[0]) for v in csv.reader(f)])\n\n        target_visible = ~occlusion & (cover>0).byte()\n\n        visible_ratio = cover.float() / 8\n        return target_visible, visible_ratio\n\n    def _get_sequence_path(self, seq_id):\n        return os.path.join(self.root, self.sequence_list[seq_id])\n\n    def get_sequence_info(self, seq_id):\n        seq_path = self._get_sequence_path(seq_id)\n        bbox = self._read_bb_anno(seq_path)\n\n        valid = (bbox[:, 2] > 0) & (bbox[:, 3] > 0)\n        visible, visible_ratio = self._read_target_visible(seq_path)\n        visible = visible & valid.byte()\n\n        return {'bbox': bbox, 'valid': valid, 'visible': visible, 'visible_ratio': visible_ratio}\n\n    def _get_frame_path(self, seq_path, frame_id):\n        return os.path.join(seq_path, '{:08}.jpg'.format(frame_id+1))    # frames start from 1\n\n    def _get_frame(self, seq_path, frame_id):\n        return self.image_loader(self._get_frame_path(seq_path, frame_id))\n\n    def get_class_name(self, seq_id):\n        obj_meta = self.sequence_meta_info[self.sequence_list[seq_id]]\n\n        return obj_meta['object_class_name']\n\n    def get_frames(self, seq_id, frame_ids, anno=None):\n        seq_path = self._get_sequence_path(seq_id)\n        obj_meta = self.sequence_meta_info[self.sequence_list[seq_id]]\n\n        frame_list = [self._get_frame(seq_path, f_id) for f_id in frame_ids]\n\n        if anno is None:\n            anno = self.get_sequence_info(seq_id)\n\n        anno_frames = {}\n        for key, value in anno.items():\n            anno_frames[key] = [value[f_id, ...].clone() for f_id in frame_ids]\n\n        return frame_list, anno_frames, obj_meta\n"
  },
  {
    "path": "lib/train/dataset/got10k_lmdb.py",
    "content": "import os\nimport os.path\nimport numpy as np\nimport torch\nimport csv\nimport pandas\nimport random\nfrom collections import OrderedDict\nfrom .base_video_dataset import BaseVideoDataset\nfrom lib.train.data import jpeg4py_loader\nfrom lib.train.admin import env_settings\n\n'''2021.1.16 Gok10k for loading lmdb dataset'''\nfrom lib.utils.lmdb_utils import *\n\n\nclass Got10k_lmdb(BaseVideoDataset):\n\n    def __init__(self, root=None, image_loader=jpeg4py_loader, split=None, seq_ids=None, data_fraction=None):\n        \"\"\"\n        args:\n            root - path to the got-10k training data. Note: This should point to the 'train' folder inside GOT-10k\n            image_loader (jpeg4py_loader) -  The function to read the images. jpeg4py (https://github.com/ajkxyz/jpeg4py)\n                                            is used by default.\n            split - 'train' or 'val'. Note: The validation split here is a subset of the official got-10k train split,\n                    not NOT the official got-10k validation split. To use the official validation split, provide that as\n                    the root folder instead.\n            seq_ids - List containing the ids of the videos to be used for training. Note: Only one of 'split' or 'seq_ids'\n                        options can be used at the same time.\n            data_fraction - Fraction of dataset to be used. The complete dataset is used by default\n            use_lmdb - whether the dataset is stored in lmdb format\n        \"\"\"\n        root = env_settings().got10k_lmdb_dir if root is None else root\n        super().__init__('GOT10k_lmdb', root, image_loader)\n\n        # all folders inside the root\n        self.sequence_list = self._get_sequence_list()\n\n        # seq_id is the index of the folder inside the got10k root path\n        if split is not None:\n            if seq_ids is not None:\n                raise ValueError('Cannot set both split_name and seq_ids.')\n            train_lib_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..')\n            if split == 'train':\n                file_path = os.path.join(train_lib_path, 'data_specs', 'got10k_train_split.txt')\n            elif split == 'val':\n                file_path = os.path.join(train_lib_path, 'data_specs', 'got10k_val_split.txt')\n            elif split == 'train_full':\n                file_path = os.path.join(train_lib_path, 'data_specs', 'got10k_train_full_split.txt')\n            elif split == 'vottrain':\n                file_path = os.path.join(train_lib_path, 'data_specs', 'got10k_vot_train_split.txt')\n            elif split == 'votval':\n                file_path = os.path.join(train_lib_path, 'data_specs', 'got10k_vot_val_split.txt')\n            else:\n                raise ValueError('Unknown split name.')\n            seq_ids = pandas.read_csv(file_path, header=None, squeeze=True, dtype=np.int64).values.tolist()\n        elif seq_ids is None:\n            seq_ids = list(range(0, len(self.sequence_list)))\n\n        self.sequence_list = [self.sequence_list[i] for i in seq_ids]\n\n        if data_fraction is not None:\n            self.sequence_list = random.sample(self.sequence_list, int(len(self.sequence_list)*data_fraction))\n\n        self.sequence_meta_info = self._load_meta_info()\n        self.seq_per_class = self._build_seq_per_class()\n\n        self.class_list = list(self.seq_per_class.keys())\n        self.class_list.sort()\n\n    def get_name(self):\n        return 'got10k_lmdb'\n\n    def has_class_info(self):\n        return True\n\n    def has_occlusion_info(self):\n        return True\n\n    def _load_meta_info(self):\n        def _read_meta(meta_info):\n\n            object_meta = OrderedDict({'object_class_name': meta_info[5].split(': ')[-1],\n                                       'motion_class': meta_info[6].split(': ')[-1],\n                                       'major_class': meta_info[7].split(': ')[-1],\n                                       'root_class': meta_info[8].split(': ')[-1],\n                                       'motion_adverb': meta_info[9].split(': ')[-1]})\n\n            return object_meta\n        sequence_meta_info = {}\n        for s in self.sequence_list:\n            try:\n                meta_str = decode_str(self.root, \"train/%s/meta_info.ini\" %s)\n                sequence_meta_info[s] = _read_meta(meta_str.split('\\n'))\n            except:\n                sequence_meta_info[s] = OrderedDict({'object_class_name': None,\n                                                     'motion_class': None,\n                                                     'major_class': None,\n                                                     'root_class': None,\n                                                     'motion_adverb': None})\n        return sequence_meta_info\n\n    def _build_seq_per_class(self):\n        seq_per_class = {}\n\n        for i, s in enumerate(self.sequence_list):\n            object_class = self.sequence_meta_info[s]['object_class_name']\n            if object_class in seq_per_class:\n                seq_per_class[object_class].append(i)\n            else:\n                seq_per_class[object_class] = [i]\n\n        return seq_per_class\n\n    def get_sequences_in_class(self, class_name):\n        return self.seq_per_class[class_name]\n\n    def _get_sequence_list(self):\n        dir_str = decode_str(self.root, 'train/list.txt')\n        dir_list = dir_str.split('\\n')\n        return dir_list\n\n    def _read_bb_anno(self, seq_path):\n        bb_anno_file = os.path.join(seq_path, \"groundtruth.txt\")\n        gt_str_list = decode_str(self.root, bb_anno_file).split('\\n')[:-1]  # the last line in got10k is empty\n        gt_list = [list(map(float, line.split(','))) for line in gt_str_list]\n        gt_arr = np.array(gt_list).astype(np.float32)\n\n        return torch.tensor(gt_arr)\n\n    def _read_target_visible(self, seq_path):\n        # full occlusion and out_of_view files\n        occlusion_file = os.path.join(seq_path, \"absence.label\")\n        cover_file = os.path.join(seq_path, \"cover.label\")\n        # Read these files\n        occ_list = list(map(int, decode_str(self.root, occlusion_file).split('\\n')[:-1]))  # the last line in got10k is empty\n        occlusion = torch.ByteTensor(occ_list)\n        cover_list = list(map(int, decode_str(self.root, cover_file).split('\\n')[:-1]))  # the last line in got10k is empty\n        cover = torch.ByteTensor(cover_list)\n\n        target_visible = ~occlusion & (cover>0).byte()\n\n        visible_ratio = cover.float() / 8\n        return target_visible, visible_ratio\n\n    def _get_sequence_path(self, seq_id):\n        return os.path.join(\"train\", self.sequence_list[seq_id])\n\n    def get_sequence_info(self, seq_id):\n        seq_path = self._get_sequence_path(seq_id)\n        bbox = self._read_bb_anno(seq_path)\n\n        valid = (bbox[:, 2] > 0) & (bbox[:, 3] > 0)\n        visible, visible_ratio = self._read_target_visible(seq_path)\n        visible = visible & valid.byte()\n\n        return {'bbox': bbox, 'valid': valid, 'visible': visible, 'visible_ratio': visible_ratio}\n\n    def _get_frame_path(self, seq_path, frame_id):\n        return os.path.join(seq_path, '{:08}.jpg'.format(frame_id+1))    # frames start from 1\n\n    def _get_frame(self, seq_path, frame_id):\n        return decode_img(self.root, self._get_frame_path(seq_path, frame_id))\n\n    def get_class_name(self, seq_id):\n        obj_meta = self.sequence_meta_info[self.sequence_list[seq_id]]\n\n        return obj_meta['object_class_name']\n\n    def get_frames(self, seq_id, frame_ids, anno=None):\n        seq_path = self._get_sequence_path(seq_id)\n        obj_meta = self.sequence_meta_info[self.sequence_list[seq_id]]\n\n        frame_list = [self._get_frame(seq_path, f_id) for f_id in frame_ids]\n\n        if anno is None:\n            anno = self.get_sequence_info(seq_id)\n\n        anno_frames = {}\n        for key, value in anno.items():\n            anno_frames[key] = [value[f_id, ...].clone() for f_id in frame_ids]\n\n        return frame_list, anno_frames, obj_meta\n"
  },
  {
    "path": "lib/train/dataset/imagenetvid.py",
    "content": "import os\nfrom .base_video_dataset import BaseVideoDataset\nfrom lib.train.data import jpeg4py_loader\nimport xml.etree.ElementTree as ET\nimport json\nimport torch\nfrom collections import OrderedDict\nfrom lib.train.admin import env_settings\n\n\ndef get_target_to_image_ratio(seq):\n    anno = torch.Tensor(seq['anno'])\n    img_sz = torch.Tensor(seq['image_size'])\n    return (anno[0, 2:4].prod() / (img_sz.prod())).sqrt()\n\n\nclass ImagenetVID(BaseVideoDataset):\n    \"\"\" Imagenet VID dataset.\n\n    Publication:\n        ImageNet Large Scale Visual Recognition Challenge\n        Olga Russakovsky, Jia Deng, Hao Su, Jonathan Krause, Sanjeev Satheesh, Sean Ma, Zhiheng Huang, Andrej Karpathy,\n        Aditya Khosla, Michael Bernstein, Alexander C. Berg and Li Fei-Fei\n        IJCV, 2015\n        https://arxiv.org/pdf/1409.0575.pdf\n\n    Download the dataset from http://image-net.org/\n    \"\"\"\n    def __init__(self, root=None, image_loader=jpeg4py_loader, min_length=0, max_target_area=1):\n        \"\"\"\n        args:\n            root - path to the imagenet vid dataset.\n            image_loader (default_image_loader) -  The function to read the images. If installed,\n                                                   jpeg4py (https://github.com/ajkxyz/jpeg4py) is used by default. Else,\n                                                   opencv's imread is used.\n            min_length - Minimum allowed sequence length.\n            max_target_area - max allowed ratio between target area and image area. Can be used to filter out targets\n                                which cover complete image.\n        \"\"\"\n        root = env_settings().imagenet_dir if root is None else root\n        super().__init__(\"imagenetvid\", root, image_loader)\n\n        cache_file = os.path.join(root, 'cache.json')\n        if os.path.isfile(cache_file):\n            # If available, load the pre-processed cache file containing meta-info for each sequence\n            with open(cache_file, 'r') as f:\n                sequence_list_dict = json.load(f)\n\n            self.sequence_list = sequence_list_dict\n        else:\n            # Else process the imagenet annotations and generate the cache file\n            self.sequence_list = self._process_anno(root)\n\n            with open(cache_file, 'w') as f:\n                json.dump(self.sequence_list, f)\n\n        # Filter the sequences based on min_length and max_target_area in the first frame\n        self.sequence_list = [x for x in self.sequence_list if len(x['anno']) >= min_length and\n                              get_target_to_image_ratio(x) < max_target_area]\n\n    def get_name(self):\n        return 'imagenetvid'\n\n    def get_num_sequences(self):\n        return len(self.sequence_list)\n\n    def get_sequence_info(self, seq_id):\n        bb_anno = torch.Tensor(self.sequence_list[seq_id]['anno'])\n        valid = (bb_anno[:, 2] > 0) & (bb_anno[:, 3] > 0)\n        visible = torch.ByteTensor(self.sequence_list[seq_id]['target_visible']) & valid.byte()\n        return {'bbox': bb_anno, 'valid': valid, 'visible': visible}\n\n    def _get_frame(self, sequence, frame_id):\n        set_name = 'ILSVRC2015_VID_train_{:04d}'.format(sequence['set_id'])\n        vid_name = 'ILSVRC2015_train_{:08d}'.format(sequence['vid_id'])\n        frame_number = frame_id + sequence['start_frame']\n        frame_path = os.path.join(self.root, 'Data', 'VID', 'train', set_name, vid_name,\n                                  '{:06d}.JPEG'.format(frame_number))\n        return self.image_loader(frame_path)\n\n    def get_frames(self, seq_id, frame_ids, anno=None):\n        sequence = self.sequence_list[seq_id]\n\n        frame_list = [self._get_frame(sequence, f) for f in frame_ids]\n\n        if anno is None:\n            anno = self.get_sequence_info(seq_id)\n\n        # Create anno dict\n        anno_frames = {}\n        for key, value in anno.items():\n            anno_frames[key] = [value[f_id, ...].clone() for f_id in frame_ids]\n\n        # added the class info to the meta info\n        object_meta = OrderedDict({'object_class': sequence['class_name'],\n                                   'motion_class': None,\n                                   'major_class': None,\n                                   'root_class': None,\n                                   'motion_adverb': None})\n\n        return frame_list, anno_frames, object_meta\n\n    def _process_anno(self, root):\n        # Builds individual tracklets\n        base_vid_anno_path = os.path.join(root, 'Annotations', 'VID', 'train')\n\n        all_sequences = []\n        for set in sorted(os.listdir(base_vid_anno_path)):\n            set_id = int(set.split('_')[-1])\n            for vid in sorted(os.listdir(os.path.join(base_vid_anno_path, set))):\n\n                vid_id = int(vid.split('_')[-1])\n                anno_files = sorted(os.listdir(os.path.join(base_vid_anno_path, set, vid)))\n\n                frame1_anno = ET.parse(os.path.join(base_vid_anno_path, set, vid, anno_files[0]))\n                image_size = [int(frame1_anno.find('size/width').text), int(frame1_anno.find('size/height').text)]\n\n                objects = [ET.ElementTree(file=os.path.join(base_vid_anno_path, set, vid, f)).findall('object')\n                           for f in anno_files]\n\n                tracklets = {}\n\n                # Find all tracklets along with start frame\n                for f_id, all_targets in enumerate(objects):\n                    for target in all_targets:\n                        tracklet_id = target.find('trackid').text\n                        if tracklet_id not in tracklets:\n                            tracklets[tracklet_id] = f_id\n\n                for tracklet_id, tracklet_start in tracklets.items():\n                    tracklet_anno = []\n                    target_visible = []\n                    class_name_id = None\n\n                    for f_id in range(tracklet_start, len(objects)):\n                        found = False\n                        for target in objects[f_id]:\n                            if target.find('trackid').text == tracklet_id:\n                                if not class_name_id:\n                                    class_name_id = target.find('name').text\n                                x1 = int(target.find('bndbox/xmin').text)\n                                y1 = int(target.find('bndbox/ymin').text)\n                                x2 = int(target.find('bndbox/xmax').text)\n                                y2 = int(target.find('bndbox/ymax').text)\n\n                                tracklet_anno.append([x1, y1, x2 - x1, y2 - y1])\n                                target_visible.append(target.find('occluded').text == '0')\n\n                                found = True\n                                break\n                        if not found:\n                            break\n\n                    new_sequence = {'set_id': set_id, 'vid_id': vid_id, 'class_name': class_name_id,\n                                    'start_frame': tracklet_start, 'anno': tracklet_anno,\n                                    'target_visible': target_visible, 'image_size': image_size}\n                    all_sequences.append(new_sequence)\n\n        return all_sequences\n"
  },
  {
    "path": "lib/train/dataset/imagenetvid_lmdb.py",
    "content": "import os\nfrom .base_video_dataset import BaseVideoDataset\nfrom lib.train.data import jpeg4py_loader\nimport torch\nfrom collections import OrderedDict\nfrom lib.train.admin import env_settings\nfrom lib.utils.lmdb_utils import decode_img, decode_json\n\n\ndef get_target_to_image_ratio(seq):\n    anno = torch.Tensor(seq['anno'])\n    img_sz = torch.Tensor(seq['image_size'])\n    return (anno[0, 2:4].prod() / (img_sz.prod())).sqrt()\n\n\nclass ImagenetVID_lmdb(BaseVideoDataset):\n    \"\"\" Imagenet VID dataset.\n\n    Publication:\n        ImageNet Large Scale Visual Recognition Challenge\n        Olga Russakovsky, Jia Deng, Hao Su, Jonathan Krause, Sanjeev Satheesh, Sean Ma, Zhiheng Huang, Andrej Karpathy,\n        Aditya Khosla, Michael Bernstein, Alexander C. Berg and Li Fei-Fei\n        IJCV, 2015\n        https://arxiv.org/pdf/1409.0575.pdf\n\n    Download the dataset from http://image-net.org/\n    \"\"\"\n    def __init__(self, root=None, image_loader=jpeg4py_loader, min_length=0, max_target_area=1):\n        \"\"\"\n        args:\n            root - path to the imagenet vid dataset.\n            image_loader (default_image_loader) -  The function to read the images. If installed,\n                                                   jpeg4py (https://github.com/ajkxyz/jpeg4py) is used by default. Else,\n                                                   opencv's imread is used.\n            min_length - Minimum allowed sequence length.\n            max_target_area - max allowed ratio between target area and image area. Can be used to filter out targets\n                                which cover complete image.\n        \"\"\"\n        root = env_settings().imagenet_dir if root is None else root\n        super().__init__(\"imagenetvid_lmdb\", root, image_loader)\n\n        sequence_list_dict = decode_json(root, \"cache.json\")\n        self.sequence_list = sequence_list_dict\n\n        # Filter the sequences based on min_length and max_target_area in the first frame\n        self.sequence_list = [x for x in self.sequence_list if len(x['anno']) >= min_length and\n                              get_target_to_image_ratio(x) < max_target_area]\n\n    def get_name(self):\n        return 'imagenetvid_lmdb'\n\n    def get_num_sequences(self):\n        return len(self.sequence_list)\n\n    def get_sequence_info(self, seq_id):\n        bb_anno = torch.Tensor(self.sequence_list[seq_id]['anno'])\n        valid = (bb_anno[:, 2] > 0) & (bb_anno[:, 3] > 0)\n        visible = torch.ByteTensor(self.sequence_list[seq_id]['target_visible']) & valid.byte()\n        return {'bbox': bb_anno, 'valid': valid, 'visible': visible}\n\n    def _get_frame(self, sequence, frame_id):\n        set_name = 'ILSVRC2015_VID_train_{:04d}'.format(sequence['set_id'])\n        vid_name = 'ILSVRC2015_train_{:08d}'.format(sequence['vid_id'])\n        frame_number = frame_id + sequence['start_frame']\n        frame_path = os.path.join('Data', 'VID', 'train', set_name, vid_name,\n                                  '{:06d}.JPEG'.format(frame_number))\n        return decode_img(self.root, frame_path)\n\n    def get_frames(self, seq_id, frame_ids, anno=None):\n        sequence = self.sequence_list[seq_id]\n\n        frame_list = [self._get_frame(sequence, f) for f in frame_ids]\n\n        if anno is None:\n            anno = self.get_sequence_info(seq_id)\n\n        # Create anno dict\n        anno_frames = {}\n        for key, value in anno.items():\n            anno_frames[key] = [value[f_id, ...].clone() for f_id in frame_ids]\n\n        # added the class info to the meta info\n        object_meta = OrderedDict({'object_class': sequence['class_name'],\n                                   'motion_class': None,\n                                   'major_class': None,\n                                   'root_class': None,\n                                   'motion_adverb': None})\n\n        return frame_list, anno_frames, object_meta\n\n"
  },
  {
    "path": "lib/train/dataset/lasot.py",
    "content": "import os\nimport os.path\nimport torch\nimport numpy as np\nimport pandas\nimport csv\nimport random\nfrom collections import OrderedDict\nfrom .base_video_dataset import BaseVideoDataset\nfrom lib.train.data import jpeg4py_loader\nfrom lib.train.admin import env_settings\n\n\nclass Lasot(BaseVideoDataset):\n    \"\"\" LaSOT dataset.\n\n    Publication:\n        LaSOT: A High-quality Benchmark for Large-scale Single Object Tracking\n        Heng Fan, Liting Lin, Fan Yang, Peng Chu, Ge Deng, Sijia Yu, Hexin Bai, Yong Xu, Chunyuan Liao and Haibin Ling\n        CVPR, 2019\n        https://arxiv.org/pdf/1809.07845.pdf\n\n    Download the dataset from https://cis.temple.edu/lasot/download.html\n    \"\"\"\n\n    def __init__(self, root=None, image_loader=jpeg4py_loader, vid_ids=None, split=None, data_fraction=None):\n        \"\"\"\n        args:\n            root - path to the lasot dataset.\n            image_loader (jpeg4py_loader) -  The function to read the images. jpeg4py (https://github.com/ajkxyz/jpeg4py)\n                                            is used by default.\n            vid_ids - List containing the ids of the videos (1 - 20) used for training. If vid_ids = [1, 3, 5], then the\n                    videos with subscripts -1, -3, and -5 from each class will be used for training.\n            split - If split='train', the official train split (protocol-II) is used for training. Note: Only one of\n                    vid_ids or split option can be used at a time.\n            data_fraction - Fraction of dataset to be used. The complete dataset is used by default\n        \"\"\"\n        root = env_settings().lasot_dir if root is None else root\n        super().__init__('LaSOT', root, image_loader)\n\n        # Keep a list of all classes\n        self.class_list = [f for f in os.listdir(self.root)]\n        self.class_to_id = {cls_name: cls_id for cls_id, cls_name in enumerate(self.class_list)}\n\n        self.sequence_list = self._build_sequence_list(vid_ids, split)\n\n        if data_fraction is not None:\n            self.sequence_list = random.sample(self.sequence_list, int(len(self.sequence_list)*data_fraction))\n\n        self.seq_per_class = self._build_class_list()\n\n    def _build_sequence_list(self, vid_ids=None, split=None):\n        if split is not None:\n            if vid_ids is not None:\n                raise ValueError('Cannot set both split_name and vid_ids.')\n            ltr_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..')\n            if split == 'train':\n                file_path = os.path.join(ltr_path, 'data_specs', 'lasot_train_split.txt')\n            else:\n                raise ValueError('Unknown split name.')\n            # sequence_list = pandas.read_csv(file_path, header=None, squeeze=True).values.tolist()\n            sequence_list = pandas.read_csv(file_path, header=None).squeeze(\"columns\").values.tolist()\n        elif vid_ids is not None:\n            sequence_list = [c+'-'+str(v) for c in self.class_list for v in vid_ids]\n        else:\n            raise ValueError('Set either split_name or vid_ids.')\n\n        return sequence_list\n\n    def _build_class_list(self):\n        seq_per_class = {}\n        for seq_id, seq_name in enumerate(self.sequence_list):\n            class_name = seq_name.split('-')[0]\n            if class_name in seq_per_class:\n                seq_per_class[class_name].append(seq_id)\n            else:\n                seq_per_class[class_name] = [seq_id]\n\n        return seq_per_class\n\n    def get_name(self):\n        return 'lasot'\n\n    def has_class_info(self):\n        return True\n\n    def has_occlusion_info(self):\n        return True\n\n    def get_num_sequences(self):\n        return len(self.sequence_list)\n\n    def get_num_classes(self):\n        return len(self.class_list)\n\n    def get_sequences_in_class(self, class_name):\n        return self.seq_per_class[class_name]\n\n    def _read_bb_anno(self, seq_path):\n        bb_anno_file = os.path.join(seq_path, \"groundtruth.txt\")\n        gt = pandas.read_csv(bb_anno_file, delimiter=',', header=None, dtype=np.float32, na_filter=False, low_memory=False).values\n        return torch.tensor(gt)\n\n    def _read_target_visible(self, seq_path):\n        # Read full occlusion and out_of_view\n        occlusion_file = os.path.join(seq_path, \"full_occlusion.txt\")\n        out_of_view_file = os.path.join(seq_path, \"out_of_view.txt\")\n\n        with open(occlusion_file, 'r', newline='') as f:\n            occlusion = torch.ByteTensor([int(v) for v in list(csv.reader(f))[0]])\n        with open(out_of_view_file, 'r') as f:\n            out_of_view = torch.ByteTensor([int(v) for v in list(csv.reader(f))[0]])\n\n        target_visible = ~occlusion & ~out_of_view\n\n        return target_visible\n\n    def _get_sequence_path(self, seq_id):\n        seq_name = self.sequence_list[seq_id]\n        class_name = seq_name.split('-')[0]\n        vid_id = seq_name.split('-')[1]\n\n        return os.path.join(self.root, class_name, class_name + '-' + vid_id)\n\n    def get_sequence_info(self, seq_id):\n        seq_path = self._get_sequence_path(seq_id)\n        bbox = self._read_bb_anno(seq_path)\n\n        valid = (bbox[:, 2] > 0) & (bbox[:, 3] > 0)\n        visible = self._read_target_visible(seq_path) & valid.byte()\n\n        return {'bbox': bbox, 'valid': valid, 'visible': visible}\n\n    def _get_frame_path(self, seq_path, frame_id):\n        return os.path.join(seq_path, 'img', '{:08}.jpg'.format(frame_id+1))    # frames start from 1\n\n    def _get_frame(self, seq_path, frame_id):\n        return self.image_loader(self._get_frame_path(seq_path, frame_id))\n\n    def _get_class(self, seq_path):\n        raw_class = seq_path.split('/')[-2]\n        return raw_class\n\n    def get_class_name(self, seq_id):\n        seq_path = self._get_sequence_path(seq_id)\n        obj_class = self._get_class(seq_path)\n\n        return obj_class\n\n    def get_frames(self, seq_id, frame_ids, anno=None):\n        seq_path = self._get_sequence_path(seq_id)\n\n        obj_class = self._get_class(seq_path)\n        frame_list = [self._get_frame(seq_path, f_id) for f_id in frame_ids]\n\n        if anno is None:\n            anno = self.get_sequence_info(seq_id)\n\n        anno_frames = {}\n        for key, value in anno.items():\n            anno_frames[key] = [value[f_id, ...].clone() for f_id in frame_ids]\n\n        object_meta = OrderedDict({'object_class_name': obj_class,\n                                   'motion_class': None,\n                                   'major_class': None,\n                                   'root_class': None,\n                                   'motion_adverb': None})\n\n        return frame_list, anno_frames, object_meta\n"
  },
  {
    "path": "lib/train/dataset/lasot_lmdb.py",
    "content": "import os\nimport os.path\nimport torch\nimport numpy as np\nimport pandas\nimport csv\nimport random\nfrom collections import OrderedDict\nfrom .base_video_dataset import BaseVideoDataset\nfrom lib.train.data import jpeg4py_loader\nfrom lib.train.admin import env_settings\n'''2021.1.16 Lasot for loading lmdb dataset'''\nfrom lib.utils.lmdb_utils import *\n\n\nclass Lasot_lmdb(BaseVideoDataset):\n\n    def __init__(self, root=None, image_loader=jpeg4py_loader, vid_ids=None, split=None, data_fraction=None):\n        \"\"\"\n        args:\n            root - path to the lasot dataset.\n            image_loader (jpeg4py_loader) -  The function to read the images. jpeg4py (https://github.com/ajkxyz/jpeg4py)\n                                            is used by default.\n            vid_ids - List containing the ids of the videos (1 - 20) used for training. If vid_ids = [1, 3, 5], then the\n                    videos with subscripts -1, -3, and -5 from each class will be used for training.\n            split - If split='train', the official train split (protocol-II) is used for training. Note: Only one of\n                    vid_ids or split option can be used at a time.\n            data_fraction - Fraction of dataset to be used. The complete dataset is used by default\n        \"\"\"\n        root = env_settings().lasot_lmdb_dir if root is None else root\n        super().__init__('LaSOT_lmdb', root, image_loader)\n\n        self.sequence_list = self._build_sequence_list(vid_ids, split)\n        class_list = [seq_name.split('-')[0] for seq_name in self.sequence_list]\n        self.class_list = []\n        for ele in class_list:\n            if ele not in self.class_list:\n                self.class_list.append(ele)\n        # Keep a list of all classes\n        self.class_to_id = {cls_name: cls_id for cls_id, cls_name in enumerate(self.class_list)}\n\n        if data_fraction is not None:\n            self.sequence_list = random.sample(self.sequence_list, int(len(self.sequence_list)*data_fraction))\n\n        self.seq_per_class = self._build_class_list()\n\n    def _build_sequence_list(self, vid_ids=None, split=None):\n        if split is not None:\n            if vid_ids is not None:\n                raise ValueError('Cannot set both split_name and vid_ids.')\n            ltr_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..')\n            if split == 'train':\n                file_path = os.path.join(ltr_path, 'data_specs', 'lasot_train_split.txt')\n            else:\n                raise ValueError('Unknown split name.')\n            sequence_list = pandas.read_csv(file_path, header=None, squeeze=True).values.tolist()\n        elif vid_ids is not None:\n            sequence_list = [c+'-'+str(v) for c in self.class_list for v in vid_ids]\n        else:\n            raise ValueError('Set either split_name or vid_ids.')\n\n        return sequence_list\n\n    def _build_class_list(self):\n        seq_per_class = {}\n        for seq_id, seq_name in enumerate(self.sequence_list):\n            class_name = seq_name.split('-')[0]\n            if class_name in seq_per_class:\n                seq_per_class[class_name].append(seq_id)\n            else:\n                seq_per_class[class_name] = [seq_id]\n\n        return seq_per_class\n\n    def get_name(self):\n        return 'lasot_lmdb'\n\n    def has_class_info(self):\n        return True\n\n    def has_occlusion_info(self):\n        return True\n\n    def get_num_sequences(self):\n        return len(self.sequence_list)\n\n    def get_num_classes(self):\n        return len(self.class_list)\n\n    def get_sequences_in_class(self, class_name):\n        return self.seq_per_class[class_name]\n\n    def _read_bb_anno(self, seq_path):\n        bb_anno_file = os.path.join(seq_path, \"groundtruth.txt\")\n        gt_str_list = decode_str(self.root, bb_anno_file).split('\\n')[:-1]  # the last line is empty\n        gt_list = [list(map(float, line.split(','))) for line in gt_str_list]\n        gt_arr = np.array(gt_list).astype(np.float32)\n        return torch.tensor(gt_arr)\n\n    def _read_target_visible(self, seq_path):\n        # Read full occlusion and out_of_view\n        occlusion_file = os.path.join(seq_path, \"full_occlusion.txt\")\n        out_of_view_file = os.path.join(seq_path, \"out_of_view.txt\")\n\n        occ_list = list(map(int, decode_str(self.root, occlusion_file).split(',')))\n        occlusion = torch.ByteTensor(occ_list)\n        out_view_list = list(map(int, decode_str(self.root, out_of_view_file).split(',')))\n        out_of_view = torch.ByteTensor(out_view_list)\n\n        target_visible = ~occlusion & ~out_of_view\n\n        return target_visible\n\n    def _get_sequence_path(self, seq_id):\n        seq_name = self.sequence_list[seq_id]\n        class_name = seq_name.split('-')[0]\n        vid_id = seq_name.split('-')[1]\n\n        return os.path.join(class_name, class_name + '-' + vid_id)\n\n    def get_sequence_info(self, seq_id):\n        seq_path = self._get_sequence_path(seq_id)\n        bbox = self._read_bb_anno(seq_path)\n\n        valid = (bbox[:, 2] > 0) & (bbox[:, 3] > 0)\n        visible = self._read_target_visible(seq_path) & valid.byte()\n\n        return {'bbox': bbox, 'valid': valid, 'visible': visible}\n\n    def _get_frame_path(self, seq_path, frame_id):\n        return os.path.join(seq_path, 'img', '{:08}.jpg'.format(frame_id+1))    # frames start from 1\n\n    def _get_frame(self, seq_path, frame_id):\n        return decode_img(self.root, self._get_frame_path(seq_path, frame_id))\n\n    def _get_class(self, seq_path):\n        raw_class = seq_path.split('/')[-2]\n        return raw_class\n\n    def get_class_name(self, seq_id):\n        seq_path = self._get_sequence_path(seq_id)\n        obj_class = self._get_class(seq_path)\n\n        return obj_class\n\n    def get_frames(self, seq_id, frame_ids, anno=None):\n        seq_path = self._get_sequence_path(seq_id)\n\n        obj_class = self._get_class(seq_path)\n        frame_list = [self._get_frame(seq_path, f_id) for f_id in frame_ids]\n\n        if anno is None:\n            anno = self.get_sequence_info(seq_id)\n\n        anno_frames = {}\n        for key, value in anno.items():\n            anno_frames[key] = [value[f_id, ...].clone() for f_id in frame_ids]\n\n        object_meta = OrderedDict({'object_class_name': obj_class,\n                                   'motion_class': None,\n                                   'major_class': None,\n                                   'root_class': None,\n                                   'motion_adverb': None})\n\n        return frame_list, anno_frames, object_meta\n"
  },
  {
    "path": "lib/train/dataset/tracking_net.py",
    "content": "import torch\nimport os\nimport os.path\nimport numpy as np\nimport pandas\nimport random\nfrom collections import OrderedDict\n\nfrom lib.train.data import jpeg4py_loader\nfrom .base_video_dataset import BaseVideoDataset\nfrom lib.train.admin import env_settings\n\n\ndef list_sequences(root, set_ids):\n    \"\"\" Lists all the videos in the input set_ids. Returns a list of tuples (set_id, video_name)\n\n    args:\n        root: Root directory to TrackingNet\n        set_ids: Sets (0-11) which are to be used\n\n    returns:\n        list - list of tuples (set_id, video_name) containing the set_id and video_name for each sequence\n    \"\"\"\n    sequence_list = []\n\n    for s in set_ids:\n        anno_dir = os.path.join(root, \"TRAIN_\" + str(s), \"anno\")\n\n        sequences_cur_set = [(s, os.path.splitext(f)[0]) for f in os.listdir(anno_dir) if f.endswith('.txt')]\n        sequence_list += sequences_cur_set\n\n    return sequence_list\n\n\nclass TrackingNet(BaseVideoDataset):\n    \"\"\" TrackingNet dataset.\n\n    Publication:\n        TrackingNet: A Large-Scale Dataset and Benchmark for Object Tracking in the Wild.\n        Matthias Mueller,Adel Bibi, Silvio Giancola, Salman Al-Subaihi and Bernard Ghanem\n        ECCV, 2018\n        https://ivul.kaust.edu.sa/Documents/Publications/2018/TrackingNet%20A%20Large%20Scale%20Dataset%20and%20Benchmark%20for%20Object%20Tracking%20in%20the%20Wild.pdf\n\n    Download the dataset using the toolkit https://github.com/SilvioGiancola/TrackingNet-devkit.\n    \"\"\"\n    def __init__(self, root=None, image_loader=jpeg4py_loader, set_ids=None, data_fraction=None):\n        \"\"\"\n        args:\n            root        - The path to the TrackingNet folder, containing the training sets.\n            image_loader (jpeg4py_loader) -  The function to read the images. jpeg4py (https://github.com/ajkxyz/jpeg4py)\n                                            is used by default.\n            set_ids (None) - List containing the ids of the TrackingNet sets to be used for training. If None, all the\n                            sets (0 - 11) will be used.\n            data_fraction - Fraction of dataset to be used. The complete dataset is used by default\n        \"\"\"\n        root = env_settings().trackingnet_dir if root is None else root\n        super().__init__('TrackingNet', root, image_loader)\n\n        if set_ids is None:\n            set_ids = [i for i in range(12)]\n\n        self.set_ids = set_ids\n\n        # Keep a list of all videos. Sequence list is a list of tuples (set_id, video_name) containing the set_id and\n        # video_name for each sequence\n        self.sequence_list = list_sequences(self.root, self.set_ids)\n\n        if data_fraction is not None:\n            self.sequence_list = random.sample(self.sequence_list, int(len(self.sequence_list) * data_fraction))\n\n        self.seq_to_class_map, self.seq_per_class = self._load_class_info()\n\n        # we do not have the class_lists for the tracking net\n        self.class_list = list(self.seq_per_class.keys())\n        self.class_list.sort()\n\n    def _load_class_info(self):\n        ltr_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..')\n        class_map_path = os.path.join(ltr_path, 'data_specs', 'trackingnet_classmap.txt')\n\n        with open(class_map_path, 'r') as f:\n            seq_to_class_map = {seq_class.split('\\t')[0]: seq_class.rstrip().split('\\t')[1] for seq_class in f}\n\n        seq_per_class = {}\n        for i, seq in enumerate(self.sequence_list):\n            class_name = seq_to_class_map.get(seq[1], 'Unknown')\n            if class_name not in seq_per_class:\n                seq_per_class[class_name] = [i]\n            else:\n                seq_per_class[class_name].append(i)\n\n        return seq_to_class_map, seq_per_class\n\n    def get_name(self):\n        return 'trackingnet'\n\n    def has_class_info(self):\n        return True\n\n    def get_sequences_in_class(self, class_name):\n        return self.seq_per_class[class_name]\n\n    def _read_bb_anno(self, seq_id):\n        set_id = self.sequence_list[seq_id][0]\n        vid_name = self.sequence_list[seq_id][1]\n        bb_anno_file = os.path.join(self.root, \"TRAIN_\" + str(set_id), \"anno\", vid_name + \".txt\")\n        gt = pandas.read_csv(bb_anno_file, delimiter=',', header=None, dtype=np.float32, na_filter=False,\n                             low_memory=False).values\n        return torch.tensor(gt)\n\n    def get_sequence_info(self, seq_id):\n        bbox = self._read_bb_anno(seq_id)\n\n        valid = (bbox[:, 2] > 0) & (bbox[:, 3] > 0)\n        visible = valid.clone().byte()\n        return {'bbox': bbox, 'valid': valid, 'visible': visible}\n\n    def _get_frame(self, seq_id, frame_id):\n        set_id = self.sequence_list[seq_id][0]\n        vid_name = self.sequence_list[seq_id][1]\n        frame_path = os.path.join(self.root, \"TRAIN_\" + str(set_id), \"frames\", vid_name, str(frame_id) + \".jpg\")\n        return self.image_loader(frame_path)\n\n    def _get_class(self, seq_id):\n        seq_name = self.sequence_list[seq_id][1]\n        return self.seq_to_class_map[seq_name]\n\n    def get_class_name(self, seq_id):\n        obj_class = self._get_class(seq_id)\n\n        return obj_class\n\n    def get_frames(self, seq_id, frame_ids, anno=None):\n        frame_list = [self._get_frame(seq_id, f) for f in frame_ids]\n\n        if anno is None:\n            anno = self.get_sequence_info(seq_id)\n\n        anno_frames = {}\n        for key, value in anno.items():\n            anno_frames[key] = [value[f_id, ...].clone() for f_id in frame_ids]\n\n        obj_class = self._get_class(seq_id)\n\n        object_meta = OrderedDict({'object_class_name': obj_class,\n                                   'motion_class': None,\n                                   'major_class': None,\n                                   'root_class': None,\n                                   'motion_adverb': None})\n\n        return frame_list, anno_frames, object_meta\n"
  },
  {
    "path": "lib/train/dataset/tracking_net_lmdb.py",
    "content": "import torch\nimport os\nimport os.path\nimport numpy as np\nimport random\nfrom collections import OrderedDict\n\nfrom lib.train.data import jpeg4py_loader\nfrom .base_video_dataset import BaseVideoDataset\nfrom lib.train.admin import env_settings\nimport json\nfrom lib.utils.lmdb_utils import decode_img, decode_str\n\n\ndef list_sequences(root):\n    \"\"\" Lists all the videos in the input set_ids. Returns a list of tuples (set_id, video_name)\n\n    args:\n        root: Root directory to TrackingNet\n\n    returns:\n        list - list of tuples (set_id, video_name) containing the set_id and video_name for each sequence\n    \"\"\"\n    fname = os.path.join(root, \"seq_list.json\")\n    with open(fname, \"r\") as f:\n        sequence_list = json.loads(f.read())\n    return sequence_list\n\n\nclass TrackingNet_lmdb(BaseVideoDataset):\n    \"\"\" TrackingNet dataset.\n\n    Publication:\n        TrackingNet: A Large-Scale Dataset and Benchmark for Object Tracking in the Wild.\n        Matthias Mueller,Adel Bibi, Silvio Giancola, Salman Al-Subaihi and Bernard Ghanem\n        ECCV, 2018\n        https://ivul.kaust.edu.sa/Documents/Publications/2018/TrackingNet%20A%20Large%20Scale%20Dataset%20and%20Benchmark%20for%20Object%20Tracking%20in%20the%20Wild.pdf\n\n    Download the dataset using the toolkit https://github.com/SilvioGiancola/TrackingNet-devkit.\n    \"\"\"\n    def __init__(self, root=None, image_loader=jpeg4py_loader, set_ids=None, data_fraction=None):\n        \"\"\"\n        args:\n            root        - The path to the TrackingNet folder, containing the training sets.\n            image_loader (jpeg4py_loader) -  The function to read the images. jpeg4py (https://github.com/ajkxyz/jpeg4py)\n                                            is used by default.\n            set_ids (None) - List containing the ids of the TrackingNet sets to be used for training. If None, all the\n                            sets (0 - 11) will be used.\n            data_fraction - Fraction of dataset to be used. The complete dataset is used by default\n        \"\"\"\n        root = env_settings().trackingnet_lmdb_dir if root is None else root\n        super().__init__('TrackingNet_lmdb', root, image_loader)\n\n        if set_ids is None:\n            set_ids = [i for i in range(12)]\n\n        self.set_ids = set_ids\n\n        # Keep a list of all videos. Sequence list is a list of tuples (set_id, video_name) containing the set_id and\n        # video_name for each sequence\n        self.sequence_list = list_sequences(self.root)\n\n        if data_fraction is not None:\n            self.sequence_list = random.sample(self.sequence_list, int(len(self.sequence_list) * data_fraction))\n\n        self.seq_to_class_map, self.seq_per_class = self._load_class_info()\n\n        # we do not have the class_lists for the tracking net\n        self.class_list = list(self.seq_per_class.keys())\n        self.class_list.sort()\n\n    def _load_class_info(self):\n        ltr_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..')\n        class_map_path = os.path.join(ltr_path, 'data_specs', 'trackingnet_classmap.txt')\n\n        with open(class_map_path, 'r') as f:\n            seq_to_class_map = {seq_class.split('\\t')[0]: seq_class.rstrip().split('\\t')[1] for seq_class in f}\n\n        seq_per_class = {}\n        for i, seq in enumerate(self.sequence_list):\n            class_name = seq_to_class_map.get(seq[1], 'Unknown')\n            if class_name not in seq_per_class:\n                seq_per_class[class_name] = [i]\n            else:\n                seq_per_class[class_name].append(i)\n\n        return seq_to_class_map, seq_per_class\n\n    def get_name(self):\n        return 'trackingnet_lmdb'\n\n    def has_class_info(self):\n        return True\n\n    def get_sequences_in_class(self, class_name):\n        return self.seq_per_class[class_name]\n\n    def _read_bb_anno(self, seq_id):\n        set_id = self.sequence_list[seq_id][0]\n        vid_name = self.sequence_list[seq_id][1]\n        gt_str_list = decode_str(os.path.join(self.root, \"TRAIN_%d_lmdb\" % set_id),\n                                 os.path.join(\"anno\", vid_name + \".txt\")).split('\\n')[:-1]\n        gt_list = [list(map(float, line.split(','))) for line in gt_str_list]\n        gt_arr = np.array(gt_list).astype(np.float32)\n        return torch.tensor(gt_arr)\n\n    def get_sequence_info(self, seq_id):\n        bbox = self._read_bb_anno(seq_id)\n\n        valid = (bbox[:, 2] > 0) & (bbox[:, 3] > 0)\n        visible = valid.clone().byte()\n        return {'bbox': bbox, 'valid': valid, 'visible': visible}\n\n    def _get_frame(self, seq_id, frame_id):\n        set_id = self.sequence_list[seq_id][0]\n        vid_name = self.sequence_list[seq_id][1]\n        return decode_img(os.path.join(self.root, \"TRAIN_%d_lmdb\" % set_id),\n                          os.path.join(\"frames\", vid_name, str(frame_id) + \".jpg\"))\n\n    def _get_class(self, seq_id):\n        seq_name = self.sequence_list[seq_id][1]\n        return self.seq_to_class_map[seq_name]\n\n    def get_class_name(self, seq_id):\n        obj_class = self._get_class(seq_id)\n\n        return obj_class\n\n    def get_frames(self, seq_id, frame_ids, anno=None):\n        frame_list = [self._get_frame(seq_id, f) for f in frame_ids]\n\n        if anno is None:\n            anno = self.get_sequence_info(seq_id)\n\n        anno_frames = {}\n        for key, value in anno.items():\n            anno_frames[key] = [value[f_id, ...].clone() for f_id in frame_ids]\n\n        obj_class = self._get_class(seq_id)\n\n        object_meta = OrderedDict({'object_class_name': obj_class,\n                                   'motion_class': None,\n                                   'major_class': None,\n                                   'root_class': None,\n                                   'motion_adverb': None})\n\n        return frame_list, anno_frames, object_meta\n"
  },
  {
    "path": "lib/train/run_training.py",
    "content": "import os\nimport sys\nimport argparse\nimport importlib\nimport cv2 as cv\nimport torch.backends.cudnn\nimport torch.distributed as dist\nimport torch\nimport random\nimport numpy as np\ntorch.backends.cudnn.benchmark = False\n\nimport _init_paths\nimport lib.train.admin.settings as ws_settings\n\n\ndef init_seeds(seed):\n    random.seed(seed)\n    np.random.seed(seed)\n    torch.manual_seed(seed)\n    torch.cuda.manual_seed(seed)\n    torch.backends.cudnn.deterministic = True\n    torch.backends.cudnn.benchmark = False\n    torch.set_num_threads(4)\n    cv.setNumThreads(1)\n    cv.ocl.setUseOpenCL(False)\n\n\ndef run_training(script_name, config_name, cudnn_benchmark=True, local_rank=-1, save_dir=None, base_seed=None,\n                 use_lmdb=False, script_name_prv=None, config_name_prv=None, use_wandb=False,\n                 distill=None, script_teacher=None, config_teacher=None):\n    \"\"\"Run the train script.\n    args:\n        script_name: Name of emperiment in the \"experiments/\" folder.\n        config_name: Name of the yaml file in the \"experiments/<script_name>\".\n        cudnn_benchmark: Use cudnn benchmark or not (default is True).\n    \"\"\"\n    if save_dir is None:\n        print(\"save_dir dir is not given. Use the default dir instead.\")\n    # This is needed to avoid strange crashes related to opencv\n    torch.set_num_threads(4)\n    cv.setNumThreads(4)\n\n    torch.backends.cudnn.benchmark = cudnn_benchmark\n\n    print('script_name: {}.py  config_name: {}.yaml'.format(script_name, config_name))\n\n    '''2021.1.5 set seed for different process'''\n    if base_seed is not None:\n        if local_rank != -1:\n            init_seeds(base_seed + local_rank)\n        else:\n            init_seeds(base_seed)\n\n    settings = ws_settings.Settings()\n    settings.script_name = script_name\n    settings.config_name = config_name\n    settings.project_path = 'train/{}/{}'.format(script_name, config_name)\n    if script_name_prv is not None and config_name_prv is not None:\n        settings.project_path_prv = 'train/{}/{}'.format(script_name_prv, config_name_prv)\n    settings.local_rank = local_rank\n    settings.save_dir = os.path.abspath(save_dir)\n    settings.use_lmdb = use_lmdb\n    prj_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), \"../..\"))\n    settings.cfg_file = os.path.join(prj_dir, 'experiments/%s/%s.yaml' % (script_name, config_name))\n    settings.use_wandb = use_wandb\n    if distill:\n        settings.distill = distill\n        settings.script_teacher = script_teacher\n        settings.config_teacher = config_teacher\n        if script_teacher is not None and config_teacher is not None:\n            settings.project_path_teacher = 'train/{}/{}'.format(script_teacher, config_teacher)\n        settings.cfg_file_teacher = os.path.join(prj_dir, 'experiments/%s/%s.yaml' % (script_teacher, config_teacher))\n        expr_module = importlib.import_module('lib.train.train_script_distill')\n    else:\n        expr_module = importlib.import_module('lib.train.train_script')\n    expr_func = getattr(expr_module, 'run')\n\n    expr_func(settings)\n\n\ndef main():\n    parser = argparse.ArgumentParser(description='Run a train scripts in train_settings.')\n    parser.add_argument('--script', type=str, required=True, help='Name of the train script.')\n    parser.add_argument('--config', type=str, required=True, help=\"Name of the config file.\")\n    parser.add_argument('--cudnn_benchmark', type=bool, default=False, help='Set cudnn benchmark on (1) or off (0) (default is on).')\n    parser.add_argument('--local_rank', default=-1, type=int, help='node rank for distributed training')\n    parser.add_argument('--save_dir', type=str, help='the directory to save checkpoints and logs')\n    parser.add_argument('--seed', type=int, default=42, help='seed for random numbers')\n    parser.add_argument('--use_lmdb', type=int, choices=[0, 1], default=0)  # whether datasets are in lmdb format\n    parser.add_argument('--script_prv', type=str, default=None, help='Name of the train script of previous model.')\n    parser.add_argument('--config_prv', type=str, default=None, help=\"Name of the config file of previous model.\")\n    parser.add_argument('--use_wandb', type=int, choices=[0, 1], default=0)  # whether to use wandb\n    # for knowledge distillation\n    parser.add_argument('--distill', type=int, choices=[0, 1], default=0)  # whether to use knowledge distillation\n    parser.add_argument('--script_teacher', type=str, help='teacher script name')\n    parser.add_argument('--config_teacher', type=str, help='teacher yaml configure file name')\n\n    args = parser.parse_args()\n    if args.local_rank != -1:\n        dist.init_process_group(backend='nccl')\n        torch.cuda.set_device(args.local_rank)\n    else:\n        torch.cuda.set_device(0)\n    run_training(args.script, args.config, cudnn_benchmark=args.cudnn_benchmark,\n                 local_rank=args.local_rank, save_dir=args.save_dir, base_seed=args.seed,\n                 use_lmdb=args.use_lmdb, script_name_prv=args.script_prv, config_name_prv=args.config_prv,\n                 use_wandb=args.use_wandb,\n                 distill=args.distill, script_teacher=args.script_teacher, config_teacher=args.config_teacher)\n\n\nif __name__ == '__main__':\n    main()\n"
  },
  {
    "path": "lib/train/train_script.py",
    "content": "import os\n# loss function related\nfrom lib.utils.box_ops import giou_loss\nfrom torch.nn.functional import l1_loss\nfrom torch.nn import BCEWithLogitsLoss\n# train pipeline related\nfrom lib.train.trainers import LTRTrainer, LTRSeqTrainer, LTRSeqTrainerV2\nfrom lib.train.dataset import Lasot, Got10k, MSCOCOSeq, ImagenetVID, TrackingNet\nfrom lib.train.dataset import Lasot_lmdb, Got10k_lmdb, MSCOCOSeq_lmdb, ImagenetVID_lmdb, TrackingNet_lmdb\nfrom lib.train.data import sampler, opencv_loader, processing, LTRLoader, sequence_sampler, sequence_sampler_v2\n# distributed training related\nfrom torch.nn.parallel import DistributedDataParallel as DDP\n# some more advanced functions\nfrom .base_functions import *\n# network related\nfrom lib.models.artrack import build_artrack\nfrom lib.models.artrackv2 import build_artrackv2\nfrom lib.models.artrack_seq import build_artrack_seq\nfrom lib.models.artrackv2_seq import build_artrackv2_seq\n# forward propagation related\nfrom lib.train.actors import ARTrackActor, ARTrackSeqActor, ARTrackV2Actor, ARTrackV2SeqActor\n# for import modules\nimport importlib\n\nfrom ..utils.focal_loss import FocalLoss\n\ndef names2datasets(name_list: list, settings, image_loader):\n    assert isinstance(name_list, list)\n    datasets = []\n    #settings.use_lmdb = True\n    for name in name_list:\n        assert name in [\"LASOT\", \"GOT10K_vottrain\", \"GOT10K_votval\", \"GOT10K_train_full\", \"GOT10K_official_val\",\n                        \"COCO17\", \"VID\", \"TRACKINGNET\"]\n        if name == \"LASOT\":\n            if settings.use_lmdb:\n                print(\"Building lasot dataset from lmdb\")\n                datasets.append(Lasot_lmdb(settings.env.lasot_lmdb_dir, split='train', image_loader=image_loader))\n            else:\n                datasets.append(Lasot(settings.env.lasot_dir, split='train', image_loader=image_loader))\n        if name == \"GOT10K_vottrain\":\n            if settings.use_lmdb:\n                print(\"Building got10k from lmdb\")\n                datasets.append(Got10k_lmdb(settings.env.got10k_lmdb_dir, split='vottrain', image_loader=image_loader))\n            else:\n                datasets.append(Got10k(settings.env.got10k_dir, split='vottrain', image_loader=image_loader))\n        if name == \"GOT10K_train_full\":\n            if settings.use_lmdb:\n                print(\"Building got10k_train_full from lmdb\")\n                datasets.append(Got10k_lmdb(settings.env.got10k_lmdb_dir, split='train_full', image_loader=image_loader))\n            else:\n                datasets.append(Got10k(settings.env.got10k_dir, split='train_full', image_loader=image_loader))\n        if name == \"GOT10K_votval\":\n            if settings.use_lmdb:\n                print(\"Building got10k from lmdb\")\n                datasets.append(Got10k_lmdb(settings.env.got10k_lmdb_dir, split='votval', image_loader=image_loader))\n            else:\n                datasets.append(Got10k(settings.env.got10k_dir, split='votval', image_loader=image_loader))\n        if name == \"GOT10K_official_val\":\n            if settings.use_lmdb:\n                raise ValueError(\"Not implement\")\n            else:\n                datasets.append(Got10k(settings.env.got10k_val_dir, split=None, image_loader=image_loader))\n        if name == \"COCO17\":\n            if settings.use_lmdb:\n                print(\"Building COCO2017 from lmdb\")\n                datasets.append(MSCOCOSeq_lmdb(settings.env.coco_lmdb_dir, version=\"2017\", image_loader=image_loader))\n            else:\n                datasets.append(MSCOCOSeq(settings.env.coco_dir, version=\"2017\", image_loader=image_loader))\n        if name == \"VID\":\n            if settings.use_lmdb:\n                print(\"Building VID from lmdb\")\n                datasets.append(ImagenetVID_lmdb(settings.env.imagenet_lmdb_dir, image_loader=image_loader))\n            else:\n                datasets.append(ImagenetVID(settings.env.imagenet_dir, image_loader=image_loader))\n        if name == \"TRACKINGNET\":\n            if settings.use_lmdb:\n                print(\"Building TrackingNet from lmdb\")\n                datasets.append(TrackingNet_lmdb(settings.env.trackingnet_lmdb_dir, image_loader=image_loader))\n            else:\n                # raise ValueError(\"NOW WE CAN ONLY USE TRACKINGNET FROM LMDB\")\n                datasets.append(TrackingNet(settings.env.trackingnet_dir, image_loader=image_loader))\n    return datasets\n\ndef slt_collate(batch):\n    ret = {}\n    for k in batch[0].keys():\n        here_list = []\n        for ex in batch:\n            here_list.append(ex[k])\n        ret[k] = here_list\n    return ret\n\nclass SLTLoader(torch.utils.data.dataloader.DataLoader):\n    \"\"\"\n    Data loader. Combines a dataset and a sampler, and provides\n    single- or multi-process iterators over the dataset.\n    \"\"\"\n\n    __initialized = False\n\n    def __init__(self, name, dataset, training=True, batch_size=1, shuffle=False, sampler=None, batch_sampler=None,\n                 num_workers=0, epoch_interval=1, collate_fn=None, stack_dim=0, pin_memory=False, drop_last=False,\n                 timeout=0, worker_init_fn=None):\n\n        if collate_fn is None:\n            collate_fn = slt_collate\n\n        super(SLTLoader, self).__init__(dataset, batch_size, shuffle, sampler, batch_sampler,\n                 num_workers, collate_fn, pin_memory, drop_last,\n                 timeout, worker_init_fn)\n\n        self.name = name\n        self.training = training\n        self.epoch_interval = epoch_interval\n        self.stack_dim = stack_dim\n\ndef run(settings):\n    settings.description = 'Training script for STARK-S, STARK-ST stage1, and STARK-ST stage2'\n\n    # update the default configs with config file\n    if not os.path.exists(settings.cfg_file):\n        raise ValueError(\"%s doesn't exist.\" % settings.cfg_file)\n    config_module = importlib.import_module(\"lib.config.%s.config\" % settings.script_name)\n    cfg = config_module.cfg\n    config_module.update_config_from_file(settings.cfg_file)\n    if settings.local_rank in [-1, 0]:\n        print(\"New configuration is shown below.\")\n        for key in cfg.keys():\n            print(\"%s configuration:\" % key, cfg[key])\n            print('\\n')\n\n    # update settings based on cfg\n    update_settings(settings, cfg)\n\n    # Record the training log\n    log_dir = os.path.join(settings.save_dir, 'logs')\n    if settings.local_rank in [-1, 0]:\n        if not os.path.exists(log_dir):\n            os.makedirs(log_dir)\n    settings.log_file = os.path.join(log_dir, \"%s-%s.log\" % (settings.script_name, settings.config_name))\n\n    # Build dataloaders\n\n    if \"RepVGG\" in cfg.MODEL.BACKBONE.TYPE or \"swin\" in cfg.MODEL.BACKBONE.TYPE or \"LightTrack\" in cfg.MODEL.BACKBONE.TYPE:\n        cfg.ckpt_dir = settings.save_dir\n    bins = cfg.MODEL.BINS\n    search_size = cfg.DATA.SEARCH.SIZE\n    # Create network\n    if settings.script_name == \"artrack\":\n        net = build_artrack(cfg)\n        loader_train, loader_val = build_dataloaders(cfg, settings)\n    elif settings.script_name == \"artrack_seq\":\n        net = build_artrack_seq(cfg)\n        dataset_train = sequence_sampler.SequenceSampler(\n            datasets=names2datasets(cfg.DATA.TRAIN.DATASETS_NAME, settings, opencv_loader),\n            p_datasets=cfg.DATA.TRAIN.DATASETS_RATIO,\n            samples_per_epoch=cfg.DATA.TRAIN.SAMPLE_PER_EPOCH,\n            max_gap=cfg.DATA.MAX_GAP, max_interval=cfg.DATA.MAX_INTERVAL,\n            num_search_frames=cfg.DATA.SEARCH.NUMBER, num_template_frames=1,\n            frame_sample_mode='random_interval',\n            prob=cfg.DATA.INTERVAL_PROB)\n        loader_train = SLTLoader('train', dataset_train, training=True, batch_size=cfg.TRAIN.BATCH_SIZE,\n                                 num_workers=cfg.TRAIN.NUM_WORKER,\n                                 shuffle=False, drop_last=True)\n    elif settings.script_name == \"artrackv2\":\n        net = build_artrackv2(cfg)\n        loader_train, loader_val = build_dataloaders(cfg, settings)\n    elif settings.script_name == \"artrackv2_seq\":\n        net = build_artrackv2_seq(cfg)\n        dataset_train = sequence_sampler_v2.SequenceSampler(\n            datasets=names2datasets(cfg.DATA.TRAIN.DATASETS_NAME, settings, opencv_loader),\n            p_datasets=cfg.DATA.TRAIN.DATASETS_RATIO,\n            samples_per_epoch=cfg.DATA.TRAIN.SAMPLE_PER_EPOCH,\n            max_gap=cfg.DATA.MAX_GAP, max_interval=cfg.DATA.MAX_INTERVAL,\n            num_search_frames=cfg.DATA.SEARCH.NUMBER, num_template_frames=1,\n            frame_sample_mode='random_interval',\n            prob=cfg.DATA.INTERVAL_PROB)\n        loader_train = SLTLoader('train', dataset_train, training=True, batch_size=cfg.TRAIN.BATCH_SIZE,\n                                 num_workers=cfg.TRAIN.NUM_WORKER,\n                                 shuffle=False, drop_last=True)\n    else:\n        raise ValueError(\"illegal script name\")\n\n    # wrap networks to distributed one\n    net.cuda()\n    if settings.local_rank != -1:\n        # net = torch.nn.SyncBatchNorm.convert_sync_batchnorm(net)  # add syncBN converter\n        net = DDP(net, device_ids=[settings.local_rank], find_unused_parameters=True)\n        settings.device = torch.device(\"cuda:%d\" % settings.local_rank)\n    else:\n        settings.device = torch.device(\"cuda:0\")\n    settings.deep_sup = getattr(cfg.TRAIN, \"DEEP_SUPERVISION\", False)\n    settings.distill = getattr(cfg.TRAIN, \"DISTILL\", False)\n    settings.distill_loss_type = getattr(cfg.TRAIN, \"DISTILL_LOSS_TYPE\", \"KL\")\n    # Loss functions and Actors\n    if settings.script_name == \"artrack\":\n        focal_loss = FocalLoss()\n        objective = {'giou': giou_loss, 'l1': l1_loss, 'focal': focal_loss}\n        loss_weight = {'giou': cfg.TRAIN.GIOU_WEIGHT, 'l1': cfg.TRAIN.L1_WEIGHT, 'focal': 2.}\n        actor = ARTrackActor(net=net, objective=objective, loss_weight=loss_weight, settings=settings, cfg=cfg, bins=bins, search_size=search_size)\n    elif settings.script_name == \"artrack_seq\":\n        focal_loss = FocalLoss()\n        objective = {'giou': giou_loss, 'l1': l1_loss, 'focal': focal_loss}\n        loss_weight = {'giou': cfg.TRAIN.GIOU_WEIGHT, 'l1': cfg.TRAIN.L1_WEIGHT, 'focal': 2.}\n        actor = ARTrackSeqActor(net=net, objective=objective, loss_weight=loss_weight, settings=settings, cfg=cfg, bins=bins, search_size=search_size)\n    elif settings.script_name == \"artrackv2\":\n        focal_loss = FocalLoss()\n        objective = {'giou': giou_loss, 'l1': l1_loss, 'focal': focal_loss}\n        loss_weight = {'giou': cfg.TRAIN.GIOU_WEIGHT, 'l1': cfg.TRAIN.L1_WEIGHT, 'focal': 2., 'score': cfg.TRAIN.SCORE_WEIGHT}\n        actor = ARTrackV2Actor(net=net, objective=objective, loss_weight=loss_weight, settings=settings, cfg=cfg, bins=bins, search_size=search_size)\n    elif settings.script_name == \"artrackv2_seq\":\n        focal_loss = FocalLoss()\n        objective = {'giou': giou_loss, 'l1': l1_loss, 'focal': focal_loss}\n        loss_weight = {'giou': cfg.TRAIN.GIOU_WEIGHT, 'l1': cfg.TRAIN.L1_WEIGHT, 'focal': 2., 'score_update': cfg.TRAIN.SCORE_WEIGHT}\n        actor = ARTrackV2SeqActor(net=net, objective=objective, loss_weight=loss_weight, settings=settings, cfg=cfg, bins=bins, search_size=search_size)\n    else:\n        raise ValueError(\"illegal script name\")\n\n    # if cfg.TRAIN.DEEP_SUPERVISION:\n    #     raise ValueError(\"Deep supervision is not supported now.\")\n\n    # Optimizer, parameters, and learning rates\n    if settings.script_name == 'artrack' or settings.script_name == 'artrack_seq':\n        optimizer, lr_scheduler = get_optimizer_scheduler(net, cfg)\n    elif settings.script_name == 'artrackv2' or settings.script_name == 'artrackv2_seq':\n        optimizer, lr_scheduler = get_optimizer_scheduler_v2(net, cfg)\n    use_amp = getattr(cfg.TRAIN, \"AMP\", False)\n    if settings.script_name == \"artrack\":\n        trainer = LTRTrainer(actor, [loader_train, loader_val], optimizer, settings, lr_scheduler, use_amp=use_amp)\n    elif settings.script_name == \"artrack_seq\":\n        trainer = LTRSeqTrainer(actor, [loader_train], optimizer, settings, lr_scheduler, use_amp=use_amp)\n    elif settings.script_name == \"artrackv2\":\n        trainer = LTRTrainer(actor, [loader_train, loader_val], optimizer, settings, lr_scheduler, use_amp=use_amp)\n    elif settings.script_name == \"artrackv2_seq\":\n        trainer = LTRSeqTrainerV2(actor, [loader_train], optimizer, settings, lr_scheduler, use_amp=use_amp)\n\n    # train process\n    trainer.train(cfg.TRAIN.EPOCH, load_latest=True, fail_safe=True)\n"
  },
  {
    "path": "lib/train/train_script_distill.py",
    "content": "import os\n# loss function related\nfrom lib.utils.box_ops import giou_loss\nfrom torch.nn.functional import l1_loss\nfrom torch.nn import BCEWithLogitsLoss\n# train pipeline related\nfrom lib.train.trainers import LTRTrainer\n# distributed training related\nfrom torch.nn.parallel import DistributedDataParallel as DDP\n# some more advanced functions\nfrom .base_functions import *\n# network related\nfrom lib.models.stark import build_starks, build_starkst\nfrom lib.models.stark import build_stark_lightning_x_trt\n# forward propagation related\nfrom lib.train.actors import STARKLightningXtrtdistillActor\n# for import modules\nimport importlib\n\n\ndef build_network(script_name, cfg):\n    # Create network\n    if script_name == \"stark_s\":\n        net = build_starks(cfg)\n    elif script_name == \"stark_st1\" or script_name == \"stark_st2\":\n        net = build_starkst(cfg)\n    elif script_name == \"stark_lightning_X_trt\":\n        net = build_stark_lightning_x_trt(cfg, phase=\"train\")\n    else:\n        raise ValueError(\"illegal script name\")\n    return net\n\n\ndef run(settings):\n    settings.description = 'Training script for STARK-S, STARK-ST stage1, and STARK-ST stage2'\n\n    # update the default configs with config file\n    if not os.path.exists(settings.cfg_file):\n        raise ValueError(\"%s doesn't exist.\" % settings.cfg_file)\n    config_module = importlib.import_module(\"lib.config.%s.config\" % settings.script_name)\n    cfg = config_module.cfg\n    config_module.update_config_from_file(settings.cfg_file)\n    if settings.local_rank in [-1, 0]:\n        print(\"New configuration is shown below.\")\n        for key in cfg.keys():\n            print(\"%s configuration:\" % key, cfg[key])\n            print('\\n')\n\n    # update the default teacher configs with teacher config file\n    if not os.path.exists(settings.cfg_file_teacher):\n        raise ValueError(\"%s doesn't exist.\" % settings.cfg_file_teacher)\n    config_module_teacher = importlib.import_module(\"lib.config.%s.config\" % settings.script_teacher)\n    cfg_teacher = config_module_teacher.cfg\n    config_module_teacher.update_config_from_file(settings.cfg_file_teacher)\n    if settings.local_rank in [-1, 0]:\n        print(\"New teacher configuration is shown below.\")\n        for key in cfg_teacher.keys():\n            print(\"%s configuration:\" % key, cfg_teacher[key])\n            print('\\n')\n\n    # update settings based on cfg\n    update_settings(settings, cfg)\n\n    # Record the training log\n    log_dir = os.path.join(settings.save_dir, 'logs')\n    if settings.local_rank in [-1, 0]:\n        if not os.path.exists(log_dir):\n            os.makedirs(log_dir)\n    settings.log_file = os.path.join(log_dir, \"%s-%s.log\" % (settings.script_name, settings.config_name))\n\n    # Build dataloaders\n    loader_train, loader_val = build_dataloaders(cfg, settings)\n\n    if \"RepVGG\" in cfg.MODEL.BACKBONE.TYPE or \"swin\" in cfg.MODEL.BACKBONE.TYPE:\n        cfg.ckpt_dir = settings.save_dir\n    \"\"\"turn on the distillation mode\"\"\"\n    cfg.TRAIN.DISTILL = True\n    cfg_teacher.TRAIN.DISTILL = True\n    net = build_network(settings.script_name, cfg)\n    net_teacher = build_network(settings.script_teacher, cfg_teacher)\n\n    # wrap networks to distributed one\n    net.cuda()\n    net_teacher.cuda()\n    net_teacher.eval()\n\n    if settings.local_rank != -1:\n        net = DDP(net, device_ids=[settings.local_rank], find_unused_parameters=True)\n        net_teacher = DDP(net_teacher, device_ids=[settings.local_rank], find_unused_parameters=True)\n        settings.device = torch.device(\"cuda:%d\" % settings.local_rank)\n    else:\n        settings.device = torch.device(\"cuda:0\")\n    # settings.deep_sup = getattr(cfg.TRAIN, \"DEEP_SUPERVISION\", False)\n    # settings.distill = getattr(cfg.TRAIN, \"DISTILL\", False)\n    settings.distill_loss_type = getattr(cfg.TRAIN, \"DISTILL_LOSS_TYPE\", \"L1\")\n    # Loss functions and Actors\n    if settings.script_name == \"stark_lightning_X_trt\":\n        objective = {'giou': giou_loss, 'l1': l1_loss}\n        loss_weight = {'giou': cfg.TRAIN.GIOU_WEIGHT, 'l1': cfg.TRAIN.L1_WEIGHT}\n        actor = STARKLightningXtrtdistillActor(net=net, objective=objective, loss_weight=loss_weight, settings=settings,\n                                               net_teacher=net_teacher)\n    else:\n        raise ValueError(\"illegal script name\")\n\n    # Optimizer, parameters, and learning rates\n    optimizer, lr_scheduler = get_optimizer_scheduler(net, cfg)\n    use_amp = getattr(cfg.TRAIN, \"AMP\", False)\n    trainer = LTRTrainer(actor, [loader_train, loader_val], optimizer, settings, lr_scheduler, use_amp=use_amp)\n\n    # train process\n    trainer.train(cfg.TRAIN.EPOCH, load_latest=True, fail_safe=True, distill=True)\n"
  },
  {
    "path": "lib/train/trainers/__init__.py",
    "content": "from .base_trainer import BaseTrainer\nfrom .ltr_trainer import LTRTrainer\nfrom .ltr_seq_trainer import LTRSeqTrainer\nfrom .ltr_seq_trainer_v2 import LTRSeqTrainerV2\n"
  },
  {
    "path": "lib/train/trainers/base_trainer.py",
    "content": "import os\nimport glob\nimport torch\nimport traceback\nfrom lib.train.admin import multigpu\nfrom torch.utils.data.distributed import DistributedSampler\n\n\nclass BaseTrainer:\n    \"\"\"Base trainer class. Contains functions for training and saving/loading checkpoints.\n    Trainer classes should inherit from this one and overload the train_epoch function.\"\"\"\n\n    def __init__(self, actor, loaders, optimizer, settings, lr_scheduler=None):\n        \"\"\"\n        args:\n            actor - The actor for training the network\n            loaders - list of dataset loaders, e.g. [train_loader, val_loader]. In each epoch, the trainer runs one\n                        epoch for each loader.\n            optimizer - The optimizer used for training, e.g. Adam\n            settings - Training settings\n            lr_scheduler - Learning rate scheduler\n        \"\"\"\n        self.actor = actor\n        self.optimizer = optimizer\n        self.lr_scheduler = lr_scheduler\n        self.loaders = loaders\n\n        self.update_settings(settings)\n\n        self.epoch = 0\n        self.stats = {}\n\n        self.device = getattr(settings, 'device', None)\n        if self.device is None:\n            self.device = torch.device(\"cuda:0\" if torch.cuda.is_available() and settings.use_gpu else \"cpu\")\n\n        self.actor.to(self.device)\n        self.settings = settings\n\n    def update_settings(self, settings=None):\n        \"\"\"Updates the trainer settings. Must be called to update internal settings.\"\"\"\n        if settings is not None:\n            self.settings = settings\n\n        if self.settings.env.workspace_dir is not None:\n            self.settings.env.workspace_dir = os.path.expanduser(self.settings.env.workspace_dir)\n            '''2021.1.4 New function: specify checkpoint dir'''\n            if self.settings.save_dir is None:\n                self._checkpoint_dir = os.path.join(self.settings.env.workspace_dir, 'checkpoints')\n            else:\n                self._checkpoint_dir = os.path.join(self.settings.save_dir, 'checkpoints')\n            print(\"checkpoints will be saved to %s\" % self._checkpoint_dir)\n\n            if self.settings.local_rank in [-1, 0]:\n                if not os.path.exists(self._checkpoint_dir):\n                    print(\"Training with multiple GPUs. checkpoints directory doesn't exist. \"\n                          \"Create checkpoints directory\")\n                    os.makedirs(self._checkpoint_dir)\n        else:\n            self._checkpoint_dir = None\n\n    def train(self, max_epochs, load_latest=False, fail_safe=True, load_previous_ckpt=False, distill=False):\n        \"\"\"Do training for the given number of epochs.\n        args:\n            max_epochs - Max number of training epochs,\n            load_latest - Bool indicating whether to resume from latest epoch.\n            fail_safe - Bool indicating whether the training to automatically restart in case of any crashes.\n        \"\"\"\n\n        epoch = -1\n        num_tries = 1\n        for i in range(num_tries):\n            try:\n                if load_latest:\n                    self.load_checkpoint()\n                if load_previous_ckpt:\n                    directory = '{}/{}'.format(self._checkpoint_dir, self.settings.project_path_prv)\n                    self.load_state_dict(directory)\n                if distill:\n                    directory_teacher = '{}/{}'.format(self._checkpoint_dir, self.settings.project_path_teacher)\n                    self.load_state_dict(directory_teacher, distill=True)\n                for epoch in range(self.epoch+1, max_epochs+1):\n                    self.epoch = epoch\n\n                    self.train_epoch()\n\n                    if self.lr_scheduler is not None:\n                        if self.settings.scheduler_type != 'cosine':\n                            self.lr_scheduler.step()\n                        else:\n                            self.lr_scheduler.step(epoch - 1)\n                    # only save the last 10 checkpoints\n                    save_every_epoch = getattr(self.settings, \"save_every_epoch\", False)\n                    save_epochs = []\n                    if epoch > (max_epochs - 1) or save_every_epoch or epoch % 5 == 0 or epoch in save_epochs or epoch > (max_epochs - 5):\n                    # if epoch > (max_epochs - 10) or save_every_epoch or epoch % 100 == 0:\n                        if self._checkpoint_dir:\n                            if self.settings.local_rank in [-1, 0]:\n                                self.save_checkpoint()\n            except:\n                print('Training crashed at epoch {}'.format(epoch))\n                if fail_safe:\n                    self.epoch -= 1\n                    load_latest = True\n                    print('Traceback for the error!')\n                    print(traceback.format_exc())\n                    print('Restarting training from last epoch ...')\n                else:\n                    raise\n\n        print('Finished training!')\n\n    def train_epoch(self):\n        raise NotImplementedError\n\n    def save_checkpoint(self):\n        \"\"\"Saves a checkpoint of the network and other variables.\"\"\"\n\n        net = self.actor.net.module if multigpu.is_multi_gpu(self.actor.net) else self.actor.net\n\n        actor_type = type(self.actor).__name__\n        net_type = type(net).__name__\n        state = {\n            'epoch': self.epoch,\n            'actor_type': actor_type,\n            'net_type': net_type,\n            'net': net.state_dict(),\n            'net_info': getattr(net, 'info', None),\n            'constructor': getattr(net, 'constructor', None),\n            'optimizer': self.optimizer.state_dict(),\n            'stats': self.stats,\n            'settings': self.settings\n        }\n\n        directory = '{}/{}'.format(self._checkpoint_dir, self.settings.project_path)\n        print(directory)\n        if not os.path.exists(directory):\n            print(\"directory doesn't exist. creating...\")\n            os.makedirs(directory)\n\n        # First save as a tmp file\n        tmp_file_path = '{}/{}_ep{:04d}.tmp'.format(directory, net_type, self.epoch)\n        torch.save(state, tmp_file_path)\n\n        file_path = '{}/{}_ep{:04d}.pth.tar'.format(directory, net_type, self.epoch)\n\n        # Now rename to actual checkpoint. os.rename seems to be atomic if files are on same filesystem. Not 100% sure\n        os.rename(tmp_file_path, file_path)\n\n    def load_checkpoint(self, checkpoint = None, fields = None, ignore_fields = None, load_constructor = False):\n        \"\"\"Loads a network checkpoint file.\n\n        Can be called in three different ways:\n            load_checkpoint():\n                Loads the latest epoch from the workspace. Use this to continue training.\n            load_checkpoint(epoch_num):\n                Loads the network at the given epoch number (int).\n            load_checkpoint(path_to_checkpoint):\n                Loads the file from the given absolute path (str).\n        \"\"\"\n\n        net = self.actor.net.module if multigpu.is_multi_gpu(self.actor.net) else self.actor.net\n\n        actor_type = type(self.actor).__name__\n        net_type = type(net).__name__\n\n        if checkpoint is None:\n            # Load most recent checkpoint\n            checkpoint_list = sorted(glob.glob('{}/{}/{}_ep*.pth.tar'.format(self._checkpoint_dir,\n                                                                             self.settings.project_path, net_type)))\n            if checkpoint_list:\n                checkpoint_path = checkpoint_list[-1]\n            else:\n                print('No matching checkpoint file found')\n                return\n        elif isinstance(checkpoint, int):\n            # Checkpoint is the epoch number\n            checkpoint_path = '{}/{}/{}_ep{:04d}.pth.tar'.format(self._checkpoint_dir, self.settings.project_path,\n                                                                 net_type, checkpoint)\n        elif isinstance(checkpoint, str):\n            # checkpoint is the path\n            if os.path.isdir(checkpoint):\n                checkpoint_list = sorted(glob.glob('{}/*_ep*.pth.tar'.format(checkpoint)))\n                if checkpoint_list:\n                    checkpoint_path = checkpoint_list[-1]\n                else:\n                    raise Exception('No checkpoint found')\n            else:\n                checkpoint_path = os.path.expanduser(checkpoint)\n        else:\n            raise TypeError\n\n        # Load network\n        print(checkpoint_path)\n        checkpoint_dict = torch.load(checkpoint_path, map_location='cpu')\n        print(checkpoint_dict['net_type'])\n\n        assert net_type == checkpoint_dict['net_type'], 'Network is not of correct type.'\n\n        if fields is None:\n            fields = checkpoint_dict.keys()\n        if ignore_fields is None:\n            ignore_fields = ['settings']\n\n            # Never load the scheduler. It exists in older checkpoints.\n        ignore_fields.extend(['lr_scheduler', 'constructor', 'net_type', 'actor_type', 'net_info'])\n\n        # Load all fields\n        for key in fields:\n            if key in ignore_fields:\n                continue\n            if key == 'net':\n                net.load_state_dict(checkpoint_dict[key])\n            elif key == 'optimizer':\n                self.optimizer.load_state_dict(checkpoint_dict[key])\n            else:\n                setattr(self, key, checkpoint_dict[key])\n\n        # Set the net info\n        if load_constructor and 'constructor' in checkpoint_dict and checkpoint_dict['constructor'] is not None:\n            net.constructor = checkpoint_dict['constructor']\n        if 'net_info' in checkpoint_dict and checkpoint_dict['net_info'] is not None:\n            net.info = checkpoint_dict['net_info']\n\n        # Update the epoch in lr scheduler\n        if 'epoch' in fields:\n            self.lr_scheduler.last_epoch = self.epoch\n        # 2021.1.10 Update the epoch in data_samplers\n            for loader in self.loaders:\n                if isinstance(loader.sampler, DistributedSampler):\n                    loader.sampler.set_epoch(self.epoch)\n        return True\n\n    def load_state_dict(self, checkpoint=None, distill=False):\n        \"\"\"Loads a network checkpoint file.\n\n        Can be called in three different ways:\n            load_checkpoint():\n                Loads the latest epoch from the workspace. Use this to continue training.\n            load_checkpoint(epoch_num):\n                Loads the network at the given epoch number (int).\n            load_checkpoint(path_to_checkpoint):\n                Loads the file from the given absolute path (str).\n        \"\"\"\n        if distill:\n            net = self.actor.net_teacher.module if multigpu.is_multi_gpu(self.actor.net_teacher) \\\n                else self.actor.net_teacher\n        else:\n            net = self.actor.net.module if multigpu.is_multi_gpu(self.actor.net) else self.actor.net\n\n        net_type = type(net).__name__\n\n        if isinstance(checkpoint, str):\n            # checkpoint is the path\n            if os.path.isdir(checkpoint):\n                checkpoint_list = sorted(glob.glob('{}/*_ep*.pth.tar'.format(checkpoint)))\n                if checkpoint_list:\n                    checkpoint_path = checkpoint_list[-1]\n                else:\n                    raise Exception('No checkpoint found')\n            else:\n                checkpoint_path = os.path.expanduser(checkpoint)\n        else:\n            raise TypeError\n\n        # Load network\n        print(\"Loading pretrained model from \", checkpoint_path)\n        checkpoint_dict = torch.load(checkpoint_path, map_location='cpu')\n\n        assert net_type == checkpoint_dict['net_type'], 'Network is not of correct type.'\n\n        missing_k, unexpected_k = net.load_state_dict(checkpoint_dict[\"net\"], strict=False)\n        print(\"previous checkpoint is loaded.\")\n        print(\"missing keys: \", missing_k)\n        print(\"unexpected keys:\", unexpected_k)\n\n        return True\n"
  },
  {
    "path": "lib/train/trainers/ltr_seq_trainer.py",
    "content": "import os\r\nimport datetime\r\nfrom collections import OrderedDict\r\nfrom torch.nn.utils import clip_grad_norm_\r\n# from lib.train.data.wandb_logger import WandbWriter\r\nfrom lib.train.trainers import BaseTrainer\r\nfrom lib.train.admin import AverageMeter, StatValue\r\nfrom memory_profiler import profile\r\n# from lib.train.admin import TensorboardWriter\r\nimport torch\r\nimport time\r\nimport numpy as np\r\nfrom torch.utils.data.distributed import DistributedSampler\r\nfrom torch.cuda.amp import autocast\r\nfrom torch.cuda.amp import GradScaler\r\n\r\nfrom lib.utils.misc import get_world_size\r\n\r\n\r\nclass LTRSeqTrainer(BaseTrainer):\r\n    def __init__(self, actor, loaders, optimizer, settings, lr_scheduler=None, use_amp=False):\r\n        \"\"\"\r\n        args:\r\n            actor - The actor for training the network\r\n            loaders - list of dataset loaders, e.g. [train_loader, val_loader]. In each epoch, the trainer runs one\r\n                        epoch for each loader.\r\n            optimizer - The optimizer used for training, e.g. Adam\r\n            settings - Training settings\r\n            lr_scheduler - Learning rate scheduler\r\n        \"\"\"\r\n        super().__init__(actor, loaders, optimizer, settings, lr_scheduler)\r\n\r\n        self._set_default_settings()\r\n\r\n        # Initialize statistics variables\r\n        self.stats = OrderedDict({loader.name: None for loader in self.loaders})\r\n\r\n        # Initialize tensorboard and wandb\r\n        # self.wandb_writer = None\r\n        # if settings.local_rank in [-1, 0]:\r\n        #    tensorboard_writer_dir = os.path.join(self.settings.env.tensorboard_dir, self.settings.project_path)\r\n        #    if not os.path.exists(tensorboard_writer_dir):\r\n        #        os.makedirs(tensorboard_writer_dir)\r\n        #    self.tensorboard_writer = TensorboardWriter(tensorboard_writer_dir, [l.name for l in loaders])\r\n\r\n        #    if settings.use_wandb:\r\n        #        world_size = get_world_size()\r\n        #        cur_train_samples = self.loaders[0].dataset.samples_per_epoch * max(0, self.epoch - 1)\r\n        #        interval = (world_size * settings.batchsize)  # * interval\r\n        #        self.wandb_writer = WandbWriter(settings.project_path[6:], {}, tensorboard_writer_dir, cur_train_samples, interval)\r\n\r\n        self.move_data_to_gpu = getattr(settings, 'move_data_to_gpu', True)\r\n        print(\"move_data\", self.move_data_to_gpu)\r\n        self.settings = settings\r\n        self.use_amp = use_amp\r\n        if use_amp:\r\n            self.scaler = GradScaler()\r\n\r\n    def _set_default_settings(self):\r\n        # Dict of all default values\r\n        default = {'print_interval': 10,\r\n                   'print_stats': None,\r\n                   'description': ''}\r\n\r\n        for param, default_value in default.items():\r\n            if getattr(self.settings, param, None) is None:\r\n                setattr(self.settings, param, default_value)\r\n\r\n        self.miou_list = []\r\n\r\n    def cycle_dataset(self, loader):\r\n        \"\"\"Do a cycle of training or validation.\"\"\"\r\n        torch.autograd.set_detect_anomaly(True)\r\n        self.actor.train(loader.training)\r\n        torch.set_grad_enabled(loader.training)\r\n\r\n        self._init_timing()\r\n\r\n        for i, data in enumerate(loader, 1):\r\n            self.actor.eval()\r\n            self.data_read_done_time = time.time()\r\n            with torch.no_grad():\r\n                explore_result = self.actor.explore(data)\r\n            if explore_result == None:\r\n                print(\"this time i skip\")\r\n                # self._update_stats(stats, batch_size, loader)\r\n                continue\r\n            # get inputs\r\n            # print(data)\r\n\r\n            self.data_to_gpu_time = time.time()\r\n\r\n            data['epoch'] = self.epoch\r\n            data['settings'] = self.settings\r\n\r\n            stats = {}\r\n            reward_record = []\r\n            miou_record = []\r\n            e_miou_record = []\r\n            num_seq = len(data['num_frames'])\r\n\r\n            # Calculate reward tensor\r\n            # reward_tensor = torch.zeros(explore_result['baseline_iou'].size())\r\n            baseline_iou = explore_result['baseline_iou']\r\n            # explore_iou = explore_result['explore_iou']\r\n            for seq_idx in range(num_seq):\r\n                num_frames = data['num_frames'][seq_idx] - 1\r\n                b_miou = torch.mean(baseline_iou[:num_frames, seq_idx])\r\n                #    e_miou = torch.mean(explore_iou[:num_frames, seq_idx])\r\n                miou_record.append(b_miou.item())\r\n                #    e_miou_record.append(e_miou.item())\r\n\r\n                b_reward = b_miou.item()\r\n            #    e_reward = e_miou.item()\r\n            #    iou_gap = e_reward - b_reward\r\n            #    reward_record.append(iou_gap)\r\n            #    reward_tensor[:num_frames, seq_idx] = iou_gap\r\n\r\n            # Training mode\r\n            cursor = 0\r\n            bs_backward = 1\r\n\r\n            # print(self.actor.net.module.box_head.decoder.layers[2].mlpx.fc1.weight)\r\n            self.optimizer.zero_grad()\r\n            while cursor < num_seq:\r\n                # print(\"now is \", cursor , \"and all is \", num_seq)\r\n                model_inputs = {}\r\n                model_inputs['slt_loss_weight'] = 15\r\n                if cursor < num_seq:\r\n                    model_inputs['template_images'] = explore_result['template_images'][\r\n                                                      cursor:cursor + bs_backward].cuda()\r\n                else:\r\n                    model_inputs['template_images'] = explore_result['template_images_reverse'][\r\n                                                      cursor - num_seq:cursor - num_seq + bs_backward].cuda()\r\n                model_inputs['search_images'] = explore_result['search_images'][:, cursor:cursor + bs_backward].cuda()\r\n                model_inputs['search_anno'] = explore_result['search_anno'][:, cursor:cursor + bs_backward].cuda()\r\n                model_inputs['pre_seq'] = explore_result['pre_seq'][:, cursor:cursor + bs_backward].cuda()\r\n                model_inputs['x_feat'] = explore_result['x_feat'].squeeze(1)[:, cursor:cursor + bs_backward].cuda()\r\n                model_inputs['epoch'] = data['epoch']\r\n                # model_inputs['template_update'] = explore_result['template_update'].squeeze(1)[:,\r\n                #                                  cursor:cursor + bs_backward].cuda()\r\n                # print(\"this is cursor\")\r\n                # print(explore_result['pre_seq'].shape)\r\n                # print(explore_result['x_feat'].squeeze(1).shape)\r\n                # model_inputs['action_tensor'] = explore_result['action_tensor'][:, cursor:cursor + bs_backward].cuda()\r\n                # model_inputs['reward_tensor'] = reward_tensor[:, cursor:cursor + bs_backward].cuda()\r\n\r\n                loss, stats_cur = self.actor.compute_sequence_losses(model_inputs)\r\n                # for name, param in self.actor.net.named_parameters():\r\n                #    shape, c = (param.grad.shape, param.grad.sum()) if param.grad is not None else (None, None)\r\n                #    print(f'{name}: {param.shape} \\n\\t grad: {shape} \\n\\t {c}')\r\n                # print(\"i make this!\")\r\n                loss.backward()\r\n                # print(\"i made that?\")\r\n\r\n                for key, val in stats_cur.items():\r\n                    if key in stats:\r\n                        stats[key] += val * (bs_backward / num_seq)\r\n                    else:\r\n                        stats[key] = val * (bs_backward / num_seq)\r\n                cursor += bs_backward\r\n            grad_norm = clip_grad_norm_(self.actor.net.parameters(), 100)\r\n            stats['grad_norm'] = grad_norm\r\n            # print(self.actor.net.module.backbone.blocks[8].mlp.fc1.weight)\r\n            self.optimizer.step()\r\n            # print(self.optimizer)\r\n\r\n            miou = np.mean(miou_record)\r\n            self.miou_list.append(miou)\r\n            # stats['reward'] = np.mean(reward_record)\r\n            # stats['e_mIoU'] = np.mean(e_miou_record)\r\n            stats['mIoU'] = miou\r\n            stats['mIoU10'] = np.mean(self.miou_list[-10:])\r\n            stats['mIoU100'] = np.mean(self.miou_list[-100:])\r\n\r\n            batch_size = num_seq * np.max(data['num_frames'])\r\n            self._update_stats(stats, batch_size, loader)\r\n            self._print_stats(i, loader, batch_size)\r\n            torch.cuda.empty_cache()\r\n\r\n            # # forward pass\r\n            # if not self.use_amp:\r\n            #     loss, stats = self.actor(data)\r\n            # else:\r\n            #     with autocast():\r\n            #         loss, stats = self.actor(data)\r\n            #\r\n            # # backward pass and update weights\r\n            # if loader.training:\r\n            #     self.optimizer.zero_grad()\r\n            #     if not self.use_amp:\r\n            #         loss.backward()\r\n            #         if self.settings.grad_clip_norm > 0:\r\n            #             torch.nn.utils.clip_grad_norm_(self.actor.net.parameters(), self.settings.grad_clip_norm)\r\n            #         self.optimizer.step()\r\n            #     else:\r\n            #         self.scaler.scale(loss).backward()\r\n            #         self.scaler.step(self.optimizer)\r\n            #         self.scaler.update()\r\n\r\n            # update statistics\r\n            # batch_size = data['template_images'].shape[loader.stack_dim]\r\n            # self._update_stats(stats, batch_size, loader)\r\n\r\n            # print statistics\r\n            # self._print_stats(i, loader, batch_size)\r\n\r\n            # update wandb status\r\n            # if self.wandb_writer is not None and i % self.settings.print_interval == 0:\r\n            #    if self.settings.local_rank in [-1, 0]:\r\n            #        self.wandb_writer.write_log(self.stats, self.epoch)\r\n\r\n        # calculate ETA after every epoch\r\n        # epoch_time = self.prev_time - self.start_time\r\n        # print(\"Epoch Time: \" + str(datetime.timedelta(seconds=epoch_time)))\r\n        # print(\"Avg Data Time: %.5f\" % (self.avg_date_time / self.num_frames * batch_size))\r\n        # print(\"Avg GPU Trans Time: %.5f\" % (self.avg_gpu_trans_time / self.num_frames * batch_size))\r\n        # print(\"Avg Forward Time: %.5f\" % (self.avg_forward_time / self.num_frames * batch_size))\r\n\r\n    def train_epoch(self):\r\n        \"\"\"Do one epoch for each loader.\"\"\"\r\n        for loader in self.loaders:\r\n            if self.epoch % loader.epoch_interval == 0:\r\n                # 2021.1.10 Set epoch\r\n                if isinstance(loader.sampler, DistributedSampler):\r\n                    loader.sampler.set_epoch(self.epoch)\r\n                self.cycle_dataset(loader)\r\n\r\n        self._stats_new_epoch()\r\n        # if self.settings.local_rank in [-1, 0]:\r\n        #    self._write_tensorboard()\r\n\r\n    def _init_timing(self):\r\n        self.num_frames = 0\r\n        self.start_time = time.time()\r\n        self.prev_time = self.start_time\r\n        self.avg_date_time = 0\r\n        self.avg_gpu_trans_time = 0\r\n        self.avg_forward_time = 0\r\n\r\n    def _update_stats(self, new_stats: OrderedDict, batch_size, loader):\r\n        # Initialize stats if not initialized yet\r\n        if loader.name not in self.stats.keys() or self.stats[loader.name] is None:\r\n            self.stats[loader.name] = OrderedDict({name: AverageMeter() for name in new_stats.keys()})\r\n\r\n        # add lr state\r\n        if loader.training:\r\n            lr_list = self.lr_scheduler.get_last_lr()\r\n            for i, lr in enumerate(lr_list):\r\n                var_name = 'LearningRate/group{}'.format(i)\r\n                if var_name not in self.stats[loader.name].keys():\r\n                    self.stats[loader.name][var_name] = StatValue()\r\n                self.stats[loader.name][var_name].update(lr)\r\n\r\n        for name, val in new_stats.items():\r\n            if name not in self.stats[loader.name].keys():\r\n                self.stats[loader.name][name] = AverageMeter()\r\n            self.stats[loader.name][name].update(val, batch_size)\r\n\r\n    def _print_stats(self, i, loader, batch_size):\r\n        self.num_frames += batch_size\r\n        current_time = time.time()\r\n        batch_fps = batch_size / (current_time - self.prev_time)\r\n        average_fps = self.num_frames / (current_time - self.start_time)\r\n        prev_frame_time_backup = self.prev_time\r\n        self.prev_time = current_time\r\n\r\n        self.avg_date_time += (self.data_read_done_time - prev_frame_time_backup)\r\n        self.avg_gpu_trans_time += (self.data_to_gpu_time - self.data_read_done_time)\r\n        self.avg_forward_time += current_time - self.data_to_gpu_time\r\n\r\n        if i % self.settings.print_interval == 0 or i == loader.__len__():\r\n            print_str = '[%s: %d, %d / %d] ' % (loader.name, self.epoch, i, loader.__len__())\r\n            print_str += 'FPS: %.1f (%.1f)  ,  ' % (average_fps, batch_fps)\r\n\r\n            # 2021.12.14 add data time print\r\n            print_str += 'DataTime: %.3f (%.3f)  ,  ' % (\r\n            self.avg_date_time / self.num_frames * batch_size, self.avg_gpu_trans_time / self.num_frames * batch_size)\r\n            print_str += 'ForwardTime: %.3f  ,  ' % (self.avg_forward_time / self.num_frames * batch_size)\r\n            print_str += 'TotalTime: %.3f  ,  ' % ((current_time - self.start_time) / self.num_frames * batch_size)\r\n            # print_str += 'DataTime: %.3f (%.3f)  ,  ' % (self.data_read_done_time - prev_frame_time_backup, self.data_to_gpu_time - self.data_read_done_time)\r\n            # print_str += 'ForwardTime: %.3f  ,  ' % (current_time - self.data_to_gpu_time)\r\n            # print_str += 'TotalTime: %.3f  ,  ' % (current_time - prev_frame_time_backup)\r\n\r\n            for name, val in self.stats[loader.name].items():\r\n                if (self.settings.print_stats is None or name in self.settings.print_stats):\r\n                    if hasattr(val, 'avg'):\r\n                        print_str += '%s: %.5f  ,  ' % (name, val.avg)\r\n                    # else:\r\n                    #     print_str += '%s: %r  ,  ' % (name, val)\r\n\r\n            print(print_str[:-5])\r\n            log_str = print_str[:-5] + '\\n'\r\n            with open(self.settings.log_file, 'a') as f:\r\n                f.write(log_str)\r\n\r\n    def _stats_new_epoch(self):\r\n        # Record learning rate\r\n        for loader in self.loaders:\r\n            if loader.training:\r\n                try:\r\n                    lr_list = self.lr_scheduler.get_last_lr()\r\n                except:\r\n                    lr_list = self.lr_scheduler._get_lr(self.epoch)\r\n                for i, lr in enumerate(lr_list):\r\n                    var_name = 'LearningRate/group{}'.format(i)\r\n                    if var_name not in self.stats[loader.name].keys():\r\n                        self.stats[loader.name][var_name] = StatValue()\r\n                    self.stats[loader.name][var_name].update(lr)\r\n\r\n        for loader_stats in self.stats.values():\r\n            if loader_stats is None:\r\n                continue\r\n            for stat_value in loader_stats.values():\r\n                if hasattr(stat_value, 'new_epoch'):\r\n                    stat_value.new_epoch()\r\n\r\n    # def _write_tensorboard(self):\r\n    #    if self.epoch == 1:\r\n    #        self.tensorboard_writer.write_info(self.settings.script_name, self.settings.description)\r\n\r\n    #    self.tensorboard_writer.write_epoch(self.stats, self.epoch)\r\n"
  },
  {
    "path": "lib/train/trainers/ltr_seq_trainer_v2.py",
    "content": "import os\nimport datetime\nfrom collections import OrderedDict\nfrom torch.nn.utils import clip_grad_norm_\n# from lib.train.data.wandb_logger import WandbWriter\nfrom lib.train.trainers import BaseTrainer\nfrom lib.train.admin import AverageMeter, StatValue\nfrom memory_profiler import profile\n# from lib.train.admin import TensorboardWriter\nimport torch\nimport time\nimport numpy as np\nfrom torch.utils.data.distributed import DistributedSampler\nfrom torch.cuda.amp import autocast\nfrom torch.cuda.amp import GradScaler\n\nfrom lib.utils.misc import get_world_size\n\n\nclass LTRSeqTrainerV2(BaseTrainer):\n    def __init__(self, actor, loaders, optimizer, settings, lr_scheduler=None, use_amp=False):\n        \"\"\"\n        args:\n            actor - The actor for training the network\n            loaders - list of dataset loaders, e.g. [train_loader, val_loader]. In each epoch, the trainer runs one\n                        epoch for each loader.\n            optimizer - The optimizer used for training, e.g. Adam\n            settings - Training settings\n            lr_scheduler - Learning rate scheduler\n        \"\"\"\n        super().__init__(actor, loaders, optimizer, settings, lr_scheduler)\n\n        self._set_default_settings()\n\n        # Initialize statistics variables\n        self.stats = OrderedDict({loader.name: None for loader in self.loaders})\n\n        # Initialize tensorboard and wandb\n        # self.wandb_writer = None\n        # if settings.local_rank in [-1, 0]:\n        #    tensorboard_writer_dir = os.path.join(self.settings.env.tensorboard_dir, self.settings.project_path)\n        #    if not os.path.exists(tensorboard_writer_dir):\n        #        os.makedirs(tensorboard_writer_dir)\n        #    self.tensorboard_writer = TensorboardWriter(tensorboard_writer_dir, [l.name for l in loaders])\n\n        #    if settings.use_wandb:\n        #        world_size = get_world_size()\n        #        cur_train_samples = self.loaders[0].dataset.samples_per_epoch * max(0, self.epoch - 1)\n        #        interval = (world_size * settings.batchsize)  # * interval\n        #        self.wandb_writer = WandbWriter(settings.project_path[6:], {}, tensorboard_writer_dir, cur_train_samples, interval)\n\n        self.move_data_to_gpu = getattr(settings, 'move_data_to_gpu', True)\n        print(\"move_data\", self.move_data_to_gpu)\n        self.settings = settings\n        self.use_amp = use_amp\n        if use_amp:\n            self.scaler = GradScaler()\n\n    def _set_default_settings(self):\n        # Dict of all default values\n        default = {'print_interval': 10,\n                   'print_stats': None,\n                   'description': ''}\n\n        for param, default_value in default.items():\n            if getattr(self.settings, param, None) is None:\n                setattr(self.settings, param, default_value)\n\n        self.miou_list = []\n\n    def cycle_dataset(self, loader):\n        \"\"\"Do a cycle of training or validation.\"\"\"\n        torch.autograd.set_detect_anomaly(True)\n        self.actor.train(loader.training)\n        torch.set_grad_enabled(loader.training)\n\n        self._init_timing()\n\n        for i, data in enumerate(loader, 1):\n            self.actor.eval()\n            self.data_read_done_time = time.time()\n            with torch.no_grad():\n                explore_result = self.actor.explore(data)\n            if explore_result == None:\n                print(\"this time i skip\")\n                continue\n            # get inputs\n            # print(data)\n\n            self.data_to_gpu_time = time.time()\n\n            data['epoch'] = self.epoch\n            data['settings'] = self.settings\n\n            stats = {}\n            reward_record = []\n            miou_record = []\n            e_miou_record = []\n            num_seq = len(data['num_frames'])\n\n            # Calculate reward tensor\n            # reward_tensor = torch.zeros(explore_result['baseline_iou'].size())\n            baseline_iou = explore_result['baseline_iou']\n            # explore_iou = explore_result['explore_iou']\n            for seq_idx in range(num_seq):\n                num_frames = data['num_frames'][seq_idx] - 1\n                b_miou = torch.mean(baseline_iou[:num_frames, seq_idx])\n                #    e_miou = torch.mean(explore_iou[:num_frames, seq_idx])\n                miou_record.append(b_miou.item())\n                #    e_miou_record.append(e_miou.item())\n\n                b_reward = b_miou.item()\n            #    e_reward = e_miou.item()\n            #    iou_gap = e_reward - b_reward\n            #    reward_record.append(iou_gap)\n            #    reward_tensor[:num_frames, seq_idx] = iou_gap\n\n            # Training mode\n            cursor = 0\n            bs_backward = 1\n\n            # print(self.actor.net.module.box_head.decoder.layers[2].mlpx.fc1.weight)\n            self.optimizer.zero_grad()\n            self.actor.train()\n            while cursor < num_seq * 2:\n                model_inputs = {}\n                model_inputs['slt_loss_weight'] = 15\n\n                model_inputs['search_images'] = explore_result['search_images'][:, cursor:cursor + bs_backward].cuda()\n                model_inputs['search_anno'] = explore_result['search_anno'][:, cursor:cursor + bs_backward].cuda()\n                model_inputs['pre_seq'] = explore_result['pre_seq'][:, cursor:cursor + bs_backward].cuda()\n                model_inputs['x_feat'] = explore_result['x_feat'].squeeze(1)[:, cursor:cursor + bs_backward].cuda()\n                model_inputs['template_images_z0'] = explore_result['template_images_z0'][:,\n                                                     cursor:cursor + bs_backward].cuda()\n                model_inputs['dz_feat_update'] = explore_result['dz_feat_update'][:, cursor:cursor + bs_backward].cuda()\n                model_inputs['target_in_search'] = explore_result['target_in_search'][:,\n                                                   cursor:cursor + bs_backward].cuda()\n                model_inputs['epoch'] = self.epoch\n\n                loss, stats_cur = self.actor.compute_sequence_losses(model_inputs)\n\n                loss.backward()\n\n                for key, val in stats_cur.items():\n                    if key in stats:\n                        stats[key] += val * (bs_backward / num_seq)\n                    else:\n                        stats[key] = val * (bs_backward / num_seq)\n                cursor += bs_backward\n            grad_norm = clip_grad_norm_(self.actor.net.parameters(), 100)\n            stats['grad_norm'] = grad_norm\n\n            self.optimizer.step()\n\n            miou = np.mean(miou_record)\n            self.miou_list.append(miou)\n\n            stats['mIoU'] = miou\n            stats['mIoU10'] = np.mean(self.miou_list[-10:])\n            stats['mIoU100'] = np.mean(self.miou_list[-100:])\n\n            batch_size = num_seq * np.max(data['num_frames'])\n            self._update_stats(stats, batch_size, loader)\n            self._print_stats(i, loader, batch_size)\n            torch.cuda.empty_cache()\n\n\n    def train_epoch(self):\n        \"\"\"Do one epoch for each loader.\"\"\"\n        for loader in self.loaders:\n            if self.epoch % loader.epoch_interval == 0:\n                # 2021.1.10 Set epoch\n                if isinstance(loader.sampler, DistributedSampler):\n                    loader.sampler.set_epoch(self.epoch)\n                self.cycle_dataset(loader)\n\n        self._stats_new_epoch()\n        # if self.settings.local_rank in [-1, 0]:\n        #    self._write_tensorboard()\n\n    def _init_timing(self):\n        self.num_frames = 0\n        self.start_time = time.time()\n        self.prev_time = self.start_time\n        self.avg_date_time = 0\n        self.avg_gpu_trans_time = 0\n        self.avg_forward_time = 0\n\n    def _update_stats(self, new_stats: OrderedDict, batch_size, loader):\n        # Initialize stats if not initialized yet\n        if loader.name not in self.stats.keys() or self.stats[loader.name] is None:\n            self.stats[loader.name] = OrderedDict({name: AverageMeter() for name in new_stats.keys()})\n\n        # add lr state\n        if loader.training:\n            lr_list = self.lr_scheduler.get_last_lr()\n            for i, lr in enumerate(lr_list):\n                var_name = 'LearningRate/group{}'.format(i)\n                if var_name not in self.stats[loader.name].keys():\n                    self.stats[loader.name][var_name] = StatValue()\n                self.stats[loader.name][var_name].update(lr)\n\n        for name, val in new_stats.items():\n            if name not in self.stats[loader.name].keys():\n                self.stats[loader.name][name] = AverageMeter()\n            self.stats[loader.name][name].update(val, batch_size)\n\n    def _print_stats(self, i, loader, batch_size):\n        self.num_frames += batch_size\n        current_time = time.time()\n        batch_fps = batch_size / (current_time - self.prev_time)\n        average_fps = self.num_frames / (current_time - self.start_time)\n        prev_frame_time_backup = self.prev_time\n        self.prev_time = current_time\n\n        self.avg_date_time += (self.data_read_done_time - prev_frame_time_backup)\n        self.avg_gpu_trans_time += (self.data_to_gpu_time - self.data_read_done_time)\n        self.avg_forward_time += current_time - self.data_to_gpu_time\n\n        if i % self.settings.print_interval == 0 or i == loader.__len__():\n            print_str = '[%s: %d, %d / %d] ' % (loader.name, self.epoch, i, loader.__len__())\n            print_str += 'FPS: %.1f (%.1f)  ,  ' % (average_fps, batch_fps)\n\n            # 2021.12.14 add data time print\n            print_str += 'DataTime: %.3f (%.3f)  ,  ' % (\n            self.avg_date_time / self.num_frames * batch_size, self.avg_gpu_trans_time / self.num_frames * batch_size)\n            print_str += 'ForwardTime: %.3f  ,  ' % (self.avg_forward_time / self.num_frames * batch_size)\n            print_str += 'TotalTime: %.3f  ,  ' % ((current_time - self.start_time) / self.num_frames * batch_size)\n            # print_str += 'DataTime: %.3f (%.3f)  ,  ' % (self.data_read_done_time - prev_frame_time_backup, self.data_to_gpu_time - self.data_read_done_time)\n            # print_str += 'ForwardTime: %.3f  ,  ' % (current_time - self.data_to_gpu_time)\n            # print_str += 'TotalTime: %.3f  ,  ' % (current_time - prev_frame_time_backup)\n\n            for name, val in self.stats[loader.name].items():\n                if (self.settings.print_stats is None or name in self.settings.print_stats):\n                    if hasattr(val, 'avg'):\n                        print_str += '%s: %.5f  ,  ' % (name, val.avg)\n                    # else:\n                    #     print_str += '%s: %r  ,  ' % (name, val)\n\n            print(print_str[:-5])\n            log_str = print_str[:-5] + '\\n'\n            with open(self.settings.log_file, 'a') as f:\n                f.write(log_str)\n\n    def _stats_new_epoch(self):\n        # Record learning rate\n        for loader in self.loaders:\n            if loader.training:\n                try:\n                    lr_list = self.lr_scheduler.get_last_lr()\n                except:\n                    lr_list = self.lr_scheduler._get_lr(self.epoch)\n                for i, lr in enumerate(lr_list):\n                    var_name = 'LearningRate/group{}'.format(i)\n                    if var_name not in self.stats[loader.name].keys():\n                        self.stats[loader.name][var_name] = StatValue()\n                    self.stats[loader.name][var_name].update(lr)\n\n        for loader_stats in self.stats.values():\n            if loader_stats is None:\n                continue\n            for stat_value in loader_stats.values():\n                if hasattr(stat_value, 'new_epoch'):\n                    stat_value.new_epoch()\n\n    # def _write_tensorboard(self):\n    #    if self.epoch == 1:\n    #        self.tensorboard_writer.write_info(self.settings.script_name, self.settings.description)\n\n    #    self.tensorboard_writer.write_epoch(self.stats, self.epoch)\n"
  },
  {
    "path": "lib/train/trainers/ltr_trainer.py",
    "content": "import os\nimport datetime\nfrom collections import OrderedDict\n\n#from lib.train.data.wandb_logger import WandbWriter\nfrom lib.train.trainers import BaseTrainer\nfrom lib.train.admin import AverageMeter, StatValue\n#from lib.train.admin import TensorboardWriter\nimport torch\nimport time\nfrom torch.utils.data.distributed import DistributedSampler\nfrom torch.cuda.amp import autocast\nfrom torch.cuda.amp import GradScaler\n\nfrom lib.utils.misc import get_world_size\n\n\nclass LTRTrainer(BaseTrainer):\n    def __init__(self, actor, loaders, optimizer, settings, lr_scheduler=None, use_amp=False):\n        \"\"\"\n        args:\n            actor - The actor for training the network\n            loaders - list of dataset loaders, e.g. [train_loader, val_loader]. In each epoch, the trainer runs one\n                        epoch for each loader.\n            optimizer - The optimizer used for training, e.g. Adam\n            settings - Training settings\n            lr_scheduler - Learning rate scheduler\n        \"\"\"\n        super().__init__(actor, loaders, optimizer, settings, lr_scheduler)\n\n        self._set_default_settings()\n\n        # Initialize statistics variables\n        self.stats = OrderedDict({loader.name: None for loader in self.loaders})\n\n        # Initialize tensorboard and wandb\n        #self.wandb_writer = None\n        #if settings.local_rank in [-1, 0]:\n        #    tensorboard_writer_dir = os.path.join(self.settings.env.tensorboard_dir, self.settings.project_path)\n        #    if not os.path.exists(tensorboard_writer_dir):\n        #        os.makedirs(tensorboard_writer_dir)\n        #    self.tensorboard_writer = TensorboardWriter(tensorboard_writer_dir, [l.name for l in loaders])\n\n        #    if settings.use_wandb:\n        #        world_size = get_world_size()\n        #        cur_train_samples = self.loaders[0].dataset.samples_per_epoch * max(0, self.epoch - 1)\n        #        interval = (world_size * settings.batchsize)  # * interval\n        #        self.wandb_writer = WandbWriter(settings.project_path[6:], {}, tensorboard_writer_dir, cur_train_samples, interval)\n\n        self.move_data_to_gpu = getattr(settings, 'move_data_to_gpu', True)\n        print(\"move_data\", self.move_data_to_gpu)\n        self.settings = settings\n        self.use_amp = use_amp\n        if use_amp:\n            self.scaler = GradScaler()\n\n    def _set_default_settings(self):\n        # Dict of all default values\n        default = {'print_interval': 10,\n                   'print_stats': None,\n                   'description': ''}\n\n        for param, default_value in default.items():\n            if getattr(self.settings, param, None) is None:\n                setattr(self.settings, param, default_value)\n\n    def cycle_dataset(self, loader):\n        \"\"\"Do a cycle of training or validation.\"\"\"\n\n        self.actor.train(loader.training)\n        torch.set_grad_enabled(loader.training)\n\n        self._init_timing()\n\n        for i, data in enumerate(loader, 1):\n            self.data_read_done_time = time.time()\n            # get inputs\n            if self.move_data_to_gpu:\n                data = data.to(self.device)\n\n            self.data_to_gpu_time = time.time()\n\n            data['epoch'] = self.epoch\n            data['settings'] = self.settings\n            # forward pass\n            if not self.use_amp:\n                loss, stats = self.actor(data)\n            else:\n                with autocast():\n                    loss, stats = self.actor(data)\n\n            # backward pass and update weights\n            if loader.training:\n                self.optimizer.zero_grad()\n                if not self.use_amp:\n                    loss.backward()\n                    if self.settings.grad_clip_norm > 0:\n                        torch.nn.utils.clip_grad_norm_(self.actor.net.parameters(), self.settings.grad_clip_norm)\n                    self.optimizer.step()\n                else:\n                    self.scaler.scale(loss).backward()\n                    self.scaler.step(self.optimizer)\n                    self.scaler.update()\n\n            # update statistics\n            batch_size = data['template_images'].shape[loader.stack_dim]\n            self._update_stats(stats, batch_size, loader)\n\n            # print statistics\n            self._print_stats(i, loader, batch_size)\n\n            # update wandb status\n            #if self.wandb_writer is not None and i % self.settings.print_interval == 0:\n            #    if self.settings.local_rank in [-1, 0]:\n            #        self.wandb_writer.write_log(self.stats, self.epoch)\n\n        # calculate ETA after every epoch\n        epoch_time = self.prev_time - self.start_time\n        print(\"Epoch Time: \" + str(datetime.timedelta(seconds=epoch_time)))\n        print(\"Avg Data Time: %.5f\" % (self.avg_date_time / self.num_frames * batch_size))\n        print(\"Avg GPU Trans Time: %.5f\" % (self.avg_gpu_trans_time / self.num_frames * batch_size))\n        print(\"Avg Forward Time: %.5f\" % (self.avg_forward_time / self.num_frames * batch_size))\n\n    def train_epoch(self):\n        \"\"\"Do one epoch for each loader.\"\"\"\n        for loader in self.loaders:\n            if self.epoch % loader.epoch_interval == 0:\n                # 2021.1.10 Set epoch\n                if isinstance(loader.sampler, DistributedSampler):\n                    loader.sampler.set_epoch(self.epoch)\n                self.cycle_dataset(loader)\n\n        self._stats_new_epoch()\n        #if self.settings.local_rank in [-1, 0]:\n        #    self._write_tensorboard()\n\n    def _init_timing(self):\n        self.num_frames = 0\n        self.start_time = time.time()\n        self.prev_time = self.start_time\n        self.avg_date_time = 0\n        self.avg_gpu_trans_time = 0\n        self.avg_forward_time = 0\n\n    def _update_stats(self, new_stats: OrderedDict, batch_size, loader):\n        # Initialize stats if not initialized yet\n        if loader.name not in self.stats.keys() or self.stats[loader.name] is None:\n            self.stats[loader.name] = OrderedDict({name: AverageMeter() for name in new_stats.keys()})\n\n        # add lr state\n        if loader.training:\n            lr_list = self.lr_scheduler.get_last_lr()\n            for i, lr in enumerate(lr_list):\n                var_name = 'LearningRate/group{}'.format(i)\n                if var_name not in self.stats[loader.name].keys():\n                    self.stats[loader.name][var_name] = StatValue()\n                self.stats[loader.name][var_name].update(lr)\n\n        for name, val in new_stats.items():\n            if name not in self.stats[loader.name].keys():\n                self.stats[loader.name][name] = AverageMeter()\n            self.stats[loader.name][name].update(val, batch_size)\n\n    def _print_stats(self, i, loader, batch_size):\n        self.num_frames += batch_size\n        current_time = time.time()\n        batch_fps = batch_size / (current_time - self.prev_time)\n        average_fps = self.num_frames / (current_time - self.start_time)\n        prev_frame_time_backup = self.prev_time\n        self.prev_time = current_time\n\n        self.avg_date_time += (self.data_read_done_time - prev_frame_time_backup)\n        self.avg_gpu_trans_time += (self.data_to_gpu_time - self.data_read_done_time)\n        self.avg_forward_time += current_time - self.data_to_gpu_time\n\n        if i % self.settings.print_interval == 0 or i == loader.__len__():\n            print_str = '[%s: %d, %d / %d] ' % (loader.name, self.epoch, i, loader.__len__())\n            print_str += 'FPS: %.1f (%.1f)  ,  ' % (average_fps, batch_fps)\n\n            # 2021.12.14 add data time print\n            print_str += 'DataTime: %.3f (%.3f)  ,  ' % (self.avg_date_time / self.num_frames * batch_size, self.avg_gpu_trans_time / self.num_frames * batch_size)\n            print_str += 'ForwardTime: %.3f  ,  ' % (self.avg_forward_time / self.num_frames * batch_size)\n            print_str += 'TotalTime: %.3f  ,  ' % ((current_time - self.start_time) / self.num_frames * batch_size)\n            # print_str += 'DataTime: %.3f (%.3f)  ,  ' % (self.data_read_done_time - prev_frame_time_backup, self.data_to_gpu_time - self.data_read_done_time)\n            # print_str += 'ForwardTime: %.3f  ,  ' % (current_time - self.data_to_gpu_time)\n            # print_str += 'TotalTime: %.3f  ,  ' % (current_time - prev_frame_time_backup)\n\n            for name, val in self.stats[loader.name].items():\n                if (self.settings.print_stats is None or name in self.settings.print_stats):\n                    if hasattr(val, 'avg'):\n                        print_str += '%s: %.5f  ,  ' % (name, val.avg)\n                    # else:\n                    #     print_str += '%s: %r  ,  ' % (name, val)\n\n            print(print_str[:-5])\n            log_str = print_str[:-5] + '\\n'\n            with open(self.settings.log_file, 'a') as f:\n                f.write(log_str)\n\n    def _stats_new_epoch(self):\n        # Record learning rate\n        for loader in self.loaders:\n            if loader.training:\n                try:\n                    lr_list = self.lr_scheduler.get_last_lr()\n                except:\n                    lr_list = self.lr_scheduler._get_lr(self.epoch)\n                for i, lr in enumerate(lr_list):\n                    var_name = 'LearningRate/group{}'.format(i)\n                    if var_name not in self.stats[loader.name].keys():\n                        self.stats[loader.name][var_name] = StatValue()\n                    self.stats[loader.name][var_name].update(lr)\n\n        for loader_stats in self.stats.values():\n            if loader_stats is None:\n                continue\n            for stat_value in loader_stats.values():\n                if hasattr(stat_value, 'new_epoch'):\n                    stat_value.new_epoch()\n\n    #def _write_tensorboard(self):\n    #    if self.epoch == 1:\n    #        self.tensorboard_writer.write_info(self.settings.script_name, self.settings.description)\n\n    #    self.tensorboard_writer.write_epoch(self.stats, self.epoch)\n"
  },
  {
    "path": "lib/utils/__init__.py",
    "content": "from .tensor import TensorDict, TensorList\n"
  },
  {
    "path": "lib/utils/box_ops.py",
    "content": "import torch\nfrom torchvision.ops.boxes import box_area\nimport numpy as np\n\ndef box_xywh_to_cxywh(x):\n\tx1, y1, w, h = x.unbind(-1)\n\tb = [x1+0.5*w, y1+0.5*h, w, h]\n\treturn torch.stack(b, dim=-1)\n\ndef box_cxcywh_to_xyxy(x):\n    x_c, y_c, w, h = x.unbind(-1)\n    b = [(x_c - 0.5 * w), (y_c - 0.5 * h),\n         (x_c + 0.5 * w), (y_c + 0.5 * h)]\n    return torch.stack(b, dim=-1)\n\n\ndef box_xywh_to_xyxy(x):\n    x1, y1, w, h = x.unbind(-1)\n    b = [x1, y1, x1 + w, y1 + h]\n    return torch.stack(b, dim=-1)\n\n\ndef box_xyxy_to_xywh(x):\n    x1, y1, x2, y2 = x.unbind(-1)\n    b = [x1, y1, x2 - x1, y2 - y1]\n    return torch.stack(b, dim=-1)\n\n\ndef box_xyxy_to_cxcywh(x):\n    x0, y0, x1, y1 = x.unbind(-1)\n    b = [(x0 + x1) / 2, (y0 + y1) / 2,\n         (x1 - x0), (y1 - y0)]\n    return torch.stack(b, dim=-1)\n\n\n# modified from torchvision to also return the union\n'''Note that this function only supports shape (N,4)'''\n\n\ndef box_iou(boxes1, boxes2):\n    \"\"\"\n\n    :param boxes1: (N, 4) (x1,y1,x2,y2)\n    :param boxes2: (N, 4) (x1,y1,x2,y2)\n    :return:\n    \"\"\"\n    area1 = box_area(boxes1) # (N,)\n    area2 = box_area(boxes2) # (N,)\n\n    lt = torch.max(boxes1[:, :2], boxes2[:, :2])  # (N,2)\n    rb = torch.min(boxes1[:, 2:], boxes2[:, 2:])  # (N,2)\n\n    wh = (rb - lt).clamp(min=0)  # (N,2)\n    inter = wh[:, 0] * wh[:, 1]  # (N,)\n\n    union = area1 + area2 - inter\n\n    iou = inter / union\n    return iou, union\n\n\n'''Note that this implementation is different from DETR's'''\n\n\ndef generalized_box_iou(boxes1, boxes2):\n    \"\"\"\n    Generalized IoU from https://giou.stanford.edu/\n\n    The boxes should be in [x0, y0, x1, y1] format\n\n    boxes1: (N, 4)\n    boxes2: (N, 4)\n    \"\"\"\n    # degenerate boxes gives inf / nan results\n    # so do an early check\n    # try:\n    #assert (boxes1[:, 2:] >= boxes1[:, :2]).all()\n    # assert (boxes2[:, 2:] >= boxes2[:, :2]).all()\n    iou, union = box_iou(boxes1, boxes2) # (N,)\n\n    lt = torch.min(boxes1[:, :2], boxes2[:, :2])\n    rb = torch.max(boxes1[:, 2:], boxes2[:, 2:])\n\n    wh = (rb - lt).clamp(min=0)  # (N,2)\n    area = wh[:, 0] * wh[:, 1] # (N,)\n\n    return iou - (area - union) / area, iou\n\n\ndef giou_loss(boxes1, boxes2):\n    \"\"\"\n\n    :param boxes1: (N, 4) (x1,y1,x2,y2)\n    :param boxes2: (N, 4) (x1,y1,x2,y2)\n    :return:\n    \"\"\"\n    giou, iou = generalized_box_iou(boxes1, boxes2)\n    return (1 - giou).mean(), iou\n\n\ndef clip_box(box: list, H, W, margin=0):\n    x1, y1, w, h = box\n    x2, y2 = x1 + w, y1 + h\n    x1 = min(max(0, x1), W-margin)\n    x2 = min(max(margin, x2), W)\n    y1 = min(max(0, y1), H-margin)\n    y2 = min(max(margin, y2), H)\n    w = max(margin, x2-x1)\n    h = max(margin, y2-y1)\n    return [x1, y1, w, h]\n"
  },
  {
    "path": "lib/utils/ce_utils.py",
    "content": "import math\r\n\r\nimport torch\r\nimport torch.nn.functional as F\r\n\r\n\r\ndef generate_bbox_mask(bbox_mask, bbox):\r\n    b, h, w = bbox_mask.shape\r\n    for i in range(b):\r\n        bbox_i = bbox[i].cpu().tolist()\r\n        bbox_mask[i, int(bbox_i[1]):int(bbox_i[1] + bbox_i[3] - 1), int(bbox_i[0]):int(bbox_i[0] + bbox_i[2] - 1)] = 1\r\n    return bbox_mask\r\n\r\n\r\ndef generate_mask_cond(cfg, bs, device, gt_bbox):\r\n    template_size = cfg.DATA.TEMPLATE.SIZE\r\n    stride = cfg.MODEL.BACKBONE.STRIDE\r\n    template_feat_size = template_size // stride\r\n\r\n    if cfg.MODEL.BACKBONE.CE_TEMPLATE_RANGE == 'ALL':\r\n        box_mask_z = None\r\n    elif cfg.MODEL.BACKBONE.CE_TEMPLATE_RANGE == 'CTR_POINT':\r\n        if template_feat_size == 8:\r\n            index = slice(3, 4)\r\n        elif template_feat_size == 12:\r\n            index = slice(5, 6)\r\n        elif template_feat_size == 7:\r\n            index = slice(3, 4)\r\n        elif template_feat_size == 14:\r\n            index = slice(6, 7)\r\n        else:\r\n            raise NotImplementedError\r\n        box_mask_z = torch.zeros([bs, template_feat_size, template_feat_size], device=device)\r\n        box_mask_z[:, index, index] = 1\r\n        box_mask_z = box_mask_z.flatten(1).to(torch.bool)\r\n    elif cfg.MODEL.BACKBONE.CE_TEMPLATE_RANGE == 'CTR_REC':\r\n        # use fixed 4x4 region, 3:5 for 8x8\r\n        # use fixed 4x4 region 5:6 for 12x12\r\n        if template_feat_size == 8:\r\n            index = slice(3, 5)\r\n        elif template_feat_size == 12:\r\n            index = slice(5, 7)\r\n        elif template_feat_size == 7:\r\n            index = slice(3, 4)\r\n        else:\r\n            raise NotImplementedError\r\n        box_mask_z = torch.zeros([bs, template_feat_size, template_feat_size], device=device)\r\n        box_mask_z[:, index, index] = 1\r\n        box_mask_z = box_mask_z.flatten(1).to(torch.bool)\r\n\r\n    elif cfg.MODEL.BACKBONE.CE_TEMPLATE_RANGE == 'GT_BOX':\r\n        box_mask_z = torch.zeros([bs, template_size, template_size], device=device)\r\n        # box_mask_z_ori = data['template_seg'][0].view(-1, 1, *data['template_seg'].shape[2:])  # (batch, 1, 128, 128)\r\n        box_mask_z = generate_bbox_mask(box_mask_z, gt_bbox * template_size).unsqueeze(1).to(\r\n            torch.float)  # (batch, 1, 128, 128)\r\n        # box_mask_z_vis = box_mask_z.cpu().numpy()\r\n        box_mask_z = F.interpolate(box_mask_z, scale_factor=1. / cfg.MODEL.BACKBONE.STRIDE, mode='bilinear',\r\n                                   align_corners=False)\r\n        box_mask_z = box_mask_z.flatten(1).to(torch.bool)\r\n        # box_mask_z_vis = box_mask_z[:, 0, ...].cpu().numpy()\r\n        # gaussian_maps_vis = generate_heatmap(data['template_anno'], self.cfg.DATA.TEMPLATE.SIZE, self.cfg.MODEL.STRIDE)[0].cpu().numpy()\r\n    else:\r\n        raise NotImplementedError\r\n\r\n    return box_mask_z\r\n\r\n\r\ndef adjust_keep_rate(epoch, warmup_epochs, total_epochs, ITERS_PER_EPOCH, base_keep_rate=0.5, max_keep_rate=1, iters=-1):\r\n    if epoch < warmup_epochs:\r\n        return 1\r\n    if epoch >= total_epochs:\r\n        return base_keep_rate\r\n    if iters == -1:\r\n        iters = epoch * ITERS_PER_EPOCH\r\n    total_iters = ITERS_PER_EPOCH * (total_epochs - warmup_epochs)\r\n    iters = iters - ITERS_PER_EPOCH * warmup_epochs\r\n    keep_rate = base_keep_rate + (max_keep_rate - base_keep_rate) \\\r\n        * (math.cos(iters / total_iters * math.pi) + 1) * 0.5\r\n\r\n    return keep_rate\r\n"
  },
  {
    "path": "lib/utils/focal_loss.py",
    "content": "from abc import ABC\r\n\r\nimport torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\n\r\n\r\nclass FocalLoss(nn.Module, ABC):\r\n    def __init__(self, alpha=2, beta=4):\r\n        super(FocalLoss, self).__init__()\r\n        self.alpha = alpha\r\n        self.beta = beta\r\n\r\n    def forward(self, prediction, target):\r\n        positive_index = target.eq(1).float()\r\n        negative_index = target.lt(1).float()\r\n\r\n        negative_weights = torch.pow(1 - target, self.beta)\r\n        # clamp min value is set to 1e-12 to maintain the numerical stability\r\n        prediction = torch.clamp(prediction, 1e-12)\r\n\r\n        positive_loss = torch.log(prediction) * torch.pow(1 - prediction, self.alpha) * positive_index\r\n        negative_loss = torch.log(1 - prediction) * torch.pow(prediction,\r\n                                                              self.alpha) * negative_weights * negative_index\r\n\r\n        num_positive = positive_index.float().sum()\r\n        positive_loss = positive_loss.sum()\r\n        negative_loss = negative_loss.sum()\r\n\r\n        if num_positive == 0:\r\n            loss = -negative_loss\r\n        else:\r\n            loss = -(positive_loss + negative_loss) / num_positive\r\n\r\n        return loss\r\n\r\n\r\nclass LBHinge(nn.Module):\r\n    \"\"\"Loss that uses a 'hinge' on the lower bound.\r\n    This means that for samples with a label value smaller than the threshold, the loss is zero if the prediction is\r\n    also smaller than that threshold.\r\n    args:\r\n        error_matric:  What base loss to use (MSE by default).\r\n        threshold:  Threshold to use for the hinge.\r\n        clip:  Clip the loss if it is above this value.\r\n    \"\"\"\r\n    def __init__(self, error_metric=nn.MSELoss(), threshold=None, clip=None):\r\n        super().__init__()\r\n        self.error_metric = error_metric\r\n        self.threshold = threshold if threshold is not None else -100\r\n        self.clip = clip\r\n\r\n    def forward(self, prediction, label, target_bb=None):\r\n        negative_mask = (label < self.threshold).float()\r\n        positive_mask = (1.0 - negative_mask)\r\n\r\n        prediction = negative_mask * F.relu(prediction) + positive_mask * prediction\r\n\r\n        loss = self.error_metric(prediction, positive_mask * label)\r\n\r\n        if self.clip is not None:\r\n            loss = torch.min(loss, torch.tensor([self.clip], device=loss.device))\r\n        return loss"
  },
  {
    "path": "lib/utils/heapmap_utils.py",
    "content": "import numpy as np\r\nimport torch\r\n\r\n\r\ndef generate_heatmap(bboxes, patch_size=320, stride=16):\r\n    \"\"\"\r\n    Generate ground truth heatmap same as CenterNet\r\n    Args:\r\n        bboxes (torch.Tensor): shape of [num_search, bs, 4]\r\n\r\n    Returns:\r\n        gaussian_maps: list of generated heatmap\r\n\r\n    \"\"\"\r\n    gaussian_maps = []\r\n    heatmap_size = patch_size // stride\r\n    for single_patch_bboxes in bboxes:\r\n        bs = single_patch_bboxes.shape[0]\r\n        gt_scoremap = torch.zeros(bs, heatmap_size, heatmap_size)\r\n        classes = torch.arange(bs).to(torch.long)\r\n        bbox = single_patch_bboxes * heatmap_size\r\n        wh = bbox[:, 2:]\r\n        centers_int = (bbox[:, :2] + wh / 2).round()\r\n        CenterNetHeatMap.generate_score_map(gt_scoremap, classes, wh, centers_int, 0.7)\r\n        gaussian_maps.append(gt_scoremap.to(bbox.device))\r\n    return gaussian_maps\r\n\r\n\r\nclass CenterNetHeatMap(object):\r\n    @staticmethod\r\n    def generate_score_map(fmap, gt_class, gt_wh, centers_int, min_overlap):\r\n        radius = CenterNetHeatMap.get_gaussian_radius(gt_wh, min_overlap)\r\n        radius = torch.clamp_min(radius, 0)\r\n        radius = radius.type(torch.int).cpu().numpy()\r\n        for i in range(gt_class.shape[0]):\r\n            channel_index = gt_class[i]\r\n            CenterNetHeatMap.draw_gaussian(fmap[channel_index], centers_int[i], radius[i])\r\n\r\n    @staticmethod\r\n    def get_gaussian_radius(box_size, min_overlap):\r\n        \"\"\"\r\n        copyed from CornerNet\r\n        box_size (w, h), it could be a torch.Tensor, numpy.ndarray, list or tuple\r\n        notice: we are using a bug-version, please refer to fix bug version in CornerNet\r\n        \"\"\"\r\n        # box_tensor = torch.Tensor(box_size)\r\n        box_tensor = box_size\r\n        width, height = box_tensor[..., 0], box_tensor[..., 1]\r\n\r\n        a1 = 1\r\n        b1 = height + width\r\n        c1 = width * height * (1 - min_overlap) / (1 + min_overlap)\r\n        sq1 = torch.sqrt(b1 ** 2 - 4 * a1 * c1)\r\n        r1 = (b1 + sq1) / 2\r\n\r\n        a2 = 4\r\n        b2 = 2 * (height + width)\r\n        c2 = (1 - min_overlap) * width * height\r\n        sq2 = torch.sqrt(b2 ** 2 - 4 * a2 * c2)\r\n        r2 = (b2 + sq2) / 2\r\n\r\n        a3 = 4 * min_overlap\r\n        b3 = -2 * min_overlap * (height + width)\r\n        c3 = (min_overlap - 1) * width * height\r\n        sq3 = torch.sqrt(b3 ** 2 - 4 * a3 * c3)\r\n        r3 = (b3 + sq3) / 2\r\n\r\n        return torch.min(r1, torch.min(r2, r3))\r\n\r\n    @staticmethod\r\n    def gaussian2D(radius, sigma=1):\r\n        # m, n = [(s - 1.) / 2. for s in shape]\r\n        m, n = radius\r\n        y, x = np.ogrid[-m: m + 1, -n: n + 1]\r\n\r\n        gauss = np.exp(-(x * x + y * y) / (2 * sigma * sigma))\r\n        gauss[gauss < np.finfo(gauss.dtype).eps * gauss.max()] = 0\r\n        return gauss\r\n\r\n    @staticmethod\r\n    def draw_gaussian(fmap, center, radius, k=1):\r\n        diameter = 2 * radius + 1\r\n        gaussian = CenterNetHeatMap.gaussian2D((radius, radius), sigma=diameter / 6)\r\n        gaussian = torch.Tensor(gaussian)\r\n        x, y = int(center[0]), int(center[1])\r\n        height, width = fmap.shape[:2]\r\n\r\n        left, right = min(x, radius), min(width - x, radius + 1)\r\n        top, bottom = min(y, radius), min(height - y, radius + 1)\r\n\r\n        masked_fmap = fmap[y - top: y + bottom, x - left: x + right]\r\n        masked_gaussian = gaussian[radius - top: radius + bottom, radius - left: radius + right]\r\n        if min(masked_gaussian.shape) > 0 and min(masked_fmap.shape) > 0:\r\n            masked_fmap = torch.max(masked_fmap, masked_gaussian * k)\r\n            fmap[y - top: y + bottom, x - left: x + right] = masked_fmap\r\n        # return fmap\r\n\r\n\r\ndef compute_grids(features, strides):\r\n    \"\"\"\r\n    grids regret to the input image size\r\n    \"\"\"\r\n    grids = []\r\n    for level, feature in enumerate(features):\r\n        h, w = feature.size()[-2:]\r\n        shifts_x = torch.arange(\r\n            0, w * strides[level],\r\n            step=strides[level],\r\n            dtype=torch.float32, device=feature.device)\r\n        shifts_y = torch.arange(\r\n            0, h * strides[level],\r\n            step=strides[level],\r\n            dtype=torch.float32, device=feature.device)\r\n        shift_y, shift_x = torch.meshgrid(shifts_y, shifts_x)\r\n        shift_x = shift_x.reshape(-1)\r\n        shift_y = shift_y.reshape(-1)\r\n        grids_per_level = torch.stack((shift_x, shift_y), dim=1) + \\\r\n                          strides[level] // 2\r\n        grids.append(grids_per_level)\r\n    return grids\r\n\r\n\r\ndef get_center3x3(locations, centers, strides, range=3):\r\n    '''\r\n    Inputs:\r\n        locations: M x 2\r\n        centers: N x 2\r\n        strides: M\r\n    '''\r\n    range = (range - 1) / 2\r\n    M, N = locations.shape[0], centers.shape[0]\r\n    locations_expanded = locations.view(M, 1, 2).expand(M, N, 2)  # M x N x 2\r\n    centers_expanded = centers.view(1, N, 2).expand(M, N, 2)  # M x N x 2\r\n    strides_expanded = strides.view(M, 1, 1).expand(M, N, 2)  # M x N\r\n    centers_discret = ((centers_expanded / strides_expanded).int() * strides_expanded).float() + \\\r\n                      strides_expanded / 2  # M x N x 2\r\n    dist_x = (locations_expanded[:, :, 0] - centers_discret[:, :, 0]).abs()\r\n    dist_y = (locations_expanded[:, :, 1] - centers_discret[:, :, 1]).abs()\r\n    return (dist_x <= strides_expanded[:, :, 0] * range) & \\\r\n           (dist_y <= strides_expanded[:, :, 0] * range)\r\n\r\n\r\ndef get_pred(score_map_ctr, size_map, offset_map, feat_size):\r\n    max_score, idx = torch.max(score_map_ctr.flatten(1), dim=1, keepdim=True)\r\n\r\n    idx = idx.unsqueeze(1).expand(idx.shape[0], 2, 1)\r\n    size = size_map.flatten(2).gather(dim=2, index=idx).squeeze(-1)\r\n    offset = offset_map.flatten(2).gather(dim=2, index=idx).squeeze(-1)\r\n\r\n    return size * feat_size, offset\r\n"
  },
  {
    "path": "lib/utils/image.py",
    "content": ""
  },
  {
    "path": "lib/utils/lmdb_utils.py",
    "content": "import lmdb\nimport numpy as np\nimport cv2\nimport json\n\nLMDB_ENVS = dict()\nLMDB_HANDLES = dict()\nLMDB_FILELISTS = dict()\n\n\ndef get_lmdb_handle(name):\n    global LMDB_HANDLES, LMDB_FILELISTS\n    item = LMDB_HANDLES.get(name, None)\n    if item is None:\n        env = lmdb.open(name, readonly=True, lock=False, readahead=False, meminit=False)\n        LMDB_ENVS[name] = env\n        item = env.begin(write=False)\n        LMDB_HANDLES[name] = item\n\n    return item\n\n\ndef decode_img(lmdb_fname, key_name):\n    handle = get_lmdb_handle(lmdb_fname)\n    binfile = handle.get(key_name.encode())\n    if binfile is None:\n        print(\"Illegal data detected. %s %s\" % (lmdb_fname, key_name))\n    s = np.frombuffer(binfile, np.uint8)\n    x = cv2.cvtColor(cv2.imdecode(s, cv2.IMREAD_COLOR), cv2.COLOR_BGR2RGB)\n    return x\n\n\ndef decode_str(lmdb_fname, key_name):\n    handle = get_lmdb_handle(lmdb_fname)\n    binfile = handle.get(key_name.encode())\n    string = binfile.decode()\n    return string\n\n\ndef decode_json(lmdb_fname, key_name):\n    return json.loads(decode_str(lmdb_fname, key_name))\n\n\nif __name__ == \"__main__\":\n    lmdb_fname = \"/data/sda/v-yanbi/iccv21/LittleBoy_clean/data/got10k_lmdb\"\n    '''Decode image'''\n    # key_name = \"test/GOT-10k_Test_000001/00000001.jpg\"\n    # img = decode_img(lmdb_fname, key_name)\n    # cv2.imwrite(\"001.jpg\", img)\n    '''Decode str'''\n    # key_name = \"test/list.txt\"\n    # key_name = \"train/GOT-10k_Train_000001/groundtruth.txt\"\n    key_name = \"train/GOT-10k_Train_000001/absence.label\"\n    str_ = decode_str(lmdb_fname, key_name)\n    print(str_)\n"
  },
  {
    "path": "lib/utils/merge.py",
    "content": "import torch\n\n\ndef merge_template_search(inp_list, return_search=False, return_template=False):\n    \"\"\"NOTICE: search region related features must be in the last place\"\"\"\n    seq_dict = {\"feat\": torch.cat([x[\"feat\"] for x in inp_list], dim=0),\n                \"mask\": torch.cat([x[\"mask\"] for x in inp_list], dim=1),\n                \"pos\": torch.cat([x[\"pos\"] for x in inp_list], dim=0)}\n    if return_search:\n        x = inp_list[-1]\n        seq_dict.update({\"feat_x\": x[\"feat\"], \"mask_x\": x[\"mask\"], \"pos_x\": x[\"pos\"]})\n    if return_template:\n        z = inp_list[0]\n        seq_dict.update({\"feat_z\": z[\"feat\"], \"mask_z\": z[\"mask\"], \"pos_z\": z[\"pos\"]})\n    return seq_dict\n\n\ndef get_qkv(inp_list):\n    \"\"\"The 1st element of the inp_list is about the template,\n    the 2nd (the last) element is about the search region\"\"\"\n    dict_x = inp_list[-1]\n    dict_c = {\"feat\": torch.cat([x[\"feat\"] for x in inp_list], dim=0),\n              \"mask\": torch.cat([x[\"mask\"] for x in inp_list], dim=1),\n              \"pos\": torch.cat([x[\"pos\"] for x in inp_list], dim=0)}  # concatenated dict\n    q = dict_x[\"feat\"] + dict_x[\"pos\"]\n    k = dict_c[\"feat\"] + dict_c[\"pos\"]\n    v = dict_c[\"feat\"]\n    key_padding_mask = dict_c[\"mask\"]\n    return q, k, v, key_padding_mask\n"
  },
  {
    "path": "lib/utils/misc.py",
    "content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n\"\"\"\nMisc functions, including distributed helpers.\n\nMostly copy-paste from torchvision references.\n\"\"\"\nimport os\nimport subprocess\nimport time\nfrom collections import defaultdict, deque\nimport datetime\nimport pickle\nfrom typing import Optional, List\n\nimport torch\nimport torch.distributed as dist\nfrom torch import Tensor\n\n# needed due to empty tensor bug in pytorch and torchvision 0.5\nimport torchvision\nvers = torchvision.__version__.split('.')\nif int(vers[0]) <= 0 and int(vers[1]) < 7:\n    from torchvision.ops import _new_empty_tensor\n    from torchvision.ops.misc import _output_size\n\n\nclass SmoothedValue(object):\n    \"\"\"Track a series of values and provide access to smoothed values over a\n    window or the global series average.\n    \"\"\"\n\n    def __init__(self, window_size=20, fmt=None):\n        if fmt is None:\n            fmt = \"{median:.4f} ({global_avg:.4f})\"\n        self.deque = deque(maxlen=window_size)\n        self.total = 0.0\n        self.count = 0\n        self.fmt = fmt\n\n    def update(self, value, n=1):\n        self.deque.append(value)\n        self.count += n\n        self.total += value * n\n\n    def synchronize_between_processes(self):\n        \"\"\"\n        Warning: does not synchronize the deque!\n        \"\"\"\n        if not is_dist_avail_and_initialized():\n            return\n        t = torch.tensor([self.count, self.total], dtype=torch.float64, device='cuda')\n        dist.barrier()\n        dist.all_reduce(t)\n        t = t.tolist()\n        self.count = int(t[0])\n        self.total = t[1]\n\n    @property\n    def median(self):\n        d = torch.tensor(list(self.deque))\n        return d.median().item()\n\n    @property\n    def avg(self):\n        d = torch.tensor(list(self.deque), dtype=torch.float32)\n        return d.mean().item()\n\n    @property\n    def global_avg(self):\n        return self.total / self.count\n\n    @property\n    def max(self):\n        return max(self.deque)\n\n    @property\n    def value(self):\n        return self.deque[-1]\n\n    def __str__(self):\n        return self.fmt.format(\n            median=self.median,\n            avg=self.avg,\n            global_avg=self.global_avg,\n            max=self.max,\n            value=self.value)\n\n\ndef all_gather(data):\n    \"\"\"\n    Run all_gather on arbitrary picklable data (not necessarily tensors)\n    Args:\n        data: any picklable object\n    Returns:\n        list[data]: list of data gathered from each rank\n    \"\"\"\n    world_size = get_world_size()\n    if world_size == 1:\n        return [data]\n\n    # serialized to a Tensor\n    buffer = pickle.dumps(data)\n    storage = torch.ByteStorage.from_buffer(buffer)\n    tensor = torch.ByteTensor(storage).to(\"cuda\")\n\n    # obtain Tensor size of each rank\n    local_size = torch.tensor([tensor.numel()], device=\"cuda\")\n    size_list = [torch.tensor([0], device=\"cuda\") for _ in range(world_size)]\n    dist.all_gather(size_list, local_size)\n    size_list = [int(size.item()) for size in size_list]\n    max_size = max(size_list)\n\n    # receiving Tensor from all ranks\n    # we pad the tensor because torch all_gather does not support\n    # gathering tensors of different shapes\n    tensor_list = []\n    for _ in size_list:\n        tensor_list.append(torch.empty((max_size,), dtype=torch.uint8, device=\"cuda\"))\n    if local_size != max_size:\n        padding = torch.empty(size=(max_size - local_size,), dtype=torch.uint8, device=\"cuda\")\n        tensor = torch.cat((tensor, padding), dim=0)\n    dist.all_gather(tensor_list, tensor)\n\n    data_list = []\n    for size, tensor in zip(size_list, tensor_list):\n        buffer = tensor.cpu().numpy().tobytes()[:size]\n        data_list.append(pickle.loads(buffer))\n\n    return data_list\n\n\ndef reduce_dict(input_dict, average=True):\n    \"\"\"\n    Args:\n        input_dict (dict): all the values will be reduced\n        average (bool): whether to do average or sum\n    Reduce the values in the dictionary from all processes so that all processes\n    have the averaged results. Returns a dict with the same fields as\n    input_dict, after reduction.\n    \"\"\"\n    world_size = get_world_size()\n    if world_size < 2:\n        return input_dict\n    with torch.no_grad():\n        names = []\n        values = []\n        # sort the keys so that they are consistent across processes\n        for k in sorted(input_dict.keys()):\n            names.append(k)\n            values.append(input_dict[k])\n        values = torch.stack(values, dim=0)\n        dist.all_reduce(values)\n        if average:\n            values /= world_size\n        reduced_dict = {k: v for k, v in zip(names, values)}\n    return reduced_dict\n\n\nclass MetricLogger(object):\n    def __init__(self, delimiter=\"\\t\"):\n        self.meters = defaultdict(SmoothedValue)\n        self.delimiter = delimiter\n\n    def update(self, **kwargs):\n        for k, v in kwargs.items():\n            if isinstance(v, torch.Tensor):\n                v = v.item()\n            assert isinstance(v, (float, int))\n            self.meters[k].update(v)\n\n    def __getattr__(self, attr):\n        if attr in self.meters:\n            return self.meters[attr]\n        if attr in self.__dict__:\n            return self.__dict__[attr]\n        raise AttributeError(\"'{}' object has no attribute '{}'\".format(\n            type(self).__name__, attr))\n\n    def __str__(self):\n        loss_str = []\n        for name, meter in self.meters.items():\n            loss_str.append(\n                \"{}: {}\".format(name, str(meter))\n            )\n        return self.delimiter.join(loss_str)\n\n    def synchronize_between_processes(self):\n        for meter in self.meters.values():\n            meter.synchronize_between_processes()\n\n    def add_meter(self, name, meter):\n        self.meters[name] = meter\n\n    def log_every(self, iterable, print_freq, header=None):\n        i = 0\n        if not header:\n            header = ''\n        start_time = time.time()\n        end = time.time()\n        iter_time = SmoothedValue(fmt='{avg:.4f}')\n        data_time = SmoothedValue(fmt='{avg:.4f}')\n        space_fmt = ':' + str(len(str(len(iterable)))) + 'd'\n        if torch.cuda.is_available():\n            log_msg = self.delimiter.join([\n                header,\n                '[{0' + space_fmt + '}/{1}]',\n                'eta: {eta}',\n                '{meters}',\n                'time: {time}',\n                'data: {data}',\n                'max mem: {memory:.0f}'\n            ])\n        else:\n            log_msg = self.delimiter.join([\n                header,\n                '[{0' + space_fmt + '}/{1}]',\n                'eta: {eta}',\n                '{meters}',\n                'time: {time}',\n                'data: {data}'\n            ])\n        MB = 1024.0 * 1024.0\n        for obj in iterable:\n            data_time.update(time.time() - end)\n            yield obj\n            iter_time.update(time.time() - end)\n            if i % print_freq == 0 or i == len(iterable) - 1:\n                eta_seconds = iter_time.global_avg * (len(iterable) - i)\n                eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))\n                if torch.cuda.is_available():\n                    print(log_msg.format(\n                        i, len(iterable), eta=eta_string,\n                        meters=str(self),\n                        time=str(iter_time), data=str(data_time),\n                        memory=torch.cuda.max_memory_allocated() / MB))\n                else:\n                    print(log_msg.format(\n                        i, len(iterable), eta=eta_string,\n                        meters=str(self),\n                        time=str(iter_time), data=str(data_time)))\n            i += 1\n            end = time.time()\n        total_time = time.time() - start_time\n        total_time_str = str(datetime.timedelta(seconds=int(total_time)))\n        print('{} Total time: {} ({:.4f} s / it)'.format(\n            header, total_time_str, total_time / len(iterable)))\n\n\ndef get_sha():\n    cwd = os.path.dirname(os.path.abspath(__file__))\n\n    def _run(command):\n        return subprocess.check_output(command, cwd=cwd).decode('ascii').strip()\n    sha = 'N/A'\n    diff = \"clean\"\n    branch = 'N/A'\n    try:\n        sha = _run(['git', 'rev-parse', 'HEAD'])\n        subprocess.check_output(['git', 'diff'], cwd=cwd)\n        diff = _run(['git', 'diff-index', 'HEAD'])\n        diff = \"has uncommited changes\" if diff else \"clean\"\n        branch = _run(['git', 'rev-parse', '--abbrev-ref', 'HEAD'])\n    except Exception:\n        pass\n    message = f\"sha: {sha}, status: {diff}, branch: {branch}\"\n    return message\n\n\ndef collate_fn(batch):\n    batch = list(zip(*batch))\n    batch[0] = nested_tensor_from_tensor_list(batch[0])\n    return tuple(batch)\n\n\ndef _max_by_axis(the_list):\n    # type: (List[List[int]]) -> List[int]\n    maxes = the_list[0] # get the first one\n    for sublist in the_list[1:]: # [h,w,3]\n        for index, item in enumerate(sublist): # index: 0,1,2\n            maxes[index] = max(maxes[index], item) # compare current max with the other elements in the whole\n    return maxes\n\n\nclass NestedTensor(object):\n    def __init__(self, tensors, mask: Optional[Tensor]):\n        self.tensors = tensors\n        self.mask = mask\n\n    def to(self, device):\n        # type: (Device) -> NestedTensor # noqa\n        cast_tensor = self.tensors.to(device)\n        mask = self.mask\n        if mask is not None:\n            assert mask is not None\n            cast_mask = mask.to(device)\n        else:\n            cast_mask = None\n        return NestedTensor(cast_tensor, cast_mask)\n\n    def decompose(self):\n        return self.tensors, self.mask\n\n    def __repr__(self):\n        return str(self.tensors)\n\n\ndef nested_tensor_from_tensor_list(tensor_list: List[Tensor]):\n    # TODO make this more general\n    if tensor_list[0].ndim == 3:\n        if torchvision._is_tracing():\n            # nested_tensor_from_tensor_list() does not export well to ONNX\n            # call _onnx_nested_tensor_from_tensor_list() instead\n            return _onnx_nested_tensor_from_tensor_list(tensor_list)\n\n        # TODO make it support different-sized images\n        max_size = _max_by_axis([list(img.shape) for img in tensor_list]) # [[3,h1,w1], [3,h2,w2], [3,h3,w3], ...]\n        # min_size = tuple(min(s) for s in zip(*[img.shape for img in tensor_list]))\n        batch_shape = [len(tensor_list)] + max_size # ()\n        b, c, h, w = batch_shape\n        dtype = tensor_list[0].dtype\n        device = tensor_list[0].device\n        tensor = torch.zeros(batch_shape, dtype=dtype, device=device)\n        mask = torch.ones((b, h, w), dtype=torch.bool, device=device)\n        for img, pad_img, m in zip(tensor_list, tensor, mask):\n            pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img) # copy valid regions of the images to the largest padded base.\n            m[: img.shape[1], :img.shape[2]] = False\n    else:\n        raise ValueError('not supported')\n    return NestedTensor(tensor, mask)\n\n\n# _onnx_nested_tensor_from_tensor_list() is an implementation of\n# nested_tensor_from_tensor_list() that is supported by ONNX tracing.\n@torch.jit.unused\ndef _onnx_nested_tensor_from_tensor_list(tensor_list: List[Tensor]) -> NestedTensor:\n    max_size = []\n    for i in range(tensor_list[0].dim()):\n        max_size_i = torch.max(torch.stack([img.shape[i] for img in tensor_list]).to(torch.float32)).to(torch.int64)\n        max_size.append(max_size_i)\n    max_size = tuple(max_size)\n\n    # work around for\n    # pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img)\n    # m[: img.shape[1], :img.shape[2]] = False\n    # which is not yet supported in onnx\n    padded_imgs = []\n    padded_masks = []\n    for img in tensor_list:\n        padding = [(s1 - s2) for s1, s2 in zip(max_size, tuple(img.shape))]\n        padded_img = torch.nn.functional.pad(img, (0, padding[2], 0, padding[1], 0, padding[0]))\n        padded_imgs.append(padded_img)\n\n        m = torch.zeros_like(img[0], dtype=torch.int, device=img.device)\n        padded_mask = torch.nn.functional.pad(m, (0, padding[2], 0, padding[1]), \"constant\", 1)\n        padded_masks.append(padded_mask.to(torch.bool))\n\n    tensor = torch.stack(padded_imgs)\n    mask = torch.stack(padded_masks)\n\n    return NestedTensor(tensor, mask=mask)\n\n\ndef setup_for_distributed(is_master):\n    \"\"\"\n    This function disables printing when not in master process\n    \"\"\"\n    import builtins as __builtin__\n    builtin_print = __builtin__.print\n\n    def print(*args, **kwargs):\n        force = kwargs.pop('force', False)\n        if is_master or force:\n            builtin_print(*args, **kwargs)\n\n    __builtin__.print = print\n\n\ndef is_dist_avail_and_initialized():\n    if not dist.is_available():\n        return False\n    if not dist.is_initialized():\n        return False\n    return True\n\n\ndef get_world_size():\n    if not is_dist_avail_and_initialized():\n        return 1\n    return dist.get_world_size()\n\n\ndef get_rank():\n    if not is_dist_avail_and_initialized():\n        return 0\n    return dist.get_rank()\n\n\ndef is_main_process():\n    return get_rank() == 0\n\n\ndef save_on_master(*args, **kwargs):\n    if is_main_process():\n        torch.save(*args, **kwargs)\n\n\ndef init_distributed_mode(args):\n    if 'RANK' in os.environ and 'WORLD_SIZE' in os.environ:\n        args.rank = int(os.environ[\"RANK\"])\n        args.world_size = int(os.environ['WORLD_SIZE'])\n        args.gpu = int(os.environ['LOCAL_RANK'])\n    elif 'SLURM_PROCID' in os.environ:\n        args.rank = int(os.environ['SLURM_PROCID'])\n        args.gpu = args.rank % torch.cuda.device_count()\n    else:\n        print('Not using distributed mode')\n        args.distributed = False\n        return\n\n    args.distributed = True\n\n    torch.cuda.set_device(args.gpu)\n    args.dist_backend = 'nccl'\n    print('| distributed init (rank {}): {}'.format(\n        args.rank, args.dist_url), flush=True)\n    torch.distributed.init_process_group(backend=args.dist_backend, init_method=args.dist_url,\n                                         world_size=args.world_size, rank=args.rank)\n    torch.distributed.barrier()\n    setup_for_distributed(args.rank == 0)\n\n\n@torch.no_grad()\ndef accuracy(output, target, topk=(1,)):\n    \"\"\"Computes the precision@k for the specified values of k\"\"\"\n    if target.numel() == 0:\n        return [torch.zeros([], device=output.device)]\n    maxk = max(topk)\n    batch_size = target.size(0)\n\n    _, pred = output.topk(maxk, 1, True, True)\n    pred = pred.t()\n    correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n    res = []\n    for k in topk:\n        correct_k = correct[:k].view(-1).float().sum(0)\n        res.append(correct_k.mul_(100.0 / batch_size))\n    return res\n\n\ndef interpolate(input, size=None, scale_factor=None, mode=\"nearest\", align_corners=None):\n    # type: (Tensor, Optional[List[int]], Optional[float], str, Optional[bool]) -> Tensor\n    \"\"\"\n    Equivalent to nn.functional.interpolate, but with support for empty batch sizes.\n    This will eventually be supported natively by PyTorch, and this\n    class can go away.\n    \"\"\"\n    if float(torchvision.__version__[:3]) < 0.7:\n        if input.numel() > 0:\n            return torch.nn.functional.interpolate(\n                input, size, scale_factor, mode, align_corners\n            )\n\n        output_shape = _output_size(2, input, size, scale_factor)\n        output_shape = list(input.shape[:-2]) + list(output_shape)\n        return _new_empty_tensor(input, output_shape)\n    else:\n        return torchvision.ops.misc.interpolate(input, size, scale_factor, mode, align_corners)\n"
  },
  {
    "path": "lib/utils/tensor.py",
    "content": "import functools\nimport torch\nimport copy\nfrom collections import OrderedDict\n\n\nclass TensorDict(OrderedDict):\n    \"\"\"Container mainly used for dicts of torch tensors. Extends OrderedDict with pytorch functionality.\"\"\"\n\n    def concat(self, other):\n        \"\"\"Concatenates two dicts without copying internal data.\"\"\"\n        return TensorDict(self, **other)\n\n    def copy(self):\n        return TensorDict(super(TensorDict, self).copy())\n\n    def __deepcopy__(self, memodict={}):\n        return TensorDict(copy.deepcopy(list(self), memodict))\n\n    def __getattr__(self, name):\n        if not hasattr(torch.Tensor, name):\n            raise AttributeError('\\'TensorDict\\' object has not attribute \\'{}\\''.format(name))\n\n        def apply_attr(*args, **kwargs):\n            return TensorDict({n: getattr(e, name)(*args, **kwargs) if hasattr(e, name) else e for n, e in self.items()})\n        return apply_attr\n\n    def attribute(self, attr: str, *args):\n        return TensorDict({n: getattr(e, attr, *args) for n, e in self.items()})\n\n    def apply(self, fn, *args, **kwargs):\n        return TensorDict({n: fn(e, *args, **kwargs) for n, e in self.items()})\n\n    @staticmethod\n    def _iterable(a):\n        return isinstance(a, (TensorDict, list))\n\n\nclass TensorList(list):\n    \"\"\"Container mainly used for lists of torch tensors. Extends lists with pytorch functionality.\"\"\"\n\n    def __init__(self, list_of_tensors = None):\n        if list_of_tensors is None:\n            list_of_tensors = list()\n        super(TensorList, self).__init__(list_of_tensors)\n\n    def __deepcopy__(self, memodict={}):\n        return TensorList(copy.deepcopy(list(self), memodict))\n\n    def __getitem__(self, item):\n        if isinstance(item, int):\n            return super(TensorList, self).__getitem__(item)\n        elif isinstance(item, (tuple, list)):\n            return TensorList([super(TensorList, self).__getitem__(i) for i in item])\n        else:\n            return TensorList(super(TensorList, self).__getitem__(item))\n\n    def __add__(self, other):\n        if TensorList._iterable(other):\n            return TensorList([e1 + e2 for e1, e2 in zip(self, other)])\n        return TensorList([e + other for e in self])\n\n    def __radd__(self, other):\n        if TensorList._iterable(other):\n            return TensorList([e2 + e1 for e1, e2 in zip(self, other)])\n        return TensorList([other + e for e in self])\n\n    def __iadd__(self, other):\n        if TensorList._iterable(other):\n            for i, e2 in enumerate(other):\n                self[i] += e2\n        else:\n            for i in range(len(self)):\n                self[i] += other\n        return self\n\n    def __sub__(self, other):\n        if TensorList._iterable(other):\n            return TensorList([e1 - e2 for e1, e2 in zip(self, other)])\n        return TensorList([e - other for e in self])\n\n    def __rsub__(self, other):\n        if TensorList._iterable(other):\n            return TensorList([e2 - e1 for e1, e2 in zip(self, other)])\n        return TensorList([other - e for e in self])\n\n    def __isub__(self, other):\n        if TensorList._iterable(other):\n            for i, e2 in enumerate(other):\n                self[i] -= e2\n        else:\n            for i in range(len(self)):\n                self[i] -= other\n        return self\n\n    def __mul__(self, other):\n        if TensorList._iterable(other):\n            return TensorList([e1 * e2 for e1, e2 in zip(self, other)])\n        return TensorList([e * other for e in self])\n\n    def __rmul__(self, other):\n        if TensorList._iterable(other):\n            return TensorList([e2 * e1 for e1, e2 in zip(self, other)])\n        return TensorList([other * e for e in self])\n\n    def __imul__(self, other):\n        if TensorList._iterable(other):\n            for i, e2 in enumerate(other):\n                self[i] *= e2\n        else:\n            for i in range(len(self)):\n                self[i] *= other\n        return self\n\n    def __truediv__(self, other):\n        if TensorList._iterable(other):\n            return TensorList([e1 / e2 for e1, e2 in zip(self, other)])\n        return TensorList([e / other for e in self])\n\n    def __rtruediv__(self, other):\n        if TensorList._iterable(other):\n            return TensorList([e2 / e1 for e1, e2 in zip(self, other)])\n        return TensorList([other / e for e in self])\n\n    def __itruediv__(self, other):\n        if TensorList._iterable(other):\n            for i, e2 in enumerate(other):\n                self[i] /= e2\n        else:\n            for i in range(len(self)):\n                self[i] /= other\n        return self\n\n    def __matmul__(self, other):\n        if TensorList._iterable(other):\n            return TensorList([e1 @ e2 for e1, e2 in zip(self, other)])\n        return TensorList([e @ other for e in self])\n\n    def __rmatmul__(self, other):\n        if TensorList._iterable(other):\n            return TensorList([e2 @ e1 for e1, e2 in zip(self, other)])\n        return TensorList([other @ e for e in self])\n\n    def __imatmul__(self, other):\n        if TensorList._iterable(other):\n            for i, e2 in enumerate(other):\n                self[i] @= e2\n        else:\n            for i in range(len(self)):\n                self[i] @= other\n        return self\n\n    def __mod__(self, other):\n        if TensorList._iterable(other):\n            return TensorList([e1 % e2 for e1, e2 in zip(self, other)])\n        return TensorList([e % other for e in self])\n\n    def __rmod__(self, other):\n        if TensorList._iterable(other):\n            return TensorList([e2 % e1 for e1, e2 in zip(self, other)])\n        return TensorList([other % e for e in self])\n\n    def __pos__(self):\n        return TensorList([+e for e in self])\n\n    def __neg__(self):\n        return TensorList([-e for e in self])\n\n    def __le__(self, other):\n        if TensorList._iterable(other):\n            return TensorList([e1 <= e2 for e1, e2 in zip(self, other)])\n        return TensorList([e <= other for e in self])\n\n    def __ge__(self, other):\n        if TensorList._iterable(other):\n            return TensorList([e1 >= e2 for e1, e2 in zip(self, other)])\n        return TensorList([e >= other for e in self])\n\n    def concat(self, other):\n        return TensorList(super(TensorList, self).__add__(other))\n\n    def copy(self):\n        return TensorList(super(TensorList, self).copy())\n\n    def unroll(self):\n        if not any(isinstance(t, TensorList) for t in self):\n            return self\n\n        new_list = TensorList()\n        for t in self:\n            if isinstance(t, TensorList):\n                new_list.extend(t.unroll())\n            else:\n                new_list.append(t)\n        return new_list\n\n    def list(self):\n        return list(self)\n\n    def attribute(self, attr: str, *args):\n        return TensorList([getattr(e, attr, *args) for e in self])\n\n    def apply(self, fn):\n        return TensorList([fn(e) for e in self])\n\n    def __getattr__(self, name):\n        if not hasattr(torch.Tensor, name):\n            raise AttributeError('\\'TensorList\\' object has not attribute \\'{}\\''.format(name))\n\n        def apply_attr(*args, **kwargs):\n            return TensorList([getattr(e, name)(*args, **kwargs) for e in self])\n\n        return apply_attr\n\n    @staticmethod\n    def _iterable(a):\n        return isinstance(a, (TensorList, list))\n\n\ndef tensor_operation(op):\n    def islist(a):\n        return isinstance(a, TensorList)\n\n    @functools.wraps(op)\n    def oplist(*args, **kwargs):\n        if len(args) == 0:\n            raise ValueError('Must be at least one argument without keyword (i.e. operand).')\n\n        if len(args) == 1:\n            if islist(args[0]):\n                return TensorList([op(a, **kwargs) for a in args[0]])\n        else:\n            # Multiple operands, assume max two\n            if islist(args[0]) and islist(args[1]):\n                return TensorList([op(a, b, *args[2:], **kwargs) for a, b in zip(*args[:2])])\n            if islist(args[0]):\n                return TensorList([op(a, *args[1:], **kwargs) for a in args[0]])\n            if islist(args[1]):\n                return TensorList([op(args[0], b, *args[2:], **kwargs) for b in args[1]])\n\n        # None of the operands are lists\n        return op(*args, **kwargs)\n\n    return oplist\n"
  },
  {
    "path": "lib/utils/variable_hook.py",
    "content": "import torch\r\nfrom bytecode import Bytecode, Instr\r\n\r\n\r\nclass get_local(object):\r\n    cache = {}\r\n    is_activate = False\r\n\r\n    def __init__(self, varname):\r\n        self.varname = varname\r\n\r\n    def __call__(self, func):\r\n        if not type(self).is_activate:\r\n            return func\r\n\r\n        type(self).cache[func.__qualname__] = []\r\n        c = Bytecode.from_code(func.__code__)\r\n        extra_code = [\r\n            Instr('STORE_FAST', '_res'),\r\n            Instr('LOAD_FAST', self.varname),\r\n            Instr('STORE_FAST', '_value'),\r\n            Instr('LOAD_FAST', '_res'),\r\n            Instr('LOAD_FAST', '_value'),\r\n            Instr('BUILD_TUPLE', 2),\r\n            Instr('STORE_FAST', '_result_tuple'),\r\n            Instr('LOAD_FAST', '_result_tuple'),\r\n        ]\r\n        c[-1:-1] = extra_code\r\n        func.__code__ = c.to_code()\r\n\r\n        def wrapper(*args, **kwargs):\r\n            res, values = func(*args, **kwargs)\r\n            if isinstance(values, torch.Tensor):\r\n                type(self).cache[func.__qualname__].append(values.detach().cpu().numpy())\r\n            elif isinstance(values, list):  # list of Tensor\r\n                type(self).cache[func.__qualname__].append([value.detach().cpu().numpy() for value in values])\r\n            else:\r\n                raise NotImplementedError\r\n            return res\r\n\r\n        return wrapper\r\n\r\n    @classmethod\r\n    def clear(cls):\r\n        for key in cls.cache.keys():\r\n            cls.cache[key] = []\r\n\r\n    @classmethod\r\n    def activate(cls):\r\n        cls.is_activate = True\r\n"
  },
  {
    "path": "lib/vis/__init__.py",
    "content": ""
  },
  {
    "path": "lib/vis/plotting.py",
    "content": "import matplotlib.pyplot as plt\nimport numpy as np\nimport torch\nimport cv2\n\n\ndef draw_figure(fig):\n    fig.canvas.draw()\n    fig.canvas.flush_events()\n    plt.pause(0.001)\n\n\ndef show_tensor(a: torch.Tensor, fig_num = None, title = None, range=(None, None), ax=None):\n    \"\"\"Display a 2D tensor.\n    args:\n        fig_num: Figure number.\n        title: Title of figure.\n    \"\"\"\n    a_np = a.squeeze().cpu().clone().detach().numpy()\n    if a_np.ndim == 3:\n        a_np = np.transpose(a_np, (1, 2, 0))\n\n    if ax is None:\n        fig = plt.figure(fig_num)\n        plt.tight_layout()\n        plt.cla()\n        plt.imshow(a_np, vmin=range[0], vmax=range[1])\n        plt.axis('off')\n        plt.axis('equal')\n        if title is not None:\n            plt.title(title)\n        draw_figure(fig)\n    else:\n        ax.cla()\n        ax.imshow(a_np, vmin=range[0], vmax=range[1])\n        ax.set_axis_off()\n        ax.axis('equal')\n        if title is not None:\n            ax.set_title(title)\n        draw_figure(plt.gcf())\n\n\ndef plot_graph(a: torch.Tensor, fig_num = None, title = None):\n    \"\"\"Plot graph. Data is a 1D tensor.\n    args:\n        fig_num: Figure number.\n        title: Title of figure.\n    \"\"\"\n    a_np = a.squeeze().cpu().clone().detach().numpy()\n    if a_np.ndim > 1:\n        raise ValueError\n    fig = plt.figure(fig_num)\n    # plt.tight_layout()\n    plt.cla()\n    plt.plot(a_np)\n    if title is not None:\n        plt.title(title)\n    draw_figure(fig)\n\n\ndef show_image_with_boxes(im, boxes, iou_pred=None, disp_ids=None):\n    im_np = im.clone().cpu().squeeze().numpy()\n    im_np = np.ascontiguousarray(im_np.transpose(1, 2, 0).astype(np.uint8))\n\n    boxes = boxes.view(-1, 4).cpu().numpy().round().astype(int)\n\n    # Draw proposals\n    for i_ in range(boxes.shape[0]):\n        if disp_ids is None or disp_ids[i_]:\n            bb = boxes[i_, :]\n            disp_color = (i_*38 % 256, (255 - i_*97) % 256, (123 + i_*66) % 256)\n            cv2.rectangle(im_np, (bb[0], bb[1]), (bb[0] + bb[2], bb[1] + bb[3]),\n                          disp_color, 1)\n\n            if iou_pred is not None:\n                text_pos = (bb[0], bb[1] - 5)\n                cv2.putText(im_np, 'ID={} IOU = {:3.2f}'.format(i_, iou_pred[i_]), text_pos,\n                            cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 1, bottomLeftOrigin=False)\n\n    im_tensor = torch.from_numpy(im_np.transpose(2, 0, 1)).float()\n\n    return im_tensor\n\n\n\ndef _pascal_color_map(N=256, normalized=False):\n    \"\"\"\n    Python implementation of the color map function for the PASCAL VOC data set.\n    Official Matlab version can be found in the PASCAL VOC devkit\n    http://host.robots.ox.ac.uk/pascal/VOC/voc2012/index.html#devkit\n    \"\"\"\n\n    def bitget(byteval, idx):\n        return (byteval & (1 << idx)) != 0\n\n    dtype = 'float32' if normalized else 'uint8'\n    cmap = np.zeros((N, 3), dtype=dtype)\n    for i in range(N):\n        r = g = b = 0\n        c = i\n        for j in range(8):\n            r = r | (bitget(c, 0) << 7 - j)\n            g = g | (bitget(c, 1) << 7 - j)\n            b = b | (bitget(c, 2) << 7 - j)\n            c = c >> 3\n\n        cmap[i] = np.array([r, g, b])\n\n    cmap = cmap / 255 if normalized else cmap\n    return cmap\n\n\ndef overlay_mask(im, ann, alpha=0.5, colors=None, contour_thickness=None):\n    \"\"\" Overlay mask over image.\n    Source: https://github.com/albertomontesg/davis-interactive/blob/master/davisinteractive/utils/visualization.py\n    This function allows you to overlay a mask over an image with some\n    transparency.\n    # Arguments\n        im: Numpy Array. Array with the image. The shape must be (H, W, 3) and\n            the pixels must be represented as `np.uint8` data type.\n        ann: Numpy Array. Array with the mask. The shape must be (H, W) and the\n            values must be intergers\n        alpha: Float. Proportion of alpha to apply at the overlaid mask.\n        colors: Numpy Array. Optional custom colormap. It must have shape (N, 3)\n            being N the maximum number of colors to represent.\n        contour_thickness: Integer. Thickness of each object index contour draw\n            over the overlay. This function requires to have installed the\n            package `opencv-python`.\n    # Returns\n        Numpy Array: Image of the overlay with shape (H, W, 3) and data type\n            `np.uint8`.\n    \"\"\"\n    im, ann = np.asarray(im, dtype=np.uint8), np.asarray(ann, dtype=np.int)\n    if im.shape[:-1] != ann.shape:\n        raise ValueError('First two dimensions of `im` and `ann` must match')\n    if im.shape[-1] != 3:\n        raise ValueError('im must have three channels at the 3 dimension')\n\n    colors = colors or _pascal_color_map()\n    colors = np.asarray(colors, dtype=np.uint8)\n\n    mask = colors[ann]\n    fg = im * alpha + (1 - alpha) * mask\n\n    img = im.copy()\n    img[ann > 0] = fg[ann > 0]\n\n    if contour_thickness:  # pragma: no cover\n        import cv2\n        for obj_id in np.unique(ann[ann > 0]):\n            contours = cv2.findContours((ann == obj_id).astype(\n                np.uint8), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)[-2:]\n            cv2.drawContours(img, contours[0], -1, colors[obj_id].tolist(),\n                             contour_thickness)\n    return img\n"
  },
  {
    "path": "lib/vis/utils.py",
    "content": "import torch\r\nimport numpy as np\r\n\r\n\r\ndef numpy_to_torch(a: np.ndarray):\r\n    return torch.from_numpy(a).float().permute(2, 0, 1).unsqueeze(0)"
  },
  {
    "path": "lib/vis/visdom_cus.py",
    "content": "import visdom\nimport visdom.server\nimport cv2\nimport torch\nimport copy\nimport numpy as np\nfrom collections import OrderedDict\nfrom enum import Enum\n\nfrom lib.vis.plotting import overlay_mask, show_image_with_boxes\nfrom lib.vis.utils import numpy_to_torch\n\n\nclass cv_colors(Enum):\n    WHITE = (255, 255, 255)[::-1]\n    RED = (0, 0, 255)[::-1]\n    GREEN = (0, 255, 0)[::-1]\n    BLUE = (255, 0, 0)[::-1]\n    PURPLE = (247, 44, 200)[::-1]\n    ORANGE = (44, 162, 247)[::-1]\n    MINT = (239, 255, 66)[::-1]\n    YELLOW = (2, 255, 250)[::-1]\n    BLACK = (0, 0, 0)[::-1]\n\n\ndef index_to_color(idx):\n    return {\n        0: cv_colors.GREEN.value,\n        1: cv_colors.BLUE.value,\n        2: cv_colors.RED.value,\n        3: cv_colors.MINT.value,\n        4: cv_colors.YELLOW.value,\n        5: cv_colors.WHITE.value,\n        6: cv_colors.BLACK.value,\n    }[idx]\n\n\nclass VisBase:\n    def __init__(self, visdom, show_data, title):\n        self.visdom = visdom\n        self.show_data = show_data\n        self.title = title\n        self.raw_data = None\n\n    def update(self, data, **kwargs):\n        self.save_data(data, **kwargs)\n\n        if self.show_data:\n            self.draw_data()\n\n    def save_data(self, data, **kwargs):\n        raise NotImplementedError\n\n    def draw_data(self):\n        raise NotImplementedError\n\n    def toggle_display(self, new_mode=None):\n        if new_mode is not None:\n            self.show_data = new_mode\n        else:\n            self.show_data = not self.show_data\n\n        if self.show_data:\n            self.draw_data()\n        else:\n            self.visdom.close(self.title)\n\n\nclass VisImage(VisBase):\n    def __init__(self, visdom, show_data, title):\n        super().__init__(visdom, show_data, title)\n\n    def save_data(self, data):\n        data = data.float()\n        self.raw_data = data\n\n    def draw_data(self):\n        self.visdom.image(self.raw_data.clone(), opts={'title': self.title}, win=self.title)\n\n\nclass VisHeatmap(VisBase):\n    def __init__(self, visdom, show_data, title):\n        super().__init__(visdom, show_data, title)\n\n    def save_data(self, data, **kwargs):\n        data = data.squeeze().flip(0)\n        if kwargs:\n            self.raw_data = [data, kwargs]\n        else:\n            self.raw_data = [data]\n        # self.raw_data = data\n\n    def draw_data(self):\n        if len(self.raw_data) == 2:\n            self.visdom.heatmap(self.raw_data[0].clone(), opts={'title': self.title + ' ' + self.raw_data[1]['caption'], **self.raw_data[1]}, win=self.title)\n        else:\n            self.visdom.heatmap(self.raw_data[0].clone(), opts={'title': self.title}, win=self.title)\n\n        # self.visdom.heatmap(self.raw_data.clone(), opts={'title': self.title}, win=self.title)\n\n\nclass VisFeaturemap(VisBase):\n    def __init__(self, visdom, show_data, title):\n        super().__init__(visdom, show_data, title)\n        self.block_list = None\n\n    def block_list_callback_handler(self, data):\n        self.block_list[data['propertyId']]['value'] = data['value']\n        self.visdom.properties(self.block_list, opts={'title': 'Featuremap UI'}, win='featuremap_ui')\n        self.draw_data()\n\n    def save_data(self, data):\n        data = data.view(-1, *data.shape[-2:])\n        data = data.flip(1)\n        if self.block_list is None:\n            self.block_list = []\n            self.draw_feat = []\n            for i in range(data.shape[0]):\n                self.block_list.append({'type': 'checkbox', 'name': 'Channel {:04d}'.format(i), 'value': False})\n\n            self.visdom.properties(self.block_list, opts={'title': 'Featuremap UI'}, win='featuremap_ui')\n            self.visdom.register_event_handler(self.block_list_callback_handler, 'featuremap_ui')\n\n        self.raw_data = data\n\n    def draw_data(self):\n        if self.block_list is not None and self.show_data:\n            for i, d in enumerate(self.block_list):\n                if d['value']:\n                    fig_title = '{} ch: {:04d}'.format(self.title, i)\n                    self.visdom.heatmap(self.raw_data[i, :, :].clone(),\n                                        opts={'title': fig_title}, win=fig_title)\n\n\nclass VisCostVolume(VisBase):\n    def __init__(self, visdom, show_data, title, flip=False):\n        super().__init__(visdom, show_data, title)\n        self.show_slice = False\n        self.slice_pos = None\n        self.flip = flip\n\n    def show_cost_volume(self):\n        data = self.raw_data.clone()\n\n        # data_perm = data.permute(2, 0, 3, 1).contiguous()\n        data_perm = data.permute(0, 2, 1, 3).contiguous()\n        if self.flip:\n            data_perm = data_perm.permute(2, 3, 0, 1).contiguous()\n\n        data_perm = data_perm.view(data_perm.shape[0] * data_perm.shape[1], -1)\n        self.visdom.heatmap(data_perm.flip(0), opts={'title': self.title}, win=self.title)\n\n    def set_zoom_pos(self, slice_pos):\n        self.slice_pos = slice_pos\n\n    def toggle_show_slice(self, new_mode=None):\n        if new_mode is not None:\n            self.show_slice = new_mode\n        else:\n            self.show_slice = not self.show_slice\n\n    def show_cost_volume_slice(self):\n        slice_pos = self.slice_pos\n\n        # slice_pos: [row, col]\n        cost_volume_data = self.raw_data.clone()\n\n        if self.flip:\n            cost_volume_slice = cost_volume_data[:, :, slice_pos[0], slice_pos[1]]\n        else:\n            cost_volume_slice = cost_volume_data[slice_pos[0], slice_pos[1], :, :]\n        self.visdom.heatmap(cost_volume_slice.flip(0), opts={'title': self.title}, win=self.title)\n\n    def save_data(self, data):\n        data = data.view(data.shape[-2], data.shape[-1], data.shape[-2], data.shape[-1])\n        self.raw_data = data\n\n    def draw_data(self):\n        if self.show_slice:\n            self.show_cost_volume_slice()\n        else:\n            self.show_cost_volume()\n\n\nclass VisCostVolumeUI(VisBase):\n    def cv_ui_handler(self, data):\n        zoom_toggled = False\n        if data['event_type'] == 'KeyPress':\n            if data['key'] == 'ArrowRight':\n                self.zoom_pos[1] = min(self.zoom_pos[1] + 1, self.feat_shape[1] - 1)\n            elif data['key'] == 'ArrowLeft':\n                self.zoom_pos[1] = max(self.zoom_pos[1] - 1, 0)\n            elif data['key'] == 'ArrowUp':\n                self.zoom_pos[0] = max(self.zoom_pos[0] - 1, 0)\n            elif data['key'] == 'ArrowDown':\n                self.zoom_pos[0] = min(self.zoom_pos[0] + 1, self.feat_shape[0] - 1)\n            elif data['key'] == 'Enter':\n                self.zoom_mode = not self.zoom_mode\n                zoom_toggled = True\n\n        # Update image\n        self.show_image()\n\n        # Update cost volumes\n        for block_title, block in self.registered_blocks.items():\n            if isinstance(block, VisCostVolume):\n                block.set_zoom_pos(self.zoom_pos)\n                block.toggle_show_slice(self.zoom_mode)\n\n                if (self.zoom_mode or zoom_toggled) and block.show_data:\n                    block.draw_data()\n\n    def __init__(self, visdom, show_data, title, feat_shape, registered_blocks):\n        super().__init__(visdom, show_data, title)\n        self.feat_shape = feat_shape\n        self.zoom_mode = False\n        self.zoom_pos = [int((feat_shape[0] - 1) / 2), int((feat_shape[1] - 1) / 2)]\n        self.registered_blocks = registered_blocks\n\n        self.visdom.register_event_handler(self.cv_ui_handler, title)\n\n    def draw_grid(self, data):\n        stride_r = int(data.shape[1] / self.feat_shape[0])\n        stride_c = int(data.shape[2] / self.feat_shape[1])\n\n        # Draw grid\n        data[:, list(range(0, data.shape[1], stride_r)), :] = 0\n        data[:, :, list(range(0, data.shape[2], stride_c))] = 0\n\n        data[0, list(range(0, data.shape[1], stride_r)), :] = 255\n        data[0, :, list(range(0, data.shape[2], stride_c))] = 255\n\n        return data\n\n    def shade_cell(self, data):\n        stride_r = int(data.shape[1] / self.feat_shape[0])\n        stride_c = int(data.shape[2] / self.feat_shape[1])\n\n        r1 = self.zoom_pos[0] * stride_r\n        r2 = min((self.zoom_pos[0] + 1) * stride_r, data.shape[1])\n\n        c1 = self.zoom_pos[1] * stride_c\n        c2 = min((self.zoom_pos[1] + 1) * stride_c, data.shape[2])\n\n        factor = 0.8 if self.zoom_mode else 0.5\n        data[:, r1:r2, c1:c2] = data[:, r1:r2, c1:c2] * (1 - factor) + torch.tensor([255.0, 0.0, 0.0]).view(3, 1, 1).to(\n            data.device) * factor\n        return data\n\n    def show_image(self, data=None):\n        if data is None:\n            data = self.raw_data.clone()\n\n        data = self.draw_grid(data)\n        data = self.shade_cell(data)\n        self.visdom.image(data, opts={'title': self.title}, win=self.title)\n\n    def save_data(self, data):\n        # Ignore feat shape\n        data = data[0]\n        data = data.float()\n        self.raw_data = data\n\n    def draw_data(self):\n        self.show_image(self.raw_data.clone())\n\n\nclass VisInfoDict(VisBase):\n    def __init__(self, visdom, show_data, title):\n        super().__init__(visdom, show_data, title)\n        self.raw_data = OrderedDict()\n\n    def generate_display_text(self, data):\n        display_text = ''\n        for key, value in data.items():\n            key = key.replace('_', ' ')\n            if value is None:\n                display_text += '<b>{}</b>: {}<br>'.format(key, 'None')\n            elif isinstance(value, (str, int)):\n                display_text += '<b>{}</b>: {}<br>'.format(key, value)\n            else:\n                display_text += '<b>{}</b>: {:.2f}<br>'.format(key, value)\n\n        return display_text\n\n    def save_data(self, data):\n        for key, val in data.items():\n            self.raw_data[key] = val\n\n    def draw_data(self):\n        data = copy.deepcopy(self.raw_data)\n        display_text = self.generate_display_text(data)\n        self.visdom.text(display_text, opts={'title': self.title}, win=self.title)\n\n\nclass VisText(VisBase):\n    def __init__(self, visdom, show_data, title):\n        super().__init__(visdom, show_data, title)\n\n    def save_data(self, data):\n        self.raw_data = data\n\n    def draw_data(self):\n        data = copy.deepcopy(self.raw_data)\n        self.visdom.text(data, opts={'title': self.title}, win=self.title)\n\n\nclass VisLinePlot(VisBase):\n    def __init__(self, visdom, show_data, title):\n        super().__init__(visdom, show_data, title)\n\n    def save_data(self, data):\n        self.raw_data = data\n\n    def draw_data(self):\n        if isinstance(self.raw_data, (list, tuple)):\n            data_y = self.raw_data[0].clone()\n            data_x = self.raw_data[1].clone()\n        else:\n            data_y = self.raw_data.clone()\n            data_x = torch.arange(data_y.shape[0])\n\n        self.visdom.line(data_y, data_x, opts={'title': self.title}, win=self.title)\n\n\nclass VisTracking(VisBase):\n    def __init__(self, visdom, show_data, title):\n        super().__init__(visdom, show_data, title)\n        self.count = 0\n\n    def save_data(self, data, **kwargs):\n        image = data[0]\n        boxes_masks = data[1:]\n\n        boxes, masks = [], []\n        for bm in boxes_masks:\n            if bm is None:\n                continue\n            if isinstance(bm, list):\n                boxes.append(torch.Tensor(bm))\n                continue\n            if len(bm.shape) > 1:\n                # Binarize segmentation if a float tensor is provided\n                if bm.dtype != np.uint8:\n                    bm = (bm > 0.5).astype(np.uint8)\n                masks.append(bm)\n                continue\n            boxes.append(bm.float())\n\n        if kwargs:\n            self.raw_data = [image, boxes, masks, kwargs]\n        else:\n            self.raw_data = [image, boxes, masks]\n\n    def draw_data(self):\n        disp_image = self.raw_data[0].copy()\n\n        resize_factor = 1\n        if max(disp_image.shape) > 480:\n            resize_factor = 480.0 / float(max(disp_image.shape))\n            disp_image = cv2.resize(disp_image, None, fx=resize_factor, fy=resize_factor)\n            for i, mask in enumerate(self.raw_data[2]):\n                self.raw_data[2][i] = cv2.resize(mask, None, fx=resize_factor, fy=resize_factor)\n\n        # if box has score\n        scores = None\n        if self.raw_data[1][0].shape[0] == 5:\n            scores = [box[4].item() for box in self.raw_data[1]]\n            self.raw_data[1] = [box[:4] for box in self.raw_data[1]]\n\n        boxes = [resize_factor * b.clone() for b in self.raw_data[1]]\n\n        for i, disp_rect in enumerate(boxes):\n            # color = ((255 * ((i % 3) > 0)), 255 * ((i + 1) % 2), (255 * (i % 5)) // 4)\n            color = index_to_color(i % 7)\n            cv2.rectangle(disp_image,\n                          (int(disp_rect[0]), int(disp_rect[1])),\n                          (int(disp_rect[0] + disp_rect[2]), int(disp_rect[1] + disp_rect[3])), color, 2)\n            if scores is not None:\n                cv2.putText(disp_image, \"{:.3f}\".format(scores[i]), (int(disp_rect[0]), int(disp_rect[1])),\n                            cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 255), 1)\n        for i, mask in enumerate(self.raw_data[2], 1):\n            disp_image = overlay_mask(disp_image, mask * i)\n\n        # import os\n        # write_img = disp_image.copy()\n        # write_img = write_img[:, :, ::-1]\n        # # cv2.imwrite(os.path.join('/home/yebotao/test', str(self.count).zfill(3) + '.jpg'), write_img)\n        # cv2.imwrite(os.path.join('/home/yebotao/test', self.raw_data[3]['caption'].split('_')[-1] + '.jpg'), write_img)\n        # self.count += 1\n\n        disp_image = numpy_to_torch(disp_image).squeeze(0)\n        disp_image = disp_image.float()\n\n        if len(self.raw_data) > 3:\n            self.visdom.image(disp_image, opts={'title': self.title, **self.raw_data[3]}, win=self.title)\n        else:\n            self.visdom.image(disp_image, opts={'title': self.title}, win=self.title)\n\n\nclass VisBBReg(VisBase):\n    def __init__(self, visdom, show_data, title):\n        super().__init__(visdom, show_data, title)\n        self.block_list = []\n\n    def block_list_callback_handler(self, data):\n        self.block_list[data['propertyId']]['value'] = data['value']\n        self.visdom.properties(self.block_list, opts={'title': 'BBReg Vis'}, win='bbreg_vis')\n        self.draw_data()\n\n    def save_data(self, data):\n        self.image = data[0].float()\n        self.init_boxes = data[1]\n        self.final_boxes = data[2]\n        self.final_ious = data[3]\n\n    def draw_data(self):\n        if len(self.block_list) == 0:\n            self.block_list.append({'type': 'checkbox', 'name': 'ID 0', 'value': True})\n            self.block_list.append({'type': 'checkbox', 'name': 'ID 1', 'value': True})\n            self.visdom.properties(self.block_list, opts={'title': 'BBReg Vis'}, win='bbreg_vis')\n            self.visdom.register_event_handler(self.block_list_callback_handler, 'bbreg_vis')\n\n        disp_image = self.image\n\n        ids = [x['value'] for x in self.block_list]\n        init_box_image = show_image_with_boxes(disp_image.clone(), self.init_boxes.clone(), disp_ids=ids)\n        final_box_image = show_image_with_boxes(disp_image.clone(), self.final_boxes.clone(), self.final_ious.clone(),\n                                                disp_ids=ids)\n\n        self.visdom.image(init_box_image, opts={'title': 'Init Boxes'}, win='Init Boxes')\n        self.visdom.image(final_box_image, opts={'title': 'Final Boxes'}, win='Final Boxes')\n\n\nclass Visdom:\n    def __init__(self, debug=0, ui_info=None, visdom_info=None, env=None):\n        self.debug = debug\n        if env is not None:\n            self.visdom = visdom.Visdom(server=visdom_info.get('server', '127.0.0.1'),\n                                        port=visdom_info.get('port', 8097), env=env)\n        else:\n            self.visdom = visdom.Visdom(server=visdom_info.get('server', '127.0.0.1'),\n                                        port=visdom_info.get('port', 8097))\n        self.registered_blocks = {}\n        self.blocks_list = []\n\n        self.visdom.properties(self.blocks_list, opts={'title': 'Block List'}, win='block_list')\n        self.visdom.register_event_handler(self.block_list_callback_handler, 'block_list')\n\n        if ui_info is not None:\n            self.visdom.register_event_handler(ui_info['handler'], ui_info['win_id'])\n\n    def block_list_callback_handler(self, data):\n        field_name = self.blocks_list[data['propertyId']]['name']\n\n        self.registered_blocks[field_name].toggle_display(data['value'])\n\n        self.blocks_list[data['propertyId']]['value'] = data['value']\n\n        self.visdom.properties(self.blocks_list, opts={'title': 'Block List'}, win='block_list')\n\n    def register(self, data, mode, debug_level=0, title='Data', **kwargs):\n        if title not in self.registered_blocks.keys():\n            show_data = self.debug >= debug_level\n\n            if title != 'Tracking':\n                self.blocks_list.append({'type': 'checkbox', 'name': title, 'value': show_data})\n\n            self.visdom.properties(self.blocks_list, opts={'title': 'Block List'}, win='block_list')\n\n            if mode == 'image':\n                self.registered_blocks[title] = VisImage(self.visdom, show_data, title)\n            elif mode == 'heatmap':\n                self.registered_blocks[title] = VisHeatmap(self.visdom, show_data, title)\n            elif mode == 'cost_volume':\n                self.registered_blocks[title] = VisCostVolume(self.visdom, show_data, title)\n            elif mode == 'cost_volume_flip':\n                self.registered_blocks[title] = VisCostVolume(self.visdom, show_data, title, flip=True)\n            elif mode == 'cost_volume_ui':\n                self.registered_blocks[title] = VisCostVolumeUI(self.visdom, show_data, title, data[1],\n                                                                self.registered_blocks)\n            elif mode == 'info_dict':\n                self.registered_blocks[title] = VisInfoDict(self.visdom, show_data, title)\n            elif mode == 'text':\n                self.registered_blocks[title] = VisText(self.visdom, show_data, title)\n            elif mode == 'lineplot':\n                self.registered_blocks[title] = VisLinePlot(self.visdom, show_data, title)\n            elif mode == 'Tracking':\n                self.registered_blocks[title] = VisTracking(self.visdom, show_data, title)\n            elif mode == 'bbreg':\n                self.registered_blocks[title] = VisBBReg(self.visdom, show_data, title)\n            elif mode == 'featmap':\n                self.registered_blocks[title] = VisFeaturemap(self.visdom, show_data, title)\n            else:\n                raise ValueError('Visdom Error: Unknown data mode {}'.format(mode))\n        # Update\n        self.registered_blocks[title].update(data, **kwargs)\n"
  },
  {
    "path": "tracking/_init_paths.py",
    "content": "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os.path as osp\nimport sys\n\n\ndef add_path(path):\n    if path not in sys.path:\n        sys.path.insert(0, path)\n\n\nthis_dir = osp.dirname(__file__)\n\nprj_path = osp.join(this_dir, '..')\nadd_path(prj_path)\n"
  },
  {
    "path": "tracking/analysis_results.py",
    "content": "import _init_paths\nimport matplotlib.pyplot as plt\nplt.rcParams['figure.figsize'] = [8, 8]\n\nfrom lib.test.analysis.plot_results import plot_results, print_results, print_per_sequence_results\nfrom lib.test.evaluation import get_dataset, trackerlist\n\ntrackers = []\ndataset_name = 'lasot_extension_subset'\n#dataset_name = 'lasot'\n\"\"\"stark\"\"\"\n# trackers.extend(trackerlist(name='stark_s', parameter_name='baseline', dataset_name=dataset_name,\n#                             run_ids=None, display_name='STARK-S50'))\n# trackers.extend(trackerlist(name='stark_st', parameter_name='baseline', dataset_name=dataset_name,\n#                             run_ids=None, display_name='STARK-ST50'))\n# trackers.extend(trackerlist(name='stark_st', parameter_name='baseline_R101', dataset_name=dataset_name,\n#                             run_ids=None, display_name='STARK-ST101'))\n\"\"\"TransT\"\"\"\n# trackers.extend(trackerlist(name='TransT_N2', parameter_name=None, dataset_name=None,\n#                             run_ids=None, display_name='TransT_N2', result_only=True))\n# trackers.extend(trackerlist(name='TransT_N4', parameter_name=None, dataset_name=None,\n#                             run_ids=None, display_name='TransT_N4', result_only=True))\n\"\"\"pytracking\"\"\"\n# trackers.extend(trackerlist('atom', 'default', None, range(0,5), 'ATOM'))\n# trackers.extend(trackerlist('dimp', 'dimp18', None, range(0,5), 'DiMP18'))\n# trackers.extend(trackerlist('dimp', 'dimp50', None, range(0,5), 'DiMP50'))\n# trackers.extend(trackerlist('dimp', 'prdimp18', None, range(0,5), 'PrDiMP18'))\n# trackers.extend(trackerlist('dimp', 'prdimp50', None, range(0,5), 'PrDiMP50'))\n\"\"\"ostrack\"\"\"\ntrackers.extend(trackerlist(name='artrack_seq', parameter_name='artrack_seq_256_full', dataset_name=dataset_name,\n                            run_ids=None, display_name='ARTrackSeq_256'))\n#trackers.extend(trackerlist(name='ostrack', parameter_name='vitb_384_mae_ce_32x4_ep300', dataset_name=dataset_name,\n#.                            run_ids=None, display_name='OSTrack384'))\n\n\ndataset = get_dataset(dataset_name)\n# dataset = get_dataset('otb', 'nfs', 'uav', 'tc128ce')\n# plot_results(trackers, dataset, 'OTB2015', merge_results=True, plot_types=('success', 'norm_prec'),\n#              skip_missing_seq=False, force_evaluation=True, plot_bin_gap=0.05)\nprint_results(trackers, dataset, dataset_name, merge_results=True, plot_types=('success', 'norm_prec', 'prec'))\n# print_results(trackers, dataset, 'UNO', merge_results=True, plot_types=('success', 'prec'))\n"
  },
  {
    "path": "tracking/analysis_results_ITP.py",
    "content": "import _init_paths\nimport argparse\nfrom lib.test.analysis.plot_results import print_results\nfrom lib.test.evaluation import get_dataset, trackerlist\n\n\ndef parse_args():\n    \"\"\"\n    args for evaluation.\n    \"\"\"\n    parser = argparse.ArgumentParser(description='Parse args for training')\n    # for train\n    parser.add_argument('--script', type=str, help='training script name')\n    parser.add_argument('--config', type=str, default='baseline', help='yaml configure file name')\n\n    args = parser.parse_args()\n\n    return args\n\n\nif __name__ == \"__main__\":\n    args = parse_args()\n    trackers = []\n    trackers.extend(trackerlist(args.script, args.config, \"None\", None, args.config))\n\n    dataset = get_dataset('lasot')\n\n    print_results(trackers, dataset, 'LaSOT', merge_results=True, plot_types=('success', 'prec', 'norm_prec'))"
  },
  {
    "path": "tracking/convert_transt.py",
    "content": "import _init_paths\nimport os\nfrom lib.test.evaluation import get_dataset\nimport shutil\n\ntrackers = []\n# dataset_name = 'uav'\ndataset_name = 'nfs'\n\n\nroot_dir = \"/data/sda/v-yanbi/iccv21/STARK_Latest/Stark\"\nbase_dir = os.path.join(root_dir, \"test/tracking_results/TransT_N2\")\ndataset = get_dataset(dataset_name)\nfor x in dataset:\n    seq_name = x.name\n    file_name = \"%s.txt\" % (seq_name.replace(\"nfs_\", \"\"))\n    file_path = os.path.join(base_dir, file_name)\n    file_path_new = os.path.join(base_dir, \"%s.txt\" % seq_name)\n    if os.path.exists(file_path):\n        shutil.move(file_path, file_path_new)\n\n"
  },
  {
    "path": "tracking/create_default_local_file.py",
    "content": "import argparse\nimport os\nimport _init_paths\nfrom lib.train.admin import create_default_local_file_ITP_train\nfrom lib.test.evaluation import create_default_local_file_ITP_test\n\n\ndef parse_args():\n    parser = argparse.ArgumentParser(description='Create default local file on ITP or PAI')\n    parser.add_argument(\"--workspace_dir\", type=str, required=True)  # workspace dir\n    parser.add_argument(\"--data_dir\", type=str, required=True)\n    parser.add_argument(\"--save_dir\", type=str, required=True)\n    args = parser.parse_args()\n    return args\n\n\nif __name__ == \"__main__\":\n    args = parse_args()\n    workspace_dir = os.path.realpath(args.workspace_dir)\n    data_dir = os.path.realpath(args.data_dir)\n    save_dir = os.path.realpath(args.save_dir)\n    create_default_local_file_ITP_train(workspace_dir, data_dir)\n    create_default_local_file_ITP_test(workspace_dir, data_dir, save_dir)\n"
  },
  {
    "path": "tracking/download_pytracking_results.py",
    "content": "import os\nimport sys\nimport gdown\nimport re\nimport shutil\nimport argparse\nimport tempfile\nimport _init_paths\n\nfrom lib.test.evaluation.environment import env_settings\n\npytracking_results_link_dict = {\n    \"dimp\": {\n        \"prdimp50_003.zip\": \"1p13j3iwcOCubBi3ms0hLwqnP6-x0J8Mc\",\n        \"prdimp50_002.zip\": \"1PPKgrAepbuyM2kjfzYAozQKTL6AjcQOz\",\n        \"prdimp50_001.zip\": \"17NFBObEDeK6mW4Mk2vN5Ekk1SGbFvxRS\",\n        \"prdimp50_000.zip\": \"1r3Efq7AumML2yGQ_KV4zmf4ATKVE1bo6\",\n        \"prdimp18_004.zip\": \"1DF4ZJQAa4CwvN_OiT4te33AV0kpsO7JM\",\n        \"prdimp18_003.zip\": \"1RgwJAN4TxnzgVgsfvrHIg1OUXD1EBZkO\",\n        \"prdimp18_002.zip\": \"17lMllYhygCqgE81DoHX4BZar3xc3auzM\",\n        \"prdimp18_001.zip\": \"1Yg7DmGYOnn2k0MYtSjjKlGyzO1Uimj4G\",\n        \"prdimp18_000.zip\": \"1DuZJSBJ-23WJBQTOWSAaoPYSbGAJJN2Z\",\n        \"prdimp50_004.zip\": \"1f9bx9-dtx3B5_IvIJhjjJyp-cnXciqLO\",\n        \"dimp50_004.zip\": \"1Lj3p8mYCoIqxzdQXZkWFTw-MA8c6eeLa\",\n        \"dimp50_000.zip\": \"1LCgf5sg453Z4bY37A_W5mbXeG68U1fET\",\n        \"dimp18_000.zip\": \"17M7dJZ1oKrIY4-O5lL_mlQPEubUn034g\",\n        \"dimp18_001.zip\": \"1AsiliVgISyDTouYOQYVOXA0srj3YskhJ\",\n        \"dimp18_002.zip\": \"1I0GrBaPnySOyPWSvItHhXH8182tFCi_Y\",\n        \"dimp50_001.zip\": \"1XfPvwAcymW88J1rq7RlhyKmqsawJDK-K\",\n        \"dimp18_004.zip\": \"1EztF6bpROFwZ1PSJWgMB7bQ4G_Z08YIg\",\n        \"dimp18_003.zip\": \"1iuiFLv04WE7GfBjm8UkZXFq4gheG2Ru8\",\n        \"dimp50_003.zip\": \"1rLsgeQXyKpD6ryl9BjlIVdO3vd27ekwy\",\n        \"dimp50_002.zip\": \"1wj2jUwlpHgsP1hAcuxXAVriUPuEspsu4\",\n    },\n    \"atom\": {\n        \"default_004.zip\": \"1BapnQh_8iRM44DXj862eOZV4q8zQLdmT\",\n        \"default_003.zip\": \"1YpfOBLBEUQQiX0fWMPA5pnW3dm0NG3E5\",\n        \"default_000.zip\": \"1x6fKGZk3V839mX99Gl_pw7JUaiMaTxc5\",\n        \"default_002.zip\": \"1QIlQFv3p6MBTwsYdIMYmzUDBDQGxGsUC\",\n        \"default_001.zip\": \"1-K2--GNCURDKEgUuiEF18K4DcCLvDEVt\",\n    },\n    \"kys\": {\n        \"default_004.zip\": \"1QdfkA3d4MzKwdDiBOM1ZhDJWk9NmALxD\",\n        \"default_000.zip\": \"1SCs79_ePTc8zxPDzRAgAmbbRlnmE89SN\",\n        \"default_003.zip\": \"1TCzq38QW4YiMrgU5VR6NAEefJ85gwzfT\",\n        \"default_002.zip\": \"1_9u1ybCFxHu0yJmW5ZzDR4-isJMEUsDf\",\n        \"default_001.zip\": \"1utJhdosNj6vlI75dfzUxGM3Vy8OjWslT\",\n    },\n}\n\n\ndef _download_file(file_id, path):\n    link = 'https://drive.google.com/uc?id=' + file_id\n    gdown.download(link, path, quiet=True)\n\n\ndef download_results(download_path, trackers='pytracking'):\n    \"\"\"\n    Script to automatically download tracker results for PyTracking.\n    args:\n        download_path - Directory where the zipped results are downloaded\n        trackers - Tracker results which are to be downloaded.\n                   If set to 'pytracking', results for all pytracking based trackers will be downloaded.\n                   If set to 'external', results for available external trackers will be downloaded.\n                   If set to 'all', all available results are downloaded.\n                   If set to a name of a tracker (e.g. atom), all results for that tracker are downloaded.\n                   Otherwise, it can be set to a dict, where the keys are the names of the trackers for which results are\n                   downloaded. The value can be set to either 'all', in which case all available results for the\n                    tracker are downloaded. Else the value should be a list of parameter file names.\n    \"\"\"\n    print('Using download path ''{}'''.format(download_path))\n\n    os.makedirs(download_path, exist_ok=True)\n\n    if isinstance(trackers, str):\n        if trackers == 'all':\n            all_trackers = list(pytracking_results_link_dict.keys()) + list(external_results_link_dict.keys())\n            trackers = {k: 'all' for k in all_trackers}\n        elif trackers == 'pytracking':\n            trackers = {k: 'all' for k in pytracking_results_link_dict.keys()}\n        elif trackers == 'external':\n            trackers = {k: 'all' for k in external_results_link_dict.keys()}\n        elif trackers in pytracking_results_link_dict or trackers in external_results_link_dict:\n            trackers = {trackers: 'all'}\n        else:\n            raise Exception('tracker_list must be set to ''all'', a tracker name, or be a dict')\n    elif isinstance(trackers, dict):\n        pass\n    else:\n        raise Exception('tracker_list must be set to ''all'', or be a dict')\n\n    common_link_dict = pytracking_results_link_dict\n    # for k, v in external_results_link_dict.items():\n    #     common_link_dict[k] = v\n\n    for trk, runfiles in trackers.items():\n        trk_path = os.path.join(download_path, trk)\n        if not os.path.exists(trk_path):\n            os.makedirs(trk_path)\n\n        if runfiles == 'all':\n            for params, fileid in common_link_dict[trk].items():\n                print('Downloading: {}/{}'.format(trk, params))\n                _download_file(fileid, os.path.join(trk_path, params))\n        elif isinstance(runfiles, (list, tuple)):\n            for p in runfiles:\n                for params, fileid in common_link_dict[trk].items():\n                    if re.match(r'{}(|_(\\d\\d\\d)).zip'.format(p), params) is not None:\n                        print('Downloading: {}/{}'.format(trk, params))\n                        _download_file(fileid, os.path.join(trk_path, params))\n\n        else:\n            raise Exception('tracker_list values must either be set to ''all'', or be a list of param names')\n\n\n\ndef unpack_tracking_results(download_path, output_path=None):\n    \"\"\"\n    Unpacks zipped benchmark results. The directory 'download_path' should have the following structure\n    - root\n        - tracker1\n            - param1.zip\n            - param2.zip\n            .\n            .\n        - tracker2\n            - param1.zip\n            - param2.zip\n        .\n        .\n    args:\n        download_path - Path to the directory where the zipped results are stored\n        output_path - Path to the directory where the results will be unpacked. Set to env_settings().results_path\n                      by default\n    \"\"\"\n\n    if output_path is None:\n        output_path = env_settings().results_path\n\n    if not os.path.exists(output_path):\n        os.makedirs(output_path)\n\n    trackers = os.listdir(download_path)\n\n    for t in trackers:\n        runfiles = os.listdir(os.path.join(download_path, t))\n\n        for r in runfiles:\n            save_path = os.path.join(output_path, t)\n            if not os.path.exists(save_path):\n                os.makedirs(save_path)\n            shutil.unpack_archive(os.path.join(download_path, t, r), os.path.join(save_path, r[:-4]), 'zip')\n\n\ndef main():\n    parser = argparse.ArgumentParser(description='Download and unpack zipped results')\n    parser.add_argument('--tracker', type=str, default='pytracking',\n                        help='Name of tracker results to download, or \"pytracking\" (downloads results for PyTracking'\n                             ' based trackers, or \"external\" (downloads results for external trackers) or \"all\"')\n    parser.add_argument('--output_path', type=str, default=None,\n                        help='Path to the directory where the results will be unpacked.')\n    parser.add_argument('--temp_download_path', type=str, default=None,\n                        help='Temporary path used for downloading the Zip files.')\n    parser.add_argument('--download', type=bool, default=True,\n                        help='Whether to download results or unpack existing downloaded files.')\n    args = parser.parse_args()\n\n    download_path = args.temp_download_path\n    if download_path is None:\n        download_path = '{}/pytracking_results/'.format(tempfile.gettempdir())\n\n    if args.download:\n        download_results(download_path, args.tracker)\n\n    unpack_tracking_results(download_path, args.output_path)\n\n\nif __name__ == '__main__':\n    main()"
  },
  {
    "path": "tracking/pre_read_datasets.py",
    "content": "import _init_paths\nimport multiprocessing as mp\nimport argparse\nimport os\nfrom lib.utils.lmdb_utils import decode_str\nimport time\nimport json\n\n\ndef parse_args():\n    \"\"\"\n    args for training.\n    \"\"\"\n    parser = argparse.ArgumentParser(description='Parse args for training')\n    parser.add_argument('--data_dir', type=str, help='directory where lmdb data is located')\n    parser.add_argument('--dataset_str', type=str, help=\"which datasets to use\")\n    args = parser.parse_args()\n\n    return args\n\n\ndef get_trknet_dict(trknet_dir):\n    with open(os.path.join(trknet_dir, \"seq_list.json\"), \"r\") as f:\n        seq_list = json.loads(f.read())\n    res_dict = {}\n    set_idx_pre = -1\n    for set_idx, seq_name in seq_list:\n        if set_idx != set_idx_pre:\n            res_dict[set_idx] = \"anno/%s.txt\" % seq_name\n            set_idx_pre = set_idx\n    return res_dict\n\n\ndef target(lmdb_dir, key_name):\n    _ = decode_str(lmdb_dir, key_name)\n\n\nif __name__ == \"__main__\":\n    args = parse_args()\n    data_dir = args.data_dir\n    dataset_str = args.dataset_str\n    key_dict = {\"got10k_lmdb\": \"train/list.txt\",\n                \"lasot_lmdb\": \"LaSOTBenchmark.json\",\n                \"coco_lmdb\": \"annotations/instances_train2017.json\",\n                \"vid_lmdb\": \"cache.json\"}\n    print(\"Ready to pre load datasets\")\n    start = time.time()\n    ps = []\n    datasets = []\n    if 'g' in dataset_str:\n        datasets.append(\"got10k_lmdb\")\n    if 'l' in dataset_str:\n        datasets.append(\"lasot_lmdb\")\n    if 'c' in dataset_str:\n        datasets.append(\"coco_lmdb\")\n    if 'v' in dataset_str:\n        datasets.append(\"vid_lmdb\")\n    for dataset in datasets:\n        lmdb_dir = os.path.join(data_dir, dataset)\n        p = mp.Process(target=target, args=(lmdb_dir, key_dict[dataset]))\n        print(\"add %s %s to job queue\" % (lmdb_dir, key_dict[dataset]))\n        ps.append(p)\n    # deal with trackingnet\n    if 't' in dataset_str:\n        trknet_dict = get_trknet_dict(os.path.join(data_dir, \"trackingnet_lmdb\"))\n        for set_idx, seq_path in trknet_dict.items():\n            lmdb_dir = os.path.join(data_dir, \"trackingnet_lmdb\", \"TRAIN_%d_lmdb\" % set_idx)\n            p = mp.Process(target=target, args=(lmdb_dir, seq_path))\n            print(\"add %s %s to job queue\" % (lmdb_dir, seq_path))\n            ps.append(p)\n    for p in ps:\n        p.start()\n    for p in ps:\n        p.join()\n\n    print(\"Pre read over\")\n    end = time.time()\n    hour = (end - start) / 3600\n    print(\"it takes %.2f hours to pre-read data\" % hour)\n"
  },
  {
    "path": "tracking/test.py",
    "content": "import os\nimport sys\nimport argparse\n\nprj_path = os.path.join(os.path.dirname(__file__), '..')\nif prj_path not in sys.path:\n    sys.path.append(prj_path)\n\nfrom lib.test.evaluation import get_dataset\nfrom lib.test.evaluation.running import run_dataset\nfrom lib.test.evaluation.tracker import Tracker\n\n\ndef run_tracker(tracker_name, tracker_param, run_id=None, dataset_name='otb', sequence=None, debug=0, threads=0,\n                num_gpus=8):\n    \"\"\"Run tracker on sequence or dataset.\n    args:\n        tracker_name: Name of tracking method.\n        tracker_param: Name of parameter file.\n        run_id: The run id.\n        dataset_name: Name of dataset (otb, nfs, uav, tpl, vot, tn, gott, gotv, lasot).\n        sequence: Sequence number or name.\n        debug: Debug level.\n        threads: Number of threads.\n    \"\"\"\n\n    dataset = get_dataset(dataset_name)\n\n    if sequence is not None:\n        dataset = [dataset[sequence]]\n\n    trackers = [Tracker(tracker_name, tracker_param, dataset_name, run_id)]\n\n    run_dataset(dataset, trackers, debug, threads, num_gpus=num_gpus)\n\n\ndef main():\n    parser = argparse.ArgumentParser(description='Run tracker on sequence or dataset.')\n    parser.add_argument('tracker_name', type=str, help='Name of tracking method.')\n    parser.add_argument('tracker_param', type=str, help='Name of config file.')\n    parser.add_argument('--runid', type=int, default=None, help='The run id.')\n    parser.add_argument('--dataset_name', type=str, default='otb', help='Name of dataset (otb, nfs, uav, tpl, vot, tn, gott, gotv, lasot).')\n    parser.add_argument('--sequence', type=str, default=None, help='Sequence number or name.')\n    parser.add_argument('--debug', type=int, default=0, help='Debug level.')\n    parser.add_argument('--threads', type=int, default=0, help='Number of threads.')\n    parser.add_argument('--num_gpus', type=int, default=8)\n\n    args = parser.parse_args()\n\n    try:\n        seq_name = int(args.sequence)\n    except:\n        seq_name = args.sequence\n\n    run_tracker(args.tracker_name, args.tracker_param, args.runid, args.dataset_name, seq_name, args.debug,\n                args.threads, num_gpus=args.num_gpus)\n\n\nif __name__ == '__main__':\n    main()\n"
  },
  {
    "path": "tracking/test_exp.py",
    "content": "import os\nimport sys\nimport argparse\n\nprj_path = os.path.join(os.path.dirname(__file__), '..')\nif prj_path not in sys.path:\n    sys.path.append(prj_path)\n\nfrom lib.test.evaluation import get_dataset\nfrom lib.test.evaluation.running import run_dataset\nfrom lib.test.evaluation.tracker import Tracker\n\n\ndef run_tracker(tracker_name, tracker_param, run_id=None, dataset_name='otb', sequence=None, debug=0, threads=0,\n                num_gpus=8):\n    \"\"\"Run tracker on sequence or dataset.\n    args:\n        tracker_name: Name of tracking method.\n        tracker_param: Name of parameter file.\n        run_id: The run id.\n        dataset_name: Name of dataset (otb, nfs, uav, tpl, vot, tn, gott, gotv, lasot).\n        sequence: Sequence number or name.\n        debug: Debug level.\n        threads: Number of threads.\n    \"\"\"\n\n    dataset = get_dataset(*dataset_name)\n\n    if sequence is not None:\n        dataset = [dataset[sequence]]\n\n    trackers = [Tracker(tracker_name, tracker_param, dataset_name, run_id)]\n\n    run_dataset(dataset, trackers, debug, threads, num_gpus=num_gpus)\n\n\ndef main():\n    parser = argparse.ArgumentParser(description='Run tracker on sequence or dataset.')\n    parser.add_argument('tracker_name', type=str, help='Name of tracking method.')\n    parser.add_argument('tracker_param', type=str, help='Name of config file.')\n    parser.add_argument('--runid', type=int, default=None, help='The run id.')\n    parser.add_argument('--dataset_name', type=str, default='otb', help='Name of dataset (otb, nfs, uav, tpl, vot, tn, gott, gotv, lasot).')\n    parser.add_argument('--sequence', type=str, default=None, help='Sequence number or name.')\n    parser.add_argument('--debug', type=int, default=0, help='Debug level.')\n    parser.add_argument('--threads', type=int, default=0, help='Number of threads.')\n    parser.add_argument('--num_gpus', type=int, default=8)\n\n    args = parser.parse_args()\n\n    try:\n        seq_name = int(args.sequence)\n    except:\n        seq_name = args.sequence\n\n    args.dataset_name = ['trackingnet', 'got10k_test', 'lasot']\n\n    run_tracker(args.tracker_name, args.tracker_param, args.runid, args.dataset_name, seq_name, args.debug,\n                args.threads, num_gpus=args.num_gpus)\n\n\nif __name__ == '__main__':\n    main()\n"
  },
  {
    "path": "tracking/train.py",
    "content": "import os\nimport argparse\nimport random\nimport torch\n\n\ndef parse_args():\n    \"\"\"\n    args for training.\n    \"\"\"\n    parser = argparse.ArgumentParser(description='Parse args for training')\n    # for train\n    parser.add_argument('--script', type=str, help='training script name')\n    parser.add_argument('--config', type=str, default='baseline', help='yaml configure file name')\n    parser.add_argument('--save_dir', type=str, help='root directory to save checkpoints, logs, and tensorboard')\n    parser.add_argument('--mode', type=str, choices=[\"single\", \"multiple\", \"multi_node\"], default=\"multiple\",\n                        help=\"train on single gpu or multiple gpus\")\n    parser.add_argument('--nproc_per_node', type=int, help=\"number of GPUs per node\")  # specify when mode is multiple\n    parser.add_argument('--use_lmdb', type=int, choices=[0, 1], default=0)  # whether datasets are in lmdb format\n    parser.add_argument('--script_prv', type=str, help='training script name')\n    parser.add_argument('--config_prv', type=str, default='baseline', help='yaml configure file name')\n    parser.add_argument('--use_wandb', type=int, choices=[0, 1], default=0)  # whether to use wandb\n    # for knowledge distillation\n    parser.add_argument('--distill', type=int, choices=[0, 1], default=0)  # whether to use knowledge distillation\n    parser.add_argument('--script_teacher', type=str, help='teacher script name')\n    parser.add_argument('--config_teacher', type=str, help='teacher yaml configure file name')\n\n    # for multiple machines\n    parser.add_argument('--rank', type=int, help='Rank of the current process.')\n    parser.add_argument('--world-size', type=int, help='Number of processes participating in the job.')\n    parser.add_argument('--ip', type=str, default='127.0.0.1', help='IP of the current rank 0.')\n    parser.add_argument('--port', type=int, default='20000', help='Port of the current rank 0.')\n\n    args = parser.parse_args()\n\n    return args\n\n\ndef main():\n    torch.set_num_threads(8)\n    args = parse_args()\n    if args.mode == \"single\":\n        train_cmd = \"python lib/train/run_training.py --script %s --config %s --save_dir %s --use_lmdb %d \" \\\n                    \"--script_prv %s --config_prv %s --distill %d --script_teacher %s --config_teacher %s --use_wandb %d\"\\\n                    % (args.script, args.config, args.save_dir, args.use_lmdb, args.script_prv, args.config_prv,\n                       args.distill, args.script_teacher, args.config_teacher, args.use_wandb)\n    elif args.mode == \"multiple\":\n        train_cmd = \"python -m torch.distributed.launch --nproc_per_node %d --master_port %d lib/train/run_training.py \" \\\n                    \"--script %s --config %s --save_dir %s --use_lmdb %d --script_prv %s --config_prv %s --use_wandb %d \" \\\n                    \"--distill %d --script_teacher %s --config_teacher %s\" \\\n                    % (args.nproc_per_node, random.randint(10000, 50000), args.script, args.config, args.save_dir, args.use_lmdb, args.script_prv, args.config_prv, args.use_wandb,\n                       args.distill, args.script_teacher, args.config_teacher)\n    elif args.mode == \"multi_node\":\n        train_cmd = \"python -m torch.distributed.launch --nproc_per_node %d --master_addr %s --master_port %d --nnodes %d --node_rank %d lib/train/run_training.py \" \\\n                    \"--script %s --config %s --save_dir %s --use_lmdb %d --script_prv %s --config_prv %s --use_wandb %d \" \\\n                    \"--distill %d --script_teacher %s --config_teacher %s\" \\\n                    % (args.nproc_per_node, args.ip, args.port, args.world_size, args.rank, args.script, args.config, args.save_dir, args.use_lmdb, args.script_prv, args.config_prv, args.use_wandb,\n                       args.distill, args.script_teacher, args.config_teacher)\n    else:\n        raise ValueError(\"mode should be 'single' or 'multiple'.\")\n    os.system(train_cmd)\n\n\nif __name__ == \"__main__\":\n    main()\n"
  }
]